diff options
-rw-r--r-- | include/linux/memcontrol.h | 5 | ||||
-rw-r--r-- | mm/ksm.c | 11 | ||||
-rw-r--r-- | mm/memcontrol.c | 30 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 8 | ||||
-rw-r--r-- | mm/swap_state.c | 10 |
6 files changed, 18 insertions, 48 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4d34356..b80de52 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, extern void mem_cgroup_replace_page_cache(struct page *oldpage, struct page *newpage); -extern void mem_cgroup_reset_owner(struct page *page); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; #endif @@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage, struct page *newpage) { } - -static inline void mem_cgroup_reset_owner(struct page *page) -{ -} #endif /* CONFIG_CGROUP_MEM_CONT */ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) @@ -28,7 +28,6 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/slab.h> -#include <linux/memcontrol.h> #include <linux/rbtree.h> #include <linux/memory.h> #include <linux/mmu_notifier.h> @@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page, new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page) { - /* - * The memcg-specific accounting when moving - * pages around the LRU lists relies on the - * page's owner (memcg) to be valid. Usually, - * pages are assigned to a new owner before - * being put on the LRU list, but since this - * is not the case here, the stale owner from - * a previous allocation cycle must be reset. - */ - mem_cgroup_reset_owner(new_page); copy_user_highpage(new_page, page, address, vma); SetPageDirty(new_page); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1097d80..d0e57a3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, pc = lookup_page_cgroup(page); memcg = pc->mem_cgroup; + + /* + * Surreptitiously switch any uncharged page to root: + * an uncharged page off lru does nothing to secure + * its former mem_cgroup from sudden removal. + * + * Our caller holds lru_lock, and PageCgroupUsed is updated + * under page_cgroup lock: between them, they make all uses + * of pc->mem_cgroup safe. + */ + if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) + pc->mem_cgroup = memcg = root_mem_cgroup; + mz = page_cgroup_zoneinfo(memcg, page); /* compound_order() is stabilized through lru_lock */ MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); @@ -3029,23 +3042,6 @@ void mem_cgroup_uncharge_end(void) batch->memcg = NULL; } -/* - * A function for resetting pc->mem_cgroup for newly allocated pages. - * This function should be called if the newpage will be added to LRU - * before start accounting. - */ -void mem_cgroup_reset_owner(struct page *newpage) -{ - struct page_cgroup *pc; - - if (mem_cgroup_disabled()) - return; - - pc = lookup_page_cgroup(newpage); - VM_BUG_ON(PageCgroupUsed(pc)); - pc->mem_cgroup = root_mem_cgroup; -} - #ifdef CONFIG_SWAP /* * called after __delete_from_swap_cache() and drop "page" account. diff --git a/mm/migrate.c b/mm/migrate.c index df141f6..1503b6b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, if (!newpage) return -ENOMEM; - mem_cgroup_reset_owner(newpage); - if (page_count(page) == 1) { /* page was freed from under us. So we are done. */ goto out; @@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release); void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { - int active; + int uninitialized_var(active); enum lru_list lru; const int file = 0; @@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone, active = 0; lru = LRU_INACTIVE_ANON; } - update_page_reclaim_stat(zone, page_tail, file, active); } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; @@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone, list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } + + if (!PageUnevictable(page)) + update_page_reclaim_stat(zone, page_tail, file, active); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg) SetPageLRU(page); if (active) SetPageActive(page); - update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); + update_page_reclaim_stat(zone, page, file, active); } /* diff --git a/mm/swap_state.c b/mm/swap_state.c index 470038a..ea6b32d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, new_page = alloc_page_vma(gfp_mask, vma, addr); if (!new_page) break; /* Out of memory */ - /* - * The memcg-specific accounting when moving - * pages around the LRU lists relies on the - * page's owner (memcg) to be valid. Usually, - * pages are assigned to a new owner before - * being put on the LRU list, but since this - * is not the case here, the stale owner from - * a previous allocation cycle must be reset. - */ - mem_cgroup_reset_owner(new_page); } /* |