summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-07-31 16:45:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:48 -0700
commit0c59b89c81eab7fe7dcf08f2252ae22a6c081ce8 (patch)
treeb6b6510704282d59dd875fed59c999c93455c973 /mm/memcontrol.c
parent5d84c7766e8aacc6e3477bdf02fdb417163cf89b (diff)
downloadop-kernel-dev-0c59b89c81eab7fe7dcf08f2252ae22a6c081ce8.zip
op-kernel-dev-0c59b89c81eab7fe7dcf08f2252ae22a6c081ce8.tar.gz
mm: memcg: push down PageSwapCache check into uncharge entry functions
Not all uncharge paths need to check if the page is swapcache, some of them can know for sure. Push down the check into all callsites of uncharge_common() so that the patch that removes some of them is more obvious. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Wanpeng Li <liwp.linux@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7eadcda..87c2ec4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2987,8 +2987,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
if (mem_cgroup_disabled())
return NULL;
- if (PageSwapCache(page))
- return NULL;
+ VM_BUG_ON(PageSwapCache(page));
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
@@ -3085,6 +3084,8 @@ void mem_cgroup_uncharge_page(struct page *page)
if (page_mapped(page))
return;
VM_BUG_ON(page->mapping && !PageAnon(page));
+ if (PageSwapCache(page))
+ return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
}
@@ -3092,6 +3093,8 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
{
VM_BUG_ON(page_mapped(page));
VM_BUG_ON(page->mapping);
+ if (PageSwapCache(page))
+ return;
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
}
@@ -3156,6 +3159,8 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
if (!swapout) /* this was a swap cache but the swap is unused ! */
ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+ if (PageSwapCache(page))
+ return;
memcg = __mem_cgroup_uncharge_common(page, ctype, false);
/*
@@ -3345,10 +3350,11 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
unused = oldpage;
}
anon = PageAnon(used);
- __mem_cgroup_uncharge_common(unused,
- anon ? MEM_CGROUP_CHARGE_TYPE_ANON
- : MEM_CGROUP_CHARGE_TYPE_CACHE,
- true);
+ if (!PageSwapCache(unused))
+ __mem_cgroup_uncharge_common(unused,
+ anon ? MEM_CGROUP_CHARGE_TYPE_ANON
+ : MEM_CGROUP_CHARGE_TYPE_CACHE,
+ true);
css_put(&memcg->css);
/*
* We disallowed uncharge of pages under migration because mapcount
OpenPOWER on IntegriCloud