summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-07 00:14:22 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 08:42:20 -0800
commit82369553d6d3bc67c54129a02e0bc0b5b88f3045 (patch)
tree1d80a6cc9f5840550ad025b32ac8ef8fd915fd98 /mm/memcontrol.c
parent3be91277e754c7db04eae145ba622b3a3e3ad96d (diff)
downloadop-kernel-dev-82369553d6d3bc67c54129a02e0bc0b5b88f3045.zip
op-kernel-dev-82369553d6d3bc67c54129a02e0bc0b5b88f3045.tar.gz
memcgroup: fix hang with shmem/tmpfs
The memcgroup regime relies upon a cgroup reclaiming pages from itself within add_to_page_cache: which may involve some waiting. Whereas shmem and tmpfs rely upon using add_to_page_cache while holding a spinlock: when it cannot wait. The consequence is that when a cgroup reaches its limit, shmem_getpage just hangs - unless there is outside memory pressure too, neither kswapd nor radix_tree_preload get it out of the retry loop. In most cases we can mem_cgroup_cache_charge the page waitably first, to attach the page_cgroup in advance, so add_to_page_cache will do no more than increment a count; then mem_cgroup_uncharge_page after (in both success and failure cases) to balance the books again. And where there used to be a congestion_wait for kswapd (recently made redundant by radix_tree_preload), use mem_cgroup_cache_charge with NULL page to go through a cycle of allocation and freeing, without accounting to any particular page, and without updating the statistics vector. This brings the cgroup below its limit so the next try usually succeeds. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dbf5715..11b23f2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -329,23 +329,26 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
* with it
*/
retry:
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- /*
- * The page_cgroup exists and the page has already been accounted
- */
- if (pc) {
- if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
- /* this page is under being uncharged ? */
- unlock_page_cgroup(page);
- cpu_relax();
- goto retry;
- } else {
- unlock_page_cgroup(page);
- goto done;
+ if (page) {
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
+ /*
+ * The page_cgroup exists and
+ * the page has already been accounted.
+ */
+ if (pc) {
+ if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
+ /* this page is under being uncharged ? */
+ unlock_page_cgroup(page);
+ cpu_relax();
+ goto retry;
+ } else {
+ unlock_page_cgroup(page);
+ goto done;
+ }
}
+ unlock_page_cgroup(page);
}
- unlock_page_cgroup(page);
pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
if (pc == NULL)
@@ -404,7 +407,7 @@ retry:
if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
pc->flags |= PAGE_CGROUP_FLAG_CACHE;
- if (page_cgroup_assign_new_page_cgroup(page, pc)) {
+ if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
/*
* Another charge has been added to this page already.
* We take lock_page_cgroup(page) again and read
@@ -413,6 +416,8 @@ retry:
res_counter_uncharge(&mem->res, PAGE_SIZE);
css_put(&mem->css);
kfree(pc);
+ if (!page)
+ goto done;
goto retry;
}
OpenPOWER on IntegriCloud