diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-09-11 14:21:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 15:57:44 -0700 |
commit | 8bb3f12e7d4f7b043a7c5aa3831e72041e80dc4a (patch) | |
tree | be5c4ff792b8555a8d5fd3286d2714bff1d01a48 /mm/hugetlb.c | |
parent | f522c3ac00a49128115f99a5fcb95a447601c1c3 (diff) | |
download | op-kernel-dev-8bb3f12e7d4f7b043a7c5aa3831e72041e80dc4a.zip op-kernel-dev-8bb3f12e7d4f7b043a7c5aa3831e72041e80dc4a.tar.gz |
mm, hugetlb: fix subpool accounting handling
If we alloc hugepage with avoid_reserve, we don't dequeue reserved one.
So, we should check subpool counter when avoid_reserve. This patch
implement it.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 204550a..dec5772 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1164,13 +1164,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, chg = vma_needs_reservation(h, vma, addr); if (chg < 0) return ERR_PTR(-ENOMEM); - if (chg) - if (hugepage_subpool_get_pages(spool, chg)) + if (chg || avoid_reserve) + if (hugepage_subpool_get_pages(spool, 1)) return ERR_PTR(-ENOSPC); ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) { - hugepage_subpool_put_pages(spool, chg); + if (chg || avoid_reserve) + hugepage_subpool_put_pages(spool, 1); return ERR_PTR(-ENOSPC); } spin_lock(&hugetlb_lock); @@ -1182,7 +1183,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); - hugepage_subpool_put_pages(spool, chg); + if (chg || avoid_reserve) + hugepage_subpool_put_pages(spool, 1); return ERR_PTR(-ENOSPC); } spin_lock(&hugetlb_lock); |