summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory.c10
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/swapfile.c3
6 files changed, 25 insertions, 18 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2fdd138..59ac95a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -26,6 +26,16 @@ struct page;
struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/*
+ * All "charge" functions with gfp_mask should use GFP_KERNEL or
+ * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
+ * alloc memory but reclaims memory from all available zones. So, "where I want
+ * memory from" bits of gfp_mask has no meaning. So any bits of that field is
+ * available but adding a rule is better. charge functions' gfp_mask should
+ * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
+ * codes.
+ * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
+ */
extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e..ceba0bd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
VM_BUG_ON(!PageLocked(page));
error = mem_cgroup_cache_charge(page, current->mm,
- gfp_mask & ~__GFP_HIGHMEM);
+ gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9bf5d7c..b9cd57b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc);
if (mem) {
- ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
+ ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
css_put(&mem->css);
}
*ptr = mem;
@@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
progress = try_to_free_mem_cgroup_pages(memcg,
- GFP_HIGHUSER_MOVABLE, false);
+ GFP_KERNEL, false);
if (!progress) retry_count--;
}
return ret;
@@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
- try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
+ try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
if (curusage >= oldusage)
retry_count--;
@@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
- ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
+ ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
if (ret == -ENOMEM)
break;
@@ -1550,7 +1550,7 @@ try_to_free:
goto out;
}
progress = try_to_free_mem_cgroup_pages(mem,
- GFP_HIGHUSER_MOVABLE, false);
+ GFP_KERNEL, false);
if (!progress) {
nr_retries--;
/* maybe some writeback is necessary */
diff --git a/mm/memory.c b/mm/memory.c
index 1358012..e5bfbe6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
- if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
+ if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
/*
@@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
- if (mem_cgroup_try_charge_swapin(mm, page,
- GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
+ if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
unlock_page(page);
goto out;
@@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom;
__SetPageUptodate(page);
- if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
+ if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
@@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
- if (mem_cgroup_newpage_charge(page,
- mm, GFP_HIGHUSER_MOVABLE)) {
+ if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
ret = VM_FAULT_OOM;
page_cache_release(page);
goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index adf5c3e..bbb7b04 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -932,8 +932,8 @@ found:
* Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
* charged back to the user(not to caller) when swap account is used.
*/
- error = mem_cgroup_cache_charge_swapin(page,
- current->mm, GFP_HIGHUSER_MOVABLE, true);
+ error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
+ true);
if (error)
goto out;
error = radix_tree_preload(GFP_KERNEL);
@@ -1275,7 +1275,7 @@ repeat:
* charge against this swap cache here.
*/
if (mem_cgroup_cache_charge_swapin(swappage,
- current->mm, gfp, false)) {
+ current->mm, gfp & GFP_RECLAIM_MASK, false)) {
page_cache_release(swappage);
error = -ENOMEM;
goto failed;
@@ -1393,7 +1393,7 @@ repeat:
/* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm,
- GFP_HIGHUSER_MOVABLE);
+ GFP_KERNEL);
if (error) {
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0579d90..da422c4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t *pte;
int ret = 1;
- if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
- GFP_HIGHUSER_MOVABLE, &ptr))
+ if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
OpenPOWER on IntegriCloud