summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-03-04 14:29:08 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 16:35:15 -0800
commit8289546e573d5ff681cdf0fc7a1184cca66fdb55 (patch)
tree9cf874b55eb9a6c97233d137278c7b7c89a5f4a1
parent7e924aafa4b03ff71de34af8553d9a1ebc86c071 (diff)
downloadop-kernel-dev-8289546e573d5ff681cdf0fc7a1184cca66fdb55.zip
op-kernel-dev-8289546e573d5ff681cdf0fc7a1184cca66fdb55.tar.gz
memcg: remove mem_cgroup_uncharge
Nothing uses mem_cgroup_uncharge apart from mem_cgroup_uncharge_page, (a trivial wrapper around it) and mem_cgroup_end_migration (which does the same as mem_cgroup_uncharge_page). And it often ends up having to lock just to let its caller unlock. Remove it (but leave the silly locking until a later patch). Moved mem_cgroup_cache_charge next to mem_cgroup_charge in memcontrol.h. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h20
-rw-r--r--mm/memcontrol.c23
2 files changed, 15 insertions, 28 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 70789df..8b1c429 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,7 +35,8 @@ extern void mm_free_cgroup(struct mm_struct *mm);
extern struct page_cgroup *page_get_page_cgroup(struct page *page);
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
-extern void mem_cgroup_uncharge(struct page_cgroup *pc);
+extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_move_lists(struct page *page, bool active);
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -45,8 +46,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct mem_cgroup *mem_cont,
int active);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
-extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
#define mm_match_cgroup(mm, cgroup) \
@@ -92,14 +91,16 @@ static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
return NULL;
}
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+static inline int mem_cgroup_charge(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
}
-static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
+static inline int mem_cgroup_cache_charge(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask)
{
+ return 0;
}
static inline void mem_cgroup_uncharge_page(struct page *page)
@@ -110,13 +111,6 @@ static inline void mem_cgroup_move_lists(struct page *page, bool active)
{
}
-static inline int mem_cgroup_cache_charge(struct page *page,
- struct mm_struct *mm,
- gfp_t gfp_mask)
-{
- return 0;
-}
-
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
{
return 1;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 83ba13a..1333d25 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -697,20 +697,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
/*
* Uncharging is always a welcome operation, we never complain, simply
- * uncharge. This routine should be called with lock_page_cgroup held
+ * uncharge.
*/
-void mem_cgroup_uncharge(struct page_cgroup *pc)
+void mem_cgroup_uncharge_page(struct page *page)
{
+ struct page_cgroup *pc;
struct mem_cgroup *mem;
struct mem_cgroup_per_zone *mz;
- struct page *page;
unsigned long flags;
/*
* Check if our page_cgroup is valid
*/
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
if (!pc)
- return;
+ goto unlock;
if (atomic_dec_and_test(&pc->ref_cnt)) {
page = pc->page;
@@ -731,12 +733,8 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
}
lock_page_cgroup(page);
}
-}
-void mem_cgroup_uncharge_page(struct page *page)
-{
- lock_page_cgroup(page);
- mem_cgroup_uncharge(page_get_page_cgroup(page));
+unlock:
unlock_page_cgroup(page);
}
@@ -759,12 +757,7 @@ int mem_cgroup_prepare_migration(struct page *page)
void mem_cgroup_end_migration(struct page *page)
{
- struct page_cgroup *pc;
-
- lock_page_cgroup(page);
- pc = page_get_page_cgroup(page);
- mem_cgroup_uncharge(pc);
- unlock_page_cgroup(page);
+ mem_cgroup_uncharge_page(page);
}
/*
* We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
OpenPOWER on IntegriCloud