diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2011-03-23 16:42:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-23 19:46:25 -0700 |
commit | f212ad7cf9c73f8a7fa160e223dcb3f074441a72 (patch) | |
tree | 95789ef7ce662e86a3e4aded5dfb97c51dc7b0a0 | |
parent | af4a662144884a7dbb19acbef70878b3b955f928 (diff) | |
download | op-kernel-dev-f212ad7cf9c73f8a7fa160e223dcb3f074441a72.zip op-kernel-dev-f212ad7cf9c73f8a7fa160e223dcb3f074441a72.tar.gz |
memcg: add memcg sanity checks at allocating and freeing pages
Add checks at allocating or freeing a page whether the page is used (iow,
charged) from the view point of memcg.
This check may be useful in debugging a problem and we did similar checks
before the commit 52d4b9ac(memcg: allocate all page_cgroup at boot).
This patch adds some overheads at allocating or freeing memory, so it's
enabled only when CONFIG_DEBUG_VM is enabled.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/memcontrol.h | 17 | ||||
-rw-r--r-- | mm/memcontrol.c | 46 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 |
3 files changed, 69 insertions, 2 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5bb7be2..5a5ce70 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -151,6 +151,10 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *mem); void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); #endif +#ifdef CONFIG_DEBUG_VM +bool mem_cgroup_bad_page_check(struct page *page); +void mem_cgroup_print_bad_page(struct page *page); +#endif #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct mem_cgroup; @@ -352,5 +356,18 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head, #endif /* CONFIG_CGROUP_MEM_CONT */ +#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) +static inline bool +mem_cgroup_bad_page_check(struct page *page) +{ + return false; +} + +static inline void +mem_cgroup_print_bad_page(struct page *page) +{ +} +#endif + #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a2d54b..0356cb6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3046,6 +3046,52 @@ int mem_cgroup_shmem_charge_fallback(struct page *page, return ret; } +#ifdef CONFIG_DEBUG_VM +static struct page_cgroup *lookup_page_cgroup_used(struct page *page) +{ + struct page_cgroup *pc; + + pc = lookup_page_cgroup(page); + if (likely(pc) && PageCgroupUsed(pc)) + return pc; + return NULL; +} + +bool mem_cgroup_bad_page_check(struct page *page) +{ + if (mem_cgroup_disabled()) + return false; + + return lookup_page_cgroup_used(page) != NULL; +} + +void mem_cgroup_print_bad_page(struct page *page) +{ + struct page_cgroup *pc; + + pc = lookup_page_cgroup_used(page); + if (pc) { + int ret = -1; + char *path; + + printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", + pc, pc->flags, pc->mem_cgroup); + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (path) { + rcu_read_lock(); + ret = cgroup_path(pc->mem_cgroup->css.cgroup, + path, PATH_MAX); + rcu_read_unlock(); + } + + printk(KERN_CONT "(%s)\n", + (ret < 0) ? "cannot get the path" : path); + kfree(path); + } +} +#endif + static DEFINE_MUTEX(set_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a58221..8e5726a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -53,6 +53,7 @@ #include <linux/compaction.h> #include <trace/events/kmem.h> #include <linux/ftrace_event.h> +#include <linux/memcontrol.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page) if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { + (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | + (mem_cgroup_bad_page_check(page)))) { bad_page(page); return 1; } @@ -754,7 +756,8 @@ static inline int check_new_page(struct page *page) if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | - (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { + (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | + (mem_cgroup_bad_page_check(page)))) { bad_page(page); return 1; } @@ -5684,4 +5687,5 @@ void dump_page(struct page *page) page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); dump_page_flags(page->flags); + mem_cgroup_print_bad_page(page); } |