diff options
author | Roman Bobniev <Roman.Bobniev@sonymobile.com> | 2013-10-08 15:58:57 -0700 |
---|---|---|
committer | Pekka Enberg <penberg@iki.fi> | 2013-10-24 20:25:10 +0300 |
commit | d56791b38e34e480d869d1b88735df16c81aa684 (patch) | |
tree | b1333007313610a469f6ab6fa23c4f65e51a370e | |
parent | 6e4664525b1db28f8c4e1130957f70a94c19213e (diff) | |
download | op-kernel-dev-d56791b38e34e480d869d1b88735df16c81aa684.zip op-kernel-dev-d56791b38e34e480d869d1b88735df16c81aa684.tar.gz |
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so
that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG)
call the appropriate kmemleak routines. This allows for kmemleak
to be configured independently of slub debug features.
It also fixes a bug where kmemleak was only partially enabled in some
configurations.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Roman Bobniev <Roman.Bobniev@sonymobile.com>
Signed-off-by: Tim Bird <tim.bird@sonymobile.com>
Signed-off-by: Pekka Enberg <penberg@iki.fi>
-rw-r--r-- | mm/slub.c | 35 |
1 files changed, 31 insertions, 4 deletions
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. */ +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { flags &= gfp_allowed_mask; @@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) +{ + kmemleak_alloc(ptr, size, 1, flags); +} + +static inline void kfree_hook(const void *x) +{ + kmemleak_free(x); +} + static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { return 0; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, - void *object) {} + void *object) +{ + kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, + flags & gfp_allowed_mask); +} -static inline void slab_free_hook(struct kmem_cache *s, void *x) {} +static inline void slab_free_hook(struct kmem_cache *s, void *x) +{ + kmemleak_free_recursive(x, s->flags); +} #endif /* CONFIG_SLUB_DEBUG */ @@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) if (page) ptr = page_address(page); - kmemleak_alloc(ptr, size, 1, flags); + kmalloc_large_node_hook(ptr, size, flags); return ptr; } @@ -3365,7 +3392,7 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); - kmemleak_free(x); + kfree_hook(x); __free_memcg_kmem_pages(page, compound_order(page)); return; } |