diff options
author | Christoph Lameter <cl@linux.com> | 2012-05-30 12:54:46 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-08-16 09:45:04 +0300 |
commit | 19c7ff9ecd89441096dab6a56f926f7df8ba850a (patch) | |
tree | 61bc27b47eb4867252b4d6aaa9c0ae99994b7556 | |
parent | 455ce9eb1cfa083da0def023094190aeb133855a (diff) | |
download | op-kernel-dev-19c7ff9ecd89441096dab6a56f926f7df8ba850a.zip op-kernel-dev-19c7ff9ecd89441096dab6a56f926f7df8ba850a.tar.gz |
slub: Take node lock during object free checks
Only applies to scenarios where debugging is on:
Validation of slabs can currently occur while debugging
information is updated from the fast paths of the allocator.
This results in various races where we get false reports about
slab metadata not being in order.
This patch makes the fast paths take the node lock so that
serialization with slab validation will occur. Causes additional
slowdown in debug scenarios.
Reported-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | mm/slub.c | 30 |
1 files changed, 18 insertions, 12 deletions
@@ -1069,13 +1069,13 @@ bad: return 0; } -static noinline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) +static noinline struct kmem_cache_node *free_debug_processing( + struct kmem_cache *s, struct page *page, void *object, + unsigned long addr, unsigned long *flags) { - unsigned long flags; - int rc = 0; + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - local_irq_save(flags); + spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1113,15 +1113,19 @@ static noinline int free_debug_processing(struct kmem_cache *s, set_track(s, object, TRACK_FREE, addr); trace(s, page, object, 0); init_object(s, object, SLUB_RED_INACTIVE); - rc = 1; out: slab_unlock(page); - local_irq_restore(flags); - return rc; + /* + * Keep node_lock to preserve integrity + * until the object is actually freed + */ + return n; fail: + slab_unlock(page); + spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); - goto out; + return NULL; } static int __init setup_slub_debug(char *str) @@ -1214,8 +1218,9 @@ static inline void setup_object_debug(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } -static inline int free_debug_processing(struct kmem_cache *s, - struct page *page, void *object, unsigned long addr) { return 0; } +static inline struct kmem_cache_node *free_debug_processing( + struct kmem_cache *s, struct page *page, void *object, + unsigned long addr, unsigned long *flags) { return NULL; } static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } @@ -2452,7 +2457,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); - if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) + if (kmem_cache_debug(s) && + !(n = free_debug_processing(s, page, x, addr, &flags))) return; do { |