diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-03-08 12:26:07 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-03-08 12:26:07 +0100 |
commit | fe36d8912c8e402c62ce5a8712b6a47baae1ceaa (patch) | |
tree | f8394fd1dbb10b2d075e74d0f6b09ec29c795fe6 /mm/slub.c | |
parent | 16a8083cedbe628228dbb08fc1469c70e6208619 (diff) | |
parent | e2857b8f11a289ed2b61d18d0665e05c1053c446 (diff) | |
download | op-kernel-dev-fe36d8912c8e402c62ce5a8712b6a47baae1ceaa.zip op-kernel-dev-fe36d8912c8e402c62ce5a8712b6a47baae1ceaa.tar.gz |
Merge branch 'linus' into irq/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 38 |
1 files changed, 17 insertions, 21 deletions
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n, __add_partial(n, page, tail); } -static inline void -__remove_partial(struct kmem_cache_node *n, struct page *page) -{ - list_del(&page->lru); - n->nr_partial--; -} - static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { lockdep_assert_held(&n->list_lock); - __remove_partial(n, page); + list_del(&page->lru); + n->nr_partial--; } /* @@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) } } +void __kmem_cache_release(struct kmem_cache *s) +{ + free_percpu(s->cpu_slab); + free_kmem_cache_nodes(s); +} + static int init_kmem_cache_nodes(struct kmem_cache *s) { int node; @@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, /* * Attempt to free all partial slabs on a node. - * This is called from kmem_cache_close(). We must be the last thread - * using the cache and therefore we do not need to lock anymore. + * This is called from __kmem_cache_shutdown(). We must take list_lock + * because sysfs file might still access partial list after the shutdowning. */ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { struct page *page, *h; + BUG_ON(irqs_disabled()); + spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { - __remove_partial(n, page); + remove_partial(n, page); discard_slab(s, page); } else { list_slab_objects(s, page, - "Objects remaining in %s on kmem_cache_close()"); + "Objects remaining in %s on __kmem_cache_shutdown()"); } } + spin_unlock_irq(&n->list_lock); } /* * Release all resources used by a slab cache. */ -static inline int kmem_cache_close(struct kmem_cache *s) +int __kmem_cache_shutdown(struct kmem_cache *s) { int node; struct kmem_cache_node *n; @@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s) if (n->nr_partial || slabs_node(s, node)) return 1; } - free_percpu(s->cpu_slab); - free_kmem_cache_nodes(s); return 0; } -int __kmem_cache_shutdown(struct kmem_cache *s) -{ - return kmem_cache_close(s); -} - /******************************************************************** * Kmalloc subsystem *******************************************************************/ @@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) memcg_propagate_slab_attrs(s); err = sysfs_slab_add(s); if (err) - kmem_cache_close(s); + __kmem_cache_release(s); return err; } |