summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-02 11:30:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-02 11:30:08 -0800
commit7b383bef25e493cc4f047e44ebd6c3ccfd6d1cc5 (patch)
tree4a8379bb6d5929cf72c916da8e5bc7532aa43841 /mm
parent87af5e5c22568201dfbda5cac9c76e96982adc9c (diff)
parentcb8ee1a3d429f8898972c869dd4792afb04e961a (diff)
downloadop-kernel-dev-7b383bef25e493cc4f047e44ebd6c3ccfd6d1cc5.zip
op-kernel-dev-7b383bef25e493cc4f047e44ebd6c3ccfd6d1cc5.tar.gz
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "Random bug fixes that have accumulated in my inbox over the past few months" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm: Fix warning on make htmldocs caused by slab.c mm: slub: work around unneeded lockdep warning mm: sl[uo]b: fix misleading comments slub: Fix possible format string bug. slub: use lockdep_assert_held slub: Fix calculation of cpu slabs slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c56
2 files changed, 35 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index eb043bf..b264214 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
+ * @page: page pointer being destroyed
*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache. The
diff --git a/mm/slub.c b/mm/slub.c
index 2b1a697..7e3e045 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/*
* Tracking of fully allocated slabs for debugging purposes.
- *
- * list_lock must be held.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
if (!(s->flags & SLAB_STORE_USER))
return;
list_add(&page->lru, &n->full);
}
-/*
- * list_lock must be held.
- */
-static void remove_full(struct kmem_cache *s, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
if (!(s->flags & SLAB_STORE_USER))
return;
@@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
@@ -1519,12 +1519,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
- *
- * list_lock must be held.
*/
static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
+ lockdep_assert_held(&n->list_lock);
+
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
@@ -1532,12 +1532,11 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-/*
- * list_lock must be held.
- */
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
list_del(&page->lru);
n->nr_partial--;
}
@@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
* return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
- *
- * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters;
struct page new;
+ lockdep_assert_held(&n->list_lock);
+
/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
@@ -1902,7 +1901,7 @@ redo:
else if (l == M_FULL)
- remove_full(s, page);
+ remove_full(s, n, page);
if (m == M_PARTIAL) {
@@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {
- if (kmem_cache_has_cpu_partial(s) && !prior)
+ if (kmem_cache_has_cpu_partial(s) && !prior) {
/*
* Slab was on no list before and will be
@@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
new.frozen = 1;
- else { /* Needs to be taken off a list */
+ } else { /* Needs to be taken off a list */
n = get_node(s, page_to_nid(page));
/*
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
- remove_full(s, page);
+ remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -2629,9 +2628,10 @@ slab_empty:
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- } else
+ } else {
/* Slab must be on the full list */
- remove_full(s, page);
+ remove_full(s, n, page);
+ }
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
@@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
+ /*
+ * the lock is for lockdep's sake, not for any actual
+ * race protection
+ */
+ spin_lock(&n->list_lock);
add_partial(n, page, DEACTIVATE_TO_HEAD);
+ spin_unlock(&n->list_lock);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
page = ACCESS_ONCE(c->partial);
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}
@@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
s->kobj.kset = slab_kset;
- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
if (err) {
kobject_put(&s->kobj);
return err;
OpenPOWER on IntegriCloud