summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-11-11 14:07:14 -0600
committerPekka Enberg <penberg@kernel.org>2011-12-13 22:17:10 +0200
commit213eeb9fd9d66c33109e2ace242df214dc3a653d (patch)
treecaaecaf19dd13f10a1704aa6b2a20fa900fb3d85 /mm
parent73736e0387ba0e6d2b703407b4d26168d31516a7 (diff)
downloadop-kernel-dev-213eeb9fd9d66c33109e2ace242df214dc3a653d.zip
op-kernel-dev-213eeb9fd9d66c33109e2ace242df214dc3a653d.tar.gz
slub: Extract get_freelist from __slab_alloc
get_freelist retrieves free objects from the page freelist (put there by remote frees) or deactivates a slab page if no more objects are available. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5e410a9..6dc79f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2127,6 +2127,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
}
/*
+ * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
+ * or deactivate the page.
+ *
+ * The page is still frozen if the return value is not NULL.
+ *
+ * If this function returns NULL then the page has been unfrozen.
+ */
+static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+{
+ struct page new;
+ unsigned long counters;
+ void *freelist;
+
+ do {
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ VM_BUG_ON(!new.frozen);
+
+ new.inuse = page->objects;
+ new.frozen = freelist != NULL;
+
+ } while (!cmpxchg_double_slab(s, page,
+ freelist, counters,
+ NULL, new.counters,
+ "get_freelist"));
+
+ return freelist;
+}
+
+/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
*
@@ -2147,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{
void **object;
unsigned long flags;
- struct page new;
- unsigned long counters;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -2176,29 +2205,7 @@ redo:
stat(s, ALLOC_SLOWPATH);
- do {
- object = c->page->freelist;
- counters = c->page->counters;
- new.counters = counters;
- VM_BUG_ON(!new.frozen);
-
- /*
- * If there is no object left then we use this loop to
- * deactivate the slab which is simple since no objects
- * are left in the slab and therefore we do not need to
- * put the page back onto the partial list.
- *
- * If there are objects left then we retrieve them
- * and use them to refill the per cpu queue.
- */
-
- new.inuse = c->page->objects;
- new.frozen = object != NULL;
-
- } while (!__cmpxchg_double_slab(s, c->page,
- object, counters,
- NULL, new.counters,
- "__slab_alloc"));
+ object = get_freelist(s, c->page);
if (!object) {
c->page = NULL;
OpenPOWER on IntegriCloud