summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 12:25:58 -0500
committerPekka Enberg <penberg@kernel.org>2011-07-02 13:26:57 +0300
commit03e404af26dc2ea0d278d7a342de0aab394793ce (patch)
tree1290b42700767c661125aaf584253bdeb98b7afd
parente36a2652d7d1ad97f7636a39bdd8654d296cc36b (diff)
downloadop-kernel-dev-03e404af26dc2ea0d278d7a342de0aab394793ce.zip
op-kernel-dev-03e404af26dc2ea0d278d7a342de0aab394793ce.tar.gz
slub: fast release on full slab
Make deactivation occur implicitly while checking out the current freelist. This avoids one cmpxchg operation on a slab that is now fully in use. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--mm/slub.c21
2 files changed, 20 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5b228b7..71441f8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -32,6 +32,7 @@ enum stat_item {
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
diff --git a/mm/slub.c b/mm/slub.c
index e00b773..25dac48 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1977,9 +1977,21 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
object = page->freelist;
counters = page->counters;
new.counters = counters;
- new.inuse = page->objects;
VM_BUG_ON(!new.frozen);
+ /*
+ * If there is no object left then we use this loop to
+ * deactivate the slab which is simple since no objects
+ * are left in the slab and therefore we do not need to
+ * put the page back onto the partial list.
+ *
+ * If there are objects left then we retrieve them
+ * and use them to refill the per cpu queue.
+ */
+
+ new.inuse = page->objects;
+ new.frozen = object != NULL;
+
} while (!cmpxchg_double_slab(s, page,
object, counters,
NULL, new.counters,
@@ -1988,8 +2000,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
load_freelist:
VM_BUG_ON(!page->frozen);
- if (unlikely(!object))
+ if (unlikely(!object)) {
+ c->page = NULL;
+ stat(s, DEACTIVATE_BYPASS);
goto new_slab;
+ }
stat(s, ALLOC_REFILL);
@@ -4680,6 +4695,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
+STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
@@ -4740,6 +4756,7 @@ static struct attribute *slab_attrs[] = {
&deactivate_to_head_attr.attr,
&deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
+ &deactivate_bypass_attr.attr,
&order_fallback_attr.attr,
&cmpxchg_double_fail_attr.attr,
&cmpxchg_double_cpu_fail_attr.attr,
OpenPOWER on IntegriCloud