summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 12:25:44 -0500
committerPekka Enberg <penberg@kernel.org>2011-07-02 13:26:52 +0300
commit7e0528dadc9f8b04e4de0dba48a075100c2afe75 (patch)
tree708bd8384d867d276d36faa6bfc33fb41fe051f3 /mm/slub.c
parente4a46182e1bcc2ddacff5a35f6b52398b51f1b11 (diff)
downloadop-kernel-dev-7e0528dadc9f8b04e4de0dba48a075100c2afe75.zip
op-kernel-dev-7e0528dadc9f8b04e4de0dba48a075100c2afe75.tar.gz
slub: Push irq disable into allocate_slab()
Do the irq handling in allocate_slab() instead of __slab_alloc(). __slab_alloc() is already cluttered and allocate_slab() is already fiddling around with gfp flags. v6->v7: Only increment ORDER_FALLBACK if we get a page during fallback Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 35f351f..add2ae7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1187,6 +1187,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
+ flags &= gfp_allowed_mask;
+
+ if (flags & __GFP_WAIT)
+ local_irq_enable();
+
flags |= s->allocflags;
/*
@@ -1203,12 +1208,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
- if (!page)
- return NULL;
- stat(s, ORDER_FALLBACK);
+ if (page)
+ stat(s, ORDER_FALLBACK);
}
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+
+ if (!page)
+ return NULL;
+
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
@@ -1849,15 +1859,8 @@ new_slab:
goto load_freelist;
}
- gfpflags &= gfp_allowed_mask;
- if (gfpflags & __GFP_WAIT)
- local_irq_enable();
-
page = new_slab(s, gfpflags, node);
- if (gfpflags & __GFP_WAIT)
- local_irq_disable();
-
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
stat(s, ALLOC_SLAB);
OpenPOWER on IntegriCloud