From 97d06609158e61f6bdf538c4a6788e2de492236f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 6 Jul 2012 15:25:11 -0500 Subject: mm, sl[aou]b: Common definition for boot state of the slab allocators All allocators have some sort of support for the bootstrap status. Setup a common definition for the boot states and make all slab allocators use that definition. Reviewed-by: Glauber Costa Reviewed-by: Joonsoo Kim Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 45 ++++++++++++++------------------------------- 1 file changed, 14 insertions(+), 31 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 10c821e..59a466b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -87,6 +87,7 @@ */ #include +#include "slab.h" #include #include #include @@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = { #define BAD_ALIEN_MAGIC 0x01020304ul -/* - * chicken and egg problem: delay the per-cpu array allocation - * until the general caches are up. - */ -static enum { - NONE, - PARTIAL_AC, - PARTIAL_L3, - EARLY, - LATE, - FULL -} g_cpucache_up; - -/* - * used by boot code to determine if it can use slab based allocator - */ -int slab_is_available(void) -{ - return g_cpucache_up >= EARLY; -} - #ifdef CONFIG_LOCKDEP /* @@ -651,7 +631,7 @@ static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; - if (g_cpucache_up < LATE) + if (slab_state < UP) return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { @@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void) } } - g_cpucache_up = EARLY; + slab_state = UP; } void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; - g_cpucache_up = LATE; + slab_state = UP; /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); @@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void) BUG(); mutex_unlock(&cache_chain_mutex); + /* Done! */ + slab_state = FULL; + /* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus @@ -1699,7 +1682,7 @@ static int __init cpucache_init(void) start_cpu_timer(cpu); /* Done! */ - g_cpucache_up = FULL; + slab_state = FULL; return 0; } __initcall(cpucache_init); @@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (g_cpucache_up >= LATE) + if (slab_state >= FULL) return enable_cpucache(cachep, gfp); - if (g_cpucache_up == NONE) { + if (slab_state == DOWN) { /* * Note: the first kmem_cache_create must create the cache * that's used by kmalloc(24), otherwise the creation of @@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) */ set_up_list3s(cachep, SIZE_AC); if (INDEX_AC == INDEX_L3) - g_cpucache_up = PARTIAL_L3; + slab_state = PARTIAL_L3; else - g_cpucache_up = PARTIAL_AC; + slab_state = PARTIAL_ARRAYCACHE; } else { cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init), gfp); - if (g_cpucache_up == PARTIAL_AC) { + if (slab_state == PARTIAL_ARRAYCACHE) { set_up_list3s(cachep, SIZE_L3); - g_cpucache_up = PARTIAL_L3; + slab_state = PARTIAL_L3; } else { int node; for_each_online_node(node) { -- cgit v1.1