diff options
author | Christoph Lameter <cl@linux.com> | 2013-01-10 19:14:19 +0000 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-02-01 12:32:07 +0200 |
commit | 95a05b428cc675694321c8f762591984f3fd2b1e (patch) | |
tree | 3a74205955201dd5e1abb0a85104d95cafa49df6 | |
parent | 6a67368c36e2c0c2578ba62f6264ab739af08cce (diff) | |
download | op-kernel-dev-95a05b428cc675694321c8f762591984f3fd2b1e.zip op-kernel-dev-95a05b428cc675694321c8f762591984f3fd2b1e.tar.gz |
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest
object kept in the kmalloc arrays for SLAB and SLUB.
Differentiate between the maximum size for which a slab cache is used
(KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size
(KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER).
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | include/linux/slab.h | 34 | ||||
-rw-r--r-- | include/linux/slub_def.h | 19 | ||||
-rw-r--r-- | mm/slub.c | 22 |
3 files changed, 38 insertions, 37 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index c97fe92..c017805 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -163,7 +163,12 @@ struct kmem_cache { #else /* CONFIG_SLOB */ /* - * The largest kmalloc size supported by the slab allocators is + * Kmalloc array related definitions + */ + +#ifdef CONFIG_SLAB +/* + * The largest kmalloc size supported by the SLAB allocators is * 32 megabyte (2^25) or the maximum allocatable page order if that is * less than 32 MB. * @@ -173,9 +178,24 @@ struct kmem_cache { */ #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ (MAX_ORDER + PAGE_SHIFT - 1) : 25) +#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH +#define KMALLOC_SHIFT_LOW 5 +#else +/* + * SLUB allocates up to order 2 pages directly and otherwise + * passes the request to the page allocator. + */ +#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_LOW 3 +#endif -#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) -#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) +/* Maximum allocatable size */ +#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) +/* Maximum size for which we actually use a slab cache */ +#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) +/* Maximum order allocatable via the slab allocagtor */ +#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) /* * Kmalloc subsystem. @@ -183,15 +203,9 @@ struct kmem_cache { #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN #else -#ifdef CONFIG_SLAB -#define KMALLOC_MIN_SIZE 32 -#else -#define KMALLOC_MIN_SIZE 8 -#endif +#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) #endif -#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) - /* * Figure out which kmalloc slab an allocation of a certain size * belongs to. diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 99c3e05..032028e 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -115,19 +115,6 @@ struct kmem_cache { struct kmem_cache_node *node[MAX_NUMNODES]; }; -/* - * Maximum kmalloc object size handled by SLUB. Larger object allocations - * are passed through to the page allocator. The page allocator "fastpath" - * is relatively slow so we need this value sufficiently high so that - * performance critical objects are allocated through the SLUB fastpath. - * - * This should be dropped to PAGE_SIZE / 2 once the page allocator - * "fastpath" becomes competitive with the slab allocator fastpaths. - */ -#define SLUB_MAX_SIZE (2 * PAGE_SIZE) - -#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) - #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA #else @@ -139,7 +126,7 @@ struct kmem_cache { * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; +extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; /* * Find the slab cache for a given combination of allocation flags and size. @@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > SLUB_MAX_SIZE) + if (size > KMALLOC_MAX_CACHE_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { @@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && - size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { + size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) @@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) { BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < - SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); + KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); /* * Must align to double word boundary for the double cmpxchg @@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s) * Kmalloc subsystem *******************************************************************/ -struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; +struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; +static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; #endif static int __init setup_slub_min_order(char *str) @@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) { + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, flags, node); trace_kmalloc_node(_RET_IP_, ret, @@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); caches++; } @@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void) BUG_ON(!kmalloc_caches[2]->name); } - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); BUG_ON(!s); @@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void) #endif #ifdef CONFIG_ZONE_DMA - for (i = 0; i < SLUB_PAGE_SHIFT; i++) { + for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { struct kmem_cache *s = kmalloc_caches[i]; if (s && s->size) { @@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); @@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) { + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, gfpflags, node); trace_kmalloc_node(caller, ret, @@ -4312,7 +4312,7 @@ static void resiliency_test(void) { u8 *p; - BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); + BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); printk(KERN_ERR "SLUB resiliency testing\n"); printk(KERN_ERR "-----------------------\n"); |