diff options
author | Christoph Lameter <cl@linux.com> | 2013-01-10 19:14:19 +0000 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-02-01 12:32:07 +0200 |
commit | 95a05b428cc675694321c8f762591984f3fd2b1e (patch) | |
tree | 3a74205955201dd5e1abb0a85104d95cafa49df6 /mm | |
parent | 6a67368c36e2c0c2578ba62f6264ab739af08cce (diff) | |
download | op-kernel-dev-95a05b428cc675694321c8f762591984f3fd2b1e.zip op-kernel-dev-95a05b428cc675694321c8f762591984f3fd2b1e.tar.gz |
slab: Common constants for kmalloc boundaries
Standardize the constants that describe the smallest and largest
object kept in the kmalloc arrays for SLAB and SLUB.
Differentiate between the maximum size for which a slab cache is used
(KMALLOC_MAX_CACHE_SIZE) and the maximum allocatable size
(KMALLOC_MAX_SIZE, KMALLOC_MAX_ORDER).
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 22 |
1 files changed, 11 insertions, 11 deletions
@@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) { BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < - SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); + KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); /* * Must align to double word boundary for the double cmpxchg @@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s) * Kmalloc subsystem *******************************************************************/ -struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; +struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; +static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; #endif static int __init setup_slub_min_order(char *str) @@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) { + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, flags, node); trace_kmalloc_node(_RET_IP_, ret, @@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); caches++; } @@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void) BUG_ON(!kmalloc_caches[2]->name); } - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); BUG_ON(!s); @@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void) #endif #ifdef CONFIG_ZONE_DMA - for (i = 0; i < SLUB_PAGE_SHIFT; i++) { + for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { struct kmem_cache *s = kmalloc_caches[i]; if (s && s->size) { @@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); @@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) { + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = kmalloc_large_node(size, gfpflags, node); trace_kmalloc_node(caller, ret, @@ -4312,7 +4312,7 @@ static void resiliency_test(void) { u8 *p; - BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); + BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); printk(KERN_ERR "SLUB resiliency testing\n"); printk(KERN_ERR "-----------------------\n"); |