summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--mm/slub.c12
2 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 71e43a1..cef6f8f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -137,10 +137,12 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
+#if KMALLOC_MIN_SIZE <= 64
if (size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
return 2;
+#endif
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1c..2c9a62d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2995,8 +2995,6 @@ void __init kmem_cache_init(void)
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
caches++;
- }
- if (KMALLOC_MIN_SIZE <= 128) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
caches++;
@@ -3026,6 +3024,16 @@ void __init kmem_cache_init(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+ if (KMALLOC_MIN_SIZE == 128) {
+ /*
+ * The 192 byte sized cache is not used if the alignment
+ * is 128 byte. Redirect kmalloc to use the 256 byte cache
+ * instead.
+ */
+ for (i = 128 + 8; i <= 192; i += 8)
+ size_index[(i - 1) / 8] = 8;
+ }
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
OpenPOWER on IntegriCloud