summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorJonathan Corbet <corbet@lwn.net>2008-07-14 15:29:34 -0600
committerJonathan Corbet <corbet@lwn.net>2008-07-14 15:29:34 -0600
commit2fceef397f9880b212a74c418290ce69e7ac00eb (patch)
treed9cc09ab992825ef7fede4a688103503e3caf655 /mm/slub.c
parentfeae1ef116ed381625d3731c5ae4f4ebcb3fa302 (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
downloadop-kernel-dev-2fceef397f9880b212a74c418290ce69e7ac00eb.zip
op-kernel-dev-2fceef397f9880b212a74c418290ce69e7ac00eb.tar.gz
Merge commit 'v2.6.26' into bkl-removal
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a505a82..315c392 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
* The allocator synchronizes using per slab locks and only
* uses a centralized lock to manage a pool of partial slabs.
*
- * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
+ * (C) 2007 SGI, Christoph Lameter
*/
#include <linux/mm.h>
@@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
+ unsigned int objsize;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
+ objsize = c->objsize;
if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object))
- memset(object, 0, c->objsize);
+ memset(object, 0, objsize);
return object;
}
@@ -2726,9 +2728,10 @@ size_t ksize(const void *object)
page = virt_to_head_page(object);
- if (unlikely(!PageSlab(page)))
+ if (unlikely(!PageSlab(page))) {
+ WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
-
+ }
s = page->slab;
#ifdef CONFIG_SLUB_DEBUG
@@ -2994,8 +2997,6 @@ void __init kmem_cache_init(void)
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
caches++;
- }
- if (KMALLOC_MIN_SIZE <= 128) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
caches++;
@@ -3025,6 +3026,16 @@ void __init kmem_cache_init(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+ if (KMALLOC_MIN_SIZE == 128) {
+ /*
+ * The 192 byte sized cache is not used if the alignment
+ * is 128 byte. Redirect kmalloc to use the 256 byte cache
+ * instead.
+ */
+ for (i = 128 + 8; i <= 192; i += 8)
+ size_index[(i - 1) / 8] = 8;
+ }
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
OpenPOWER on IntegriCloud