diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 04:03:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 10:23:02 -0700 |
commit | 434e245ddd3f14aa8eef97cae16c71b863ab092a (patch) | |
tree | bbfd9d012416e6882fd714650435a78ce4f9da9b /mm/slub.c | |
parent | 94f6030ca792c57422f04a73e7a872d8325946d3 (diff) | |
download | op-kernel-dev-434e245ddd3f14aa8eef97cae16c71b863ab092a.zip op-kernel-dev-434e245ddd3f14aa8eef97cae16c71b863ab092a.tar.gz |
SLUB: Do not allocate object bit array on stack
The objects per slab increase with the current patches in mm since we allow up
to order 3 allocs by default. More patches in mm actually allow to use 2M or
higher sized slabs. For slab validation we need per object bitmaps in order
to check a slab. We end up with up to 64k objects per slab resulting in a
potential requirement of 8K stack space. That does not look good.
Allocate the bit arrays via kmalloc.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 39 |
1 files changed, 25 insertions, 14 deletions
@@ -2764,11 +2764,11 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, } #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) -static int validate_slab(struct kmem_cache *s, struct page *page) +static int validate_slab(struct kmem_cache *s, struct page *page, + unsigned long *map) { void *p; void *addr = page_address(page); - DECLARE_BITMAP(map, s->objects); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) @@ -2790,10 +2790,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page) return 1; } -static void validate_slab_slab(struct kmem_cache *s, struct page *page) +static void validate_slab_slab(struct kmem_cache *s, struct page *page, + unsigned long *map) { if (slab_trylock(page)) { - validate_slab(s, page); + validate_slab(s, page, map); slab_unlock(page); } else printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", @@ -2810,7 +2811,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page) } } -static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) +static int validate_slab_node(struct kmem_cache *s, + struct kmem_cache_node *n, unsigned long *map) { unsigned long count = 0; struct page *page; @@ -2819,7 +2821,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { - validate_slab_slab(s, page); + validate_slab_slab(s, page, map); count++; } if (count != n->nr_partial) @@ -2830,7 +2832,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) goto out; list_for_each_entry(page, &n->full, lru) { - validate_slab_slab(s, page); + validate_slab_slab(s, page, map); count++; } if (count != atomic_long_read(&n->nr_slabs)) @@ -2843,17 +2845,23 @@ out: return count; } -static unsigned long validate_slab_cache(struct kmem_cache *s) +static long validate_slab_cache(struct kmem_cache *s) { int node; unsigned long count = 0; + unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) * + sizeof(unsigned long), GFP_KERNEL); + + if (!map) + return -ENOMEM; flush_all(s); for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); - count += validate_slab_node(s, n); + count += validate_slab_node(s, n, map); } + kfree(map); return count; } @@ -3467,11 +3475,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf) static ssize_t validate_store(struct kmem_cache *s, const char *buf, size_t length) { - if (buf[0] == '1') - validate_slab_cache(s); - else - return -EINVAL; - return length; + int ret = -EINVAL; + + if (buf[0] == '1') { + ret = validate_slab_cache(s); + if (ret >= 0) + ret = length; + } + return ret; } SLAB_ATTR(validate); |