summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c141
1 files changed, 115 insertions, 26 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 30354bf..4c64493 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemtrace.h>
+#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/kmemleak.h>
@@ -147,7 +148,7 @@
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_CACHE_DMA)
+ SLAB_CACHE_DMA | SLAB_NOTRACK)
#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -839,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
return atomic_long_read(&n->nr_slabs);
}
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+{
+ return atomic_long_read(&n->nr_slabs);
+}
+
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
struct kmem_cache_node *n = get_node(s, node);
@@ -1057,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+ { return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
@@ -1071,6 +1079,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
{
int order = oo_order(oo);
+ flags |= __GFP_NOTRACK;
+
if (node == -1)
return alloc_pages(flags, order);
else
@@ -1098,6 +1108,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
}
+
+ if (kmemcheck_enabled
+ && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
+ {
+ int pages = 1 << oo_order(oo);
+
+ kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
+
+ /*
+ * Objects from caches that have a constructor don't get
+ * cleared when they're allocated, so we need to do it here.
+ */
+ if (s->ctor)
+ kmemcheck_mark_uninitialized_pages(page, pages);
+ else
+ kmemcheck_mark_unallocated_pages(page, pages);
+ }
+
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1199,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlubDebug(page);
}
+ kmemcheck_free_shadow(page, compound_order(page));
+
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1491,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
return 1;
}
+static int count_free(struct page *page)
+{
+ return page->objects - page->inuse;
+}
+
+static unsigned long count_partial(struct kmem_cache_node *n,
+ int (*get_count)(struct page *))
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
+static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ return atomic_long_read(&n->total_objects);
+#else
+ return 0;
+#endif
+}
+
+static noinline void
+slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
+{
+ int node;
+
+ printk(KERN_WARNING
+ "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
+ nid, gfpflags);
+ printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
+ "default order: %d, min order: %d\n", s->name, s->objsize,
+ s->size, oo_order(s->oo), oo_order(s->min));
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(s, node);
+ unsigned long nr_slabs;
+ unsigned long nr_objs;
+ unsigned long nr_free;
+
+ if (!n)
+ continue;
+
+ nr_free = count_partial(n, count_free);
+ nr_slabs = node_nr_slabs(n);
+ nr_objs = node_nr_objs(n);
+
+ printk(KERN_WARNING
+ " node %d: slabs: %ld, objs: %ld, free: %ld\n",
+ node, nr_slabs, nr_objs, nr_free);
+ }
+}
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -1572,6 +1661,8 @@ new_slab:
c->page = new;
goto load_freelist;
}
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1626,7 +1717,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, objsize);
+ kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+
return object;
}
@@ -1759,6 +1852,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
+ kmemcheck_slab_free(s, object, c->objsize);
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, c->objsize);
@@ -2610,6 +2704,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
struct kmem_cache *s;
char *text;
size_t realsize;
+ unsigned long slabflags;
s = kmalloc_caches_dma[index];
if (s)
@@ -2631,9 +2726,18 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
(unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
+ /*
+ * Must defer sysfs creation to a workqueue because we don't know
+ * what context we are called from. Before sysfs comes up, we don't
+ * need to do anything because our sysfs initcall will start by
+ * adding all existing slabs to sysfs.
+ */
+ slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
+ if (slab_state >= SYSFS)
+ slabflags |= __SYSFS_ADD_DEFERRED;
+
if (!s || !text || !kmem_cache_open(s, flags, text,
- realsize, ARCH_KMALLOC_MINALIGN,
- SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+ realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s);
kfree(text);
goto unlock_out;
@@ -2642,7 +2746,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
list_add(&s->list, &slab_caches);
kmalloc_caches_dma[index] = s;
- schedule_work(&sysfs_add_work);
+ if (slab_state >= SYSFS)
+ schedule_work(&sysfs_add_work);
unlock_out:
up_write(&slub_lock);
@@ -2727,9 +2832,10 @@ EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
- get_order(size));
+ struct page *page;
+ flags |= __GFP_COMP | __GFP_NOTRACK;
+ page = alloc_pages_node(node, flags, get_order(size));
if (page)
return page_address(page);
else
@@ -3340,20 +3446,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#ifdef CONFIG_SLUB_DEBUG
-static unsigned long count_partial(struct kmem_cache_node *n,
- int (*get_count)(struct page *))
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
static int count_inuse(struct page *page)
{
return page->inuse;
@@ -3364,11 +3456,6 @@ static int count_total(struct page *page)
return page->objects;
}
-static int count_free(struct page *page)
-{
- return page->objects - page->inuse;
-}
-
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
@@ -3737,7 +3824,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
to_cpumask(l->cpus));
}
- if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
+ if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
@@ -4412,6 +4499,8 @@ static char *create_unique_id(struct kmem_cache *s)
*p++ = 'a';
if (s->flags & SLAB_DEBUG_FREE)
*p++ = 'F';
+ if (!(s->flags & SLAB_NOTRACK))
+ *p++ = 't';
if (p != name + 1)
*p++ = '-';
p += sprintf(p, "%07d", s->size);
OpenPOWER on IntegriCloud