summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ce62b77..b9f1491 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -21,7 +21,6 @@
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
-#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
@@ -1085,11 +1084,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
flags |= s->allocflags;
- page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
- oo);
+ /*
+ * Let the initial higher-order allocation fail under memory pressure
+ * so we fall-back to the minimum order allocation.
+ */
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*
@@ -2589,6 +2594,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
@@ -2827,13 +2834,15 @@ EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
+ void *ptr = NULL;
flags |= __GFP_COMP | __GFP_NOTRACK;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
- return page_address(page);
- else
- return NULL;
+ ptr = page_address(page);
+
+ kmemleak_alloc(ptr, size, 1, flags);
+ return ptr;
}
#ifdef CONFIG_NUMA
@@ -2918,6 +2927,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
+ kmemleak_free(x);
put_page(page);
return;
}
OpenPOWER on IntegriCloud