diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 07:15:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 07:15:06 -0400 |
commit | bff157b3ad4b9f6be0af6987fcd62deaf0f2b799 (patch) | |
tree | 02ae68620a40fefd9ffc2de739a8bb362baa3f08 /mm/slob.c | |
parent | 8bf5e36d0429e9b8fc2c84966577f10386bd7195 (diff) | |
parent | 23774a2f6fee0848503bfb8004eeeb5adef94f5c (diff) | |
download | op-kernel-dev-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.zip op-kernel-dev-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.tar.gz |
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB update from Pekka Enberg:
"Nothing terribly exciting here apart from Christoph's kmalloc
unification patches that brings sl[aou]b implementations closer to
each other"
* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slab: Use correct GFP_DMA constant
slub: remove verify_mem_not_deleted()
mm/sl[aou]b: Move kmallocXXX functions to common code
mm, slab_common: add 'unlikely' to size check of kmalloc_slab()
mm/slub.c: beautify code for removing redundancy 'break' statement.
slub: Remove unnecessary page NULL check
slub: don't use cpu partial pages on UP
mm/slub: beautify code for 80 column limitation and tab alignment
mm/slub: remove 'per_cpu' which is useless variable
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 28 |
1 files changed, 24 insertions, 4 deletions
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) return ret; } -void *__kmalloc_node(size_t size, gfp_t gfp, int node) +void *__kmalloc(size_t size, gfp_t gfp) { - return __do_kmalloc_node(size, gfp, node, _RET_IP_); + return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); } -EXPORT_SYMBOL(__kmalloc_node); +EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_TRACING void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) @@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) return 0; } -void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) +void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; @@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); return b; } +EXPORT_SYMBOL(slob_alloc_node); + +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + return slob_alloc_node(cachep, flags, NUMA_NO_NODE); +} +EXPORT_SYMBOL(kmem_cache_alloc); + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t gfp, int node) +{ + return __do_kmalloc_node(size, gfp, node, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc_node); + +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) +{ + return slob_alloc_node(cachep, gfp, node); +} EXPORT_SYMBOL(kmem_cache_alloc_node); +#endif static void __kmem_cache_free(void *b, int size) { |