summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorZhouping Liu <zliu@redhat.com>2013-05-16 11:36:23 +0800
committerPekka Enberg <penberg@kernel.org>2013-06-08 14:30:42 +0300
commitd0d04b78f403b0bcfe03315e16b50d196610720d (patch)
treee37ddc853888ebee6dcdc0dee75c10139171ad30 /mm/slab.c
parent8a965b3baa89ffedc73c0fbc750006c631012ced (diff)
downloadop-kernel-dev-d0d04b78f403b0bcfe03315e16b50d196610720d.zip
op-kernel-dev-d0d04b78f403b0bcfe03315e16b50d196610720d.tar.gz
mm, slab: moved kmem_cache_alloc_node comment to correct place
After several fixing about kmem_cache_alloc_node(), its comment was splitted. This patch moved it on top of kmem_cache_alloc_node() definition. Signed-off-by: Zhouping Liu <zliu@redhat.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a98f8db..273a5ac 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3340,18 +3340,6 @@ done:
return obj;
}
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long caller)
@@ -3645,6 +3633,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
#ifdef CONFIG_NUMA
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
OpenPOWER on IntegriCloud