summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-05-09 10:09:56 -0500
committerPekka Enberg <penberg@kernel.org>2012-06-01 09:25:41 +0300
commitec3ab083a7a004282ee374bdaeb0aa603521b8eb (patch)
tree8c570d6bd284ca8357675a4e7c8409dec691f91c /mm/slub.c
parent188fd063208942a4681d8e8a4484ad0d4ae0fda1 (diff)
downloadop-kernel-dev-ec3ab083a7a004282ee374bdaeb0aa603521b8eb.zip
op-kernel-dev-ec3ab083a7a004282ee374bdaeb0aa603521b8eb.tar.gz
slub: Get rid of the node field
The node field is always page_to_nid(c->page). So its rather easy to replace. Note that there maybe slightly more overhead in various hot paths due to the need to shift the bits from page->flags. However, that is mostly compensated for by a smaller footprint of the kmem_cache_cpu structure (this patch reduces that to 3 words per cache) which allows better caching. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b29246b..aed8792 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,
if (!object) {
c->page = page;
- c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
@@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
- if (node != NUMA_NO_NODE && c->node != node)
+ if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
return 0;
#endif
return 1;
@@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
page->freelist = NULL;
stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
c->page = page;
*pc = c;
} else
@@ -2269,7 +2267,6 @@ new_slab:
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
- c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
@@ -2294,7 +2291,6 @@ new_slab:
c->freelist = get_freepointer(s, freelist);
deactivate_slab(s, c);
- c->node = NUMA_NO_NODE;
local_irq_restore(flags);
return freelist;
}
@@ -4507,30 +4503,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- int node = ACCESS_ONCE(c->node);
+ int node;
struct page *page;
- if (node < 0)
- continue;
page = ACCESS_ONCE(c->page);
- if (page) {
- if (flags & SO_TOTAL)
- x = page->objects;
- else if (flags & SO_OBJECTS)
- x = page->inuse;
- else
- x = 1;
+ if (!page)
+ continue;
- total += x;
- nodes[node] += x;
- }
- page = c->partial;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ x = page->objects;
+ else if (flags & SO_OBJECTS)
+ x = page->inuse;
+ else
+ x = 1;
+
+ total += x;
+ nodes[node] += x;
+ page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}
+
per_cpu[node]++;
}
}
OpenPOWER on IntegriCloud