summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex,Shi <alex.shi@intel.com>2011-09-07 10:26:36 +0800
committerPekka Enberg <penberg@kernel.org>2011-09-13 20:41:25 +0300
commit12d79634f8d7af5229b7d21143d50e7cf7d94177 (patch)
tree06096e3fdcc4c7fe84b9111d2f6274514bfb72b0
parentaca726a07a71ff7aedc0e90a91f80a2701adcca5 (diff)
downloadop-kernel-dev-12d79634f8d7af5229b7d21143d50e7cf7d94177.zip
op-kernel-dev-12d79634f8d7af5229b7d21143d50e7cf7d94177.tar.gz
slub: Code optimization in get_partial_node()
I find a way to reduce a variable in get_partial_node(). That is also helpful for code understanding. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slub.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0e286ac..4982fb5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1600,7 +1600,6 @@ static void *get_partial_node(struct kmem_cache *s,
{
struct page *page, *page2;
void *object = NULL;
- int count = 0;
/*
* Racy check. If we mistakenly see no partial slabs then we
@@ -1613,17 +1612,16 @@ static void *get_partial_node(struct kmem_cache *s,
spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) {
- void *t = acquire_slab(s, n, page, count == 0);
+ void *t = acquire_slab(s, n, page, object == NULL);
int available;
if (!t)
break;
- if (!count) {
+ if (!object) {
c->page = page;
c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
- count++;
object = t;
available = page->objects - page->inuse;
} else {
OpenPOWER on IntegriCloud