summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-09-11 14:20:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 15:57:19 -0700
commite2d0bd2b924d74d5e0d4f395f8f4730d125e198c (patch)
tree1ed63051163b55dce3491cb6cee65d621a80c923 /mm/page_alloc.c
parentf92310c1877fc73470bdcd9228758fa3713c191b (diff)
downloadop-kernel-dev-e2d0bd2b924d74d5e0d4f395f8f4730d125e198c.zip
op-kernel-dev-e2d0bd2b924d74d5e0d4f395f8f4730d125e198c.tar.gz
mm: kill one if loop in __free_pages_bootmem()
We should not check loop+1 with loop end in loop body. Just duplicate two lines code to avoid it. That will help a bit when we have huge amount of pages on system with 16TiB memory. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2748fc6a..8c68ef1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -751,19 +751,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
+ struct page *p = page;
unsigned int loop;
- prefetchw(page);
- for (loop = 0; loop < nr_pages; loop++) {
- struct page *p = &page[loop];
-
- if (loop + 1 < nr_pages)
- prefetchw(p + 1);
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
__ClearPageReserved(p);
set_page_count(p, 0);
}
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
- page_zone(page)->managed_pages += 1 << order;
+ page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
}
OpenPOWER on IntegriCloud