diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2012-01-10 15:08:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-10 16:30:44 -0800 |
commit | c3993076f842de3754360e5b998d6657a9d30303 (patch) | |
tree | 78c1ca3d031483932e2f236706b20064742c0b0c /mm/page_alloc.c | |
parent | 43d2b113241d6797b890318767e0af78e313414b (diff) | |
download | op-kernel-dev-c3993076f842de3754360e5b998d6657a9d30303.zip op-kernel-dev-c3993076f842de3754360e5b998d6657a9d30303.tar.gz |
mm: page_alloc: generalize order handling in __free_pages_bootmem()
__free_pages_bootmem() used to special-case higher-order frees to save
individual page checking with free_pages_bulk().
Nowadays, both zero order and non-zero order frees use free_pages(), which
checks each individual page anyway, and so there is little point in making
the distinction anymore. The higher-order loop will work just fine for
zero order pages.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59153da..794e671 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -730,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order) local_irq_restore(flags); } -/* - * permit the bootmem allocator to evade page validation on high-order frees - */ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) { - if (order == 0) { - __ClearPageReserved(page); - set_page_count(page, 0); - set_page_refcounted(page); - __free_page(page); - } else { - int loop; - - prefetchw(page); - for (loop = 0; loop < (1 << order); loop++) { - struct page *p = &page[loop]; + unsigned int nr_pages = 1 << order; + unsigned int loop; - if (loop + 1 < (1 << order)) - prefetchw(p + 1); - __ClearPageReserved(p); - set_page_count(p, 0); - } + prefetchw(page); + for (loop = 0; loop < nr_pages; loop++) { + struct page *p = &page[loop]; - set_page_refcounted(page); - __free_pages(page, order); + if (loop + 1 < nr_pages) + prefetchw(p + 1); + __ClearPageReserved(p); + set_page_count(p, 0); } + + set_page_refcounted(page); + __free_pages(page, order); } |