summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 01:26:00 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:43:00 -0700
commitd100313fd615cc30374ff92e0b3facb053838330 (patch)
treef0bcd5e3b07bee40a65182c63b54baceca366849 /mm/page_alloc.c
parent64c5e135bf5a2a7f0ededb3435a31adbe0202f0c (diff)
downloadop-kernel-dev-d100313fd615cc30374ff92e0b3facb053838330.zip
op-kernel-dev-d100313fd615cc30374ff92e0b3facb053838330.tar.gz
Fix calculation in move_freepages_block for counting pages
move_freepages_block() returns the number of blocks moved. This value is used to determine if a block of pages should be stolen for the exclusive use of a migrate type or not. However, the value returned is being used correctly. This patch fixes the calculation to return the number of base pages that have been moved. This should be considered a fix to the patch move-free-pages-between-lists-on-steal.patch Credit to Andy Whitcroft for spotting the problem. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ac8fc51..942498f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -718,7 +718,7 @@ int move_freepages(struct zone *zone,
{
struct page *page;
unsigned long order;
- int blocks_moved = 0;
+ int pages_moved = 0;
#ifndef CONFIG_HOLES_IN_ZONE
/*
@@ -747,10 +747,10 @@ int move_freepages(struct zone *zone,
list_add(&page->lru,
&zone->free_area[order].free_list[migratetype]);
page += 1 << order;
- blocks_moved++;
+ pages_moved += 1 << order;
}
- return blocks_moved;
+ return pages_moved;
}
int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
@@ -833,7 +833,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
start_migratetype);
/* Claim the whole block if over half of it is free */
- if ((pages << current_order) >= (1 << (MAX_ORDER-2)))
+ if (pages >= (1 << (MAX_ORDER-2)))
set_pageblock_migratetype(page,
start_migratetype);
OpenPOWER on IntegriCloud