diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-05-24 14:32:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 08:07:00 -0700 |
commit | 5f53e76299ceebd68bdf9495e8ff80db77711236 (patch) | |
tree | 2ecb8324a6593a49868161d85511cc14d474900a /mm/vmscan.c | |
parent | bf8abe8b926f7546eb763fd2a088fe461dde6317 (diff) | |
download | op-kernel-dev-5f53e76299ceebd68bdf9495e8ff80db77711236.zip op-kernel-dev-5f53e76299ceebd68bdf9495e8ff80db77711236.tar.gz |
vmscan: page_check_references(): check low order lumpy reclaim properly
If vmscan is under lumpy reclaim mode, it have to ignore referenced bit
for making contenious free pages. but current page_check_references()
doesn't.
Fix it.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8e1d723..cd4a5ed 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -77,6 +77,12 @@ struct scan_control { int order; + /* + * Intend to reclaim enough contenious memory rather than to reclaim + * enough amount memory. I.e, it's the mode for high order allocation. + */ + bool lumpy_reclaim_mode; + /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; @@ -575,7 +581,7 @@ static enum page_references page_check_references(struct page *page, referenced_page = TestClearPageReferenced(page); /* Lumpy reclaim - ignore references */ - if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + if (sc->lumpy_reclaim_mode) return PAGEREF_RECLAIM; /* @@ -1125,7 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, unsigned long nr_scanned = 0; unsigned long nr_reclaimed = 0; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); - int lumpy_reclaim = 0; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -1135,17 +1140,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, return SWAP_CLUSTER_MAX; } - /* - * If we need a large contiguous chunk of memory, or have - * trouble getting a small set of contiguous pages, we - * will reclaim both active and inactive pages. - * - * We use the same threshold as pageout congestion_wait below. - */ - if (sc->order > PAGE_ALLOC_COSTLY_ORDER) - lumpy_reclaim = 1; - else if (sc->order && priority < DEF_PRIORITY - 2) - lumpy_reclaim = 1; pagevec_init(&pvec, 1); @@ -1158,7 +1152,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, unsigned long nr_freed; unsigned long nr_active; unsigned int count[NR_LRU_LISTS] = { 0, }; - int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; + int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE; unsigned long nr_anon; unsigned long nr_file; @@ -1211,7 +1205,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, * but that should be acceptable to the caller */ if (nr_freed < nr_taken && !current_is_kswapd() && - lumpy_reclaim) { + sc->lumpy_reclaim_mode) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* @@ -1639,6 +1633,21 @@ out: } } +static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc) +{ + /* + * If we need a large contiguous chunk of memory, or have + * trouble getting a small set of contiguous pages, we + * will reclaim both active and inactive pages. + */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + sc->lumpy_reclaim_mode = 1; + else if (sc->order && priority < DEF_PRIORITY - 2) + sc->lumpy_reclaim_mode = 1; + else + sc->lumpy_reclaim_mode = 0; +} + /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ @@ -1653,6 +1662,8 @@ static void shrink_zone(int priority, struct zone *zone, get_scan_count(zone, sc, nr, priority); + set_lumpy_reclaim_mode(priority, sc); + while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { for_each_evictable_lru(l) { |