summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-08-09 17:19:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 20:45:00 -0700
commit666356297ec4e9e6594c6008803f2b1403ff7950 (patch)
treeaaa1e60f81588d0d90c2279c9812a32e5a085a27 /mm
parentd4debc66d1fc1b98a68081c4c8156f171841dca8 (diff)
downloadop-kernel-dev-666356297ec4e9e6594c6008803f2b1403ff7950.zip
op-kernel-dev-666356297ec4e9e6594c6008803f2b1403ff7950.tar.gz
vmscan: set up pagevec as late as possible in shrink_inactive_list()
shrink_inactive_list() sets up a pagevec to release unfreeable pages. It uses significant amounts of stack doing this. This patch splits shrink_inactive_list() to take the stack usage out of the main path so that callers to writepage() do not contain an unused pagevec on the stack. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Rik van Riel <riel@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Chris Mason <chris.mason@oracle.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michael Rubin <mrubin@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c99
1 files changed, 56 insertions, 43 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7f25f33..12b6921 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1133,19 +1133,65 @@ static int too_many_isolated(struct zone *zone, int file,
}
/*
+ * TODO: Try merging with migrations version of putback_lru_pages
+ */
+static noinline_for_stack void
+putback_lru_pages(struct zone *zone, struct zone_reclaim_stat *reclaim_stat,
+ unsigned long nr_anon, unsigned long nr_file,
+ struct list_head *page_list)
+{
+ struct page *page;
+ struct pagevec pvec;
+
+ pagevec_init(&pvec, 1);
+
+ /*
+ * Put back any unfreeable pages.
+ */
+ spin_lock(&zone->lru_lock);
+ while (!list_empty(page_list)) {
+ int lru;
+ page = lru_to_page(page_list);
+ VM_BUG_ON(PageLRU(page));
+ list_del(&page->lru);
+ if (unlikely(!page_evictable(page, NULL))) {
+ spin_unlock_irq(&zone->lru_lock);
+ putback_lru_page(page);
+ spin_lock_irq(&zone->lru_lock);
+ continue;
+ }
+ SetPageLRU(page);
+ lru = page_lru(page);
+ add_page_to_lru_list(zone, page, lru);
+ if (is_active_lru(lru)) {
+ int file = is_file_lru(lru);
+ reclaim_stat->recent_rotated[file]++;
+ }
+ if (!pagevec_add(&pvec, page)) {
+ spin_unlock_irq(&zone->lru_lock);
+ __pagevec_release(&pvec);
+ spin_lock_irq(&zone->lru_lock);
+ }
+ }
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
+ spin_unlock_irq(&zone->lru_lock);
+ pagevec_release(&pvec);
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
-static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
- struct zone *zone, struct scan_control *sc,
- int priority, int file)
+static noinline_for_stack unsigned long
+shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
+ struct scan_control *sc, int priority, int file)
{
LIST_HEAD(page_list);
- struct pagevec pvec;
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
- struct page *page;
unsigned long nr_taken;
unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, };
@@ -1161,8 +1207,6 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
}
- pagevec_init(&pvec, 1);
-
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
@@ -1192,8 +1236,10 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
*/
}
- if (nr_taken == 0)
- goto done;
+ if (nr_taken == 0) {
+ spin_unlock_irq(&zone->lru_lock);
+ return 0;
+ }
nr_active = clear_active_flags(&page_list, count);
__count_vm_events(PGDEACTIVATE, nr_active);
@@ -1244,40 +1290,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
- spin_lock(&zone->lru_lock);
- /*
- * Put back any unfreeable pages.
- */
- while (!list_empty(&page_list)) {
- int lru;
- page = lru_to_page(&page_list);
- VM_BUG_ON(PageLRU(page));
- list_del(&page->lru);
- if (unlikely(!page_evictable(page, NULL))) {
- spin_unlock_irq(&zone->lru_lock);
- putback_lru_page(page);
- spin_lock_irq(&zone->lru_lock);
- continue;
- }
- SetPageLRU(page);
- lru = page_lru(page);
- add_page_to_lru_list(zone, page, lru);
- if (is_active_lru(lru)) {
- int file = is_file_lru(lru);
- reclaim_stat->recent_rotated[file]++;
- }
- if (!pagevec_add(&pvec, page)) {
- spin_unlock_irq(&zone->lru_lock);
- __pagevec_release(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
- }
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
-
-done:
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
+ putback_lru_pages(zone, reclaim_stat, nr_anon, nr_file, &page_list);
return nr_reclaimed;
}
OpenPOWER on IntegriCloud