diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-11-16 18:21:36 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-11-16 18:21:36 -0800 |
commit | 98c4514ff6e3072288770db66f91bdb15af8b433 (patch) | |
tree | eb5f2541e70d27144720e1735b463471025908f0 /mm/vmscan.c | |
parent | 644a9d3b66e6983c2c1f3b24c3006d49b184c871 (diff) | |
parent | f4a75d2eb7b1e2206094b901be09adb31ba63681 (diff) | |
download | op-kernel-dev-98c4514ff6e3072288770db66f91bdb15af8b433.zip op-kernel-dev-98c4514ff6e3072288770db66f91bdb15af8b433.tar.gz |
Merge 3.7-rc6 into char-misc-next
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 27 |
1 files changed, 2 insertions, 25 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2624edc..48550c6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc) return false; } -#ifdef CONFIG_COMPACTION -/* - * If compaction is deferred for sc->order then scale the number of pages - * reclaimed based on the number of consecutive allocation failures - */ -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - struct zone *zone = lruvec_zone(lruvec); - - if (zone->compact_order_failed <= sc->order) - pages_for_compaction <<= zone->compact_defer_shift; - return pages_for_compaction; -} -#else -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - return pages_for_compaction; -} -#endif - /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns @@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec, * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); - - pages_for_compaction = scale_for_compaction(pages_for_compaction, - lruvec, sc); inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); if (nr_swap_pages > 0) inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); @@ -3017,6 +2992,8 @@ static int kswapd(void *p) &balanced_classzone_idx); } } + + current->reclaim_state = NULL; return 0; } |