summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/vmscan.c29
2 files changed, 18 insertions, 15 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b289310..5fa63bd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
(!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS))
> background_thresh)))
- bdi_start_writeback(bdi, NULL, 0, 0);
+ bdi_start_writeback(bdi, NULL, 0);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -707,7 +707,7 @@ void laptop_mode_timer_fn(unsigned long data)
*/
if (bdi_has_dirty_io(&q->backing_dev_info))
- bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0);
+ bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages);
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 915dceb..9c7e57c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1724,13 +1724,13 @@ static void shrink_zone(int priority, struct zone *zone,
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static int shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
struct zoneref *z;
struct zone *zone;
- int progress = 0;
+ bool all_unreclaimable = true;
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
sc->nodemask) {
@@ -1757,9 +1757,9 @@ static int shrink_zones(int priority, struct zonelist *zonelist,
}
shrink_zone(priority, zone, sc);
- progress = 1;
+ all_unreclaimable = false;
}
- return progress;
+ return all_unreclaimable;
}
/*
@@ -1782,7 +1782,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
int priority;
- unsigned long ret = 0;
+ bool all_unreclaimable;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
@@ -1813,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- ret = shrink_zones(priority, zonelist, sc);
+ all_unreclaimable = shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -1826,10 +1826,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
}
}
total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
- ret = sc->nr_reclaimed;
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim)
goto out;
- }
/*
* Try to write back as many pages as we just scanned. This
@@ -1849,9 +1847,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
priority < DEF_PRIORITY - 2)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
- /* top priority shrink_zones still had more to do? don't OOM, then */
- if (ret && scanning_global_lru(sc))
- ret = sc->nr_reclaimed;
+
out:
/*
* Now that we've scanned all the zones at this priority level, note
@@ -1877,7 +1873,14 @@ out:
delayacct_freepages_end();
put_mems_allowed();
- return ret;
+ if (sc->nr_reclaimed)
+ return sc->nr_reclaimed;
+
+ /* top priority shrink_zones still had more to do? don't OOM, then */
+ if (scanning_global_lru(sc) && !all_unreclaimable)
+ return 1;
+
+ return 0;
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
OpenPOWER on IntegriCloud