summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-06-18 10:53:12 +0200
committerIngo Molnar <mingo@elte.hu>2010-06-18 10:53:19 +0200
commit646b1db4956ba8bf748b835b5eba211133d91c2e (patch)
tree061166d873d9da9cf83044a7593ad111787076c5 /mm
parent0f2c3de2ba110626515234d5d584fb1b0c0749a2 (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
downloadop-kernel-dev-646b1db4956ba8bf748b835b5eba211133d91c2e.zip
op-kernel-dev-646b1db4956ba8bf748b835b5eba211133d91c2e.tar.gz
Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c34
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c29
3 files changed, 41 insertions, 27 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b289310..bbd396a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
(!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS))
> background_thresh)))
- bdi_start_writeback(bdi, NULL, 0, 0);
+ bdi_start_writeback(bdi, NULL, 0);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -707,7 +707,7 @@ void laptop_mode_timer_fn(unsigned long data)
*/
if (bdi_has_dirty_io(&q->backing_dev_info))
- bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0);
+ bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages);
}
/*
@@ -835,7 +835,6 @@ int write_cache_pages(struct address_space *mapping,
pgoff_t done_index;
int cycled;
int range_whole = 0;
- long nr_to_write = wbc->nr_to_write;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
@@ -852,7 +851,22 @@ int write_cache_pages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
+
+ /*
+ * If this is a data integrity sync, cap the writeback to the
+ * current end of file. Any extension to the file that occurs
+ * after this is a new write and we don't need to write those
+ * pages out to fulfil our data integrity requirements. If we
+ * try to write them out, we can get stuck in this scan until
+ * the concurrent writer stops adding dirty pages and extending
+ * EOF.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL &&
+ wbc->range_end == LLONG_MAX) {
+ end = i_size_read(mapping->host) >> PAGE_CACHE_SHIFT;
+ }
}
+
retry:
done_index = index;
while (!done && (index <= end)) {
@@ -935,11 +949,10 @@ continue_unlock:
done = 1;
break;
}
- }
+ }
- if (nr_to_write > 0) {
- nr_to_write--;
- if (nr_to_write == 0 &&
+ if (wbc->nr_to_write > 0) {
+ if (--wbc->nr_to_write == 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
/*
* We stop writing back only if we are
@@ -970,11 +983,8 @@ continue_unlock:
end = writeback_index - 1;
goto retry;
}
- if (!wbc->no_nrwrite_index_update) {
- if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
- mapping->writeback_index = done_index;
- wbc->nr_to_write = nr_to_write;
- }
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = done_index;
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 7e5030a..f65f840 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -764,10 +764,11 @@ done2:
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
+ loff_t newsize = attr->ia_size;
int error;
- if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- loff_t newsize = attr->ia_size;
+ if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
+ && newsize != inode->i_size) {
struct page *page = NULL;
if (newsize < inode->i_size) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 915dceb..9c7e57c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1724,13 +1724,13 @@ static void shrink_zone(int priority, struct zone *zone,
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static int shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
struct zoneref *z;
struct zone *zone;
- int progress = 0;
+ bool all_unreclaimable = true;
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
sc->nodemask) {
@@ -1757,9 +1757,9 @@ static int shrink_zones(int priority, struct zonelist *zonelist,
}
shrink_zone(priority, zone, sc);
- progress = 1;
+ all_unreclaimable = false;
}
- return progress;
+ return all_unreclaimable;
}
/*
@@ -1782,7 +1782,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc)
{
int priority;
- unsigned long ret = 0;
+ bool all_unreclaimable;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
@@ -1813,7 +1813,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- ret = shrink_zones(priority, zonelist, sc);
+ all_unreclaimable = shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -1826,10 +1826,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
}
}
total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
- ret = sc->nr_reclaimed;
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim)
goto out;
- }
/*
* Try to write back as many pages as we just scanned. This
@@ -1849,9 +1847,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
priority < DEF_PRIORITY - 2)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
- /* top priority shrink_zones still had more to do? don't OOM, then */
- if (ret && scanning_global_lru(sc))
- ret = sc->nr_reclaimed;
+
out:
/*
* Now that we've scanned all the zones at this priority level, note
@@ -1877,7 +1873,14 @@ out:
delayacct_freepages_end();
put_mems_allowed();
- return ret;
+ if (sc->nr_reclaimed)
+ return sc->nr_reclaimed;
+
+ /* top priority shrink_zones still had more to do? don't OOM, then */
+ if (scanning_global_lru(sc) && !all_unreclaimable)
+ return 1;
+
+ return 0;
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
OpenPOWER on IntegriCloud