summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-22 18:23:34 -0400
committerJens Axboe <axboe@fb.com>2015-06-02 08:38:13 -0600
commit2529bb3aadc40a93e642f5f3650f63379a964467 (patch)
treeced5264ace40ca3cbb6899233f7caa1bd79a0903
parent841710aa6e4acd066ab9fe8c8cb6f4e4e6709d83 (diff)
downloadop-kernel-dev-2529bb3aadc40a93e642f5f3650f63379a964467.zip
op-kernel-dev-2529bb3aadc40a93e642f5f3650f63379a964467.tar.gz
writeback: reset wb_domain->dirty_limit[_tstmp] when memcg domain size changes
The amount of available memory to a memcg wb_domain can change as memcg configuration changes. A domain's ->dirty_limit exists to smooth out sudden drops in dirty threshold; however, when a domain's size actually drops significantly, it hinders the dirty throttling from adjusting to the new configuration leading to unexpected behaviors including unnecessary OOM kills. This patch resolves the issue by adding wb_domain_size_changed() which resets ->dirty_limit[_tstmp] and making memcg call it on configuration changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--include/linux/writeback.h20
-rw-r--r--mm/memcontrol.c12
2 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 04a3786..3b73e97 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -132,6 +132,26 @@ struct wb_domain {
unsigned long dirty_limit;
};
+/**
+ * wb_domain_size_changed - memory available to a wb_domain has changed
+ * @dom: wb_domain of interest
+ *
+ * This function should be called when the amount of memory available to
+ * @dom has changed. It resets @dom's dirty limit parameters to prevent
+ * the past values which don't match the current configuration from skewing
+ * dirty throttling. Without this, when memory size of a wb_domain is
+ * greatly reduced, the dirty throttling logic may allow too many pages to
+ * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
+ * that situation.
+ */
+static inline void wb_domain_size_changed(struct wb_domain *dom)
+{
+ spin_lock(&dom->lock);
+ dom->dirty_limit_tstamp = jiffies;
+ dom->dirty_limit = 0;
+ spin_unlock(&dom->lock);
+}
+
/*
* fs/fs-writeback.c
*/
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ce113dd..c0b0406 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4005,6 +4005,11 @@ static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
wb_domain_exit(&memcg->cgwb_domain);
}
+static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
+{
+ wb_domain_size_changed(&memcg->cgwb_domain);
+}
+
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
@@ -4026,6 +4031,10 @@ static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}
+static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
/*
@@ -4624,6 +4633,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
memcg->low = 0;
memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX;
+ memcg_wb_domain_size_changed(memcg);
}
#ifdef CONFIG_MMU
@@ -5361,6 +5371,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
memcg->high = high;
+ memcg_wb_domain_size_changed(memcg);
return nbytes;
}
@@ -5393,6 +5404,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (err)
return err;
+ memcg_wb_domain_size_changed(memcg);
return nbytes;
}
OpenPOWER on IntegriCloud