summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/fork.c10
-rw-r--r--mm/page-writeback.c50
4 files changed, 62 insertions, 1 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 513bc3e..3a619f5 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -171,6 +171,7 @@ extern struct group_info init_groups;
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
+ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 592e3a5..59738ef 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -74,6 +74,7 @@ struct sched_param {
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
+#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/futex.h>
@@ -1149,6 +1150,7 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
+ struct prop_local_single dirties;
};
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 3fc3c13..163325a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
+ prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ int err;
prepare_to_copy(orig);
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->stack = ti;
+
+ err = prop_local_init_single(&tsk->dirties);
+ if (err) {
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
+ }
+
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b036054..4073d53 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
*
*/
static struct prop_descriptor vm_completions;
+static struct prop_descriptor vm_dirties;
static unsigned long determine_dirtyable_memory(void);
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
+ prop_change_shift(&vm_dirties, shift);
}
return ret;
}
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
__prop_inc_percpu(&vm_completions, &bdi->completions);
}
+static inline void task_dirty_inc(struct task_struct *tsk)
+{
+ prop_inc_single(&vm_dirties, &tsk->dirties);
+}
+
/*
* Obtain an accurate fraction of the BDI's portion.
*/
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
}
+static inline void task_dirties_fraction(struct task_struct *tsk,
+ long *numerator, long *denominator)
+{
+ prop_fraction_single(&vm_dirties, &tsk->dirties,
+ numerator, denominator);
+}
+
+/*
+ * scale the dirty limit
+ *
+ * task specific dirty limit:
+ *
+ * dirty -= (dirty/8) * p_{t}
+ */
+void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+{
+ long numerator, denominator;
+ long dirty = *pdirty;
+ u64 inv = dirty >> 3;
+
+ task_dirties_fraction(tsk, &numerator, &denominator);
+ inv *= numerator;
+ do_div(inv, denominator);
+
+ dirty -= inv;
+ if (dirty < *pdirty/2)
+ dirty = *pdirty/2;
+
+ *pdirty = dirty;
+}
+
/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
*pbdi_dirty = bdi_dirty;
clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
+ task_dirty_limit(current, pbdi_dirty);
}
}
@@ -720,6 +759,7 @@ void __init page_writeback_init(void)
shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift);
+ prop_descriptor_init(&vm_dirties, shift);
}
/**
@@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
-int fastcall set_page_dirty(struct page *page)
+static int __set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page)
}
return 0;
}
+
+int fastcall set_page_dirty(struct page *page)
+{
+ int ret = __set_page_dirty(page);
+ if (ret)
+ task_dirty_inc(current);
+ return ret;
+}
EXPORT_SYMBOL(set_page_dirty);
/*
OpenPOWER on IntegriCloud