summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2009-09-16 12:31:31 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-17 10:17:25 +0200
commitad4b78bbcbab66998b05d422ac6106b645796e54 (patch)
tree45f3561f4bd6b886948a3b0eea64edab9bab9eda /kernel/sched.c
parenteb24073bc1fe3e569a855cf38d529fb650c35524 (diff)
downloadop-kernel-dev-ad4b78bbcbab66998b05d422ac6106b645796e54.zip
op-kernel-dev-ad4b78bbcbab66998b05d422ac6106b645796e54.tar.gz
sched: Add new wakeup preemption mode: WAKEUP_RUNNING
Create a new wakeup preemption mode, preempt towards tasks that run shorter on avg. It sets next buddy to be sure we actually run the task we preempted for. Test results: root@twins:~# while :; do :; done & [1] 6537 root@twins:~# while :; do :; done & [2] 6538 root@twins:~# while :; do :; done & [3] 6539 root@twins:~# while :; do :; done & [4] 6540 root@twins:/home/peter# ./latt -c4 sleep 4 Entries: 48 (clients=4) Averages: ------------------------------ Max 4750 usec Avg 497 usec Stdev 737 usec root@twins:/home/peter# echo WAKEUP_RUNNING > /debug/sched_features root@twins:/home/peter# ./latt -c4 sleep 4 Entries: 48 (clients=4) Averages: ------------------------------ Max 14 usec Avg 5 usec Stdev 3 usec Disabled by default - needs more testing. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> LKML-Reference: <new-submission>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 969dfae..3bb4ea2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2458,6 +2458,7 @@ static void __sched_fork(struct task_struct *p)
p->se.avg_overlap = 0;
p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
+ p->se.avg_running = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
@@ -5310,14 +5311,13 @@ static inline void schedule_debug(struct task_struct *prev)
#endif
}
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
+static void put_prev_task(struct rq *rq, struct task_struct *p)
{
- if (prev->state == TASK_RUNNING) {
- u64 runtime = prev->se.sum_exec_runtime;
+ u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
- runtime -= prev->se.prev_sum_exec_runtime;
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+ update_avg(&p->se.avg_running, runtime);
+ if (p->state == TASK_RUNNING) {
/*
* In order to avoid avg_overlap growing stale when we are
* indeed overlapping and hence not getting put to sleep, grow
@@ -5327,9 +5327,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
* correlates to the amount of cache footprint a task can
* build up.
*/
- update_avg(&prev->se.avg_overlap, runtime);
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+ update_avg(&p->se.avg_overlap, runtime);
+ } else {
+ update_avg(&p->se.avg_running, 0);
}
- prev->sched_class->put_prev_task(rq, prev);
+ p->sched_class->put_prev_task(rq, p);
}
/*
OpenPOWER on IntegriCloud