diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-02-26 12:24:50 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-26 12:24:50 +0100 |
commit | e9e4e44309f866b115d08ab4a54834008c50a8a4 (patch) | |
tree | ae9f91e682a4d6592ef263f30a4a0b1a862b7987 /kernel/sched/rt.c | |
parent | 8a26ce4e544659256349551283414df504889a59 (diff) | |
parent | c517d838eb7d07bbe9507871fab3931deccff539 (diff) | |
download | op-kernel-dev-e9e4e44309f866b115d08ab4a54834008c50a8a4.zip op-kernel-dev-e9e4e44309f866b115d08ab4a54834008c50a8a4.tar.gz |
Merge tag 'v4.0-rc1' into perf/core, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ee15f5a..f4d4b07 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) enqueue = 1; /* - * Force a clock update if the CPU was idle, - * lest wakeup -> unthrottle time accumulate. + * When we're idle and a woken (rt) task is + * throttled check_preempt_curr() will set + * skip_update and the time between the wakeup + * and this unthrottle will get accounted as + * 'runtime'. */ if (rt_rq->rt_nr_running && rq->curr == rq->idle) - rq->skip_clock_update = -1; + rq_clock_skip_update(rq, false); } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; @@ -1337,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) curr->prio <= p->prio)) { int target = find_lowest_rq(p); - if (target != -1) + /* + * Don't bother moving it if the destination CPU is + * not running a lower priority task. + */ + if (target != -1 && + p->prio < cpu_rq(target)->rt.highest_prio.curr) cpu = target; } rcu_read_unlock(); @@ -1614,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) lowest_rq = cpu_rq(cpu); + if (lowest_rq->rt.highest_prio.curr <= task->prio) { + /* + * Target rq has tasks of equal or higher priority, + * retrying does not release any lock and is unlikely + * to yield a different result. + */ + lowest_rq = NULL; + break; + } + /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { /* |