summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-01-23 20:32:21 +0100
committerIngo Molnar <mingo@kernel.org>2014-02-11 09:58:10 +0100
commit38033c37faab850ed5d33bb675c4de6c66be84d8 (patch)
tree7a00530a9a1346f29f8899ff949bf07a9e7db7ee /kernel/sched/core.c
parent6c3b4d44ba2838f00614a5a2d777d4401e0bfd71 (diff)
downloadop-kernel-dev-38033c37faab850ed5d33bb675c4de6c66be84d8.zip
op-kernel-dev-38033c37faab850ed5d33bb675c4de6c66be84d8.tar.gz
sched: Push down pre_schedule() and idle_balance()
This patch both merged idle_balance() and pre_schedule() and pushes both of them into pick_next_task(). Conceptually pre_schedule() and idle_balance() are rather similar, both are used to pull more work onto the current CPU. We cannot however first move idle_balance() into pre_schedule_fair() since there is no guarantee the last runnable task is a fair task, and thus we would miss newidle balances. Similarly, the dl and rt pre_schedule calls must be ran before idle_balance() since their respective tasks have higher priority and it would not do to delay their execution searching for less important tasks first. However, by noticing that pick_next_tasks() already traverses the sched_class hierarchy in the right order, we can get the right behaviour and do away with both calls. We must however change the special case optimization to also require that prev is of sched_class_fair, otherwise we can miss doing a dl or rt pull where we needed one. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/n/tip-a8k6vvaebtn64nie345kx1je@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c26
1 files changed, 2 insertions, 24 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dedb5f0..3068f37 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2169,13 +2169,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
#ifdef CONFIG_SMP
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
- if (prev->sched_class->pre_schedule)
- prev->sched_class->pre_schedule(rq, prev);
-}
-
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
@@ -2193,10 +2186,6 @@ static inline void post_schedule(struct rq *rq)
#else
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
-{
-}
-
static inline void post_schedule(struct rq *rq)
{
}
@@ -2592,7 +2581,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
+ if (likely(prev->sched_class == &fair_sched_class &&
+ rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq, prev);
if (likely(p))
return p;
@@ -2695,18 +2685,6 @@ need_resched:
switch_count = &prev->nvcsw;
}
- pre_schedule(rq, prev);
-
- if (unlikely(!rq->nr_running)) {
- /*
- * We must set idle_stamp _before_ calling idle_balance(), such
- * that we measure the duration of idle_balance() as idle time.
- */
- rq->idle_stamp = rq_clock(rq);
- if (idle_balance(rq))
- rq->idle_stamp = 0;
- }
-
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);
OpenPOWER on IntegriCloud