diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-24 08:09:26 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-24 08:09:26 +0200 |
commit | 28afe961a18f77b2249062499bdbf70fd2ec6bba (patch) | |
tree | 71a5cb32924b8c8256bbc0f2f81c6b8c2ac79108 /kernel/sched_fair.c | |
parent | 1e01cb0c6ff7e9ddb6547551794c6aa82785a7cb (diff) | |
parent | 338b9bb3adac0d2c5a1e180491d9b001d624c402 (diff) | |
download | op-kernel-dev-28afe961a18f77b2249062499bdbf70fd2ec6bba.zip op-kernel-dev-28afe961a18f77b2249062499bdbf70fd2ec6bba.tar.gz |
Merge branch 'linus' into tracing/urgent
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f2aa987..cf2cd6c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_SCHED_HRTICK static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { - int requeue = rq->curr == p; struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) * Don't schedule slices shorter than 10000ns, that just * doesn't make sense. Rely on vruntime for fairness. */ - if (!requeue) + if (rq->curr != p) delta = max(10000LL, delta); - hrtick_start(rq, delta, requeue); + hrtick_start(rq, delta); } } #else /* !CONFIG_SCHED_HRTICK */ @@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq) * not idle and an idle cpu is available. The span of cpus to * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. + * Domains may include CPUs that are not usable for migration, + * hence we need to mask them out (cpu_active_map) * * Returns the CPU we should wake onto. */ @@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p) || ((sd->flags & SD_WAKE_IDLE_FAR) && !task_hot(p, task_rq(p)->clock, sd))) { cpus_and(tmp, sd->span, p->cpus_allowed); - for_each_cpu_mask(i, tmp) { + cpus_and(tmp, tmp, cpu_active_map); + for_each_cpu_mask_nr(i, tmp) { if (idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p, |