diff options
author | Paul Mackerras <paulus@samba.org> | 2008-11-12 08:43:22 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-11-12 08:43:22 +1100 |
commit | 486936cd93e99c802153b3f2f629c5ce62b8c0d4 (patch) | |
tree | 51e261a96e1fb6b51d4a6afb92bfc2480e150b6c /kernel/sched.c | |
parent | 1c1b777a5673b57a6c0377ba60a790d05e4a0676 (diff) | |
parent | f21f237cf55494c3a4209de323281a3b0528da10 (diff) | |
download | op-kernel-dev-486936cd93e99c802153b3f2f629c5ce62b8c0d4.zip op-kernel-dev-486936cd93e99c802153b3f2f629c5ce62b8c0d4.tar.gz |
Merge branch 'linux-2.6' into next
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e8819bc..50a21f9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -397,9 +397,9 @@ struct cfs_rq { * 'curr' points to currently running entity on this cfs_rq. * It is set to NULL otherwise (i.e when none are currently running). */ - struct sched_entity *curr, *next; + struct sched_entity *curr, *next, *last; - unsigned long nr_spread_over; + unsigned int nr_spread_over; #ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ @@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) } } +void task_rq_unlock_wait(struct task_struct *p) +{ + struct rq *rq = task_rq(p); + + smp_mb(); /* spin-unlock-wait is not a full memory barrier */ + spin_unlock_wait(&rq->lock); +} + static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { @@ -1805,7 +1813,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) /* * Buddy candidates are cache hot: */ - if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) + if (sched_feat(CACHE_HOT_BUDDY) && + (&p->se == cfs_rq_of(&p->se)->next || + &p->se == cfs_rq_of(&p->se)->last)) return 1; if (p->sched_class != &fair_sched_class) @@ -6875,15 +6885,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ - for (tmp = sd; tmp; tmp = tmp->parent) { + for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; if (!parent) break; + if (sd_parent_degenerate(tmp, parent)) { tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; - } + } else + tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { @@ -7672,6 +7684,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, error: free_sched_groups(cpu_map, tmpmask); SCHED_CPUMASK_FREE((void *)allmasks); + kfree(rd); return -ENOMEM; #endif } |