summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2010-12-17 13:29:07 -0500
committerJ. Bruce Fields <bfields@redhat.com>2010-12-17 13:29:07 -0500
commitec66ee3797e5848356cf593c6ec7aabf30a00cf1 (patch)
tree7ed5c84cc914644ffa1cd1b6a2b45db53fc224e8 /kernel/sched.c
parent1205065764f2eda3216ebe213143f69891ee3460 (diff)
parentb0c3844d8af6b9f3f18f31e1b0502fbefa2166be (diff)
downloadop-kernel-dev-ec66ee3797e5848356cf593c6ec7aabf30a00cf1.zip
op-kernel-dev-ec66ee3797e5848356cf593c6ec7aabf30a00cf1.tar.gz
Merge commit 'v2.6.37-rc6' into for-2.6.38
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index aa14a56..dc91a4d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -560,18 +560,8 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static inline
-void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
-{
- rq->curr->sched_class->check_preempt_curr(rq, p, flags);
- /*
- * A queue event has occurred, and we're going to schedule. In
- * this case, we can save a useless back to back clock update.
- */
- if (test_tsk_need_resched(p))
- rq->skip_clock_update = 1;
-}
+static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
static inline int cpu_of(struct rq *rq)
{
@@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running);
}
+static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+{
+ const struct sched_class *class;
+
+ if (p->sched_class == rq->curr->sched_class) {
+ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+ } else {
+ for_each_class(class) {
+ if (class == rq->curr->sched_class)
+ break;
+ if (class == p->sched_class) {
+ resched_task(rq->curr);
+ break;
+ }
+ }
+ }
+
+ /*
+ * A queue event has occurred, and we're going to schedule. In
+ * this case, we can save a useless back to back clock update.
+ */
+ if (test_tsk_need_resched(rq->curr))
+ rq->skip_clock_update = 1;
+}
+
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
@@ -6960,6 +6975,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
if (cpu != group_first_cpu(sd->groups))
return;
+ sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+
child = sd->child;
sd->groups->cpu_power = 0;
OpenPOWER on IntegriCloud