summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-02 17:41:40 +0200
committerIngo Molnar <mingo@elte.hu>2007-08-02 17:41:40 +0200
commitcad60d93e18ba52b6f069b2edb031c89bf603b07 (patch)
treedfe74c165e7607c233d223614ef400163c6ba44c /kernel
parent4e6f96f313561d86d248edf0eaff2336d8217e1b (diff)
downloadop-kernel-dev-cad60d93e18ba52b6f069b2edb031c89bf603b07.zip
op-kernel-dev-cad60d93e18ba52b6f069b2edb031c89bf603b07.tar.gz
[PATCH] sched: ->task_new cleanup
make sched_class.task_new == NULL a 'default method', this allows the removal of task_rt_new. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c11
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_rt.c10
3 files changed, 9 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7bed2c5..915c75e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1641,22 +1641,27 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
unsigned long flags;
struct rq *rq;
int this_cpu;
+ u64 now;
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
this_cpu = smp_processor_id(); /* parent's CPU */
+ now = rq_clock(rq);
p->prio = effective_prio(p);
- if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
- task_cpu(p) != this_cpu || !current->se.on_rq) {
+ if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
+ (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
+ !current->se.on_rq) {
+
activate_task(rq, p, 0);
} else {
/*
* Let the scheduling class do new task startup
* management (if any):
*/
- p->sched_class->task_new(rq, p);
+ p->sched_class->task_new(rq, p, now);
+ inc_nr_running(p, rq, now);
}
check_preempt_curr(rq, p);
task_rq_unlock(rq, &flags);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6971db0..243da6c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1041,11 +1041,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
* monopolize the CPU. Note: the parent runqueue is locked,
* the child is not running yet.
*/
-static void task_new_fair(struct rq *rq, struct task_struct *p)
+static void task_new_fair(struct rq *rq, struct task_struct *p, u64 now)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct sched_entity *se = &p->se;
- u64 now = rq_clock(rq);
sched_info_queued(p);
@@ -1072,7 +1071,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
p->se.wait_runtime = -(sysctl_sched_granularity / 2);
__enqueue_entity(cfs_rq, se);
- inc_nr_running(p, rq, now);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1192a27..ade20dc 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -229,15 +229,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
requeue_task_rt(rq, p);
}
-/*
- * No parent/child timeslice management necessary for RT tasks,
- * just activate them:
- */
-static void task_new_rt(struct rq *rq, struct task_struct *p)
-{
- activate_task(rq, p, 1);
-}
-
static struct sched_class rt_sched_class __read_mostly = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
@@ -251,5 +242,4 @@ static struct sched_class rt_sched_class __read_mostly = {
.load_balance = load_balance_rt,
.task_tick = task_tick_rt,
- .task_new = task_new_rt,
};
OpenPOWER on IntegriCloud