diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 |
4 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4f9f9e9..6644401 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3470,7 +3470,7 @@ need_resched_nonpreemptible: if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); - prev->sched_class->put_prev_task(rq, prev, now); + prev->sched_class->put_prev_task(rq, prev); next = pick_next_task(rq, prev); sched_info_switch(prev, next); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0b23aaf..103327b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -878,7 +878,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) /* * Account for a descheduled task: */ -static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, u64 now) +static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 9f4c28f..3503fb2 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -33,7 +33,7 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) spin_lock_irq(&rq->lock); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, u64 now) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c0b0d62..dcdcad6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -92,7 +92,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) return next; } -static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now) +static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); p->se.exec_start = 0; |