diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-02-21 14:23:38 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-16 09:46:23 +0100 |
commit | 7134b3e941613dcb959b4b178cc4a35e45cbbc0d (patch) | |
tree | e7f3c680ae086a6631c003afd41599ee11a1a2ff /kernel/sched | |
parent | 0a67d1ee30ef1efe6a412b3590e08734902aed43 (diff) | |
download | op-kernel-dev-7134b3e941613dcb959b4b178cc4a35e45cbbc0d.zip op-kernel-dev-7134b3e941613dcb959b4b178cc4a35e45cbbc0d.tar.gz |
sched/core: Add ENQUEUE_NOCLOCK to ENQUEUE_RESTORE
In all cases, ENQUEUE_RESTORE should also have ENQUEUE_NOCLOCK because
DEQUEUE_SAVE will have done an update_rq_clock().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ce363bd..247d0a0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1070,7 +1070,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) p->sched_class->set_cpus_allowed(p, new_mask); if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE); + enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); if (running) set_curr_task(rq, p); } @@ -3815,7 +3815,7 @@ void set_user_nice(struct task_struct *p, long nice) delta = p->prio - old_prio; if (queued) { - enqueue_task(rq, p, ENQUEUE_RESTORE); + enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: @@ -5517,7 +5517,7 @@ void sched_setnuma(struct task_struct *p, int nid) p->numa_preferred_nid = nid; if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE); + enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); if (running) set_curr_task(rq, p); task_rq_unlock(rq, p, &rf); @@ -6431,7 +6431,7 @@ void sched_move_task(struct task_struct *tsk) sched_change_group(tsk, TASK_MOVE_GROUP); if (queued) - enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); + enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE | ENQUEUE_NOCLOCK); if (running) set_curr_task(rq, tsk); |