summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-09-20 22:06:01 +0200
committerIngo Molnar <mingo@kernel.org>2016-09-30 11:03:28 +0200
commit49bd21efe7fc84f9c82c8475b8ff6f8b865b1692 (patch)
tree93f1620565f6479e5dceb3d1133dba73390edebb /kernel/sched
parentb2bf6c314e3a9e227925240d92ecd6e9b0110170 (diff)
downloadop-kernel-dev-49bd21efe7fc84f9c82c8475b8ff6f8b865b1692.zip
op-kernel-dev-49bd21efe7fc84f9c82c8475b8ff6f8b865b1692.tar.gz
sched/core: Fix set_user_nice()
Almost all scheduler functions update state with the following pattern: if (queued) dequeue_task(rq, p, DEQUEUE_SAVE); if (running) put_prev_task(rq, p); /* update state */ if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); if (running) set_curr_task(rq, p); set_user_nice() however misses the running part, cure this. This was found by asserting we never enqueue 'current'. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ce69fc7..aae08ce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3724,7 +3724,8 @@ out_unlock:
void set_user_nice(struct task_struct *p, long nice)
{
- int old_prio, delta, queued;
+ bool queued, running;
+ int old_prio, delta;
struct rq_flags rf;
struct rq *rq;
@@ -3746,8 +3747,11 @@ void set_user_nice(struct task_struct *p, long nice)
goto out_unlock;
}
queued = task_on_rq_queued(p);
+ running = task_current(rq, p);
if (queued)
dequeue_task(rq, p, DEQUEUE_SAVE);
+ if (running)
+ put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
@@ -3764,6 +3768,8 @@ void set_user_nice(struct task_struct *p, long nice)
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_curr(rq);
}
+ if (running)
+ set_curr_task(rq, p);
out_unlock:
task_rq_unlock(rq, p, &rf);
}
OpenPOWER on IntegriCloud