summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-17 16:05:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-17 16:05:23 -0700
commit4651afbbae968772efd6dc4ba461cba9b49bb9d8 (patch)
tree486bfb263e8b7ad09c29db16f3d2925fc6c03555
parent08077ca849ced0306ce842ed597b0d80434a8bf0 (diff)
parent960bd11bf2daf669d0d910428fd9ef5a15c3d7cb (diff)
downloadop-kernel-dev-4651afbbae968772efd6dc4ba461cba9b49bb9d8.zip
op-kernel-dev-4651afbbae968772efd6dc4ba461cba9b49bb9d8.tar.gz
Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull another workqueue fix from Tejun Heo: "Unfortunately, yet another late fix. This too is discovered and fixed by Lai. This bug was introduced during this merge window by commit 25511a477657 ("workqueue: reimplement CPU online rebinding to handle idle workers") which started using WORKER_REBIND flag for idle rebind too. The bug is relatively easy to trigger if the CPU rapidly goes through off, on and then off (and stay off). The fix is on the safer side. This hasn't been on linux-next yet but I'm pushing early so that it can get more exposure before v3.6 release." * 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: always clear WORKER_REBIND in busy_worker_rebind_fn()
-rw-r--r--kernel/workqueue.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1e1373b..b80065a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
struct worker *worker = container_of(work, struct worker, rebind_work);
struct global_cwq *gcwq = worker->pool->gcwq;
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_REBIND);
+ worker_maybe_bind_and_lock(worker);
+
+ /*
+ * %WORKER_REBIND must be cleared even if the above binding failed;
+ * otherwise, we may confuse the next CPU_UP cycle or oops / get
+ * stuck by calling idle_worker_rebind() prematurely. If CPU went
+ * down again inbetween, %WORKER_UNBOUND would be set, so clearing
+ * %WORKER_REBIND is always safe.
+ */
+ worker_clr_flags(worker, WORKER_REBIND);
spin_unlock_irq(&gcwq->lock);
}
OpenPOWER on IntegriCloud