summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 10:07:13 +0200
committerTejun Heo <tj@kernel.org>2010-06-29 10:07:13 +0200
commit18aa9effad4adb2c1efe123af4eb24fec9f59b30 (patch)
tree76975b37ae0ef1ebf460aaab68359ed7a12fe37a /kernel
parent7a22ad757ec75186ad43a5b4670fa7423ee8f480 (diff)
downloadop-kernel-dev-18aa9effad4adb2c1efe123af4eb24fec9f59b30.zip
op-kernel-dev-18aa9effad4adb2c1efe123af4eb24fec9f59b30.tar.gz
workqueue: implement WQ_NON_REENTRANT
With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c68277c..bce1074 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
- /* determine gcwq to use */
+ /*
+ * Determine gcwq to use. SINGLE_CPU is inherently
+ * NON_REENTRANT, so test it first.
+ */
if (!(wq->flags & WQ_SINGLE_CPU)) {
- /* just use the requested cpu for multicpu workqueues */
+ struct global_cwq *last_gcwq;
+
+ /*
+ * It's multi cpu. If @wq is non-reentrant and @work
+ * was previously on a different cpu, it might still
+ * be running there, in which case the work needs to
+ * be queued on that cpu to guarantee non-reentrance.
+ */
gcwq = get_gcwq(cpu);
- spin_lock_irqsave(&gcwq->lock, flags);
+ if (wq->flags & WQ_NON_REENTRANT &&
+ (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+ struct worker *worker;
+
+ spin_lock_irqsave(&last_gcwq->lock, flags);
+
+ worker = find_worker_executing_work(last_gcwq, work);
+
+ if (worker && worker->current_cwq->wq == wq)
+ gcwq = last_gcwq;
+ else {
+ /* meh... not running there, queue here */
+ spin_unlock_irqrestore(&last_gcwq->lock, flags);
+ spin_lock_irqsave(&gcwq->lock, flags);
+ }
+ } else
+ spin_lock_irqsave(&gcwq->lock, flags);
} else {
unsigned int req_cpu = cpu;
OpenPOWER on IntegriCloud