From 18aa9effad4adb2c1efe123af4eb24fec9f59b30 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: implement WQ_NON_REENTRANT With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c68277c..bce1074 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); - /* determine gcwq to use */ + /* + * Determine gcwq to use. SINGLE_CPU is inherently + * NON_REENTRANT, so test it first. + */ if (!(wq->flags & WQ_SINGLE_CPU)) { - /* just use the requested cpu for multicpu workqueues */ + struct global_cwq *last_gcwq; + + /* + * It's multi cpu. If @wq is non-reentrant and @work + * was previously on a different cpu, it might still + * be running there, in which case the work needs to + * be queued on that cpu to guarantee non-reentrance. + */ gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); + if (wq->flags & WQ_NON_REENTRANT && + (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { + struct worker *worker; + + spin_lock_irqsave(&last_gcwq->lock, flags); + + worker = find_worker_executing_work(last_gcwq, work); + + if (worker && worker->current_cwq->wq == wq) + gcwq = last_gcwq; + else { + /* meh... not running there, queue here */ + spin_unlock_irqrestore(&last_gcwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); + } + } else + spin_lock_irqsave(&gcwq->lock, flags); } else { unsigned int req_cpu = cpu; -- cgit v1.1