diff options
author | Tejun Heo <tj@kernel.org> | 2010-07-02 10:03:51 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-07-02 11:00:08 +0200 |
commit | c7fc77f78f16d138ca997ce096a62f46e2e9420a (patch) | |
tree | 0478e5dde66f6ff86d4baa0fe541748e1a6f1ed2 /kernel/workqueue.c | |
parent | f34217977d717385a3e9fd7018ac39fade3964c0 (diff) | |
download | op-kernel-dev-c7fc77f78f16d138ca997ce096a62f46e2e9420a.zip op-kernel-dev-c7fc77f78f16d138ca997ce096a62f46e2e9420a.tar.gz |
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full
ordering among works queued to a workqueue. The same can be achieved
using WQ_UNBOUND as unbound workqueues always use the gcwq for
WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu
locality isn't accessible anyway, serving them with unbound workqueues
should be fine.
Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most
single thread workqueue users will be converted to use multithread or
non-reentrant instead and only the ones which require strict ordering
will keep using WQ_UNBOUND + @max_active of 1.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 100 |
1 files changed, 18 insertions, 82 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4608563..20d6237 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -206,8 +206,6 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ - unsigned long single_cpu; /* cpu for single cpu wq */ - cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ @@ -889,34 +887,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_worker(gcwq); } -/** - * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing - * @cwq: cwq to unbind - * - * Try to unbind @cwq from single cpu workqueue processing. If - * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. - * - * CONTEXT: - * spin_lock_irq(gcwq->lock). - */ -static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) -{ - struct workqueue_struct *wq = cwq->wq; - struct global_cwq *gcwq = cwq->gcwq; - - BUG_ON(wq->single_cpu != gcwq->cpu); - /* - * Unbind from workqueue if @cwq is not frozen. If frozen, - * thaw_workqueues() will either restart processing on this - * cpu or unbind if empty. This keeps works queued while - * frozen fully ordered and flushable. - */ - if (likely(!(gcwq->flags & GCWQ_FREEZING))) { - smp_wmb(); /* paired with cmpxchg() in __queue_work() */ - wq->single_cpu = WORK_CPU_NONE; - } -} - static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { @@ -924,20 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned long flags; - bool arbitrate; debug_work_activate(work); - if (unlikely(cpu == WORK_CPU_UNBOUND)) - cpu = raw_smp_processor_id(); - - /* - * Determine gcwq to use. SINGLE_CPU is inherently - * NON_REENTRANT, so test it first. - */ - if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { + /* determine gcwq to use */ + if (!(wq->flags & WQ_UNBOUND)) { struct global_cwq *last_gcwq; + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + /* * It's multi cpu. If @wq is non-reentrant and @work * was previously on a different cpu, it might still @@ -962,38 +928,6 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, } } else spin_lock_irqsave(&gcwq->lock, flags); - } else if (!(wq->flags & WQ_UNBOUND)) { - unsigned int req_cpu = cpu; - - /* - * It's a bit more complex for single cpu workqueues. - * We first need to determine which cpu is going to be - * used. If no cpu is currently serving this - * workqueue, arbitrate using atomic accesses to - * wq->single_cpu; otherwise, use the current one. - */ - retry: - cpu = wq->single_cpu; - arbitrate = cpu == WORK_CPU_NONE; - if (arbitrate) - cpu = req_cpu; - - gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); - - /* - * The following cmpxchg() is a full barrier paired - * with smp_wmb() in cwq_unbind_single_cpu() and - * guarantees that all changes to wq->st_* fields are - * visible on the new cpu after this point. - */ - if (arbitrate) - cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); - - if (unlikely(wq->single_cpu != cpu)) { - spin_unlock_irqrestore(&gcwq->lock, flags); - goto retry; - } } else { gcwq = get_gcwq(WORK_CPU_UNBOUND); spin_lock_irqsave(&gcwq->lock, flags); @@ -1105,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work = &dwork->work; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - struct global_cwq *gcwq = get_work_gcwq(work); - unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); + unsigned int lcpu; BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); + /* * This stores cwq for the moment, for the timer_fn. * Note that the work's gcwq is preserved to allow * reentrance detection for delayed works. */ + if (!(wq->flags & WQ_UNBOUND)) { + struct global_cwq *gcwq = get_work_gcwq(work); + + if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) + lcpu = gcwq->cpu; + else + lcpu = raw_smp_processor_id(); + } else + lcpu = WORK_CPU_UNBOUND; + set_work_cwq(work, get_cwq(lcpu, wq), 0); + timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -1696,9 +1641,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) /* one down, submit a delayed one */ if (cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { - /* this was the last work, unbind from single cpu */ - cwq_unbind_single_cpu(cwq); } /* is flush in progress and are we at the flushing tip? */ @@ -2751,7 +2693,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); - wq->single_cpu = WORK_CPU_NONE; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -3513,11 +3454,6 @@ void thaw_workqueues(void) while (!list_empty(&cwq->delayed_works) && cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - - /* perform delayed unbind from single cpu if empty */ - if (wq->single_cpu == gcwq->cpu && - !cwq->nr_active && list_empty(&cwq->delayed_works)) - cwq_unbind_single_cpu(cwq); } wake_up_worker(gcwq); |