summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-16 10:42:16 +0200
committerTejun Heo <tj@kernel.org>2010-09-19 17:51:05 +0200
commitbaf59022c37d43f202e62d5130e4bac5e825b426 (patch)
tree43eea7aac112b2ee07b195e00bce4b14465d1183 /kernel
parent401a8d048eadfbe1b1c1bf53d3b614fcc894c61a (diff)
downloadop-kernel-dev-baf59022c37d43f202e62d5130e4bac5e825b426.zip
op-kernel-dev-baf59022c37d43f202e62d5130e4bac5e825b426.tar.gz
workqueue: factor out start_flush_work()
Factor out start_flush_work() from flush_work(). start_flush_work() has @wait_executing argument which controls whether the barrier is queued only if the work is pending or also if executing. As flush_work() needs to wait for execution too, it uses %true. This commit doesn't cause any behavior difference. start_flush_work() will be used to implement flush_work_sync(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c64
1 files changed, 37 insertions, 27 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1240b9d..33d31d7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2326,35 +2326,17 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(flush_workqueue);
-/**
- * flush_work - wait for a work to finish executing the last queueing instance
- * @work: the work to flush
- *
- * Wait until @work has finished execution. This function considers
- * only the last queueing instance of @work. If @work has been
- * enqueued across different CPUs on a non-reentrant workqueue or on
- * multiple workqueues, @work might still be executing on return on
- * some of the CPUs from earlier queueing.
- *
- * If @work was queued only on a non-reentrant, ordered or unbound
- * workqueue, @work is guaranteed to be idle on return if it hasn't
- * been requeued since flush started.
- *
- * RETURNS:
- * %true if flush_work() waited for the work to finish execution,
- * %false if it was already idle.
- */
-bool flush_work(struct work_struct *work)
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
- struct wq_barrier barr;
might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
- return 0;
+ return false;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
@@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work)
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
- } else {
+ } else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
- }
+ } else
+ goto already_gone;
- insert_wq_barrier(cwq, &barr, work, worker);
+ insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);
lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
-
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}
+
+/**
+ * flush_work - wait for a work to finish executing the last queueing instance
+ * @work: the work to flush
+ *
+ * Wait until @work has finished execution. This function considers
+ * only the last queueing instance of @work. If @work has been
+ * enqueued across different CPUs on a non-reentrant workqueue or on
+ * multiple workqueues, @work might still be executing on return on
+ * some of the CPUs from earlier queueing.
+ *
+ * If @work was queued only on a non-reentrant, ordered or unbound
+ * workqueue, @work is guaranteed to be idle on return if it hasn't
+ * been requeued since flush started.
+ *
+ * RETURNS:
+ * %true if flush_work() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_work(struct work_struct *work)
+{
+ struct wq_barrier barr;
+
+ if (start_flush_work(work, &barr, true)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else
+ return false;
+}
EXPORT_SYMBOL_GPL(flush_work);
static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
OpenPOWER on IntegriCloud