diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-17 17:16:24 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-17 17:19:58 -0800 |
commit | 111c225a5f8d872bc9327ada18d13b75edaa34be (patch) | |
tree | 8bb9e31b8345f67c50f5370e6ba03f613afd5b65 | |
parent | 023f27d3d6fcc9048754d879fe5e7d63402a5b16 (diff) | |
download | op-kernel-dev-111c225a5f8d872bc9327ada18d13b75edaa34be.zip op-kernel-dev-111c225a5f8d872bc9327ada18d13b75edaa34be.tar.gz |
workqueue: set PF_WQ_WORKER on rescuers
PF_WQ_WORKER is used to tell scheduler that the task is a workqueue
worker and needs wq_worker_sleeping/waking_up() invoked on it for
concurrency management. As rescuers never participate in concurrency
management, PF_WQ_WORKER wasn't set on them.
There's a need for an interface which can query whether %current is
executing a work item and if so which. Such interface requires a way
to identify all tasks which may execute work items and PF_WQ_WORKER
will be used for that. As all normal workers always have PF_WQ_WORKER
set, we only need to add it to rescuers.
As rescuers start with WORKER_PREP but never clear it, it's always
NOT_RUNNING and there's no need to worry about it interfering with
concurrency management even if PF_WQ_WORKER is set; however, unlike
normal workers, rescuers currently don't have its worker struct as
kthread_data(). It uses the associated workqueue_struct instead.
This is problematic as wq_worker_sleeping/waking_up() expect struct
worker at kthread_data().
This patch adds worker->rescue_wq and start rescuer kthreads with
worker struct as kthread_data and sets PF_WQ_WORKER on rescuers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/workqueue.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7967f34..6b99ac7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -149,6 +149,9 @@ struct worker { /* for rebinding worker to CPU */ struct work_struct rebind_work; /* L: for busy worker */ + + /* used only by rescuers to point to the target workqueue */ + struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ }; struct worker_pool { @@ -763,12 +766,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, unsigned int cpu) { struct worker *worker = kthread_data(task), *to_wakeup = NULL; - struct worker_pool *pool = worker->pool; - atomic_t *nr_running = get_pool_nr_running(pool); + struct worker_pool *pool; + atomic_t *nr_running; + /* + * Rescuers, which may not have all the fields set up like normal + * workers, also reach here, let's not access anything before + * checking NOT_RUNNING. + */ if (worker->flags & WORKER_NOT_RUNNING) return NULL; + pool = worker->pool; + nr_running = get_pool_nr_running(pool); + /* this can only happen on the local cpu */ BUG_ON(cpu != raw_smp_processor_id()); @@ -2357,7 +2368,7 @@ sleep: /** * rescuer_thread - the rescuer thread function - * @__wq: the associated workqueue + * @__rescuer: self * * Workqueue rescuer thread function. There's one rescuer for each * workqueue which has WQ_RESCUER set. @@ -2374,20 +2385,27 @@ sleep: * * This should happen rarely. */ -static int rescuer_thread(void *__wq) +static int rescuer_thread(void *__rescuer) { - struct workqueue_struct *wq = __wq; - struct worker *rescuer = wq->rescuer; + struct worker *rescuer = __rescuer; + struct workqueue_struct *wq = rescuer->rescue_wq; struct list_head *scheduled = &rescuer->scheduled; bool is_unbound = wq->flags & WQ_UNBOUND; unsigned int cpu; set_user_nice(current, RESCUER_NICE_LEVEL); + + /* + * Mark rescuer as worker too. As WORKER_PREP is never cleared, it + * doesn't participate in concurrency management. + */ + rescuer->task->flags |= PF_WQ_WORKER; repeat: set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); + rescuer->task->flags &= ~PF_WQ_WORKER; return 0; } @@ -2431,6 +2449,8 @@ repeat: spin_unlock_irq(&gcwq->lock); } + /* rescuers should never participate in concurrency management */ + WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); schedule(); goto repeat; } @@ -3266,7 +3286,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, if (!rescuer) goto err; - rescuer->task = kthread_create(rescuer_thread, wq, "%s", + rescuer->rescue_wq = wq; + rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); if (IS_ERR(rescuer->task)) goto err; |