diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-19 12:17:02 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-04 09:44:58 -0800 |
commit | f36dc67b27a689eeb3631b11ebef17bbff257fbb (patch) | |
tree | d93c4310742fae4633af6ebddf53fe649ca4965d /kernel/workqueue.c | |
parent | f5faa0774e07eada85b0c55ec789b3f337d01412 (diff) | |
download | op-kernel-dev-f36dc67b27a689eeb3631b11ebef17bbff257fbb.zip op-kernel-dev-f36dc67b27a689eeb3631b11ebef17bbff257fbb.tar.gz |
workqueue: change argument of worker_maybe_bind_and_lock() to @pool
worker_maybe_bind_and_lock() currently takes @worker but only cares
about @worker->pool. This patch updates worker_maybe_bind_and_lock()
to take @pool instead of @worker. This will be used to better define
synchronization rules regarding rescuer->pool updates.
This doesn't introduce any functional change.
tj: Updated the comments and description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f456433..09545d4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1504,8 +1504,10 @@ static void worker_leave_idle(struct worker *worker) } /** - * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool - * @worker: self + * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it + * @pool: target worker_pool + * + * Bind %current to the cpu of @pool if it is associated and lock @pool. * * Works which are scheduled while the cpu is online must at least be * scheduled to a worker which is bound to the cpu so that if they are @@ -1533,11 +1535,9 @@ static void worker_leave_idle(struct worker *worker) * %true if the associated pool is online (@worker is successfully * bound), %false if offline. */ -static bool worker_maybe_bind_and_lock(struct worker *worker) +static bool worker_maybe_bind_and_lock(struct worker_pool *pool) __acquires(&pool->lock) { - struct worker_pool *pool = worker->pool; - while (true) { /* * The following call may fail, succeed or succeed @@ -1575,7 +1575,7 @@ __acquires(&pool->lock) static void idle_worker_rebind(struct worker *worker) { /* CPU may go down again inbetween, clear UNBOUND only on success */ - if (worker_maybe_bind_and_lock(worker)) + if (worker_maybe_bind_and_lock(worker->pool)) worker_clr_flags(worker, WORKER_UNBOUND); /* rebind complete, become available again */ @@ -1593,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work) { struct worker *worker = container_of(work, struct worker, rebind_work); - if (worker_maybe_bind_and_lock(worker)) + if (worker_maybe_bind_and_lock(worker->pool)) worker_clr_flags(worker, WORKER_UNBOUND); spin_unlock_irq(&worker->pool->lock); @@ -2038,7 +2038,7 @@ static bool manage_workers(struct worker *worker) * on @pool's current state. Try it and adjust * %WORKER_UNBOUND accordingly. */ - if (worker_maybe_bind_and_lock(worker)) + if (worker_maybe_bind_and_lock(pool)) worker->flags &= ~WORKER_UNBOUND; else worker->flags |= WORKER_UNBOUND; @@ -2358,7 +2358,7 @@ repeat: /* migrate to the target cpu if possible */ rescuer->pool = pool; - worker_maybe_bind_and_lock(rescuer); + worker_maybe_bind_and_lock(pool); /* * Slurp in all works issued via this workqueue and |