diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-17 12:39:28 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-17 12:39:28 -0700 |
commit | 8db25e7891a47e03db6f04344a9c92be16e391bb (patch) | |
tree | e093119c71e655b54b159fed76b654a437b1ff30 | |
parent | 628c78e7ea19d5b70d2b6a59030362168cdbe1ad (diff) | |
download | op-kernel-dev-8db25e7891a47e03db6f04344a9c92be16e391bb.zip op-kernel-dev-8db25e7891a47e03db6f04344a9c92be16e391bb.tar.gz |
workqueue: simplify CPU hotplug code
With trustee gone, CPU hotplug code can be simplified.
* gcwq_claim/release_management() now grab and release gcwq lock too
respectively and gained _and_lock and _and_unlock postfixes.
* All CPU hotplug logic was implemented in workqueue_cpu_callback()
which was called by workqueue_cpu_up/down_callback() for the correct
priority. This was because up and down paths shared a lot of logic,
which is no longer true. Remove workqueue_cpu_callback() and move
all hotplug logic into the two actual callbacks.
This patch doesn't make any functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
-rw-r--r-- | kernel/workqueue.c | 79 |
1 files changed, 25 insertions, 54 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d1545da..471996a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3358,19 +3358,21 @@ EXPORT_SYMBOL_GPL(work_busy); */ /* claim manager positions of all pools */ -static void gcwq_claim_management(struct global_cwq *gcwq) +static void gcwq_claim_management_and_lock(struct global_cwq *gcwq) { struct worker_pool *pool; for_each_worker_pool(pool, gcwq) mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools); + spin_lock_irq(&gcwq->lock); } /* release manager positions */ -static void gcwq_release_management(struct global_cwq *gcwq) +static void gcwq_release_management_and_unlock(struct global_cwq *gcwq) { struct worker_pool *pool; + spin_unlock_irq(&gcwq->lock); for_each_worker_pool(pool, gcwq) mutex_unlock(&pool->manager_mutex); } @@ -3385,8 +3387,7 @@ static void gcwq_unbind_fn(struct work_struct *work) BUG_ON(gcwq->cpu != smp_processor_id()); - gcwq_claim_management(gcwq); - spin_lock_irq(&gcwq->lock); + gcwq_claim_management_and_lock(gcwq); /* * We've claimed all manager positions. Make all workers unbound @@ -3403,8 +3404,7 @@ static void gcwq_unbind_fn(struct work_struct *work) gcwq->flags |= GCWQ_DISASSOCIATED; - spin_unlock_irq(&gcwq->lock); - gcwq_release_management(gcwq); + gcwq_release_management_and_unlock(gcwq); /* * Call schedule() so that we cross rq->lock and thus can guarantee @@ -3428,26 +3428,19 @@ static void gcwq_unbind_fn(struct work_struct *work) atomic_set(get_pool_nr_running(pool), 0); } -static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +/* + * Workqueues should be brought up before normal priority CPU notifiers. + * This will be registered high priority CPU notifier. + */ +static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct global_cwq *gcwq = get_gcwq(cpu); struct worker_pool *pool; - struct work_struct unbind_work; - unsigned long flags; - - action &= ~CPU_TASKS_FROZEN; - - switch (action) { - case CPU_DOWN_PREPARE: - /* unbinding should happen on the local CPU */ - INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); - schedule_work_on(cpu, &unbind_work); - flush_work(&unbind_work); - break; + switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: for_each_worker_pool(pool, gcwq) { struct worker *worker; @@ -3463,45 +3456,16 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, start_worker(worker); spin_unlock_irq(&gcwq->lock); } - } - - /* some are called w/ irq disabled, don't disturb irq status */ - spin_lock_irqsave(&gcwq->lock, flags); + break; - switch (action) { case CPU_DOWN_FAILED: case CPU_ONLINE: - spin_unlock_irq(&gcwq->lock); - gcwq_claim_management(gcwq); - spin_lock_irq(&gcwq->lock); - + gcwq_claim_management_and_lock(gcwq); gcwq->flags &= ~GCWQ_DISASSOCIATED; - rebind_workers(gcwq); - - gcwq_release_management(gcwq); + gcwq_release_management_and_unlock(gcwq); break; } - - spin_unlock_irqrestore(&gcwq->lock, flags); - - return notifier_from_errno(0); -} - -/* - * Workqueues should be brought up before normal priority CPU notifiers. - * This will be registered high priority CPU notifier. - */ -static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - case CPU_DOWN_FAILED: - case CPU_ONLINE: - return workqueue_cpu_callback(nfb, action, hcpu); - } return NOTIFY_OK; } @@ -3513,9 +3477,16 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { + unsigned int cpu = (unsigned long)hcpu; + struct work_struct unbind_work; + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: - return workqueue_cpu_callback(nfb, action, hcpu); + /* unbinding should happen on the local CPU */ + INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); + schedule_work_on(cpu, &unbind_work); + flush_work(&unbind_work); + break; } return NOTIFY_OK; } |