summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-11-17 14:06:20 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-11-17 17:40:33 -0800
commit9398180097e359646d46083c3e079a54e20bee82 (patch)
tree633933ddd86e3dba7c0970eea5768867849842c9
parente13193319d3a5545c82ed4b724bffd16f87873e3 (diff)
downloadop-kernel-dev-9398180097e359646d46083c3e079a54e20bee82.zip
op-kernel-dev-9398180097e359646d46083c3e079a54e20bee82.tar.gz
workqueue: fix race condition in schedule_on_each_cpu()
Commit 65a64464349883891e21e74af16c05d6e1eeb4e9 ("HWPOISON: Allow schedule_on_each_cpu() from keventd") which allows schedule_on_each_cpu() to be called from keventd added a race condition. schedule_on_each_cpu() may race with cpu hotplug and end up executing the function twice on a cpu. Fix it by moving direct execution into the section protected with get/put_online_cpus(). While at it, update code such that direct execution is done after works have been scheduled for all other cpus and drop unnecessary cpu != orig test from flush loop. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/workqueue.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1232814..67e526b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
if (!works)
return -ENOMEM;
+ get_online_cpus();
+
/*
- * when running in keventd don't schedule a work item on itself.
- * Can just call directly because the work queue is already bound.
- * This also is faster.
- * Make this a generic parameter for other workqueues?
+ * When running in keventd don't schedule a work item on
+ * itself. Can just call directly because the work queue is
+ * already bound. This also is faster.
*/
- if (current_is_keventd()) {
+ if (current_is_keventd())
orig = raw_smp_processor_id();
- INIT_WORK(per_cpu_ptr(works, orig), func);
- func(per_cpu_ptr(works, orig));
- }
- get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
- if (cpu == orig)
- continue;
INIT_WORK(work, func);
- schedule_work_on(cpu, work);
- }
- for_each_online_cpu(cpu) {
if (cpu != orig)
- flush_work(per_cpu_ptr(works, cpu));
+ schedule_work_on(cpu, work);
}
+ if (orig >= 0)
+ func(per_cpu_ptr(works, orig));
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(works, cpu));
+
put_online_cpus();
free_percpu(works);
return 0;
OpenPOWER on IntegriCloud