summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2016-02-09 17:59:38 -0500
committerTejun Heo <tj@kernel.org>2016-02-09 17:59:38 -0500
commitf303fccb82928790ec58eea82722bd5c54d300b3 (patch)
tree20030c728af441a4efa01c2612be4883bd431115 /kernel/workqueue.c
parentef557180447fa9a7a0affd3abb21ecceb4b5e125 (diff)
downloadop-kernel-dev-f303fccb82928790ec58eea82722bd5c54d300b3.zip
op-kernel-dev-f303fccb82928790ec58eea82722bd5c54d300b3.tar.gz
workqueue: implement "workqueue.debug_force_rr_cpu" debug feature
Workqueue used to guarantee local execution for work items queued without explicit target CPU. The guarantee is gone now which can break some usages in subtle ways. To flush out those cases, this patch implements a debug feature which forces round-robin CPU selection for all such work items. The debug feature defaults to off and can be enabled with a kernel parameter. The default can be flipped with a debug config option. If you hit this commit during bisection, please refer to 041bd12e272c ("Revert "workqueue: make sure delayed work run in local cpu"") for more information and ping me. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0547746..51d77e7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -307,6 +307,18 @@ static cpumask_var_t wq_unbound_cpumask;
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+/*
+ * Local execution of unbound work items is no longer guaranteed. The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
+
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
cpu_worker_pools);
@@ -1309,10 +1321,17 @@ static bool is_chained_work(struct workqueue_struct *wq)
*/
static int wq_select_unbound_cpu(int cpu)
{
+ static bool printed_dbg_warning;
int new_cpu;
- if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
- return cpu;
+ if (likely(!wq_debug_force_rr_cpu)) {
+ if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+ return cpu;
+ } else if (!printed_dbg_warning) {
+ pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+ printed_dbg_warning = true;
+ }
+
if (cpumask_empty(wq_unbound_cpumask))
return cpu;
OpenPOWER on IntegriCloud