summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2016-03-07 11:06:46 +0100
committerTakashi Iwai <tiwai@suse.de>2016-03-07 11:06:46 +0100
commitad09ef2cce91e3a98a32e3bb0a5982a6e8920aa1 (patch)
tree6d46455cb546b65b041acc1293043c71ef93da40 /kernel/workqueue.c
parent790b415c98de62602810b0eedce26f0f9d6ddd78 (diff)
parent02db17ff376893f65f45974b0897e70e6695034e (diff)
downloadop-kernel-dev-ad09ef2cce91e3a98a32e3bb0a5982a6e8920aa1.zip
op-kernel-dev-ad09ef2cce91e3a98a32e3bb0a5982a6e8920aa1.tar.gz
Merge tag 'asoc-fix-v4.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v4.5 This is far too big a set of fixes for this late in the release cycle but the overwhelming bulk is essentially the same simple fix from Takashi for a cut'n'pasted 64 bit cleanliness issue in the userspace interface where drivers were accessing things using the wrong element in a union which worked OK on 32 bit platforms as the correct element happened to be aligned the same way but with 64 bit platforms ABIs are different and the two members of the union are laid out in different places. They aren't all tagged to stable since some of these chips have vanishingly little chance of being used in 64 bit systems. The other changes are: - A fix for Qualcomm devices to work on big endian systems. The original change is actually correct but triggered a bug in regmap which is too invasive to fix for this cycle and can be worked around by just letting regmap pick the default. - A fix for the Samsung I2S driver locking which wasn't using IRQ safe spinlocks when it needed to. - A fix for the new Intel Sky Lake driver forgetting that C pointer arithmetic takes the type of the pointer into consideration. - A revert of a change to the FSL SSI driver that broke some systems. - A fix for the cleanup path of the wm9713 driver. - A fix for some incorrect register definitions in the ADAU17x1 driver that caused misclocking in some configurations. - A fix for the tracepoints for jack detection to avoid using an internal field of the core jack structure which is no longer present in all configurations. - A fix for another of the new Intel drivers which tried to write to a string literal.
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c74
1 files changed, 67 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 61a0264..7ff5dc7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -301,7 +301,23 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
-static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
+/* PL: allowable cpus for unbound wqs and work items */
+static cpumask_var_t wq_unbound_cpumask;
+
+/* CPU where unbound work was last round robin scheduled from this CPU */
+static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+
+/*
+ * Local execution of unbound work items is no longer guaranteed. The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
int node)
{
assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+ /*
+ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+ * delayed item is pending. The plan is to keep CPU -> NODE
+ * mapping valid and stable across CPU on/offlines. Once that
+ * happens, this workaround can be removed.
+ */
+ if (unlikely(node == NUMA_NO_NODE))
+ return wq->dfl_pwq;
+
return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
}
@@ -1298,6 +1324,39 @@ static bool is_chained_work(struct workqueue_struct *wq)
return worker && worker->current_pwq->wq == wq;
}
+/*
+ * When queueing an unbound work item to a wq, prefer local CPU if allowed
+ * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
+ * avoid perturbing sensitive tasks.
+ */
+static int wq_select_unbound_cpu(int cpu)
+{
+ static bool printed_dbg_warning;
+ int new_cpu;
+
+ if (likely(!wq_debug_force_rr_cpu)) {
+ if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+ return cpu;
+ } else if (!printed_dbg_warning) {
+ pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+ printed_dbg_warning = true;
+ }
+
+ if (cpumask_empty(wq_unbound_cpumask))
+ return cpu;
+
+ new_cpu = __this_cpu_read(wq_rr_cpu_last);
+ new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+ if (unlikely(new_cpu >= nr_cpu_ids)) {
+ new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
+ if (unlikely(new_cpu >= nr_cpu_ids))
+ return cpu;
+ }
+ __this_cpu_write(wq_rr_cpu_last, new_cpu);
+
+ return new_cpu;
+}
+
static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
@@ -1323,7 +1382,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
return;
retry:
if (req_cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
/* pwq which will be used unless @work is executing elsewhere */
if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
timer_stats_timer_set_start_info(&dwork->timer);
dwork->wq = wq;
- /* timer isn't guaranteed to run in this cpu, record earlier */
- if (cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
dwork->cpu = cpu;
timer->expires = jiffies + delay;
- add_timer_on(timer, cpu);
+ if (unlikely(cpu != WORK_CPU_UNBOUND))
+ add_timer_on(timer, cpu);
+ else
+ add_timer(timer);
}
/**
@@ -2355,7 +2414,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq,
WARN_ONCE(current->flags & PF_MEMALLOC,
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
current->pid, current->comm, target_wq->name, target_func);
- WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM),
+ WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
+ (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
"workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
worker->current_pwq->wq->name, worker->current_func,
target_wq->name, target_func);
OpenPOWER on IntegriCloud