summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2016-07-15 15:48:56 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-09-02 12:47:54 -0400
commit0330f7aa8ee63d0c435c0cb4e47ea06235ee4b7f (patch)
tree67fa852b52a5c4c09ec74f6ba38c7b44f14af180 /kernel/trace
parentc850ed38db5f46441565174ef57c271124cce568 (diff)
downloadop-kernel-dev-0330f7aa8ee63d0c435c0cb4e47ea06235ee4b7f.zip
op-kernel-dev-0330f7aa8ee63d0c435c0cb4e47ea06235ee4b7f.tar.gz
tracing: Have hwlat trace migrate across tracing_cpumask CPUs
Instead of having the hwlat detector thread stay on one CPU, have it migrate across all the CPUs specified by tracing_cpumask. If the user modifies the thread's CPU affinity, the migration will stop until the next instance that the tracer is instantiated. The migration happens at the end of each window (period). Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_hwlat.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 08dfabe..65aab39 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -42,6 +42,7 @@
#include <linux/kthread.h>
#include <linux/tracefs.h>
#include <linux/uaccess.h>
+#include <linux/cpumask.h>
#include <linux/delay.h>
#include "trace.h"
@@ -211,6 +212,57 @@ out:
return ret;
}
+static struct cpumask save_cpumask;
+static bool disable_migrate;
+
+static void move_to_next_cpu(void)
+{
+ static struct cpumask *current_mask;
+ int next_cpu;
+
+ if (disable_migrate)
+ return;
+
+ /* Just pick the first CPU on first iteration */
+ if (!current_mask) {
+ current_mask = &save_cpumask;
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ put_online_cpus();
+ next_cpu = cpumask_first(current_mask);
+ goto set_affinity;
+ }
+
+ /*
+ * If for some reason the user modifies the CPU affinity
+ * of this thread, than stop migrating for the duration
+ * of the current test.
+ */
+ if (!cpumask_equal(current_mask, &current->cpus_allowed))
+ goto disable;
+
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+ next_cpu = cpumask_next(smp_processor_id(), current_mask);
+ put_online_cpus();
+
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(current_mask);
+
+ set_affinity:
+ if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
+ goto disable;
+
+ cpumask_clear(current_mask);
+ cpumask_set_cpu(next_cpu, current_mask);
+
+ sched_setaffinity(0, current_mask);
+ return;
+
+ disable:
+ disable_migrate = true;
+}
+
/*
* kthread_fn - The CPU time sampling/hardware latency detection kernel thread
*
@@ -230,6 +282,8 @@ static int kthread_fn(void *data)
while (!kthread_should_stop()) {
+ move_to_next_cpu();
+
local_irq_disable();
get_sample();
local_irq_enable();
@@ -473,6 +527,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
hwlat_trace = tr;
+ disable_migrate = false;
hwlat_data.count = 0;
tr->max_latency = 0;
save_tracing_thresh = tracing_thresh;
OpenPOWER on IntegriCloud