summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_sched_switch.c7
4 files changed, 15 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a102b11..1281969 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -620,7 +620,12 @@ static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static DEFINE_SPINLOCK(trace_cmdline_lock);
-atomic_t trace_record_cmdline_disabled;
+
+/* trace in all context switches */
+atomic_t trace_record_cmdline_enabled __read_mostly;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 21c29ee..8991c5e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -216,6 +216,8 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs);
extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;
+extern atomic_t trace_record_cmdline_enabled;
+
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 4165d34..0a08465 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -29,12 +29,14 @@ static void function_reset(struct trace_array *tr)
static void start_function_trace(struct trace_array *tr)
{
function_reset(tr);
+ atomic_inc(&trace_record_cmdline_enabled);
tracing_start_function_trace();
}
static void stop_function_trace(struct trace_array *tr)
{
tracing_stop_function_trace();
+ atomic_dec(&trace_record_cmdline_enabled);
}
static void function_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5671db0..a337647 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -29,8 +29,6 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
if (!tracer_enabled)
return;
- tracing_record_cmdline(prev);
-
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
@@ -73,6 +71,9 @@ void
ftrace_ctx_switch(void *__rq, struct task_struct *prev,
struct task_struct *next)
{
+ if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
+ tracing_record_cmdline(prev);
+
/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
@@ -134,11 +135,13 @@ static void sched_switch_reset(struct trace_array *tr)
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
+ atomic_inc(&trace_record_cmdline_enabled);
tracer_enabled = 1;
}
static void stop_sched_trace(struct trace_array *tr)
{
+ atomic_dec(&trace_record_cmdline_enabled);
tracer_enabled = 0;
}
OpenPOWER on IntegriCloud