diff options
author | Dmitry Safonov <0x7f454c46@gmail.com> | 2015-11-06 22:07:26 +0300 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-11-07 13:25:14 -0500 |
commit | 03e88ae6b369da2a26a6e09ad165e57d210789cd (patch) | |
tree | 263c536f2003d6cd3779b85cdf4988ab9b88c0ca /kernel/trace | |
parent | 8b1291994d8e5e621a8af7e165b106e50d04bbf1 (diff) | |
download | op-kernel-dev-03e88ae6b369da2a26a6e09ad165e57d210789cd.zip op-kernel-dev-03e88ae6b369da2a26a6e09ad165e57d210789cd.tar.gz |
tracing: Remove unused ftrace_cpu_disabled per cpu variable
Since the ring buffer is lockless, there is no need to disable ftrace on
CPU. And no one doing so: after commit 68179686ac67cb ("tracing: Remove
ftrace_disable/enable_cpu()") ftrace_cpu_disabled stays the same after
initialization, nothing changes it.
ftrace_cpu_disabled shouldn't be used by any external module since it
disables only function and graph_function tracers but not any other
tracer.
Link: http://lkml.kernel.org/r/1446836846-22239-1-git-send-email-0x7f454c46@gmail.com
Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace.h | 1 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 |
3 files changed, 0 insertions, 13 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 08af79c..b115826 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -100,8 +100,6 @@ static DEFINE_PER_CPU(bool, trace_cmdline_save); */ static int tracing_disabled = 1; -DEFINE_PER_CPU(int, ftrace_cpu_disabled); - cpumask_var_t __read_mostly tracing_buffer_mask; /* @@ -1775,10 +1773,6 @@ trace_function(struct trace_array *tr, struct ring_buffer_event *event; struct ftrace_entry *entry; - /* If we are reading the ring buffer, don't trace */ - if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) - return; - event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index dd76208..919d9d0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -667,7 +667,6 @@ extern int DYN_FTRACE_TEST_NAME2(void); extern bool ring_buffer_expanded; extern bool tracing_selftest_disabled; -DECLARE_PER_CPU(int, ftrace_cpu_disabled); #ifdef CONFIG_FTRACE_STARTUP_TEST extern int trace_selftest_startup_function(struct tracer *trace, diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 92382af..a663cbb 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -288,9 +288,6 @@ int __trace_graph_entry(struct trace_array *tr, struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ftrace_graph_ent_entry *entry; - if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) - return 0; - event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, sizeof(*entry), flags, pc); if (!event) @@ -403,9 +400,6 @@ void __trace_graph_return(struct trace_array *tr, struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ftrace_graph_ret_entry *entry; - if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) - return; - event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, sizeof(*entry), flags, pc); if (!event) |