summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-14 15:58:35 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-14 15:58:35 +0200
commit6712e299b7dc78aa4971b85e803435ee6d49a9dd (patch)
treeb3d17a2d068737ec07727b28e93c7d374c27721b /kernel/trace
parentec1bb60bbff0386c3ec25360e7a8c72f467a6ff1 (diff)
parentb2613e370dbeb69edbff989382fa54f2395aa471 (diff)
downloadop-kernel-dev-6712e299b7dc78aa4971b85e803435ee6d49a9dd.zip
op-kernel-dev-6712e299b7dc78aa4971b85e803435ee6d49a9dd.tar.gz
Merge branch 'tracing/ftrace' into auto-ftrace-next
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c24
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
6 files changed, 52 insertions, 9 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0f271c4..4231a3d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1602,6 +1602,23 @@ core_initcall(ftrace_dynamic_init);
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
+ * ftrace_kill_atomic - kill ftrace from critical sections
+ *
+ * This function should be used by panic code. It stops ftrace
+ * but in a not so nice way. If you need to simply kill ftrace
+ * from a non-atomic section, use ftrace_kill.
+ */
+void ftrace_kill_atomic(void)
+{
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ ftraced_suspend = -1;
+#endif
+ clear_ftrace_function();
+}
+
+/**
* ftrace_kill - totally shutdown ftrace
*
* This is a safety measure. If something was detected that seems
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e46de64..868e121 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -96,6 +96,9 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
+/* function tracing enabled */
+int ftrace_function_enabled;
+
/*
* trace_nr_entries is the number of entries that is allocated
* for a buffer. Note, the number of entries is always rounded
@@ -134,6 +137,7 @@ static notrace void no_trace_init(struct trace_array *tr)
{
int cpu;
+ ftrace_function_enabled = 0;
if(tr->ctrl)
for_each_online_cpu(cpu)
tracing_reset(tr->data[cpu]);
@@ -1027,7 +1031,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
long disabled;
int cpu;
- if (unlikely(!tracer_enabled))
+ if (unlikely(!ftrace_function_enabled))
return;
if (skip_trace(ip))
@@ -1052,11 +1056,15 @@ static struct ftrace_ops trace_ops __read_mostly =
void tracing_start_function_trace(void)
{
+ ftrace_function_enabled = 0;
register_ftrace_function(&trace_ops);
+ if (tracer_enabled)
+ ftrace_function_enabled = 1;
}
void tracing_stop_function_trace(void)
{
+ ftrace_function_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
#endif
@@ -1383,7 +1391,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
"server",
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
"desktop",
-#elif defined(CONFIG_PREEMPT_DESKTOP)
+#elif defined(CONFIG_PREEMPT)
"preempt",
#else
"unknown",
@@ -1892,8 +1900,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
m->private = iter;
/* stop the trace while dumping */
- if (iter->tr->ctrl)
+ if (iter->tr->ctrl) {
tracer_enabled = 0;
+ ftrace_function_enabled = 0;
+ }
if (iter->trace && iter->trace->open)
iter->trace->open(iter);
@@ -1926,8 +1936,14 @@ int tracing_release(struct inode *inode, struct file *file)
iter->trace->close(iter);
/* reenable tracing if it was previously enabled */
- if (iter->tr->ctrl)
+ if (iter->tr->ctrl) {
tracer_enabled = 1;
+ /*
+ * It is safe to enable function tracing even if it
+ * isn't used
+ */
+ ftrace_function_enabled = 1;
+ }
mutex_unlock(&trace_types_lock);
seq_release(inode, file);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8cb215b..f69f867 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -223,8 +223,6 @@ void trace_function(struct trace_array *tr,
unsigned long parent_ip,
unsigned long flags);
-void tracing_start_function_trace(void);
-void tracing_stop_function_trace(void);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
int register_tracer(struct tracer *type);
@@ -241,6 +239,14 @@ void update_max_tr_single(struct trace_array *tr,
extern cycle_t ftrace_now(int cpu);
+#ifdef CONFIG_FTRACE
+void tracing_start_function_trace(void);
+void tracing_stop_function_trace(void);
+#else
+# define tracing_start_function_trace() do { } while (0)
+# define tracing_stop_function_trace() do { } while (0)
+#endif
+
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 7ee7dcd..3121448 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -28,7 +28,10 @@ static void function_reset(struct trace_array *tr)
static void start_function_trace(struct trace_array *tr)
{
+ tr->cpu = get_cpu();
function_reset(tr);
+ put_cpu();
+
tracing_start_cmdline_record();
tracing_start_function_trace();
}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 93a6620..cb817a2 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -227,14 +227,14 @@ void tracing_stop_cmdline_record(void)
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
- tracer_enabled = 1;
tracing_start_cmdline_record();
+ tracer_enabled = 1;
}
static void stop_sched_trace(struct trace_array *tr)
{
- tracing_stop_cmdline_record();
tracer_enabled = 0;
+ tracing_stop_cmdline_record();
}
static void sched_switch_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index bf7e91c..3c8d61d 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -352,9 +352,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
*/
smp_wmb();
- tracer_enabled = 1;
register_ftrace_function(&trace_ops);
+ tracer_enabled = 1;
+
return;
fail_deprobe_wake_new:
marker_probe_unregister("kernel_sched_wakeup_new",
OpenPOWER on IntegriCloud