summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sysprof.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sysprof.c')
-rw-r--r--kernel/trace/trace_sysprof.c67
1 files changed, 63 insertions, 4 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 6c139bc..ba55b87 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -5,19 +5,76 @@
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/kallsyms.h>
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
#include <linux/uaccess.h>
-#include <linux/marker.h>
#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/fs.h>
#include "trace.h"
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
+static const unsigned long sample_period = 1000000;
+
+/*
+ * Per CPU hrtimers that do the profiling:
+ */
+static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
+
+static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
+{
+ /* trace here */
+ panic_timeout++;
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
+
+ return HRTIMER_RESTART;
+}
+
+static void start_stack_timer(int cpu)
+{
+ struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
+
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = stack_trace_timer_fn;
+ hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
+}
+
+static void start_stack_timers(void)
+{
+ cpumask_t saved_mask = current->cpus_allowed;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
+ start_stack_timer(cpu);
+ printk("started timer on cpu%d\n", cpu);
+ }
+ set_cpus_allowed_ptr(current, &saved_mask);
+}
+
+static void stop_stack_timer(int cpu)
+{
+ struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
+
+ hrtimer_cancel(hrtimer);
+ printk("cancelled timer on cpu%d\n", cpu);
+}
+
+static void stop_stack_timers(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ stop_stack_timer(cpu);
+}
+
static notrace void stack_reset(struct trace_array *tr)
{
int cpu;
@@ -31,11 +88,13 @@ static notrace void stack_reset(struct trace_array *tr)
static notrace void start_stack_trace(struct trace_array *tr)
{
stack_reset(tr);
+ start_stack_timers();
tracer_enabled = 1;
}
static notrace void stop_stack_trace(struct trace_array *tr)
{
+ stop_stack_timers();
tracer_enabled = 0;
}
OpenPOWER on IntegriCloud