summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-18 06:59:56 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-18 06:59:56 +0100
commit327019b01e068d66dada6a8b2571180ab3674d20 (patch)
treec81354a509d6962f6878145fcf3cdbe50a000a89 /kernel
parent03418c7efaa429dc7647ac93e3862e3fe1816873 (diff)
parent62524d55e5b9ffe36e3bf3dd7a594114f150b449 (diff)
downloadop-kernel-dev-327019b01e068d66dada6a8b2571180ab3674d20.zip
op-kernel-dev-327019b01e068d66dada6a8b2571180ab3674d20.tar.gz
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c65
-rw-r--r--kernel/trace/trace.c55
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_power.c8
-rw-r--r--kernel/trace/trace_sched_switch.c9
5 files changed, 94 insertions, 44 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 58128ad..bbf5192 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -180,29 +180,6 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
#include "trace.h"
-/* Up this if you want to test the TIME_EXTENTS and normalization */
-#define DEBUG_SHIFT 0
-
-u64 ring_buffer_time_stamp(int cpu)
-{
- u64 time;
-
- preempt_disable_notrace();
- /* shift to debug/test normalization and TIME_EXTENTS */
- time = trace_clock_local() << DEBUG_SHIFT;
- preempt_enable_no_resched_notrace();
-
- return time;
-}
-EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
-
-void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
-{
- /* Just stupid testing the normalize function and deltas */
- *ts >>= DEBUG_SHIFT;
-}
-EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
-
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA 28
@@ -374,6 +351,7 @@ struct ring_buffer {
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
+ u64 (*clock)(void);
};
struct ring_buffer_iter {
@@ -394,6 +372,30 @@ struct ring_buffer_iter {
_____ret; \
})
+/* Up this if you want to test the TIME_EXTENTS and normalization */
+#define DEBUG_SHIFT 0
+
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+{
+ u64 time;
+
+ preempt_disable_notrace();
+ /* shift to debug/test normalization and TIME_EXTENTS */
+ time = buffer->clock() << DEBUG_SHIFT;
+ preempt_enable_no_resched_notrace();
+
+ return time;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
+
+void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+ int cpu, u64 *ts)
+{
+ /* Just stupid testing the normalize function and deltas */
+ *ts >>= DEBUG_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
+
/**
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
@@ -569,6 +571,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
+ buffer->clock = trace_clock_local;
/* need at least two pages */
if (buffer->pages == 1)
@@ -645,6 +648,12 @@ ring_buffer_free(struct ring_buffer *buffer)
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
+void ring_buffer_set_clock(struct ring_buffer *buffer,
+ u64 (*clock)(void))
+{
+ buffer->clock = clock;
+}
+
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static void
@@ -1191,7 +1200,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page = next_page;
/* reread the time stamp */
- *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+ *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
cpu_buffer->tail_page->page->time_stamp = *ts;
}
@@ -1334,7 +1343,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
return NULL;
- ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+ ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
/*
* Only the first commit can update the timestamp.
@@ -2051,7 +2060,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
- ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+ ring_buffer_normalize_time_stamp(buffer,
+ cpu_buffer->cpu, ts);
}
return event;
@@ -2112,7 +2122,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = iter->read_stamp + event->time_delta;
- ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+ ring_buffer_normalize_time_stamp(buffer,
+ cpu_buffer->cpu, ts);
}
return event;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ce6208..a2d13e8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -155,13 +155,6 @@ ns2usecs(cycle_t nsec)
return nsec;
}
-cycle_t ftrace_now(int cpu)
-{
- u64 ts = ring_buffer_time_stamp(cpu);
- ring_buffer_normalize_time_stamp(cpu, &ts);
- return ts;
-}
-
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
@@ -178,6 +171,20 @@ static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+cycle_t ftrace_now(int cpu)
+{
+ u64 ts;
+
+ /* Early boot up does not have a buffer yet */
+ if (!global_trace.buffer)
+ return trace_clock_local();
+
+ ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
+ ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
+
+ return ts;
+}
+
/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
@@ -308,6 +315,7 @@ static const char *trace_options[] = {
"printk-msg-only",
"context-info",
"latency-format",
+ "global-clock",
NULL
};
@@ -2244,6 +2252,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return 0;
}
+static void set_tracer_flags(unsigned int mask, int enabled)
+{
+ /* do nothing if flag is already set */
+ if (!!(trace_flags & mask) == !!enabled)
+ return;
+
+ if (enabled)
+ trace_flags |= mask;
+ else
+ trace_flags &= ~mask;
+
+ if (mask == TRACE_ITER_GLOBAL_CLK) {
+ u64 (*func)(void);
+
+ if (enabled)
+ func = trace_clock_global;
+ else
+ func = trace_clock_local;
+
+ mutex_lock(&trace_types_lock);
+ ring_buffer_set_clock(global_trace.buffer, func);
+
+ if (max_tr.buffer)
+ ring_buffer_set_clock(max_tr.buffer, func);
+ mutex_unlock(&trace_types_lock);
+ }
+}
+
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -2271,10 +2307,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
int len = strlen(trace_options[i]);
if (strncmp(cmp, trace_options[i], len) == 0) {
- if (neg)
- trace_flags &= ~(1 << i);
- else
- trace_flags |= (1 << i);
+ set_tracer_flags(1 << i, !neg);
break;
}
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 546bcbd..38276d1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -684,6 +684,7 @@ enum trace_iterator_flags {
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000,
+ TRACE_ITER_GLOBAL_CLK = 0x80000,
};
/*
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 91ce672..bae791e 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -122,12 +122,16 @@ fail_start:
static void start_power_trace(struct trace_array *tr)
{
trace_power_enabled = 1;
- tracing_power_register();
}
static void stop_power_trace(struct trace_array *tr)
{
trace_power_enabled = 0;
+}
+
+static void power_trace_reset(struct trace_array *tr)
+{
+ trace_power_enabled = 0;
unregister_trace_power_start(probe_power_start);
unregister_trace_power_end(probe_power_end);
unregister_trace_power_mark(probe_power_mark);
@@ -188,7 +192,7 @@ static struct tracer power_tracer __read_mostly =
.init = power_trace_init,
.start = start_power_trace,
.stop = stop_power_trace,
- .reset = stop_power_trace,
+ .reset = power_trace_reset,
.print_line = power_print_line,
};
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 77132c2..de35f20 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -18,6 +18,7 @@ static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
+static int sched_stopped;
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
int cpu;
int pc;
- if (!sched_ref)
+ if (!sched_ref || sched_stopped)
return;
tracing_record_cmdline(prev);
@@ -193,6 +194,7 @@ static void stop_sched_trace(struct trace_array *tr)
static int sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
+ tracing_reset_online_cpus(tr);
tracing_start_sched_switch_record();
return 0;
}
@@ -205,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr)
static void sched_switch_trace_start(struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
- tracing_start_sched_switch();
+ sched_stopped = 0;
}
static void sched_switch_trace_stop(struct trace_array *tr)
{
- tracing_stop_sched_switch();
+ sched_stopped = 1;
}
static struct tracer sched_switch_trace __read_mostly =
OpenPOWER on IntegriCloud