summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 13:55:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 13:55:38 -0700
commit9e8529afc4518f4e5d610001545ebc97e1333c79 (patch)
tree26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /include/trace
parentec25e246b94a3233ab064994ef05a170bdba0e7c (diff)
parent4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff)
downloadop-kernel-dev-9e8529afc4518f4e5d610001545ebc97e1333c79.zip
op-kernel-dev-9e8529afc4518f4e5d610001545ebc97e1333c79.tar.gz
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Along with the usual minor fixes and clean ups there are a few major changes with this pull request. 1) Multiple buffers for the ftrace facility This feature has been requested by many people over the last few years. I even heard that Google was about to implement it themselves. I finally had time and cleaned up the code such that you can now create multiple instances of the ftrace buffer and have different events go to different buffers. This way, a low frequency event will not be lost in the noise of a high frequency event. Note, currently only events can go to different buffers, the tracers (ie function, function_graph and the latency tracers) still can only be written to the main buffer. 2) The function tracer triggers have now been extended. The function tracer had two triggers. One to enable tracing when a function is hit, and one to disable tracing. Now you can record a stack trace on a single (or many) function(s), take a snapshot of the buffer (copy it to the snapshot buffer), and you can enable or disable an event to be traced when a function is hit. 3) A perf clock has been added. A "perf" clock can be chosen to be used when tracing. This will cause ftrace to use the same clock as perf uses, and hopefully this will make it easier to interleave the perf and ftrace data for analysis." * tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits) tracepoints: Prevent null probe from being added tracing: Compare to 1 instead of zero for is_signed_type() tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT ftrace: Get rid of ftrace_profile_bits tracing: Check return value of tracing_init_dentry() tracing: Get rid of unneeded key calculation in ftrace_hash_move() tracing: Reset ftrace_graph_filter_enabled if count is zero tracing: Fix off-by-one on allocating stat->pages kernel: tracing: Use strlcpy instead of strncpy tracing: Update debugfs README file tracing: Fix ftrace_dump() tracing: Rename trace_event_mutex to trace_event_sem tracing: Fix comment about prefix in arch_syscall_match_sym_name() tracing: Convert trace_destroy_fields() to static tracing: Move find_event_field() into trace_events.c tracing: Use TRACE_MAX_PRINT instead of constant tracing: Use pr_warn_once instead of open coded implementation ring-buffer: Add ring buffer startup selftest tracing: Bring Documentation/trace/ftrace.txt up to date tracing: Add "perf" trace_clock ... Conflicts: kernel/trace/ftrace.c kernel/trace/trace.c
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h49
1 files changed, 23 insertions, 26 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 40dc5e8..19edd7f 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -227,29 +227,18 @@ static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *trace_event) \
{ \
- struct ftrace_event_call *event; \
struct trace_seq *s = &iter->seq; \
+ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
struct ftrace_raw_##call *field; \
- struct trace_entry *entry; \
- struct trace_seq *p = &iter->tmp_seq; \
int ret; \
\
- event = container_of(trace_event, struct ftrace_event_call, \
- event); \
- \
- entry = iter->ent; \
- \
- if (entry->type != event->event.type) { \
- WARN_ON_ONCE(1); \
- return TRACE_TYPE_UNHANDLED; \
- } \
- \
- field = (typeof(field))entry; \
+ field = (typeof(field))iter->ent; \
\
- trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", event->name); \
+ ret = ftrace_raw_output_prep(iter, trace_event); \
if (ret) \
- ret = trace_seq_printf(s, print); \
+ return ret; \
+ \
+ ret = trace_seq_printf(s, print); \
if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \
\
@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int notrace \
+static int notrace __init \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
struct ftrace_raw_##call field; \
@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \
*
* static void ftrace_raw_event_<call>(void *__data, proto)
* {
- * struct ftrace_event_call *event_call = __data;
+ * struct ftrace_event_file *ftrace_file = __data;
+ * struct ftrace_event_call *event_call = ftrace_file->event_call;
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \
* int __data_size;
* int pc;
*
+ * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
+ * &ftrace_file->flags))
+ * return;
+ *
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
- * event = trace_current_buffer_lock_reserve(&buffer,
+ * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
* event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __array macros.
*
* if (!filter_current_check_discard(buffer, event_call, entry, event))
- * trace_current_buffer_unlock_commit(buffer,
+ * trace_nowake_buffer_unlock_commit(buffer,
* event, irq_flags, pc);
* }
*
@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \
static notrace void \
ftrace_raw_event_##call(void *__data, proto) \
{ \
- struct ftrace_event_call *event_call = __data; \
+ struct ftrace_event_file *ftrace_file = __data; \
+ struct ftrace_event_call *event_call = ftrace_file->event_call; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \
int __data_size; \
int pc; \
\
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
+ &ftrace_file->flags)) \
+ return; \
+ \
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
- event = trace_current_buffer_lock_reserve(&buffer, \
+ event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
event_call->event.type, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
static const char print_fmt_##call[] = print; \
-static struct ftrace_event_class __used event_class_##call = { \
+static struct ftrace_event_class __used __refdata event_class_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_PERF_EVENTS */
-#undef _TRACE_PROFILE_INIT
-
OpenPOWER on IntegriCloud