diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-23 23:08:43 +0200 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-23 23:08:43 +0200 |
commit | d7a4b414eed51f1653bb05ebe84122bf9a7ae18b (patch) | |
tree | bd6603a0c27de4c138a1767871897e9cd3e1a1d2 /include/trace/ftrace.h | |
parent | 1f0ab40976460bc4673fa204ce917a725185d8f2 (diff) | |
parent | a724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff) | |
download | op-kernel-dev-d7a4b414eed51f1653bb05ebe84122bf9a7ae18b.zip op-kernel-dev-d7a4b414eed51f1653bb05ebe84122bf9a7ae18b.tar.gz |
Merge commit 'linus/master' into tracing/kprobes
Conflicts:
kernel/trace/Makefile
kernel/trace/trace.h
kernel/trace/trace_event_types.h
kernel/trace/trace_export.c
Merge reason:
Sync with latest significant tracing core changes.
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 125 |
1 files changed, 70 insertions, 55 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 5d3df2a..54d02c0 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #undef __print_flags #define __print_flags(flag, delim, flag_array...) \ ({ \ - static const struct trace_print_flags flags[] = \ + static const struct trace_print_flags __flags[] = \ { flag_array, { -1, NULL }}; \ - ftrace_print_flags_seq(p, delim, flag, flags); \ + ftrace_print_flags_seq(p, delim, flag, __flags); \ }) #undef __print_symbolic @@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #undef TRACE_EVENT #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -enum print_line_t \ +static enum print_line_t \ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ { \ struct trace_seq *s = &iter->seq; \ @@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ #undef TRACE_EVENT #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ -int \ +static int \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ { \ struct ftrace_raw_##call field; \ @@ -378,24 +378,18 @@ static inline int ftrace_get_offsets_##call( \ #ifdef CONFIG_EVENT_PROFILE /* - * Generate the functions needed for tracepoint perf_counter support. + * Generate the functions needed for tracepoint perf_event support. * * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later * - * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) + * static int ftrace_profile_enable_<call>(void) * { - * int ret = 0; - * - * if (!atomic_inc_return(&event_call->profile_count)) - * ret = register_trace_<call>(ftrace_profile_<call>); - * - * return ret; + * return register_trace_<call>(ftrace_profile_<call>); * } * - * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) + * static void ftrace_profile_disable_<call>(void) * { - * if (atomic_add_negative(-1, &event->call->profile_count)) - * unregister_trace_<call>(ftrace_profile_<call>); + * unregister_trace_<call>(ftrace_profile_<call>); * } * */ @@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \ \ static void ftrace_profile_##call(proto); \ \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ +static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\ { \ - int ret = 0; \ - \ - if (!atomic_inc_return(&event_call->profile_count)) \ - ret = register_trace_##call(ftrace_profile_##call); \ - \ - return ret; \ + return register_trace_##call(ftrace_profile_##call); \ } \ \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ +static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\ { \ - if (atomic_add_negative(-1, &event_call->profile_count)) \ - unregister_trace_##call(ftrace_profile_##call); \ + unregister_trace_##call(ftrace_profile_##call); \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -656,15 +644,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * { * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_<call>; - * extern void perf_tpcounter_event(int, u64, u64, void *, int); + * extern void perf_tp_event(int, u64, u64, void *, int); * struct ftrace_raw_##call *entry; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; + * struct trace_entry *ent; * int __entry_size; * int __data_size; + * int __cpu * int pc; * - * local_save_flags(irq_flags); * pc = preempt_count(); * * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); @@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * sizeof(u64)); * __entry_size -= sizeof(u32); * - * do { - * char raw_data[__entry_size]; <- allocate our sample in the stack - * struct trace_entry *ent; + * // Protect the non nmi buffer + * // This also protects the rcu read side + * local_irq_save(irq_flags); + * __cpu = smp_processor_id(); + * + * if (in_nmi()) + * raw_data = rcu_dereference(trace_profile_buf_nmi); + * else + * raw_data = rcu_dereference(trace_profile_buf); + * + * if (!raw_data) + * goto end; * - * zero dead bytes from alignment to avoid stack leak to userspace: + * raw_data = per_cpu_ptr(raw_data, __cpu); * - * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; - * entry = (struct ftrace_raw_<call> *)raw_data; - * ent = &entry->ent; - * tracing_generic_entry_update(ent, irq_flags, pc); - * ent->type = event_call->id; + * //zero dead bytes from alignment to avoid stack leak to userspace: + * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; + * entry = (struct ftrace_raw_<call> *)raw_data; + * ent = &entry->ent; + * tracing_generic_entry_update(ent, irq_flags, pc); + * ent->type = event_call->id; * - * <tstruct> <- do some jobs with dynamic arrays + * <tstruct> <- do some jobs with dynamic arrays * - * <assign> <- affect our values + * <assign> <- affect our values * - * perf_tpcounter_event(event_call->id, __addr, __count, entry, - * __entry_size); <- submit them to perf counter - * } while (0); + * perf_tp_event(event_call->id, __addr, __count, entry, + * __entry_size); <- submit them to perf counter * * } */ @@ -712,15 +710,17 @@ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_event_call *event_call = &event_##call; \ - extern void perf_tpcounter_event(int, u64, u64, void *, int); \ + extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct trace_entry *ent; \ int __entry_size; \ int __data_size; \ + char *raw_data; \ + int __cpu; \ int pc; \ \ - local_save_flags(irq_flags); \ pc = preempt_count(); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ @@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - do { \ - char raw_data[__entry_size]; \ - struct trace_entry *ent; \ + if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + "profile buffer not large enough")) \ + return; \ + \ + local_irq_save(irq_flags); \ + __cpu = smp_processor_id(); \ \ - *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ - entry = (struct ftrace_raw_##call *)raw_data; \ - ent = &entry->ent; \ - tracing_generic_entry_update(ent, irq_flags, pc); \ - ent->type = event_call->id; \ + if (in_nmi()) \ + raw_data = rcu_dereference(trace_profile_buf_nmi); \ + else \ + raw_data = rcu_dereference(trace_profile_buf); \ \ - tstruct \ + if (!raw_data) \ + goto end; \ \ - { assign; } \ + raw_data = per_cpu_ptr(raw_data, __cpu); \ \ - perf_tpcounter_event(event_call->id, __addr, __count, entry,\ + *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ + entry = (struct ftrace_raw_##call *)raw_data; \ + ent = &entry->ent; \ + tracing_generic_entry_update(ent, irq_flags, pc); \ + ent->type = event_call->id; \ + \ + tstruct \ + \ + { assign; } \ + \ + perf_tp_event(event_call->id, __addr, __count, entry, \ __entry_size); \ - } while (0); \ + \ +end: \ + local_irq_restore(irq_flags); \ \ } |