summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/trace/ftrace.h12
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_kprobe.c18
-rw-r--r--kernel/trace/trace_syscalls.c14
8 files changed, 34 insertions, 32 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index b1a007d..0be0285 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -149,14 +149,13 @@ struct ftrace_event_call {
char *name;
struct dentry *dir;
struct trace_event event;
- int enabled;
- int id;
const char *print_fmt;
- int filter_active;
struct event_filter *filter;
void *mod;
void *data;
+ int enabled;
+ int filter_active;
int perf_refcount;
};
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 824141d..4866c10 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -150,7 +150,7 @@
*
* entry = iter->ent;
*
- * if (entry->type != event_<call>.id) {
+ * if (entry->type != event_<call>->event.type) {
* WARN_ON_ONCE(1);
* return TRACE_TYPE_UNHANDLED;
* }
@@ -221,7 +221,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
entry = iter->ent; \
\
- if (entry->type != event->id) { \
+ if (entry->type != event->event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -257,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
entry = iter->ent; \
\
- if (entry->type != event_##call.id) { \
+ if (entry->type != event_##call.event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
@@ -409,7 +409,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_current_buffer_lock_reserve(&buffer,
- * event_<call>.id,
+ * event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
@@ -510,7 +510,7 @@ ftrace_raw_event_##call(void *__data, proto) \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
event = trace_current_buffer_lock_reserve(&buffer, \
- event_call->id, \
+ event_call->event.type, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
@@ -711,7 +711,7 @@ perf_trace_##call(void *__data, proto) \
"profile buffer not large enough")) \
return; \
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
- __entry_size, event_call->id, &rctx, &irq_flags); \
+ __entry_size, event_call->event.type, &rctx, &irq_flags); \
if (!entry) \
return; \
tstruct \
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 196fe9d..0a47e8d 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -80,7 +80,7 @@ int perf_trace_enable(int event_id)
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id &&
+ if (event->event.type == event_id &&
event->class && event->class->perf_probe &&
try_module_get(event->mod)) {
ret = perf_trace_event_enable(event);
@@ -128,7 +128,7 @@ void perf_trace_disable(int event_id)
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id) {
+ if (event->event.type == event_id) {
perf_trace_event_disable(event);
module_put(event->mod);
break;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aafe5bf..8daaca5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -125,7 +125,6 @@ int trace_event_raw_init(struct ftrace_event_call *call)
id = register_ftrace_event(&call->event);
if (!id)
return -ENODEV;
- call->id = id;
return 0;
}
@@ -567,7 +566,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_init(s);
trace_seq_printf(s, "name: %s\n", call->name);
- trace_seq_printf(s, "ID: %d\n", call->id);
+ trace_seq_printf(s, "ID: %d\n", call->event.type);
trace_seq_printf(s, "format:\n");
head = trace_get_fields(call);
@@ -641,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return -ENOMEM;
trace_seq_init(s);
- trace_seq_printf(s, "%d\n", call->id);
+ trace_seq_printf(s, "%d\n", call->event.type);
r = simple_read_from_buffer(ubuf, cnt, ppos,
s->buffer, s->len);
@@ -969,7 +968,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
enable);
#ifdef CONFIG_PERF_EVENTS
- if (call->id && (call->class->perf_probe || call->class->reg))
+ if (call->event.type && (call->class->perf_probe || call->class->reg))
trace_create_file("id", 0444, call->dir, call,
id);
#endif
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 961f99b..2702d6b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1395,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) {
- if (call->id == event_id)
+ if (call->event.type == event_id)
break;
}
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index e878d06..8536e2a 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -153,7 +153,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
#define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
+#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
\
struct ftrace_event_class event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
@@ -165,7 +165,7 @@ struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
- .id = type, \
+ .event.type = etype, \
.class = &event_class_ftrace_##call, \
.print_fmt = print, \
}; \
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d8061c3..934078b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -960,8 +960,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
- event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
- irq_flags, pc);
+ event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
+ size, irq_flags, pc);
if (!event)
return;
@@ -992,8 +992,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
- event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
- irq_flags, pc);
+ event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
+ size, irq_flags, pc);
if (!event)
return;
@@ -1228,7 +1228,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
"profile buffer not large enough"))
return;
- entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
+ entry = perf_trace_buf_prepare(size, call->event.type,
+ &rctx, &irq_flags);
if (!entry)
return;
@@ -1258,7 +1259,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
"profile buffer not large enough"))
return;
- entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
+ entry = perf_trace_buf_prepare(size, call->event.type,
+ &rctx, &irq_flags);
if (!entry)
return;
@@ -1375,8 +1377,8 @@ static int register_probe_event(struct trace_probe *tp)
}
if (set_print_fmt(tp) < 0)
return -ENOMEM;
- call->id = register_ftrace_event(&call->event);
- if (!call->id) {
+ ret = register_ftrace_event(&call->event);
+ if (!ret) {
kfree(call->print_fmt);
return -ENODEV;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7c7cfe9..9d35830 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -117,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
if (!entry)
goto end;
- if (entry->enter_event->id != ent->type) {
+ if (entry->enter_event->event.type != ent->type) {
WARN_ON_ONCE(1);
goto end;
}
@@ -173,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
return TRACE_TYPE_HANDLED;
}
- if (entry->exit_event->id != ent->type) {
+ if (entry->exit_event->event.type != ent->type) {
WARN_ON_ONCE(1);
return TRACE_TYPE_UNHANDLED;
}
@@ -315,7 +315,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
event = trace_current_buffer_lock_reserve(&buffer,
- sys_data->enter_event->id, size, 0, 0);
+ sys_data->enter_event->event.type, size, 0, 0);
if (!event)
return;
@@ -347,7 +347,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
return;
event = trace_current_buffer_lock_reserve(&buffer,
- sys_data->exit_event->id, sizeof(*entry), 0, 0);
+ sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
if (!event)
return;
@@ -511,7 +511,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
return;
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
- sys_data->enter_event->id, &rctx, &flags);
+ sys_data->enter_event->event.type,
+ &rctx, &flags);
if (!rec)
return;
@@ -586,7 +587,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
return;
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
- sys_data->exit_event->id, &rctx, &flags);
+ sys_data->exit_event->event.type,
+ &rctx, &flags);
if (!rec)
return;
OpenPOWER on IntegriCloud