summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-23 11:12:36 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:33:22 -0400
commit553552ce1796c32cf4e3d4f45cd5b537de91dd1d (patch)
treea65defc1055bcc3e9f34327d2cc59704e536948b
parent32c0edaeaad74a7883e736ae0f3798784cfc2a80 (diff)
downloadop-kernel-dev-553552ce1796c32cf4e3d4f45cd5b537de91dd1d.zip
op-kernel-dev-553552ce1796c32cf4e3d4f45cd5b537de91dd1d.tar.gz
tracing: Combine event filter_active and enable into single flags field
The filter_active and enable both use an int (4 bytes each) to set a single flag. We can save 4 bytes per event by combining the two into a single integer. text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4894944 1018052 861512 6774508 675eec vmlinux.id 4894871 1012292 861512 6768675 674823 vmlinux.flags This gives us another 5K in savings. The modification of both the enable and filter fields are done under the event_mutex, so it is still safe to combine the two. Note: Although Mathieu gave his Acked-by, he would like it documented that the reads of flags are not protected by the mutex. The way the code works, these reads will not break anything, but will have a residual effect. Since this behavior is the same even before this patch, describing this situation is left to another patch, as this patch does not change the behavior, but just brought it to Mathieu's attention. v2: Updated the event trace self test to for this change. Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace_event.h21
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_filter.c10
-rw-r--r--kernel/trace/trace_kprobe.c2
5 files changed, 34 insertions, 17 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0be0285..5ac97a4 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -143,6 +143,16 @@ struct ftrace_event_class {
int (*raw_init)(struct ftrace_event_call *);
};
+enum {
+ TRACE_EVENT_FL_ENABLED_BIT,
+ TRACE_EVENT_FL_FILTERED_BIT,
+};
+
+enum {
+ TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
+ TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+};
+
struct ftrace_event_call {
struct list_head list;
struct ftrace_event_class *class;
@@ -154,8 +164,15 @@ struct ftrace_event_call {
void *mod;
void *data;
- int enabled;
- int filter_active;
+ /*
+ * 32 bit flags:
+ * bit 1: enabled
+ * bit 2: filter_active
+ *
+ * Must hold event_mutex to change.
+ */
+ unsigned int flags;
+
int perf_refcount;
};
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c88c563..6356259 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -802,7 +802,7 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
- if (unlikely(call->filter_active) &&
+ if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event);
return 1;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8daaca5..53cffc0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -137,8 +137,8 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
switch (enable) {
case 0:
- if (call->enabled) {
- call->enabled = 0;
+ if (call->flags & TRACE_EVENT_FL_ENABLED) {
+ call->flags &= ~TRACE_EVENT_FL_ENABLED;
tracing_stop_cmdline_record();
if (call->class->reg)
call->class->reg(call, TRACE_REG_UNREGISTER);
@@ -149,7 +149,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
}
break;
case 1:
- if (!call->enabled) {
+ if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
tracing_start_cmdline_record();
if (call->class->reg)
ret = call->class->reg(call, TRACE_REG_REGISTER);
@@ -163,7 +163,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
"%s\n", call->name);
break;
}
- call->enabled = 1;
+ call->flags |= TRACE_EVENT_FL_ENABLED;
}
break;
}
@@ -352,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++;
list_for_each_entry_continue(call, &ftrace_events, list) {
- if (call->enabled)
+ if (call->flags & TRACE_EVENT_FL_ENABLED)
return call;
}
@@ -411,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
struct ftrace_event_call *call = filp->private_data;
char *buf;
- if (call->enabled)
+ if (call->flags & TRACE_EVENT_FL_ENABLED)
buf = "1\n";
else
buf = "0\n";
@@ -486,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
* or if all events or cleared, or if we have
* a mixture.
*/
- set |= (1 << !!call->enabled);
+ set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
/*
* If we have a mixture, no need to look further.
@@ -1447,7 +1447,7 @@ static __init void event_trace_self_tests(void)
* If an event is already enabled, someone is using
* it and the self test should not be on.
*/
- if (call->enabled) {
+ if (call->flags & TRACE_EVENT_FL_ENABLED) {
pr_warning("Enabled event during self test!\n");
WARN_ON_ONCE(1);
continue;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 2702d6b..239ea5d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -547,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
struct event_filter *filter = call->filter;
int i;
- call->filter_active = 0;
+ call->flags &= ~TRACE_EVENT_FL_FILTERED;
filter->n_preds = 0;
for (i = 0; i < MAX_FILTER_PRED; i++)
@@ -574,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
{
__free_preds(call->filter);
call->filter = NULL;
- call->filter_active = 0;
+ call->flags &= ~TRACE_EVENT_FL_FILTERED;
}
static struct event_filter *__alloc_preds(void)
@@ -613,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
if (call->filter)
return 0;
- call->filter_active = 0;
+ call->flags &= ~TRACE_EVENT_FL_FILTERED;
call->filter = __alloc_preds();
if (IS_ERR(call->filter))
return PTR_ERR(call->filter);
@@ -1268,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
if (err)
filter_disable_preds(call);
else {
- call->filter_active = 1;
+ call->flags |= TRACE_EVENT_FL_FILTERED;
replace_filter_string(filter, filter_string);
}
fail = false;
@@ -1317,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
if (err)
append_filter_err(ps, call->filter);
else
- call->filter_active = 1;
+ call->flags |= TRACE_EVENT_FL_FILTERED;
out:
filter_opstack_clear(ps);
postfix_clear(ps);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 934078b..0e3ded6 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1382,7 +1382,7 @@ static int register_probe_event(struct trace_probe *tp)
kfree(call->print_fmt);
return -ENODEV;
}
- call->enabled = 0;
+ call->flags = 0;
call->class->reg = kprobe_register;
call->data = tp;
ret = trace_add_event_call(call);
OpenPOWER on IntegriCloud