summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-18 18:08:32 +0200
committerIngo Molnar <mingo@elte.hu>2010-05-18 18:35:46 +0200
commit4f41c013f553957765902fb01475972f0af3e8e7 (patch)
treeddaa54947cc990094a4b270f2f8b3d6da195044f /kernel
parentef4f30f54e265c2f6f9ac9eda4db158a4e16050b (diff)
downloadop-kernel-dev-4f41c013f553957765902fb01475972f0af3e8e7.zip
op-kernel-dev-4f41c013f553957765902fb01475972f0af3e8e7.tar.gz
perf/ftrace: Optimize perf/tracepoint interaction for single events
When we've got but a single event per tracepoint there is no reason to try and multiplex it so don't. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Ingo Molnar <mingo@elte.hu> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c15
-rw-r--r--kernel/trace/trace_event_perf.c11
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_syscalls.c6
4 files changed, 23 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index a4fa381..17ac47f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4468,8 +4468,9 @@ static int swevent_hlist_get(struct perf_event *event)
#ifdef CONFIG_EVENT_TRACING
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
- int entry_size, struct pt_regs *regs)
+ int entry_size, struct pt_regs *regs, void *event)
{
+ const int type = PERF_TYPE_TRACEPOINT;
struct perf_sample_data data;
struct perf_raw_record raw = {
.size = entry_size,
@@ -4479,9 +4480,13 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
perf_sample_data_init(&data, addr);
data.raw = &raw;
- /* Trace events already protected against recursion */
- do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
- &data, regs);
+ if (!event) {
+ do_perf_sw_event(type, event_id, count, 1, &data, regs);
+ return;
+ }
+
+ if (perf_swevent_match(event, type, event_id, &data, regs))
+ perf_swevent_add(event, count, 1, &data, regs);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
@@ -4514,7 +4519,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
- if (perf_trace_enable(event->attr.config))
+ if (perf_trace_enable(event->attr.config, event))
return NULL;
event->destroy = tp_perf_event_destroy;
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 0565bb4..89b780a 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -27,13 +27,15 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
-static int perf_trace_event_enable(struct ftrace_event_call *event)
+static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
{
char *buf;
int ret = -ENOMEM;
- if (event->perf_refcount++ > 0)
+ if (event->perf_refcount++ > 0) {
+ event->perf_data = NULL;
return 0;
+ }
if (!total_ref_count) {
buf = (char *)alloc_percpu(perf_trace_t);
@@ -51,6 +53,7 @@ static int perf_trace_event_enable(struct ftrace_event_call *event)
ret = event->perf_event_enable(event);
if (!ret) {
+ event->perf_data = data;
total_ref_count++;
return 0;
}
@@ -68,7 +71,7 @@ fail_buf:
return ret;
}
-int perf_trace_enable(int event_id)
+int perf_trace_enable(int event_id, void *data)
{
struct ftrace_event_call *event;
int ret = -EINVAL;
@@ -77,7 +80,7 @@ int perf_trace_enable(int event_id)
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->perf_event_enable &&
try_module_get(event->mod)) {
- ret = perf_trace_event_enable(event);
+ ret = perf_trace_event_enable(event, data);
break;
}
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a751432..2d7bf41 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1362,7 +1362,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
for (i = 0; i < tp->nr_args; i++)
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs, call->perf_data);
}
/* Kretprobe profile handler */
@@ -1395,7 +1395,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
- irq_flags, regs);
+ irq_flags, regs, call->perf_data);
}
static int probe_perf_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 4d6d711..9eff1a4 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -468,7 +468,8 @@ static void perf_syscall_enter(struct pt_regs *regs, long id)
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs,
+ sys_data->enter_event->perf_data);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -543,7 +544,8 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret)
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs,
+ sys_data->exit_event->perf_data);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
OpenPOWER on IntegriCloud