summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-03-22 19:40:03 +0100
committerFrederic Weisbecker <fweisbec@gmail.com>2010-04-01 08:26:31 +0200
commite49a5bd38159dfb1928fd25b173bc9de4bbadb21 (patch)
tree85c7c5fcce6df6a6ab6c119c40f6652d7783ec0b /include
parenteb1e79611cc9bfe21978230e3521e77ea2d7874a (diff)
downloadop-kernel-dev-e49a5bd38159dfb1928fd25b173bc9de4bbadb21.zip
op-kernel-dev-e49a5bd38159dfb1928fd25b173bc9de4bbadb21.tar.gz
perf: Use hot regs with software sched switch/migrate events
Scheduler's task migration events don't work because they always pass NULL regs perf_sw_event(). The event hence gets filtered in perf_swevent_add(). Scheduler's context switches events use task_pt_regs() to get the context when the event occured which is a wrong thing to do as this won't give us the place in the kernel where we went to sleep but the place where we left userspace. The result is even more wrong if we switch from a kernel thread. Use the hot regs snapshot for both events as they belong to the non-interrupt/exception based events family. Unlike page faults or so that provide the regs matching the exact origin of the event, we need to save the current context. This makes the task migration event working and fix the context switch callchains and origin ip. Example: perf record -a -e cs Before: 10.91% ksoftirqd/0 0 [k] 0000000000000000 | --- (nil) perf_callchain perf_prepare_sample __perf_event_overflow perf_swevent_overflow perf_swevent_add perf_swevent_ctx_event do_perf_sw_event __perf_sw_event perf_event_task_sched_out schedule run_ksoftirqd kthread kernel_thread_helper After: 23.77% hald-addon-stor [kernel.kallsyms] [k] schedule | --- schedule | |--60.00%-- schedule_timeout | wait_for_common | wait_for_completion | blk_execute_rq | scsi_execute | scsi_execute_req | sr_test_unit_ready | | | |--66.67%-- sr_media_change | | media_changed | | cdrom_media_changed | | sr_block_media_changed | | check_disk_change | | cdrom_open v2: Always build perf_arch_fetch_caller_regs() now that software events need that too. They don't need it from modules, unlike trace events, so we keep the EXPORT_SYMBOL in trace_event_perf.c Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/perf_event.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9547703..c8e375440 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -842,13 +842,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
-static inline void
-perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
-{
- if (atomic_read(&perf_swevent_enabled[event_id]))
- __perf_sw_event(event_id, nr, nmi, regs, addr);
-}
-
extern void
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
@@ -887,6 +880,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
return perf_arch_fetch_caller_regs(regs, ip, skip);
}
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swevent_enabled[event_id])) {
+ struct pt_regs hot_regs;
+
+ if (!regs) {
+ perf_fetch_caller_regs(&hot_regs, 1);
+ regs = &hot_regs;
+ }
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
+ }
+}
+
extern void __perf_event_mmap(struct vm_area_struct *vma);
static inline void perf_event_mmap(struct vm_area_struct *vma)
OpenPOWER on IntegriCloud