summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-01-29 09:04:26 +0100
committerIngo Molnar <mingo@elte.hu>2010-01-29 09:15:26 +0100
commit75c9f3284a7ff957829f44baace82406a6354ceb (patch)
tree8e8903b75bbecbee85f0a30743f049e39b08d15d /kernel
parent18c01f8abff51e4910cc5ffb4b710e8c6eea60c9 (diff)
downloadop-kernel-dev-75c9f3284a7ff957829f44baace82406a6354ceb.zip
op-kernel-dev-75c9f3284a7ff957829f44baace82406a6354ceb.tar.gz
perf_events: Fix sample_period transfer on inherit
One problem with frequency driven counters is that we cannot predict the rate at which they trigger, therefore we have to start them at period=1, this causes a ramp up effect. However, if we fail to propagate the stable state on fork each new child will have to ramp up again. This can lead to significant artifacts in sample data. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: eranian@google.com Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1264752266.4283.2121.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 251fb95..53dc2a3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -5002,8 +5002,15 @@ inherit_event(struct perf_event *parent_event,
else
child_event->state = PERF_EVENT_STATE_OFF;
- if (parent_event->attr.freq)
- child_event->hw.sample_period = parent_event->hw.sample_period;
+ if (parent_event->attr.freq) {
+ u64 sample_period = parent_event->hw.sample_period;
+ struct hw_perf_event *hwc = &child_event->hw;
+
+ hwc->sample_period = sample_period;
+ hwc->last_period = sample_period;
+
+ atomic64_set(&hwc->period_left, sample_period);
+ }
child_event->overflow_handler = parent_event->overflow_handler;
OpenPOWER on IntegriCloud