diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-07-23 02:31:05 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-30 22:29:15 +0200 |
commit | d84153d6c96f61aa06429586284639f32debf03e (patch) | |
tree | 3aab6dd1374fa5f9beb6ea0ad65bcbcf25798faa /kernel/events | |
parent | ba8a75c16e292c0a3a87406a77508cbbc6cf4ee2 (diff) | |
download | op-kernel-dev-d84153d6c96f61aa06429586284639f32debf03e.zip op-kernel-dev-d84153d6c96f61aa06429586284639f32debf03e.tar.gz |
perf: Implement finer grained full dynticks kick
Currently the full dynticks subsystem keep the
tick alive as long as there are perf events running.
This prevents the tick from being stopped as long as features
such that the lockup detectors are running. As a temporary fix,
the lockup detector is disabled by default when full dynticks
is built but this is not a long term viable solution.
To fix this, only keep the tick alive when an event configured
with a frequency rather than a period is running on the CPU,
or when an event throttles on the CPU.
These are the only purposes of the perf tick, especially now that
the rotation of flexible events is handled from a seperate hrtimer.
The tick can be shutdown the rest of the time.
Original-patch-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-8-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 3fe385a..916cf1f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -870,12 +870,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu) WARN_ON(!irqs_disabled()); - if (list_empty(&cpuctx->rotation_list)) { - int was_empty = list_empty(head); + if (list_empty(&cpuctx->rotation_list)) list_add(&cpuctx->rotation_list, head); - if (was_empty) - tick_nohz_full_kick(); - } } static void get_ctx(struct perf_event_context *ctx) @@ -1875,6 +1871,9 @@ static int __perf_install_in_context(void *info) perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, task_ctx); + if (atomic_read(&__get_cpu_var(perf_freq_events))) + tick_nohz_full_kick(); + return 0; } @@ -2812,10 +2811,11 @@ done: #ifdef CONFIG_NO_HZ_FULL bool perf_event_can_stop_tick(void) { - if (list_empty(&__get_cpu_var(rotation_list))) - return true; - else + if (atomic_read(&__get_cpu_var(perf_freq_events)) || + __this_cpu_read(perf_throttled_count)) return false; + else + return true; } #endif @@ -5202,6 +5202,7 @@ static int __perf_event_overflow(struct perf_event *event, __this_cpu_inc(perf_throttled_count); hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); + tick_nohz_full_kick(); ret = 1; } } |