summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-13 13:21:36 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-15 09:47:01 +0200
commit962bf7a66edca4d36a730a38ff8410a67f560e40 (patch)
tree86a22c33a9daed37db6afccfa5ed01e06ea5c00e /arch/x86
parent53020fe81eecd0b7be295868ce5850ef8f41074e (diff)
downloadop-kernel-dev-962bf7a66edca4d36a730a38ff8410a67f560e40.zip
op-kernel-dev-962bf7a66edca4d36a730a38ff8410a67f560e40.tar.gz
perf_counter: x86: Fix up the amd NMI/INT throttle
perf_counter_unthrottle() restores throttle_ctrl, buts its never set. Also, we fail to disable all counters when throttling. [ Impact: fix rare stuck perf-counters when they are throttled ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c38
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c19e927..7601c01 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -334,6 +334,8 @@ static u64 amd_pmu_save_disable_all(void)
* right thing.
*/
barrier();
+ if (!enabled)
+ goto out;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
@@ -347,6 +349,7 @@ static u64 amd_pmu_save_disable_all(void)
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
+out:
return enabled;
}
@@ -787,32 +790,43 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
int handled = 0;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
- int idx;
+ int idx, throttle = 0;
+
+ cpuc->throttle_ctrl = cpuc->enabled;
+ cpuc->enabled = 0;
+ barrier();
+
+ if (cpuc->throttle_ctrl) {
+ if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
+ throttle = 1;
+ }
- ++cpuc->interrupts;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ int disable = 0;
+
if (!test_bit(idx, cpuc->active_mask))
continue;
+
counter = cpuc->counters[idx];
hwc = &counter->hw;
val = x86_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
- continue;
+ goto next;
+
/* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
- if (perf_counter_overflow(counter, nmi, regs, 0))
- amd_pmu_disable_counter(hwc, idx);
- else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
- /*
- * do not reenable when throttled, but reload
- * the register
- */
+ disable = perf_counter_overflow(counter, nmi, regs, 0);
+
+next:
+ if (disable || throttle)
amd_pmu_disable_counter(hwc, idx);
- else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- amd_pmu_enable_counter(hwc, idx);
}
+
+ if (cpuc->throttle_ctrl && !throttle)
+ cpuc->enabled = 1;
+
return handled;
}
OpenPOWER on IntegriCloud