summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-26 08:10:00 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-26 09:49:28 +0200
commit79202ba9ff8cf570a75596f42e011167734d1c4b (patch)
treea7a2ebd002db9a212319da0bdc82609fe47e49e1 /arch
parent8a7b8cb91f26a671f22cedc7fd54508667f2d9b9 (diff)
downloadop-kernel-dev-79202ba9ff8cf570a75596f42e011167734d1c4b.zip
op-kernel-dev-79202ba9ff8cf570a75596f42e011167734d1c4b.tar.gz
perf_counter, x86: Fix APIC NMI programming
My Nehalem box locks up in certain situations (with an always-asserted NMI causing a lockup) if the PMU LVT entry is programmed between NMI and IRQ mode with a high frequency. Standardize exlusively on NMIs instead. [ Impact: fix lockup ] Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c16
1 files changed, 3 insertions, 13 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 189bf9d..ece3813 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -285,14 +285,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
/*
- * If privileged enough, allow NMI events:
+ * Use NMI events all the time:
*/
- hwc->nmi = 0;
- if (hw_event->nmi) {
- if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
- return -EACCES;
- hwc->nmi = 1;
- }
+ hwc->nmi = 1;
+ hw_event->nmi = 1;
if (!hwc->irq_period)
hwc->irq_period = x86_pmu.max_period;
@@ -553,9 +549,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
if (!x86_pmu.num_counters_fixed)
return -1;
- if (unlikely(hwc->nmi))
- return -1;
-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
@@ -806,9 +799,6 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
counter = cpuc->counters[idx];
hwc = &counter->hw;
- if (counter->hw_event.nmi != nmi)
- continue;
-
val = x86_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;
OpenPOWER on IntegriCloud