summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-10 09:59:56 +0200
committerIngo Molnar <mingo@elte.hu>2009-07-10 10:28:29 +0200
commit984b838ce69c063a91b87550598ab7f3439dd94a (patch)
tree70654d87c9964a16642ef6d5d9d3c420f788bfae
parent9c74fb50867e8fb5f3be3be06716492c0f79309e (diff)
downloadop-kernel-dev-984b838ce69c063a91b87550598ab7f3439dd94a.zip
op-kernel-dev-984b838ce69c063a91b87550598ab7f3439dd94a.tar.gz
perf_counter: Clean up global vs counter enable
Ingo noticed that both AMD and P6 call x86_pmu_disable_counter() on *_pmu_enable_counter(). This is because we rely on the side effect of that call to program the event config but not touch the EN bit. We change that for AMD by having enable_all() simply write the full config in, and for P6 by explicitly coding it. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c7cc6ea..bed1c4c 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -874,13 +874,13 @@ static void amd_pmu_enable_all(void)
barrier();
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_counter *counter = cpuc->counters[idx];
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
- continue;
+
+ val = counter->hw.config;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
@@ -1044,11 +1044,13 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ u64 val;
+ val = hwc->config;
if (cpuc->enabled)
- x86_pmu_enable_counter(hwc, idx);
- else
- x86_pmu_disable_counter(hwc, idx);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ (void)checking_wrmsrl(hwc->config_base + idx, val);
}
@@ -1068,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
if (cpuc->enabled)
x86_pmu_enable_counter(hwc, idx);
- else
- x86_pmu_disable_counter(hwc, idx);
}
static int
OpenPOWER on IntegriCloud