summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_v6.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2011-04-28 10:17:04 +0100
committerWill Deacon <will.deacon@arm.com>2011-08-31 10:50:07 +0100
commit0f78d2d5ccf72ec834da6901886a40fd8e3b7615 (patch)
treeda1262d040b2c10d95c6fc313b44e18801bcb4a3 /arch/arm/kernel/perf_event_v6.c
parent1b69beb7684c79673995607939d8acab51056b63 (diff)
downloadop-kernel-dev-0f78d2d5ccf72ec834da6901886a40fd8e3b7615.zip
op-kernel-dev-0f78d2d5ccf72ec834da6901886a40fd8e3b7615.tar.gz
ARM: perf: lock PMU registers per-CPU
Currently, a single lock serialises access to CPU PMU registers. This global locking is unnecessary as PMU registers are local to the CPU they monitor. This patch replaces the global lock with a per-CPU lock. As the lock is in struct cpu_hw_events, PMUs providing a single cpu_hw_events instance can be locked globally. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r--arch/arm/kernel/perf_event_v6.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 8390128..68cf704 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
+ struct cpu_hw_events *events = armpmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
@@ -454,12 +455,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
@@ -544,24 +545,26 @@ static void
armv6pmu_start(void)
{
unsigned long flags, val;
+ struct cpu_hw_events *events = armpmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
armv6pmu_stop(void)
{
unsigned long flags, val;
+ struct cpu_hw_events *events = armpmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
@@ -595,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
+ struct cpu_hw_events *events = armpmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -615,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
@@ -628,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, flags, evt = 0;
+ struct cpu_hw_events *events = armpmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -644,12 +649,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static struct arm_pmu armv6pmu = {
OpenPOWER on IntegriCloud