From 0f78d2d5ccf72ec834da6901886a40fd8e3b7615 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 28 Apr 2011 10:17:04 +0100 Subject: ARM: perf: lock PMU registers per-CPU Currently, a single lock serialises access to CPU PMU registers. This global locking is unnecessary as PMU registers are local to the CPU they monitor. This patch replaces the global lock with a per-CPU lock. As the lock is in struct cpu_hw_events, PMUs providing a single cpu_hw_events instance can be locked globally. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v6.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel/perf_event_v6.c') diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 8390128..68cf704 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = 0; @@ -454,12 +455,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int counter_is_active(unsigned long pmcr, int idx) @@ -544,24 +545,26 @@ static void armv6pmu_start(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -595,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -615,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, * of ETM bus signal assertion cycles. The external reporting should * be disabled and so this should never increment. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void @@ -628,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, flags, evt = 0; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -644,12 +649,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, * Unlike UP ARMv6, we don't have a way of stopping the counters. We * simply disable the interrupt reporting. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static struct arm_pmu armv6pmu = { -- cgit v1.1