summaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2016-08-11 10:50:43 +0100
committerPawel Moll <pawel.moll@arm.com>2016-08-26 09:16:18 +0100
commitd662ed2e50c9dab1d4c25e80fa3e01ebe257bd65 (patch)
tree6d156228a6d37b892cc2c401752666dd435f162b /drivers/bus
parent5b1e01f3ce15d3a8f2af5d38cc31f0d5c3c11dae (diff)
downloadop-kernel-dev-d662ed2e50c9dab1d4c25e80fa3e01ebe257bd65.zip
op-kernel-dev-d662ed2e50c9dab1d4c25e80fa3e01ebe257bd65.tar.gz
bus: arm-ccn: make event groups reliable
The CCN PMU driver leaves the counting logic always enabled, and thus events are enabled while groups are manipulated. As each event is stopped and read individually, this leads to arbitrary skew across event groups, which can be seen if counting several identical events. To avoid this, implement pmu_{enable,disable} callbacks to stop and start all counters atomically around event manipulation. As the counters are now stopped, we cannot poll the cycle counter to wait for events to drain from the bus. However, as the counters are stopped and the events will not be read regardless, we can simply allow the bus to drain naturally. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Pawel Moll <pawel.moll@arm.com>
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/arm-ccn.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 12c1fd1..884c030 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -946,20 +946,11 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
{
- struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
- u64 timeout;
/* Disable counting, setting the DT bus to pass-through mode */
arm_ccn_pmu_xp_dt_config(event, 0);
- /* Let the DT bus drain */
- timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
- ccn->num_xps;
- while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
- timeout)
- cpu_relax();
-
if (flags & PERF_EF_UPDATE)
arm_ccn_pmu_event_update(event);
@@ -1162,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
arm_ccn_pmu_event_update(event);
}
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val |= CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val &= ~CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
{
u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1284,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
.start = arm_ccn_pmu_event_start,
.stop = arm_ccn_pmu_event_stop,
.read = arm_ccn_pmu_event_read,
+ .pmu_enable = arm_ccn_pmu_enable,
+ .pmu_disable = arm_ccn_pmu_disable,
};
/* No overflow interrupt? Have to use a timer instead. */
OpenPOWER on IntegriCloud