summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorbr <br@FreeBSD.org>2015-01-28 16:08:07 +0000
committerbr <br@FreeBSD.org>2015-01-28 16:08:07 +0000
commit236c1b153d8f9cb45e001cab10a5f7cde815ca08 (patch)
treefffab9a0d051c34807598c60199e574f6c344eec /sys
parentb7bb1dabe49776494d20ef55f89858d44829945d (diff)
downloadFreeBSD-src-236c1b153d8f9cb45e001cab10a5f7cde815ca08.zip
FreeBSD-src-236c1b153d8f9cb45e001cab10a5f7cde815ca08.tar.gz
Add ARMv7 performance monitoring counters.
Differential Revision: https://reviews.freebsd.org/D1687 Reviewed by: rpaulo Sponsored by: DARPA, AFRL
Diffstat (limited to 'sys')
-rw-r--r--sys/arm/arm/intr.c7
-rw-r--r--sys/arm/include/pmc_mdep.h6
-rw-r--r--sys/arm/ti/files.ti1
-rw-r--r--sys/conf/files.arm1
-rw-r--r--sys/dev/hwpmc/hwpmc_arm.c11
-rw-r--r--sys/dev/hwpmc/hwpmc_armv7.c652
-rw-r--r--sys/dev/hwpmc/hwpmc_armv7.h61
-rw-r--r--sys/dev/hwpmc/pmc_events.h45
-rw-r--r--sys/sys/pmc.h2
9 files changed, 783 insertions, 3 deletions
diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c
index 030407a..e83bca9 100644
--- a/sys/arm/arm/intr.c
+++ b/sys/arm/arm/intr.c
@@ -37,6 +37,7 @@
*/
#include "opt_platform.h"
+#include "opt_hwpmc_hooks.h"
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@@ -50,6 +51,8 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/conf.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
#include <machine/atomic.h>
#include <machine/intr.h>
@@ -190,6 +193,10 @@ arm_irq_handler(struct trapframe *frame)
arm_mask_irq(i);
}
}
+#ifdef HWPMC_HOOKS
+ if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
+ pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, frame);
+#endif
}
/*
diff --git a/sys/arm/include/pmc_mdep.h b/sys/arm/include/pmc_mdep.h
index 6153df0..43546b8 100644
--- a/sys/arm/include/pmc_mdep.h
+++ b/sys/arm/include/pmc_mdep.h
@@ -30,12 +30,15 @@
#define _MACHINE_PMC_MDEP_H_
#define PMC_MDEP_CLASS_INDEX_XSCALE 1
+#define PMC_MDEP_CLASS_INDEX_ARMV7 1
/*
* On the ARM platform we support the following PMCs.
*
* XSCALE Intel XScale processors
+ * ARMV7 ARM Cortex-A processors
*/
#include <dev/hwpmc/hwpmc_xscale.h>
+#include <dev/hwpmc/hwpmc_armv7.h>
union pmc_md_op_pmcallocate {
uint64_t __pad[4];
@@ -48,6 +51,7 @@ union pmc_md_op_pmcallocate {
#ifdef _KERNEL
union pmc_md_pmc {
struct pmc_md_xscale_pmc pm_xscale;
+ struct pmc_md_armv7_pmc pm_armv7;
};
#define PMC_IN_KERNEL_STACK(S,START,END) \
@@ -73,6 +77,8 @@ union pmc_md_pmc {
*/
struct pmc_mdep *pmc_xscale_initialize(void);
void pmc_xscale_finalize(struct pmc_mdep *_md);
+struct pmc_mdep *pmc_armv7_initialize(void);
+void pmc_armv7_finalize(struct pmc_mdep *_md);
#endif /* _KERNEL */
#endif /* !_MACHINE_PMC_MDEP_H_ */
diff --git a/sys/arm/ti/files.ti b/sys/arm/ti/files.ti
index 52ee22b..3cbab91 100644
--- a/sys/arm/ti/files.ti
+++ b/sys/arm/ti/files.ti
@@ -9,6 +9,7 @@ arm/arm/cpufunc_asm_armv5.S standard
arm/arm/cpufunc_asm_arm10.S standard
arm/arm/cpufunc_asm_arm11.S standard
arm/arm/cpufunc_asm_armv7.S standard
+arm/arm/pmu.c optional hwpmc
arm/ti/ti_common.c standard
arm/ti/ti_cpuid.c standard
diff --git a/sys/conf/files.arm b/sys/conf/files.arm
index c2d773b..8ed4a7e 100644
--- a/sys/conf/files.arm
+++ b/sys/conf/files.arm
@@ -70,6 +70,7 @@ crypto/des/des_enc.c optional crypto | ipsec | netsmb
dev/fb/fb.c optional sc
dev/fdt/fdt_arm_platform.c optional platform fdt
dev/hwpmc/hwpmc_arm.c optional hwpmc
+dev/hwpmc/hwpmc_armv7.c optional hwpmc
dev/kbd/kbd.c optional sc | vt
dev/syscons/scgfbrndr.c optional sc
dev/syscons/scterm-teken.c optional sc
diff --git a/sys/dev/hwpmc/hwpmc_arm.c b/sys/dev/hwpmc/hwpmc_arm.c
index 654b949..492695a 100644
--- a/sys/dev/hwpmc/hwpmc_arm.c
+++ b/sys/dev/hwpmc/hwpmc_arm.c
@@ -47,9 +47,12 @@ pmc_md_initialize()
#ifdef CPU_XSCALE_IXP425
if (cpu_class == CPU_CLASS_XSCALE)
return pmc_xscale_initialize();
- else
#endif
- return NULL;
+#ifdef CPU_CORTEXA
+ if (cpu_class == CPU_CLASS_CORTEXA)
+ return pmc_armv7_initialize();
+#endif
+ return NULL;
}
void
@@ -62,6 +65,10 @@ pmc_md_finalize(struct pmc_mdep *md)
KASSERT(0, ("[arm,%d] Unknown CPU Class 0x%x", __LINE__,
cpu_class));
#endif
+#ifdef CPU_CORTEXA
+ if (cpu_class == CPU_CLASS_CORTEXA)
+ pmc_armv7_finalize(md);
+#endif
}
int
diff --git a/sys/dev/hwpmc/hwpmc_armv7.c b/sys/dev/hwpmc/hwpmc_armv7.c
new file mode 100644
index 0000000..d58aca5
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_armv7.c
@@ -0,0 +1,652 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/cpu.h>
+
+#define CPU_ID_CORTEX_VER_MASK 0xff
+#define CPU_ID_CORTEX_VER_SHIFT 4
+
+static int armv7_npmcs;
+
+struct armv7_event_code_map {
+ enum pmc_event pe_ev;
+ uint8_t pe_code;
+};
+
+const struct armv7_event_code_map armv7_event_codes[] = {
+ { PMC_EV_ARMV7_PMNC_SW_INCR, 0x00 },
+ { PMC_EV_ARMV7_L1_ICACHE_REFILL, 0x01 },
+ { PMC_EV_ARMV7_ITLB_REFILL, 0x02 },
+ { PMC_EV_ARMV7_L1_DCACHE_REFILL, 0x03 },
+ { PMC_EV_ARMV7_L1_DCACHE_ACCESS, 0x04 },
+ { PMC_EV_ARMV7_DTLB_REFILL, 0x05 },
+ { PMC_EV_ARMV7_MEM_READ, 0x06 },
+ { PMC_EV_ARMV7_MEM_WRITE, 0x07 },
+ { PMC_EV_ARMV7_INSTR_EXECUTED, 0x08 },
+ { PMC_EV_ARMV7_EXC_TAKEN, 0x09 },
+ { PMC_EV_ARMV7_EXC_EXECUTED, 0x0A },
+ { PMC_EV_ARMV7_CID_WRITE, 0x0B },
+ { PMC_EV_ARMV7_PC_WRITE, 0x0C },
+ { PMC_EV_ARMV7_PC_IMM_BRANCH, 0x0D },
+ { PMC_EV_ARMV7_PC_PROC_RETURN, 0x0E },
+ { PMC_EV_ARMV7_MEM_UNALIGNED_ACCESS, 0x0F },
+ { PMC_EV_ARMV7_PC_BRANCH_MIS_PRED, 0x10 },
+ { PMC_EV_ARMV7_CLOCK_CYCLES, 0x11 },
+ { PMC_EV_ARMV7_PC_BRANCH_PRED, 0x12 },
+ { PMC_EV_ARMV7_MEM_ACCESS, 0x13 },
+ { PMC_EV_ARMV7_L1_ICACHE_ACCESS, 0x14 },
+ { PMC_EV_ARMV7_L1_DCACHE_WB, 0x15 },
+ { PMC_EV_ARMV7_L2_CACHE_ACCESS, 0x16 },
+ { PMC_EV_ARMV7_L2_CACHE_REFILL, 0x17 },
+ { PMC_EV_ARMV7_L2_CACHE_WB, 0x18 },
+ { PMC_EV_ARMV7_BUS_ACCESS, 0x19 },
+ { PMC_EV_ARMV7_MEM_ERROR, 0x1A },
+ { PMC_EV_ARMV7_INSTR_SPEC, 0x1B },
+ { PMC_EV_ARMV7_TTBR_WRITE, 0x1C },
+ { PMC_EV_ARMV7_BUS_CYCLES, 0x1D },
+ { PMC_EV_ARMV7_CPU_CYCLES, 0xFF },
+};
+
+const int armv7_event_codes_size =
+ sizeof(armv7_event_codes) / sizeof(armv7_event_codes[0]);
+
+/*
+ * Per-processor information.
+ */
+struct armv7_cpu {
+ struct pmc_hw *pc_armv7pmcs;
+ int cortex_ver;
+};
+
+static struct armv7_cpu **armv7_pcpu;
+
+/*
+ * Performance Monitor Control Register
+ */
+static __inline uint32_t
+armv7_pmnc_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+armv7_pmnc_write(uint32_t reg)
+{
+
+ __asm __volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (reg));
+}
+
+/*
+ * Clock Counter Register (PMCCNTR)
+ * Counts processor clock cycles.
+ */
+static __inline uint32_t
+armv7_ccnt_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+armv7_ccnt_write(uint32_t reg)
+{
+
+ __asm __volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (reg));
+}
+
+/*
+ * Interrupt Enable Set Register
+ */
+static __inline void
+armv7_interrupt_enable(uint32_t pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+
+ __asm __volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (reg));
+}
+
+/*
+ * Interrupt Clear Set Register
+ */
+static __inline void
+armv7_interrupt_disable(uint32_t pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+
+ __asm __volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (reg));
+}
+
+/*
+ * Overflow Flag Register
+ */
+static __inline uint32_t
+armv7_flag_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+armv7_flag_write(uint32_t reg)
+{
+
+ __asm __volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (reg));
+}
+
+/*
+ * Event Selection Register
+ */
+static __inline void
+armv7_evtsel_write(uint32_t reg)
+{
+
+ __asm __volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (reg));
+}
+
+/*
+ * PMSELR
+ */
+static __inline void
+armv7_select_counter(unsigned int pmc)
+{
+
+ __asm __volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (pmc));
+}
+
+/*
+ * Counter Set Enable Register
+ */
+static __inline void
+armv7_counter_enable(unsigned int pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+
+ __asm __volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (reg));
+}
+
+/*
+ * Counter Clear Enable Register
+ */
+static __inline void
+armv7_counter_disable(unsigned int pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+
+ __asm __volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (reg));
+}
+
+/*
+ * Performance Count Register N
+ */
+static uint32_t
+armv7_pmcn_read(unsigned int pmc)
+{
+ uint32_t reg = 0;
+
+ KASSERT(pmc < 4, ("[armv7,%d] illegal PMC number %d", __LINE__, pmc));
+
+ armv7_select_counter(pmc);
+ __asm __volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (reg));
+
+ return (reg);
+}
+
+static uint32_t
+armv7_pmcn_write(unsigned int pmc, uint32_t reg)
+{
+
+ KASSERT(pmc < 4, ("[armv7,%d] illegal PMC number %d", __LINE__, pmc));
+
+ armv7_select_counter(pmc);
+ __asm __volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (reg));
+
+ return (reg);
+}
+
+static int
+armv7_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ uint32_t caps, config;
+ struct armv7_cpu *pac;
+ enum pmc_event pe;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] illegal row index %d", __LINE__, ri));
+
+ pac = armv7_pcpu[cpu];
+
+ caps = a->pm_caps;
+ if (a->pm_class != PMC_CLASS_ARMV7)
+ return (EINVAL);
+ pe = a->pm_ev;
+
+ for (i = 0; i < armv7_event_codes_size; i++) {
+ if (armv7_event_codes[i].pe_ev == pe) {
+ config = armv7_event_codes[i].pe_code;
+ break;
+ }
+ }
+ if (i == armv7_event_codes_size)
+ return EINVAL;
+
+ pm->pm_md.pm_armv7.pm_armv7_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"armv7-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+
+static int
+armv7_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ pmc_value_t tmp;
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] illegal row index %d", __LINE__, ri));
+
+ pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
+
+ if (pm->pm_md.pm_armv7.pm_armv7_evsel == 0xFF)
+ tmp = armv7_ccnt_read();
+ else
+ tmp = armv7_pmcn_read(ri);
+
+ PMCDBG(MDP,REA,2,"armv7-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+armv7_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"armv7-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ if (pm->pm_md.pm_armv7.pm_armv7_evsel == 0xFF)
+ armv7_ccnt_write(v);
+ else
+ armv7_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+armv7_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[armv7,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+armv7_start_pmc(int cpu, int ri)
+{
+ struct pmc_hw *phw;
+ uint32_t config;
+ struct pmc *pm;
+
+ phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_armv7.pm_armv7_evsel;
+
+ /*
+ * Configure the event selection.
+ */
+ armv7_select_counter(ri);
+ armv7_evtsel_write(config);
+
+ /*
+ * Enable the PMC.
+ */
+ armv7_interrupt_enable(ri);
+ armv7_counter_enable(ri);
+
+ return 0;
+}
+
+static int
+armv7_stop_pmc(int cpu, int ri)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+
+ phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ */
+ armv7_counter_disable(ri);
+ armv7_interrupt_disable(ri);
+
+ return 0;
+}
+
+static int
+armv7_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[armv7,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+armv7_intr(int cpu, struct trapframe *tf)
+{
+ struct armv7_cpu *pc;
+ int retval, ri;
+ struct pmc *pm;
+ int error;
+ int reg;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] CPU %d out of range", __LINE__, cpu));
+
+ retval = 0;
+ pc = armv7_pcpu[cpu];
+
+ for (ri = 0; ri < armv7_npmcs; ri++) {
+ pm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
+ if (pm == NULL)
+ continue;
+ if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ /* Check if counter has overflowed */
+ if (pm->pm_md.pm_armv7.pm_armv7_evsel == 0xFF)
+ reg = (1 << 31);
+ else
+ reg = (1 << ri);
+
+ if ((armv7_flag_read() & reg) == 0) {
+ continue;
+ }
+
+ /* Clear Overflow Flag */
+ armv7_flag_write(reg);
+
+ retval = 1; /* Found an interrupting PMC. */
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error)
+ armv7_stop_pmc(cpu, ri);
+
+ /* Reload sampling count */
+ armv7_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+ }
+
+ return (retval);
+}
+
+static int
+armv7_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ char armv7_name[PMC_NAME_MAX];
+ struct pmc_hw *phw;
+ int error;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < armv7_npmcs,
+ ("[armv7,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &armv7_pcpu[cpu]->pc_armv7pmcs[ri];
+ snprintf(armv7_name, sizeof(armv7_name), "ARMV7-%d", ri);
+ if ((error = copystr(armv7_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return error;
+ pi->pm_class = PMC_CLASS_ARMV7;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+armv7_get_config(int cpu, int ri, struct pmc **ppm)
+{
+
+ *ppm = armv7_pcpu[cpu]->pc_armv7pmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+/*
+ * XXX don't know what we should do here.
+ */
+static int
+armv7_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+ return 0;
+}
+
+static int
+armv7_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+ return 0;
+}
+
+static int
+armv7_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ struct armv7_cpu *pac;
+ struct pmc_hw *phw;
+ struct pmc_cpu *pc;
+ uint32_t pmnc;
+ int first_ri;
+ int cpuid;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[armv7,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"armv7-init cpu=%d", cpu);
+
+ armv7_pcpu[cpu] = pac = malloc(sizeof(struct armv7_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ cpuid = cpu_id();
+ pac->cortex_ver = (cpuid >> CPU_ID_CORTEX_VER_SHIFT) & \
+ CPU_ID_CORTEX_VER_MASK;
+
+ pac->pc_armv7pmcs = malloc(sizeof(struct pmc_hw) * armv7_npmcs,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7].pcd_ri;
+ KASSERT(pc != NULL, ("[armv7,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_armv7pmcs; i < armv7_npmcs; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /* Enable unit */
+ pmnc = armv7_pmnc_read();
+ pmnc |= ARMV7_PMNC_ENABLE;
+ armv7_pmnc_write(pmnc);
+
+ return 0;
+}
+
+static int
+armv7_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ uint32_t pmnc;
+
+ pmnc = armv7_pmnc_read();
+ pmnc &= ~ARMV7_PMNC_ENABLE;
+ armv7_pmnc_write(pmnc);
+
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_armv7_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+ int reg;
+
+ reg = armv7_pmnc_read();
+
+ armv7_npmcs = (reg >> ARMV7_PMNC_N_SHIFT) & \
+ ARMV7_PMNC_N_MASK;
+
+ PMCDBG(MDP,INI,1,"armv7-init npmcs=%d", armv7_npmcs);
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ armv7_pcpu = malloc(sizeof(struct armv7_cpu *) * pmc_cpu_max(),
+ M_PMC, M_WAITOK | M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = pmc_mdep_alloc(1);
+ pmc_mdep->pmd_cputype = PMC_CPU_ARMV7;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV7];
+ pcd->pcd_caps = ARMV7_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_ARMV7;
+ pcd->pcd_num = armv7_npmcs;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32;
+
+ pcd->pcd_allocate_pmc = armv7_allocate_pmc;
+ pcd->pcd_config_pmc = armv7_config_pmc;
+ pcd->pcd_pcpu_fini = armv7_pcpu_fini;
+ pcd->pcd_pcpu_init = armv7_pcpu_init;
+ pcd->pcd_describe = armv7_describe;
+ pcd->pcd_get_config = armv7_get_config;
+ pcd->pcd_read_pmc = armv7_read_pmc;
+ pcd->pcd_release_pmc = armv7_release_pmc;
+ pcd->pcd_start_pmc = armv7_start_pmc;
+ pcd->pcd_stop_pmc = armv7_stop_pmc;
+ pcd->pcd_write_pmc = armv7_write_pmc;
+
+ pmc_mdep->pmd_intr = armv7_intr;
+ pmc_mdep->pmd_switch_in = armv7_switch_in;
+ pmc_mdep->pmd_switch_out = armv7_switch_out;
+
+ pmc_mdep->pmd_npmc += armv7_npmcs;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_armv7_finalize(struct pmc_mdep *md)
+{
+
+}
diff --git a/sys/dev/hwpmc/hwpmc_armv7.h b/sys/dev/hwpmc/hwpmc_armv7.h
new file mode 100644
index 0000000..35f7d5b
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_armv7.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_ARMV7_H_
+#define _DEV_HWPMC_ARMV7_H_
+
+#define ARMV7_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define ARMV7_PMNC_ENABLE (1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X (1 << 4) /* Export to ext. monitoring (ETM) */
+#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters implemented */
+#define ARMV7_PMNC_N_MASK 0x1f
+#define ARMV7_PMNC_MASK 0x3f /* Writable bits */
+
+#define ARMV7_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R))
+#define ARMV7_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P))
+
+#ifdef _KERNEL
+/* MD extension for 'struct pmc' */
+struct pmc_md_armv7_pmc {
+ uint32_t pm_armv7_evsel;
+};
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_ARMV7_H_ */
diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h
index 4953f19..5ecfd81 100644
--- a/sys/dev/hwpmc/pmc_events.h
+++ b/sys/dev/hwpmc/pmc_events.h
@@ -4757,6 +4757,46 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
#define PMC_EV_XSCALE_LAST PMC_EV_XSCALE_DATA_BUS_TRANS
/*
+ * ARMv7 Events
+ */
+
+#define __PMC_EV_ARMV7() \
+ __PMC_EV(ARMV7, PMNC_SW_INCR) \
+ __PMC_EV(ARMV7, L1_ICACHE_REFILL) \
+ __PMC_EV(ARMV7, ITLB_REFILL) \
+ __PMC_EV(ARMV7, L1_DCACHE_REFILL) \
+ __PMC_EV(ARMV7, L1_DCACHE_ACCESS) \
+ __PMC_EV(ARMV7, DTLB_REFILL) \
+ __PMC_EV(ARMV7, MEM_READ) \
+ __PMC_EV(ARMV7, MEM_WRITE) \
+ __PMC_EV(ARMV7, INSTR_EXECUTED) \
+ __PMC_EV(ARMV7, EXC_TAKEN) \
+ __PMC_EV(ARMV7, EXC_EXECUTED) \
+ __PMC_EV(ARMV7, CID_WRITE) \
+ __PMC_EV(ARMV7, PC_WRITE) \
+ __PMC_EV(ARMV7, PC_IMM_BRANCH) \
+ __PMC_EV(ARMV7, PC_PROC_RETURN) \
+ __PMC_EV(ARMV7, MEM_UNALIGNED_ACCESS) \
+ __PMC_EV(ARMV7, PC_BRANCH_MIS_PRED) \
+ __PMC_EV(ARMV7, CLOCK_CYCLES) \
+ __PMC_EV(ARMV7, PC_BRANCH_PRED) \
+ __PMC_EV(ARMV7, MEM_ACCESS) \
+ __PMC_EV(ARMV7, L1_ICACHE_ACCESS) \
+ __PMC_EV(ARMV7, L1_DCACHE_WB) \
+ __PMC_EV(ARMV7, L2_CACHE_ACCESS) \
+ __PMC_EV(ARMV7, L2_CACHE_REFILL) \
+ __PMC_EV(ARMV7, L2_CACHE_WB) \
+ __PMC_EV(ARMV7, BUS_ACCESS) \
+ __PMC_EV(ARMV7, MEM_ERROR) \
+ __PMC_EV(ARMV7, INSTR_SPEC) \
+ __PMC_EV(ARMV7, TTBR_WRITE) \
+ __PMC_EV(ARMV7, BUS_CYCLES) \
+ __PMC_EV(ARMV7, CPU_CYCLES)
+
+#define PMC_EV_ARMV7_FIRST PMC_EV_ARMV7_PMNC_SW_INCR
+#define PMC_EV_ARMV7_LAST PMC_EV_ARMV7_CPU_CYCLES
+
+/*
* MIPS Events from "Programming the MIPS32 24K Core Family",
* Document Number: MD00355 Revision 04.63 December 19, 2008
* These events are kept in the order found in Table 7.4.
@@ -5219,7 +5259,8 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
* 0x11080 0x0080 INTEL Pentium MMX events
* 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events
* 0x11200 0x00FF INTEL XScale events
- * 0x11300 0x00FF MIPS 24K events
+ * 0x11300 0x00FF MIPS 24K events
+ * 0x14000 0x0100 ARMv7 events
* 0x20000 0x1000 Software events
*/
#define __PMC_EVENTS() \
@@ -5253,6 +5294,8 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
__PMC_EV_PPC7450() \
__PMC_EV_BLOCK(PPC970, 0x13100) \
__PMC_EV_PPC970() \
+ __PMC_EV_BLOCK(ARMV7, 0x14000) \
+ __PMC_EV_ARMV7() \
#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
#define PMC_EVENT_LAST PMC_EV_SOFT_LAST
diff --git a/sys/sys/pmc.h b/sys/sys/pmc.h
index 76f000a..ca0d076 100644
--- a/sys/sys/pmc.h
+++ b/sys/sys/pmc.h
@@ -73,6 +73,7 @@
#define __PMC_CPUS() \
__PMC_CPU(AMD_K7, 0x00, "AMD K7") \
__PMC_CPU(AMD_K8, 0x01, "AMD K8") \
+ __PMC_CPU(ARMV7, 0x500, "ARMv7") \
__PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \
__PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \
__PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \
@@ -127,6 +128,7 @@ enum pmc_cputype {
__PMC_CLASS(UCF) /* Intel Uncore fixed function */ \
__PMC_CLASS(UCP) /* Intel Uncore programmable */ \
__PMC_CLASS(XSCALE) /* Intel XScale counters */ \
+ __PMC_CLASS(ARMV7) /* ARMv7 */ \
__PMC_CLASS(MIPS24K) /* MIPS 24K */ \
__PMC_CLASS(OCTEON) /* Cavium Octeon */ \
__PMC_CLASS(PPC7450) /* Motorola MPC7450 class */ \
OpenPOWER on IntegriCloud