summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libpmc/libpmc.c77
-rw-r--r--sys/arm/arm/pmu.c30
-rw-r--r--sys/arm64/arm64/intr_machdep.c4
-rw-r--r--sys/arm64/include/armreg.h18
-rw-r--r--sys/arm64/include/pmc_mdep.h23
-rw-r--r--sys/conf/files.arm643
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.c544
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.h51
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64_md.c154
-rw-r--r--sys/dev/hwpmc/pmc_events.h366
-rw-r--r--sys/sys/pmc.h7
11 files changed, 1265 insertions, 12 deletions
diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c
index 2ad2268..c4482c8 100644
--- a/lib/libpmc/libpmc.c
+++ b/lib/libpmc/libpmc.c
@@ -82,6 +82,10 @@ static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
#endif
+#if defined(__aarch64__)
+static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+#endif
#if defined(__mips__)
static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
@@ -158,6 +162,7 @@ PMC_CLASSDEP_TABLE(p5, P5);
PMC_CLASSDEP_TABLE(p6, P6);
PMC_CLASSDEP_TABLE(xscale, XSCALE);
PMC_CLASSDEP_TABLE(armv7, ARMV7);
+PMC_CLASSDEP_TABLE(armv8, ARMV8);
PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
PMC_CLASSDEP_TABLE(octeon, OCTEON);
@@ -263,6 +268,16 @@ static const struct pmc_event_descr westmereuc_event_table[] =
__PMC_EV_ALIAS_WESTMEREUC()
};
+static const struct pmc_event_descr cortex_a53_event_table[] =
+{
+ __PMC_EV_ALIAS_ARMV8_CORTEX_A53()
+};
+
+static const struct pmc_event_descr cortex_a57_event_table[] =
+{
+ __PMC_EV_ALIAS_ARMV8_CORTEX_A57()
+};
+
/*
* PMC_MDEP_TABLE(NAME, PRIMARYCLASS, ADDITIONAL_CLASSES...)
*
@@ -294,6 +309,8 @@ PMC_MDEP_TABLE(p5, P5, PMC_CLASS_SOFT, PMC_CLASS_TSC);
PMC_MDEP_TABLE(p6, P6, PMC_CLASS_SOFT, PMC_CLASS_TSC);
PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE);
PMC_MDEP_TABLE(armv7, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7);
+PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
+PMC_MDEP_TABLE(cortex_a57, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8);
PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_SOFT, PMC_CLASS_MIPS24K);
PMC_MDEP_TABLE(mips74k, MIPS74K, PMC_CLASS_SOFT, PMC_CLASS_MIPS74K);
PMC_MDEP_TABLE(octeon, OCTEON, PMC_CLASS_SOFT, PMC_CLASS_OCTEON);
@@ -362,6 +379,10 @@ PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
#endif
PMC_CLASS_TABLE_DESC(armv7, ARMV7, armv7, armv7);
#endif
+#if defined(__aarch64__)
+PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
+PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
+#endif
#if defined(__mips__)
PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
@@ -2429,6 +2450,26 @@ armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
}
#endif
+#if defined(__aarch64__)
+static struct pmc_event_alias cortex_a53_aliases[] = {
+ EV_ALIAS(NULL, NULL)
+};
+static struct pmc_event_alias cortex_a57_aliases[] = {
+ EV_ALIAS(NULL, NULL)
+};
+static int
+arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
+ struct pmc_op_pmcallocate *pmc_config __unused)
+{
+ switch (pe) {
+ default:
+ break;
+ }
+
+ return (0);
+}
+#endif
+
#if defined(__mips__)
static struct pmc_event_alias mips24k_aliases[] = {
@@ -2938,6 +2979,19 @@ pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
ev = armv7_event_table;
count = PMC_EVENT_TABLE_SIZE(armv7);
break;
+ case PMC_CLASS_ARMV8:
+ switch (cpu_info.pm_cputype) {
+ default:
+ case PMC_CPU_ARMV8_CORTEX_A53:
+ ev = cortex_a53_event_table;
+ count = PMC_EVENT_TABLE_SIZE(cortex_a53);
+ break;
+ case PMC_CPU_ARMV8_CORTEX_A57:
+ ev = cortex_a57_event_table;
+ count = PMC_EVENT_TABLE_SIZE(cortex_a57);
+ break;
+ }
+ break;
case PMC_CLASS_MIPS24K:
ev = mips24k_event_table;
count = PMC_EVENT_TABLE_SIZE(mips24k);
@@ -3235,6 +3289,16 @@ pmc_init(void)
pmc_class_table[n] = &armv7_class_table_descr;
break;
#endif
+#if defined(__aarch64__)
+ case PMC_CPU_ARMV8_CORTEX_A53:
+ PMC_MDEP_INIT(cortex_a53);
+ pmc_class_table[n] = &cortex_a53_class_table_descr;
+ break;
+ case PMC_CPU_ARMV8_CORTEX_A57:
+ PMC_MDEP_INIT(cortex_a57);
+ pmc_class_table[n] = &cortex_a57_class_table_descr;
+ break;
+#endif
#if defined(__mips__)
case PMC_CPU_MIPS_24K:
PMC_MDEP_INIT(mips24k);
@@ -3446,6 +3510,19 @@ _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
} else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
ev = armv7_event_table;
evfence = armv7_event_table + PMC_EVENT_TABLE_SIZE(armv7);
+ } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
+ switch (cpu) {
+ case PMC_CPU_ARMV8_CORTEX_A53:
+ ev = cortex_a53_event_table;
+ evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
+ break;
+ case PMC_CPU_ARMV8_CORTEX_A57:
+ ev = cortex_a57_event_table;
+ evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
+ break;
+ default: /* Unknown CPU type. */
+ break;
+ }
} else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
ev = mips24k_event_table;
evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
diff --git a/sys/arm/arm/pmu.c b/sys/arm/arm/pmu.c
index c224525..1b079c0 100644
--- a/sys/arm/arm/pmu.c
+++ b/sys/arm/arm/pmu.c
@@ -58,13 +58,16 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/intr.h>
+#define MAX_RLEN 8
+
struct pmu_softc {
- struct resource *res[1];
+ struct resource *res[MAX_RLEN];
device_t dev;
- void *ih;
+ void *ih[MAX_RLEN];
};
static struct ofw_compat_data compat_data[] = {
+ {"arm,armv8-pmuv3", 1},
{"arm,cortex-a17-pmu", 1},
{"arm,cortex-a15-pmu", 1},
{"arm,cortex-a12-pmu", 1},
@@ -81,6 +84,13 @@ static struct ofw_compat_data compat_data[] = {
static struct resource_spec pmu_spec[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 6, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 7, RF_ACTIVE | RF_OPTIONAL },
{ -1, 0 }
};
@@ -119,6 +129,7 @@ pmu_attach(device_t dev)
{
struct pmu_softc *sc;
int err;
+ int i;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -129,11 +140,16 @@ pmu_attach(device_t dev)
}
/* Setup interrupt handler */
- err = bus_setup_intr(dev, sc->res[0], INTR_MPSAFE | INTR_TYPE_MISC,
- pmu_intr, NULL, NULL, &sc->ih);
- if (err) {
- device_printf(dev, "Unable to setup interrupt handler.\n");
- return (ENXIO);
+ for (i = 0; i < MAX_RLEN; i++) {
+ if (sc->res[i] == NULL)
+ break;
+
+ err = bus_setup_intr(dev, sc->res[i], INTR_MPSAFE | INTR_TYPE_MISC,
+ pmu_intr, NULL, NULL, &sc->ih[i]);
+ if (err) {
+ device_printf(dev, "Unable to setup interrupt handler.\n");
+ return (ENXIO);
+ }
}
return (0);
diff --git a/sys/arm64/arm64/intr_machdep.c b/sys/arm64/arm64/intr_machdep.c
index 6f1c7bb..7ff6f11 100644
--- a/sys/arm64/arm64/intr_machdep.c
+++ b/sys/arm64/arm64/intr_machdep.c
@@ -430,6 +430,10 @@ stray:
if (intr != NULL)
PIC_MASK(root_pic, intr->i_hw_irq);
+#ifdef HWPMC_HOOKS
+ if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
+ pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
+#endif
}
void
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 499ad7e..ab1de97 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -212,4 +212,22 @@
#define DBG_MDSCR_KDE (0x1 << 13)
#define DBG_MDSCR_MDE (0x1 << 15)
+/* Perfomance Monitoring Counters */
+#define PMCR_E (1 << 0) /* Enable all counters */
+#define PMCR_P (1 << 1) /* Reset all counters */
+#define PMCR_C (1 << 2) /* Clock counter reset */
+#define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */
+#define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */
+#define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define PMCR_LC (1 << 6) /* Long cycle count enable */
+#define PMCR_IMP_SHIFT 24 /* Implementer code */
+#define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT)
+#define PMCR_IDCODE_SHIFT 16 /* Identification code */
+#define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT)
+#define PMCR_IDCODE_CORTEX_A57 0x01
+#define PMCR_IDCODE_CORTEX_A72 0x02
+#define PMCR_IDCODE_CORTEX_A53 0x03
+#define PMCR_N_SHIFT 11 /* Number of counters implemented */
+#define PMCR_N_MASK (0x1f << PMCR_N_SHIFT)
+
#endif /* !_MACHINE_ARMREG_H_ */
diff --git a/sys/arm64/include/pmc_mdep.h b/sys/arm64/include/pmc_mdep.h
index 455f7a1..5d6f40d 100644
--- a/sys/arm64/include/pmc_mdep.h
+++ b/sys/arm64/include/pmc_mdep.h
@@ -29,6 +29,14 @@
#ifndef _MACHINE_PMC_MDEP_H_
#define _MACHINE_PMC_MDEP_H_
+#define PMC_MDEP_CLASS_INDEX_ARMV8 1
+/*
+ * On the ARMv8 platform we support the following PMCs.
+ *
+ * ARMV8 ARM Cortex-A53/57/72 processors
+ */
+#include <dev/hwpmc/hwpmc_arm64.h>
+
union pmc_md_op_pmcallocate {
uint64_t __pad[4];
};
@@ -39,12 +47,21 @@ union pmc_md_op_pmcallocate {
#ifdef _KERNEL
union pmc_md_pmc {
+ struct pmc_md_arm64_pmc pm_arm64;
};
-#define PMC_TRAPFRAME_TO_PC(TF) (0) /* Stubs */
-#define PMC_TRAPFRAME_TO_FP(TF) (0)
-#define PMC_TRAPFRAME_TO_SP(TF) (0)
+#define PMC_IN_KERNEL_STACK(S,START,END) \
+ ((S) >= (START) && (S) < (END))
+#define PMC_IN_KERNEL(va) INKERNEL((va))
+#define PMC_IN_USERSPACE(va) ((va) <= VM_MAXUSER_ADDRESS)
+#define PMC_TRAPFRAME_TO_PC(TF) ((TF)->tf_lr)
+#define PMC_TRAPFRAME_TO_FP(TF) ((TF)->tf_x[29])
+/*
+ * Prototypes
+ */
+struct pmc_mdep *pmc_arm64_initialize(void);
+void pmc_arm64_finalize(struct pmc_mdep *_md);
#endif /* _KERNEL */
#endif /* !_MACHINE_PMC_MDEP_H_ */
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 26d974e..8bdc93a 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -1,6 +1,7 @@
# $FreeBSD$
arm/arm/devmap.c standard
arm/arm/generic_timer.c standard
+arm/arm/pmu.c standard
arm64/arm64/autoconf.c standard
arm64/arm64/bcopy.c standard
arm64/arm64/bus_machdep.c standard
@@ -40,6 +41,8 @@ arm64/arm64/uio_machdep.c standard
arm64/arm64/vfp.c standard
arm64/arm64/vm_machdep.c standard
dev/fdt/fdt_arm64.c optional fdt
+dev/hwpmc/hwpmc_arm64.c optional hwpmc
+dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
dev/ofw/ofw_cpu.c optional fdt
dev/psci/psci.c optional psci
dev/psci/psci_arm64.S optional psci
diff --git a/sys/dev/hwpmc/hwpmc_arm64.c b/sys/dev/hwpmc/hwpmc_arm64.c
new file mode 100644
index 0000000..2e54e38
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_arm64.c
@@ -0,0 +1,544 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/cpu.h>
+
+static int arm64_npmcs;
+
+struct arm64_event_code_map {
+ enum pmc_event pe_ev;
+ uint8_t pe_code;
+};
+
+/*
+ * Per-processor information.
+ */
+struct arm64_cpu {
+ struct pmc_hw *pc_arm64pmcs;
+};
+
+static struct arm64_cpu **arm64_pcpu;
+
+/*
+ * Interrupt Enable Set Register
+ */
+static __inline void
+arm64_interrupt_enable(uint32_t pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+ WRITE_SPECIALREG(PMINTENSET_EL1, reg);
+
+ isb();
+}
+
+/*
+ * Interrupt Clear Set Register
+ */
+static __inline void
+arm64_interrupt_disable(uint32_t pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+ WRITE_SPECIALREG(PMINTENCLR_EL1, reg);
+
+ isb();
+}
+
+/*
+ * Counter Set Enable Register
+ */
+static __inline void
+arm64_counter_enable(unsigned int pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+ WRITE_SPECIALREG(PMCNTENSET_EL0, reg);
+
+ isb();
+}
+
+/*
+ * Counter Clear Enable Register
+ */
+static __inline void
+arm64_counter_disable(unsigned int pmc)
+{
+ uint32_t reg;
+
+ reg = (1 << pmc);
+ WRITE_SPECIALREG(PMCNTENCLR_EL0, reg);
+
+ isb();
+}
+
+/*
+ * Performance Monitors Control Register
+ */
+static uint32_t
+arm64_pmcr_read(void)
+{
+ uint32_t reg;
+
+ reg = READ_SPECIALREG(PMCR_EL0);
+
+ return (reg);
+}
+
+static void
+arm64_pmcr_write(uint32_t reg)
+{
+
+ WRITE_SPECIALREG(PMCR_EL0, reg);
+
+ isb();
+}
+
+/*
+ * Performance Count Register N
+ */
+static uint32_t
+arm64_pmcn_read(unsigned int pmc)
+{
+
+ KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
+
+ WRITE_SPECIALREG(PMSELR_EL0, pmc);
+
+ isb();
+
+ return (READ_SPECIALREG(PMXEVCNTR_EL0));
+}
+
+static void
+arm64_pmcn_write(unsigned int pmc, uint32_t reg)
+{
+
+ KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
+
+ WRITE_SPECIALREG(PMSELR_EL0, pmc);
+ WRITE_SPECIALREG(PMXEVCNTR_EL0, reg);
+
+ isb();
+}
+
+static int
+arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ uint32_t caps, config;
+ struct arm64_cpu *pac;
+ enum pmc_event pe;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] illegal row index %d", __LINE__, ri));
+
+ pac = arm64_pcpu[cpu];
+
+ caps = a->pm_caps;
+ if (a->pm_class != PMC_CLASS_ARMV8) {
+ return (EINVAL);
+ }
+ pe = a->pm_ev;
+
+ config = (pe & EVENT_ID_MASK);
+ pm->pm_md.pm_arm64.pm_arm64_evsel = config;
+
+ PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+
+static int
+arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ pmc_value_t tmp;
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] illegal row index %d", __LINE__, ri));
+
+ pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+ tmp = arm64_pmcn_read(ri);
+
+ PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+arm64_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ arm64_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+arm64_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG3(MDP, CFG, 1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[arm64,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+arm64_start_pmc(int cpu, int ri)
+{
+ struct pmc_hw *phw;
+ uint32_t config;
+ struct pmc *pm;
+
+ phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_arm64.pm_arm64_evsel;
+
+ /*
+ * Configure the event selection.
+ */
+ WRITE_SPECIALREG(PMSELR_EL0, ri);
+ WRITE_SPECIALREG(PMXEVTYPER_EL0, config);
+
+ isb();
+
+ /*
+ * Enable the PMC.
+ */
+ arm64_interrupt_enable(ri);
+ arm64_counter_enable(ri);
+
+ return 0;
+}
+
+static int
+arm64_stop_pmc(int cpu, int ri)
+{
+ struct pmc_hw *phw;
+ struct pmc *pm;
+
+ phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ */
+ arm64_counter_disable(ri);
+ arm64_interrupt_disable(ri);
+
+ return 0;
+}
+
+static int
+arm64_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[arm64,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+arm64_intr(int cpu, struct trapframe *tf)
+{
+ struct arm64_cpu *pc;
+ int retval, ri;
+ struct pmc *pm;
+ int error;
+ int reg;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] CPU %d out of range", __LINE__, cpu));
+
+ retval = 0;
+ pc = arm64_pcpu[cpu];
+
+ for (ri = 0; ri < arm64_npmcs; ri++) {
+ pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+ if (pm == NULL)
+ continue;
+ if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ /* Check if counter is overflowed */
+ reg = (1 << ri);
+ if ((READ_SPECIALREG(PMOVSCLR_EL0) & reg) == 0)
+ continue;
+ /* Clear Overflow Flag */
+ WRITE_SPECIALREG(PMOVSCLR_EL0, reg);
+
+ isb();
+
+ retval = 1; /* Found an interrupting PMC. */
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error)
+ arm64_stop_pmc(cpu, ri);
+
+ /* Reload sampling count */
+ arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+ }
+
+ return (retval);
+}
+
+static int
+arm64_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ char arm64_name[PMC_NAME_MAX];
+ struct pmc_hw *phw;
+ int error;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < arm64_npmcs,
+ ("[arm64,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &arm64_pcpu[cpu]->pc_arm64pmcs[ri];
+ snprintf(arm64_name, sizeof(arm64_name), "ARMV8-%d", ri);
+ if ((error = copystr(arm64_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return (error);
+ pi->pm_class = PMC_CLASS_ARMV8;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+arm64_get_config(int cpu, int ri, struct pmc **ppm)
+{
+
+ *ppm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
+
+ return (0);
+}
+
+/*
+ * XXX don't know what we should do here.
+ */
+static int
+arm64_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+ return (0);
+}
+
+static int
+arm64_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+
+ return (0);
+}
+
+static int
+arm64_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ struct arm64_cpu *pac;
+ struct pmc_hw *phw;
+ struct pmc_cpu *pc;
+ uint64_t pmcr;
+ int first_ri;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[arm64,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG1(MDP, INI, 1, "arm64-init cpu=%d", cpu);
+
+ arm64_pcpu[cpu] = pac = malloc(sizeof(struct arm64_cpu), M_PMC,
+ M_WAITOK | M_ZERO);
+
+ pac->pc_arm64pmcs = malloc(sizeof(struct pmc_hw) * arm64_npmcs,
+ M_PMC, M_WAITOK | M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8].pcd_ri;
+ KASSERT(pc != NULL, ("[arm64,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_arm64pmcs; i < arm64_npmcs; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /* Enable unit */
+ pmcr = arm64_pmcr_read();
+ pmcr |= PMCR_E;
+ arm64_pmcr_write(pmcr);
+
+ return (0);
+}
+
+static int
+arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ uint32_t pmcr;
+
+ pmcr = arm64_pmcr_read();
+ pmcr &= ~PMCR_E;
+ arm64_pmcr_write(pmcr);
+
+ return (0);
+}
+
+struct pmc_mdep *
+pmc_arm64_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+ int idcode;
+ int reg;
+
+ reg = arm64_pmcr_read();
+ arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
+ idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
+
+ PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ arm64_pcpu = malloc(sizeof(struct arm64_cpu *) * pmc_cpu_max(),
+ M_PMC, M_WAITOK | M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = pmc_mdep_alloc(1);
+
+ switch (idcode) {
+ case PMCR_IDCODE_CORTEX_A57:
+ case PMCR_IDCODE_CORTEX_A72:
+ pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A57;
+ break;
+ default:
+ case PMCR_IDCODE_CORTEX_A53:
+ pmc_mdep->pmd_cputype = PMC_CPU_ARMV8_CORTEX_A53;
+ break;
+ }
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_ARMV8];
+ pcd->pcd_caps = ARMV8_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_ARMV8;
+ pcd->pcd_num = arm64_npmcs;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32;
+
+ pcd->pcd_allocate_pmc = arm64_allocate_pmc;
+ pcd->pcd_config_pmc = arm64_config_pmc;
+ pcd->pcd_pcpu_fini = arm64_pcpu_fini;
+ pcd->pcd_pcpu_init = arm64_pcpu_init;
+ pcd->pcd_describe = arm64_describe;
+ pcd->pcd_get_config = arm64_get_config;
+ pcd->pcd_read_pmc = arm64_read_pmc;
+ pcd->pcd_release_pmc = arm64_release_pmc;
+ pcd->pcd_start_pmc = arm64_start_pmc;
+ pcd->pcd_stop_pmc = arm64_stop_pmc;
+ pcd->pcd_write_pmc = arm64_write_pmc;
+
+ pmc_mdep->pmd_intr = arm64_intr;
+ pmc_mdep->pmd_switch_in = arm64_switch_in;
+ pmc_mdep->pmd_switch_out = arm64_switch_out;
+
+ pmc_mdep->pmd_npmc += arm64_npmcs;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_arm64_finalize(struct pmc_mdep *md)
+{
+
+}
diff --git a/sys/dev/hwpmc/hwpmc_arm64.h b/sys/dev/hwpmc/hwpmc_arm64.h
new file mode 100644
index 0000000..f0d43aa
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_arm64.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_ARMV8_H_
+#define _DEV_HWPMC_ARMV8_H_
+
+#define ARMV8_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define ARMV8_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R))
+#define ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P))
+#define EVENT_ID_MASK 0xFF
+
+#ifdef _KERNEL
+/* MD extension for 'struct pmc' */
+struct pmc_md_arm64_pmc {
+ uint32_t pm_arm64_evsel;
+};
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_ARMV8_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_arm64_md.c b/sys/dev/hwpmc/hwpmc_arm64_md.c
new file mode 100644
index 0000000..9037c46
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_arm64_md.c
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory with support from ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/stack.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+
+ return (pmc_arm64_initialize());
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+
+ pmc_arm64_finalize(md);
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ uintptr_t pc, r, stackstart, stackend, fp;
+ struct thread *td;
+ int count;
+
+ KASSERT(TRAPF_USERMODE(tf) == 0,("[arm,%d] not a kernel backtrace",
+ __LINE__));
+
+ td = curthread;
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ *cc++ = pc;
+
+ if (maxsamples <= 1)
+ return (1);
+
+ stackstart = (uintptr_t) td->td_kstack;
+ stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
+ fp = PMC_TRAPFRAME_TO_FP(tf);
+
+ if (!PMC_IN_KERNEL(pc) ||
+ !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+ return (1);
+
+ for (count = 1; count < maxsamples; count++) {
+ /* Use saved lr as pc. */
+ r = fp + sizeof(uintptr_t);
+ if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
+ break;
+ pc = *(uintptr_t *)r;
+ if (!PMC_IN_KERNEL(pc))
+ break;
+
+ *cc++ = pc;
+
+ /* Switch to next frame up */
+ r = fp;
+ if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
+ break;
+ fp = *(uintptr_t *)r;
+ if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+ break;
+ }
+
+ return (count);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ uintptr_t pc, r, oldfp, fp;
+ struct thread *td;
+ int count;
+
+ KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
+ __LINE__, (void *) tf));
+
+ td = curthread;
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ *cc++ = pc;
+
+ if (maxsamples <= 1)
+ return (1);
+
+ oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
+
+ if (!PMC_IN_USERSPACE(pc) ||
+ !PMC_IN_USERSPACE(fp))
+ return (1);
+
+ for (count = 1; count < maxsamples; count++) {
+ /* Use saved lr as pc. */
+ r = fp + sizeof(uintptr_t);
+ if (copyin((void *)r, &pc, sizeof(pc)) != 0)
+ break;
+ if (!PMC_IN_USERSPACE(pc))
+ break;
+
+ *cc++ = pc;
+
+ /* Switch to next frame up */
+ oldfp = fp;
+ r = fp;
+ if (copyin((void *)r, &fp, sizeof(fp)) != 0)
+ break;
+ if (fp < oldfp || !PMC_IN_USERSPACE(fp))
+ break;
+ }
+
+ return (count);
+}
diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h
index 7f878cb..467367f 100644
--- a/sys/dev/hwpmc/pmc_events.h
+++ b/sys/dev/hwpmc/pmc_events.h
@@ -4803,6 +4803,369 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
#define PMC_EV_ARMV7_LAST PMC_EV_ARMV7_CPU_CYCLES
/*
+ * ARMv8 Events
+ */
+
+#define __PMC_EV_ARMV8() \
+ __PMC_EV(ARMV8, EVENT_00H) \
+ __PMC_EV(ARMV8, EVENT_01H) \
+ __PMC_EV(ARMV8, EVENT_02H) \
+ __PMC_EV(ARMV8, EVENT_03H) \
+ __PMC_EV(ARMV8, EVENT_04H) \
+ __PMC_EV(ARMV8, EVENT_05H) \
+ __PMC_EV(ARMV8, EVENT_06H) \
+ __PMC_EV(ARMV8, EVENT_07H) \
+ __PMC_EV(ARMV8, EVENT_08H) \
+ __PMC_EV(ARMV8, EVENT_09H) \
+ __PMC_EV(ARMV8, EVENT_0AH) \
+ __PMC_EV(ARMV8, EVENT_0BH) \
+ __PMC_EV(ARMV8, EVENT_0CH) \
+ __PMC_EV(ARMV8, EVENT_0DH) \
+ __PMC_EV(ARMV8, EVENT_0EH) \
+ __PMC_EV(ARMV8, EVENT_0FH) \
+ __PMC_EV(ARMV8, EVENT_10H) \
+ __PMC_EV(ARMV8, EVENT_11H) \
+ __PMC_EV(ARMV8, EVENT_12H) \
+ __PMC_EV(ARMV8, EVENT_13H) \
+ __PMC_EV(ARMV8, EVENT_14H) \
+ __PMC_EV(ARMV8, EVENT_15H) \
+ __PMC_EV(ARMV8, EVENT_16H) \
+ __PMC_EV(ARMV8, EVENT_17H) \
+ __PMC_EV(ARMV8, EVENT_18H) \
+ __PMC_EV(ARMV8, EVENT_19H) \
+ __PMC_EV(ARMV8, EVENT_1AH) \
+ __PMC_EV(ARMV8, EVENT_1BH) \
+ __PMC_EV(ARMV8, EVENT_1CH) \
+ __PMC_EV(ARMV8, EVENT_1DH) \
+ __PMC_EV(ARMV8, EVENT_1EH) \
+ __PMC_EV(ARMV8, EVENT_1FH) \
+ __PMC_EV(ARMV8, EVENT_20H) \
+ __PMC_EV(ARMV8, EVENT_21H) \
+ __PMC_EV(ARMV8, EVENT_22H) \
+ __PMC_EV(ARMV8, EVENT_23H) \
+ __PMC_EV(ARMV8, EVENT_24H) \
+ __PMC_EV(ARMV8, EVENT_25H) \
+ __PMC_EV(ARMV8, EVENT_26H) \
+ __PMC_EV(ARMV8, EVENT_27H) \
+ __PMC_EV(ARMV8, EVENT_28H) \
+ __PMC_EV(ARMV8, EVENT_29H) \
+ __PMC_EV(ARMV8, EVENT_2AH) \
+ __PMC_EV(ARMV8, EVENT_2BH) \
+ __PMC_EV(ARMV8, EVENT_2CH) \
+ __PMC_EV(ARMV8, EVENT_2DH) \
+ __PMC_EV(ARMV8, EVENT_2EH) \
+ __PMC_EV(ARMV8, EVENT_2FH) \
+ __PMC_EV(ARMV8, EVENT_30H) \
+ __PMC_EV(ARMV8, EVENT_31H) \
+ __PMC_EV(ARMV8, EVENT_32H) \
+ __PMC_EV(ARMV8, EVENT_33H) \
+ __PMC_EV(ARMV8, EVENT_34H) \
+ __PMC_EV(ARMV8, EVENT_35H) \
+ __PMC_EV(ARMV8, EVENT_36H) \
+ __PMC_EV(ARMV8, EVENT_37H) \
+ __PMC_EV(ARMV8, EVENT_38H) \
+ __PMC_EV(ARMV8, EVENT_39H) \
+ __PMC_EV(ARMV8, EVENT_3AH) \
+ __PMC_EV(ARMV8, EVENT_3BH) \
+ __PMC_EV(ARMV8, EVENT_3CH) \
+ __PMC_EV(ARMV8, EVENT_3DH) \
+ __PMC_EV(ARMV8, EVENT_3EH) \
+ __PMC_EV(ARMV8, EVENT_3FH) \
+ __PMC_EV(ARMV8, EVENT_40H) \
+ __PMC_EV(ARMV8, EVENT_41H) \
+ __PMC_EV(ARMV8, EVENT_42H) \
+ __PMC_EV(ARMV8, EVENT_43H) \
+ __PMC_EV(ARMV8, EVENT_44H) \
+ __PMC_EV(ARMV8, EVENT_45H) \
+ __PMC_EV(ARMV8, EVENT_46H) \
+ __PMC_EV(ARMV8, EVENT_47H) \
+ __PMC_EV(ARMV8, EVENT_48H) \
+ __PMC_EV(ARMV8, EVENT_49H) \
+ __PMC_EV(ARMV8, EVENT_4AH) \
+ __PMC_EV(ARMV8, EVENT_4BH) \
+ __PMC_EV(ARMV8, EVENT_4CH) \
+ __PMC_EV(ARMV8, EVENT_4DH) \
+ __PMC_EV(ARMV8, EVENT_4EH) \
+ __PMC_EV(ARMV8, EVENT_4FH) \
+ __PMC_EV(ARMV8, EVENT_50H) \
+ __PMC_EV(ARMV8, EVENT_51H) \
+ __PMC_EV(ARMV8, EVENT_52H) \
+ __PMC_EV(ARMV8, EVENT_53H) \
+ __PMC_EV(ARMV8, EVENT_54H) \
+ __PMC_EV(ARMV8, EVENT_55H) \
+ __PMC_EV(ARMV8, EVENT_56H) \
+ __PMC_EV(ARMV8, EVENT_57H) \
+ __PMC_EV(ARMV8, EVENT_58H) \
+ __PMC_EV(ARMV8, EVENT_59H) \
+ __PMC_EV(ARMV8, EVENT_5AH) \
+ __PMC_EV(ARMV8, EVENT_5BH) \
+ __PMC_EV(ARMV8, EVENT_5CH) \
+ __PMC_EV(ARMV8, EVENT_5DH) \
+ __PMC_EV(ARMV8, EVENT_5EH) \
+ __PMC_EV(ARMV8, EVENT_5FH) \
+ __PMC_EV(ARMV8, EVENT_60H) \
+ __PMC_EV(ARMV8, EVENT_61H) \
+ __PMC_EV(ARMV8, EVENT_62H) \
+ __PMC_EV(ARMV8, EVENT_63H) \
+ __PMC_EV(ARMV8, EVENT_64H) \
+ __PMC_EV(ARMV8, EVENT_65H) \
+ __PMC_EV(ARMV8, EVENT_66H) \
+ __PMC_EV(ARMV8, EVENT_67H) \
+ __PMC_EV(ARMV8, EVENT_68H) \
+ __PMC_EV(ARMV8, EVENT_69H) \
+ __PMC_EV(ARMV8, EVENT_6AH) \
+ __PMC_EV(ARMV8, EVENT_6BH) \
+ __PMC_EV(ARMV8, EVENT_6CH) \
+ __PMC_EV(ARMV8, EVENT_6DH) \
+ __PMC_EV(ARMV8, EVENT_6EH) \
+ __PMC_EV(ARMV8, EVENT_6FH) \
+ __PMC_EV(ARMV8, EVENT_70H) \
+ __PMC_EV(ARMV8, EVENT_71H) \
+ __PMC_EV(ARMV8, EVENT_72H) \
+ __PMC_EV(ARMV8, EVENT_73H) \
+ __PMC_EV(ARMV8, EVENT_74H) \
+ __PMC_EV(ARMV8, EVENT_75H) \
+ __PMC_EV(ARMV8, EVENT_76H) \
+ __PMC_EV(ARMV8, EVENT_77H) \
+ __PMC_EV(ARMV8, EVENT_78H) \
+ __PMC_EV(ARMV8, EVENT_79H) \
+ __PMC_EV(ARMV8, EVENT_7AH) \
+ __PMC_EV(ARMV8, EVENT_7BH) \
+ __PMC_EV(ARMV8, EVENT_7CH) \
+ __PMC_EV(ARMV8, EVENT_7DH) \
+ __PMC_EV(ARMV8, EVENT_7EH) \
+ __PMC_EV(ARMV8, EVENT_7FH) \
+ __PMC_EV(ARMV8, EVENT_80H) \
+ __PMC_EV(ARMV8, EVENT_81H) \
+ __PMC_EV(ARMV8, EVENT_82H) \
+ __PMC_EV(ARMV8, EVENT_83H) \
+ __PMC_EV(ARMV8, EVENT_84H) \
+ __PMC_EV(ARMV8, EVENT_85H) \
+ __PMC_EV(ARMV8, EVENT_86H) \
+ __PMC_EV(ARMV8, EVENT_87H) \
+ __PMC_EV(ARMV8, EVENT_88H) \
+ __PMC_EV(ARMV8, EVENT_89H) \
+ __PMC_EV(ARMV8, EVENT_8AH) \
+ __PMC_EV(ARMV8, EVENT_8BH) \
+ __PMC_EV(ARMV8, EVENT_8CH) \
+ __PMC_EV(ARMV8, EVENT_8DH) \
+ __PMC_EV(ARMV8, EVENT_8EH) \
+ __PMC_EV(ARMV8, EVENT_8FH) \
+ __PMC_EV(ARMV8, EVENT_90H) \
+ __PMC_EV(ARMV8, EVENT_91H) \
+ __PMC_EV(ARMV8, EVENT_92H) \
+ __PMC_EV(ARMV8, EVENT_93H) \
+ __PMC_EV(ARMV8, EVENT_94H) \
+ __PMC_EV(ARMV8, EVENT_95H) \
+ __PMC_EV(ARMV8, EVENT_96H) \
+ __PMC_EV(ARMV8, EVENT_97H) \
+ __PMC_EV(ARMV8, EVENT_98H) \
+ __PMC_EV(ARMV8, EVENT_99H) \
+ __PMC_EV(ARMV8, EVENT_9AH) \
+ __PMC_EV(ARMV8, EVENT_9BH) \
+ __PMC_EV(ARMV8, EVENT_9CH) \
+ __PMC_EV(ARMV8, EVENT_9DH) \
+ __PMC_EV(ARMV8, EVENT_9EH) \
+ __PMC_EV(ARMV8, EVENT_9FH) \
+ __PMC_EV(ARMV8, EVENT_A0H) \
+ __PMC_EV(ARMV8, EVENT_A1H) \
+ __PMC_EV(ARMV8, EVENT_A2H) \
+ __PMC_EV(ARMV8, EVENT_A3H) \
+ __PMC_EV(ARMV8, EVENT_A4H) \
+ __PMC_EV(ARMV8, EVENT_A5H) \
+ __PMC_EV(ARMV8, EVENT_A6H) \
+ __PMC_EV(ARMV8, EVENT_A7H) \
+ __PMC_EV(ARMV8, EVENT_A8H) \
+ __PMC_EV(ARMV8, EVENT_A9H) \
+ __PMC_EV(ARMV8, EVENT_AAH) \
+ __PMC_EV(ARMV8, EVENT_ABH) \
+ __PMC_EV(ARMV8, EVENT_ACH) \
+ __PMC_EV(ARMV8, EVENT_ADH) \
+ __PMC_EV(ARMV8, EVENT_AEH) \
+ __PMC_EV(ARMV8, EVENT_AFH) \
+ __PMC_EV(ARMV8, EVENT_B0H) \
+ __PMC_EV(ARMV8, EVENT_B1H) \
+ __PMC_EV(ARMV8, EVENT_B2H) \
+ __PMC_EV(ARMV8, EVENT_B3H) \
+ __PMC_EV(ARMV8, EVENT_B4H) \
+ __PMC_EV(ARMV8, EVENT_B5H) \
+ __PMC_EV(ARMV8, EVENT_B6H) \
+ __PMC_EV(ARMV8, EVENT_B7H) \
+ __PMC_EV(ARMV8, EVENT_B8H) \
+ __PMC_EV(ARMV8, EVENT_B9H) \
+ __PMC_EV(ARMV8, EVENT_BAH) \
+ __PMC_EV(ARMV8, EVENT_BBH) \
+ __PMC_EV(ARMV8, EVENT_BCH) \
+ __PMC_EV(ARMV8, EVENT_BDH) \
+ __PMC_EV(ARMV8, EVENT_BEH) \
+ __PMC_EV(ARMV8, EVENT_BFH) \
+ __PMC_EV(ARMV8, EVENT_C0H) \
+ __PMC_EV(ARMV8, EVENT_C1H) \
+ __PMC_EV(ARMV8, EVENT_C2H) \
+ __PMC_EV(ARMV8, EVENT_C3H) \
+ __PMC_EV(ARMV8, EVENT_C4H) \
+ __PMC_EV(ARMV8, EVENT_C5H) \
+ __PMC_EV(ARMV8, EVENT_C6H) \
+ __PMC_EV(ARMV8, EVENT_C7H) \
+ __PMC_EV(ARMV8, EVENT_C8H) \
+ __PMC_EV(ARMV8, EVENT_C9H) \
+ __PMC_EV(ARMV8, EVENT_CAH) \
+ __PMC_EV(ARMV8, EVENT_CBH) \
+ __PMC_EV(ARMV8, EVENT_CCH) \
+ __PMC_EV(ARMV8, EVENT_CDH) \
+ __PMC_EV(ARMV8, EVENT_CEH) \
+ __PMC_EV(ARMV8, EVENT_CFH) \
+ __PMC_EV(ARMV8, EVENT_D0H) \
+ __PMC_EV(ARMV8, EVENT_D1H) \
+ __PMC_EV(ARMV8, EVENT_D2H) \
+ __PMC_EV(ARMV8, EVENT_D3H) \
+ __PMC_EV(ARMV8, EVENT_D4H) \
+ __PMC_EV(ARMV8, EVENT_D5H) \
+ __PMC_EV(ARMV8, EVENT_D6H) \
+ __PMC_EV(ARMV8, EVENT_D7H) \
+ __PMC_EV(ARMV8, EVENT_D8H) \
+ __PMC_EV(ARMV8, EVENT_D9H) \
+ __PMC_EV(ARMV8, EVENT_DAH) \
+ __PMC_EV(ARMV8, EVENT_DBH) \
+ __PMC_EV(ARMV8, EVENT_DCH) \
+ __PMC_EV(ARMV8, EVENT_DDH) \
+ __PMC_EV(ARMV8, EVENT_DEH) \
+ __PMC_EV(ARMV8, EVENT_DFH) \
+ __PMC_EV(ARMV8, EVENT_E0H) \
+ __PMC_EV(ARMV8, EVENT_E1H) \
+ __PMC_EV(ARMV8, EVENT_E2H) \
+ __PMC_EV(ARMV8, EVENT_E3H) \
+ __PMC_EV(ARMV8, EVENT_E4H) \
+ __PMC_EV(ARMV8, EVENT_E5H) \
+ __PMC_EV(ARMV8, EVENT_E6H) \
+ __PMC_EV(ARMV8, EVENT_E7H) \
+ __PMC_EV(ARMV8, EVENT_E8H) \
+ __PMC_EV(ARMV8, EVENT_E9H) \
+ __PMC_EV(ARMV8, EVENT_EAH) \
+ __PMC_EV(ARMV8, EVENT_EBH) \
+ __PMC_EV(ARMV8, EVENT_ECH) \
+ __PMC_EV(ARMV8, EVENT_EDH) \
+ __PMC_EV(ARMV8, EVENT_EEH) \
+ __PMC_EV(ARMV8, EVENT_EFH) \
+ __PMC_EV(ARMV8, EVENT_F0H) \
+ __PMC_EV(ARMV8, EVENT_F1H) \
+ __PMC_EV(ARMV8, EVENT_F2H) \
+ __PMC_EV(ARMV8, EVENT_F3H) \
+ __PMC_EV(ARMV8, EVENT_F4H) \
+ __PMC_EV(ARMV8, EVENT_F5H) \
+ __PMC_EV(ARMV8, EVENT_F6H) \
+ __PMC_EV(ARMV8, EVENT_F7H) \
+ __PMC_EV(ARMV8, EVENT_F8H) \
+ __PMC_EV(ARMV8, EVENT_F9H) \
+ __PMC_EV(ARMV8, EVENT_FAH) \
+ __PMC_EV(ARMV8, EVENT_FBH) \
+ __PMC_EV(ARMV8, EVENT_FCH) \
+ __PMC_EV(ARMV8, EVENT_FDH) \
+ __PMC_EV(ARMV8, EVENT_FEH) \
+ __PMC_EV(ARMV8, EVENT_FFH)
+
+#define PMC_EV_ARMV8_FIRST PMC_EV_ARMV8_EVENT_00H
+#define PMC_EV_ARMV8_LAST PMC_EV_ARMV8_EVENT_FFH
+
+#define __PMC_EV_ALIAS_ARMV8_COMMON() \
+ __PMC_EV_ALIAS("SW_INCR", ARMV8_EVENT_00H) \
+ __PMC_EV_ALIAS("L1I_CACHE_REFILL", ARMV8_EVENT_01H) \
+ __PMC_EV_ALIAS("L1I_TLB_REFILL", ARMV8_EVENT_02H) \
+ __PMC_EV_ALIAS("L1D_CACHE_REFILL", ARMV8_EVENT_03H) \
+ __PMC_EV_ALIAS("L1D_CACHE", ARMV8_EVENT_04H) \
+ __PMC_EV_ALIAS("L1D_TLB_REFILL", ARMV8_EVENT_05H) \
+ __PMC_EV_ALIAS("INST_RETIRED", ARMV8_EVENT_08H) \
+ __PMC_EV_ALIAS("EXC_TAKEN", ARMV8_EVENT_09H) \
+ __PMC_EV_ALIAS("EXC_RETURN", ARMV8_EVENT_0AH) \
+ __PMC_EV_ALIAS("CID_WRITE_RETIRED", ARMV8_EVENT_0BH) \
+ __PMC_EV_ALIAS("BR_MIS_PRED", ARMV8_EVENT_10H) \
+ __PMC_EV_ALIAS("CPU_CYCLES", ARMV8_EVENT_11H) \
+ __PMC_EV_ALIAS("BR_PRED", ARMV8_EVENT_12H) \
+ __PMC_EV_ALIAS("MEM_ACCESS", ARMV8_EVENT_13H) \
+ __PMC_EV_ALIAS("L1I_CACHE", ARMV8_EVENT_14H) \
+ __PMC_EV_ALIAS("L1D_CACHE_WB", ARMV8_EVENT_15H) \
+ __PMC_EV_ALIAS("L2D_CACHE", ARMV8_EVENT_16H) \
+ __PMC_EV_ALIAS("L2D_CACHE_REFILL", ARMV8_EVENT_17H) \
+ __PMC_EV_ALIAS("L2D_CACHE_WB", ARMV8_EVENT_18H) \
+ __PMC_EV_ALIAS("BUS_ACCESS", ARMV8_EVENT_19H) \
+ __PMC_EV_ALIAS("MEMORY_ERROR", ARMV8_EVENT_1AH) \
+ __PMC_EV_ALIAS("BUS_CYCLES", ARMV8_EVENT_1DH) \
+ __PMC_EV_ALIAS("CHAIN", ARMV8_EVENT_1EH) \
+ __PMC_EV_ALIAS("BUS_ACCESS_LD", ARMV8_EVENT_60H) \
+ __PMC_EV_ALIAS("BUS_ACCESS_ST", ARMV8_EVENT_61H) \
+ __PMC_EV_ALIAS("BR_INDIRECT_SPEC", ARMV8_EVENT_7AH) \
+ __PMC_EV_ALIAS("EXC_IRQ", ARMV8_EVENT_86H) \
+ __PMC_EV_ALIAS("EXC_FIQ", ARMV8_EVENT_87H)
+
+#define __PMC_EV_ALIAS_ARMV8_CORTEX_A53() \
+ __PMC_EV_ALIAS_ARMV8_COMMON() \
+ __PMC_EV_ALIAS("LD_RETIRED", ARMV8_EVENT_06H) \
+ __PMC_EV_ALIAS("ST_RETIRED", ARMV8_EVENT_07H) \
+ __PMC_EV_ALIAS("PC_WRITE_RETIRED", ARMV8_EVENT_0CH) \
+ __PMC_EV_ALIAS("BR_IMMED_RETIRED", ARMV8_EVENT_0DH) \
+ __PMC_EV_ALIAS("BR_RETURN_RETIRED", ARMV8_EVENT_0EH) \
+ __PMC_EV_ALIAS("UNALIGNED_LDST_RETIRED",ARMV8_EVENT_0FH)
+
+#define __PMC_EV_ALIAS_ARMV8_CORTEX_A57() \
+ __PMC_EV_ALIAS_ARMV8_COMMON() \
+ __PMC_EV_ALIAS("INST_SPEC", ARMV8_EVENT_1BH) \
+ __PMC_EV_ALIAS("TTBR_WRITE_RETIRED", ARMV8_EVENT_1CH) \
+ __PMC_EV_ALIAS("L1D_CACHE_LD", ARMV8_EVENT_40H) \
+ __PMC_EV_ALIAS("L1D_CACHE_ST", ARMV8_EVENT_41H) \
+ __PMC_EV_ALIAS("L1D_CACHE_REFILL_LD", ARMV8_EVENT_42H) \
+ __PMC_EV_ALIAS("L1D_CACHE_REFILL_ST", ARMV8_EVENT_43H) \
+ __PMC_EV_ALIAS("L1D_CACHE_WB_VICTIM", ARMV8_EVENT_46H) \
+ __PMC_EV_ALIAS("L1D_CACHE_WB_CLEAN", ARMV8_EVENT_47H) \
+ __PMC_EV_ALIAS("L1D_CACHE_INVAL", ARMV8_EVENT_48H) \
+ __PMC_EV_ALIAS("L1D_TLB_REFILL_LD", ARMV8_EVENT_4CH) \
+ __PMC_EV_ALIAS("L1D_TLB_REFILL_ST", ARMV8_EVENT_4DH) \
+ __PMC_EV_ALIAS("L2D_CACHE_LD", ARMV8_EVENT_50H) \
+ __PMC_EV_ALIAS("L2D_CACHE_ST", ARMV8_EVENT_51H) \
+ __PMC_EV_ALIAS("L2D_CACHE_REFILL_LD", ARMV8_EVENT_52H) \
+ __PMC_EV_ALIAS("L2D_CACHE_REFILL_ST", ARMV8_EVENT_53H) \
+ __PMC_EV_ALIAS("L2D_CACHE_WB_VICTIM", ARMV8_EVENT_56H) \
+ __PMC_EV_ALIAS("L2D_CACHE_WB_CLEAN", ARMV8_EVENT_57H) \
+ __PMC_EV_ALIAS("L2D_CACHE_INVAL", ARMV8_EVENT_58H) \
+ __PMC_EV_ALIAS("BUS_ACCESS_SHARED", ARMV8_EVENT_62H) \
+ __PMC_EV_ALIAS("BUS_ACCESS_NOT_SHARED", ARMV8_EVENT_63H) \
+ __PMC_EV_ALIAS("BUS_ACCESS_NORMAL", ARMV8_EVENT_64H) \
+ __PMC_EV_ALIAS("BUS_ACCESS_PERIPH", ARMV8_EVENT_65H) \
+ __PMC_EV_ALIAS("MEM_ACCESS_LD", ARMV8_EVENT_66H) \
+ __PMC_EV_ALIAS("MEM_ACCESS_ST", ARMV8_EVENT_67H) \
+ __PMC_EV_ALIAS("UNALIGNED_LD_SPEC", ARMV8_EVENT_68H) \
+ __PMC_EV_ALIAS("UNALIGNED_ST_SPEC", ARMV8_EVENT_69H) \
+ __PMC_EV_ALIAS("UNALIGNED_LDST_SPEC", ARMV8_EVENT_6AH) \
+ __PMC_EV_ALIAS("LDREX_SPEC", ARMV8_EVENT_6CH) \
+ __PMC_EV_ALIAS("STREX_PASS_SPEC", ARMV8_EVENT_6DH) \
+ __PMC_EV_ALIAS("STREX_FAIL_SPEC", ARMV8_EVENT_6EH) \
+ __PMC_EV_ALIAS("LD_SPEC", ARMV8_EVENT_70H) \
+ __PMC_EV_ALIAS("ST_SPEC", ARMV8_EVENT_71H) \
+ __PMC_EV_ALIAS("LDST_SPEC", ARMV8_EVENT_72H) \
+ __PMC_EV_ALIAS("DP_SPEC", ARMV8_EVENT_73H) \
+ __PMC_EV_ALIAS("ASE_SPEC", ARMV8_EVENT_74H) \
+ __PMC_EV_ALIAS("VFP_SPEC", ARMV8_EVENT_75H) \
+ __PMC_EV_ALIAS("PC_WRITE_SPEC", ARMV8_EVENT_76H) \
+ __PMC_EV_ALIAS("CRYPTO_SPEC", ARMV8_EVENT_77H) \
+ __PMC_EV_ALIAS("BR_IMMED_SPEC", ARMV8_EVENT_78H) \
+ __PMC_EV_ALIAS("BR_RETURN_SPEC", ARMV8_EVENT_79H) \
+ __PMC_EV_ALIAS("ISB_SPEC", ARMV8_EVENT_7CH) \
+ __PMC_EV_ALIAS("DSB_SPEC", ARMV8_EVENT_7DH) \
+ __PMC_EV_ALIAS("DMB_SPEC", ARMV8_EVENT_7EH) \
+ __PMC_EV_ALIAS("EXC_UNDEF", ARMV8_EVENT_81H) \
+ __PMC_EV_ALIAS("EXC_SVC", ARMV8_EVENT_82H) \
+ __PMC_EV_ALIAS("EXC_PABORT", ARMV8_EVENT_83H) \
+ __PMC_EV_ALIAS("EXC_DABORT", ARMV8_EVENT_84H) \
+ __PMC_EV_ALIAS("EXC_SMC", ARMV8_EVENT_88H) \
+ __PMC_EV_ALIAS("EXC_HVC", ARMV8_EVENT_8AH) \
+ __PMC_EV_ALIAS("EXC_TRAP_PABORT", ARMV8_EVENT_8BH) \
+ __PMC_EV_ALIAS("EXC_TRAP_DABORT", ARMV8_EVENT_8CH) \
+ __PMC_EV_ALIAS("EXC_TRAP_OTHER", ARMV8_EVENT_8DH) \
+ __PMC_EV_ALIAS("EXC_TRAP_IRQ", ARMV8_EVENT_8EH) \
+ __PMC_EV_ALIAS("EXC_TRAP_FIQ", ARMV8_EVENT_8FH) \
+ __PMC_EV_ALIAS("RC_LD_SPEC", ARMV8_EVENT_90H) \
+ __PMC_EV_ALIAS("RC_ST_SPEC", ARMV8_EVENT_91H)
+
+/*
* MIPS Events from "Programming the MIPS32 24K Core Family",
* Document Number: MD00355 Revision 04.63 December 19, 2008
* These events are kept in the order found in Table 7.4.
@@ -5566,6 +5929,7 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
* 0x13100 0x00FF IBM PPC970 events
* 0x13300 0x00FF Freescale e500 events
* 0x14000 0x0100 ARMv7 events
+ * 0x14100 0x0100 ARMv8 events
* 0x20000 0x1000 Software events
*/
#define __PMC_EVENTS() \
@@ -5605,6 +5969,8 @@ __PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
__PMC_EV_E500() \
__PMC_EV_BLOCK(ARMV7, 0x14000) \
__PMC_EV_ARMV7() \
+ __PMC_EV_BLOCK(ARMV8, 0x14100) \
+ __PMC_EV_ARMV8()
#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
#define PMC_EVENT_LAST PMC_EV_SOFT_LAST
diff --git a/sys/sys/pmc.h b/sys/sys/pmc.h
index cc67d37..6c1517b 100644
--- a/sys/sys/pmc.h
+++ b/sys/sys/pmc.h
@@ -73,7 +73,6 @@
#define __PMC_CPUS() \
__PMC_CPU(AMD_K7, 0x00, "AMD K7") \
__PMC_CPU(AMD_K8, 0x01, "AMD K8") \
- __PMC_CPU(ARMV7, 0x500, "ARMv7") \
__PMC_CPU(INTEL_P5, 0x80, "Intel Pentium") \
__PMC_CPU(INTEL_P6, 0x81, "Intel Pentium Pro") \
__PMC_CPU(INTEL_CL, 0x82, "Intel Celeron") \
@@ -105,7 +104,10 @@
__PMC_CPU(PPC_E500, 0x340, "PowerPC e500 Core") \
__PMC_CPU(PPC_MPC85XX, 0x340, "Freescale PowerPC MPC85XX") \
__PMC_CPU(PPC_970, 0x380, "IBM PowerPC 970") \
- __PMC_CPU(GENERIC, 0x400, "Generic")
+ __PMC_CPU(GENERIC, 0x400, "Generic") \
+ __PMC_CPU(ARMV7, 0x500, "ARMv7") \
+ __PMC_CPU(ARMV8_CORTEX_A53, 0x600, "ARMv8 Cortex A53") \
+ __PMC_CPU(ARMV8_CORTEX_A57, 0x601, "ARMv8 Cortex A57")
enum pmc_cputype {
#undef __PMC_CPU
@@ -133,6 +135,7 @@ enum pmc_cputype {
__PMC_CLASS(UCP) /* Intel Uncore programmable */ \
__PMC_CLASS(XSCALE) /* Intel XScale counters */ \
__PMC_CLASS(ARMV7) /* ARMv7 */ \
+ __PMC_CLASS(ARMV8) /* ARMv8 */ \
__PMC_CLASS(MIPS24K) /* MIPS 24K */ \
__PMC_CLASS(OCTEON) /* Cavium Octeon */ \
__PMC_CLASS(MIPS74K) /* MIPS 74K */ \
OpenPOWER on IntegriCloud