summaryrefslogtreecommitdiffstats
path: root/sys/dev/hwpmc
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/hwpmc')
-rw-r--r--sys/dev/hwpmc/hwpmc_amd.c1033
-rw-r--r--sys/dev/hwpmc/hwpmc_amd.h97
-rw-r--r--sys/dev/hwpmc/hwpmc_arm.c160
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c2641
-rw-r--r--sys/dev/hwpmc/hwpmc_core.h191
-rw-r--r--sys/dev/hwpmc/hwpmc_ia64.c66
-rw-r--r--sys/dev/hwpmc/hwpmc_intel.c326
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c1070
-rw-r--r--sys/dev/hwpmc/hwpmc_mips.c807
-rw-r--r--sys/dev/hwpmc/hwpmc_mips24k.c229
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c5139
-rw-r--r--sys/dev/hwpmc/hwpmc_octeon.c195
-rw-r--r--sys/dev/hwpmc/hwpmc_pentium.c57
-rw-r--r--sys/dev/hwpmc/hwpmc_pentium.h73
-rw-r--r--sys/dev/hwpmc/hwpmc_piv.c1698
-rw-r--r--sys/dev/hwpmc/hwpmc_piv.h125
-rw-r--r--sys/dev/hwpmc/hwpmc_powerpc.c852
-rw-r--r--sys/dev/hwpmc/hwpmc_ppro.c866
-rw-r--r--sys/dev/hwpmc/hwpmc_ppro.h84
-rw-r--r--sys/dev/hwpmc/hwpmc_soft.c485
-rw-r--r--sys/dev/hwpmc/hwpmc_soft.h48
-rw-r--r--sys/dev/hwpmc/hwpmc_sparc64.c66
-rw-r--r--sys/dev/hwpmc/hwpmc_tsc.c385
-rw-r--r--sys/dev/hwpmc/hwpmc_tsc.h43
-rw-r--r--sys/dev/hwpmc/hwpmc_uncore.c1208
-rw-r--r--sys/dev/hwpmc/hwpmc_uncore.h128
-rw-r--r--sys/dev/hwpmc/hwpmc_x86.c274
-rw-r--r--sys/dev/hwpmc/hwpmc_xscale.c676
-rw-r--r--sys/dev/hwpmc/hwpmc_xscale.h73
-rw-r--r--sys/dev/hwpmc/pmc_events.h4388
30 files changed, 23483 insertions, 0 deletions
diff --git a/sys/dev/hwpmc/hwpmc_amd.c b/sys/dev/hwpmc/hwpmc_amd.c
new file mode 100644
index 0000000..1a8398c
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_amd.c
@@ -0,0 +1,1033 @@
+/*-
+ * Copyright (c) 2003-2008 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Support for the AMD K7 and later processors */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+#ifdef DEBUG
+enum pmc_class amd_pmc_class;
+#endif
+
+/* AMD K7 & K8 PMCs */
+struct amd_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+ uint32_t pm_evsel; /* address of EVSEL register */
+ uint32_t pm_perfctr; /* address of PERFCTR register */
+};
+
+static struct amd_descr amd_pmcdesc[AMD_NPMCS] =
+{
+ {
+ .pm_descr =
+ {
+ .pd_name = "",
+ .pd_class = -1,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_0,
+ .pm_perfctr = AMD_PMC_PERFCTR_0
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = "",
+ .pd_class = -1,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_1,
+ .pm_perfctr = AMD_PMC_PERFCTR_1
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = "",
+ .pd_class = -1,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_2,
+ .pm_perfctr = AMD_PMC_PERFCTR_2
+ },
+ {
+ .pm_descr =
+ {
+ .pd_name = "",
+ .pd_class = -1,
+ .pd_caps = AMD_PMC_CAPS,
+ .pd_width = 48
+ },
+ .pm_evsel = AMD_PMC_EVSEL_3,
+ .pm_perfctr = AMD_PMC_PERFCTR_3
+ }
+};
+
+struct amd_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_code; /* encoded event mask */
+ uint8_t pe_mask; /* bits allowed in unit mask */
+};
+
+const struct amd_event_code_map amd_event_codes[] = {
+#if defined(__i386__) /* 32 bit Athlon (K7) only */
+ { PMC_EV_K7_DC_ACCESSES, 0x40, 0 },
+ { PMC_EV_K7_DC_MISSES, 0x41, 0 },
+ { PMC_EV_K7_DC_REFILLS_FROM_L2, 0x42, AMD_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_REFILLS_FROM_SYSTEM, 0x43, AMD_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_DC_WRITEBACKS, 0x44, AMD_PMC_UNITMASK_MOESI },
+ { PMC_EV_K7_L1_DTLB_MISS_AND_L2_DTLB_HITS, 0x45, 0 },
+ { PMC_EV_K7_L1_AND_L2_DTLB_MISSES, 0x46, 0 },
+ { PMC_EV_K7_MISALIGNED_REFERENCES, 0x47, 0 },
+
+ { PMC_EV_K7_IC_FETCHES, 0x80, 0 },
+ { PMC_EV_K7_IC_MISSES, 0x81, 0 },
+
+ { PMC_EV_K7_L1_ITLB_MISSES, 0x84, 0 },
+ { PMC_EV_K7_L1_L2_ITLB_MISSES, 0x85, 0 },
+
+ { PMC_EV_K7_RETIRED_INSTRUCTIONS, 0xC0, 0 },
+ { PMC_EV_K7_RETIRED_OPS, 0xC1, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES, 0xC2, 0 },
+ { PMC_EV_K7_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES, 0xC4, 0 },
+ { PMC_EV_K7_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0 },
+ { PMC_EV_K7_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0 },
+ { PMC_EV_K7_RETIRED_RESYNC_BRANCHES, 0xC7, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_CYCLES, 0xCD, 0 },
+ { PMC_EV_K7_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0 },
+ { PMC_EV_K7_HARDWARE_INTERRUPTS, 0xCF, 0 },
+#endif
+
+ { PMC_EV_K8_FP_DISPATCHED_FPU_OPS, 0x00, 0x3F },
+ { PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED, 0x01, 0x00 },
+ { PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS, 0x02, 0x00 },
+
+ { PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD, 0x20, 0x7F },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE,
+ 0x21, 0x00 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x22, 0x00 },
+ { PMC_EV_K8_LS_BUFFER2_FULL, 0x23, 0x00 },
+ { PMC_EV_K8_LS_LOCKED_OPERATION, 0x24, 0x07 },
+ { PMC_EV_K8_LS_MICROARCHITECTURAL_LATE_CANCEL, 0x25, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CFLUSH_INSTRUCTIONS, 0x26, 0x00 },
+ { PMC_EV_K8_LS_RETIRED_CPUID_INSTRUCTIONS, 0x27, 0x00 },
+
+ { PMC_EV_K8_DC_ACCESS, 0x40, 0x00 },
+ { PMC_EV_K8_DC_MISS, 0x41, 0x00 },
+ { PMC_EV_K8_DC_REFILL_FROM_L2, 0x42, 0x1F },
+ { PMC_EV_K8_DC_REFILL_FROM_SYSTEM, 0x43, 0x1F },
+ { PMC_EV_K8_DC_COPYBACK, 0x44, 0x1F },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_HIT, 0x45, 0x00 },
+ { PMC_EV_K8_DC_L1_DTLB_MISS_AND_L2_DTLB_MISS, 0x46, 0x00 },
+ { PMC_EV_K8_DC_MISALIGNED_DATA_REFERENCE, 0x47, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_LATE_CANCEL, 0x48, 0x00 },
+ { PMC_EV_K8_DC_MICROARCHITECTURAL_EARLY_CANCEL, 0x49, 0x00 },
+ { PMC_EV_K8_DC_ONE_BIT_ECC_ERROR, 0x4A, 0x03 },
+ { PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS, 0x4B, 0x07 },
+ { PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS, 0x4C, 0x03 },
+
+ { PMC_EV_K8_BU_CPU_CLK_UNHALTED, 0x76, 0x00 },
+ { PMC_EV_K8_BU_INTERNAL_L2_REQUEST, 0x7D, 0x1F },
+ { PMC_EV_K8_BU_FILL_REQUEST_L2_MISS, 0x7E, 0x07 },
+ { PMC_EV_K8_BU_FILL_INTO_L2, 0x7F, 0x03 },
+
+ { PMC_EV_K8_IC_FETCH, 0x80, 0x00 },
+ { PMC_EV_K8_IC_MISS, 0x81, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_L2, 0x82, 0x00 },
+ { PMC_EV_K8_IC_REFILL_FROM_SYSTEM, 0x83, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_HIT, 0x84, 0x00 },
+ { PMC_EV_K8_IC_L1_ITLB_MISS_AND_L2_ITLB_MISS, 0x85, 0x00 },
+ { PMC_EV_K8_IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP, 0x86, 0x00 },
+ { PMC_EV_K8_IC_INSTRUCTION_FETCH_STALL, 0x87, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_HIT, 0x88, 0x00 },
+ { PMC_EV_K8_IC_RETURN_STACK_OVERFLOW, 0x89, 0x00 },
+
+ { PMC_EV_K8_FR_RETIRED_X86_INSTRUCTIONS, 0xC0, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_UOPS, 0xC1, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES, 0xC2, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_BRANCHES_MISPREDICTED, 0xC3, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES, 0xC4, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED, 0xC5, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FAR_CONTROL_TRANSFERS, 0xC6, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_RESYNCS, 0xC7, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS, 0xC8, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_NEAR_RETURNS_MISPREDICTED, 0xC9, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE,
+ 0xCA, 0x00 },
+ { PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS, 0xCB, 0x0F },
+ { PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS,
+ 0xCC, 0x07 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_CYCLES, 0xCD, 0x00 },
+ { PMC_EV_K8_FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES, 0xCE, 0x00 },
+ { PMC_EV_K8_FR_TAKEN_HARDWARE_INTERRUPTS, 0xCF, 0x00 },
+
+ { PMC_EV_K8_FR_DECODER_EMPTY, 0xD0, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALLS, 0xD1, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE,
+ 0xD2, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SERIALIZATION, 0xD3, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_FOR_SEGMENT_LOAD, 0xD4, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL,
+ 0xD5, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL,
+ 0xD6, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FPU_IS_FULL, 0xD7, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_LS_IS_FULL, 0xD8, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET,
+ 0xD9, 0x00 },
+ { PMC_EV_K8_FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING,
+ 0xDA, 0x00 },
+ { PMC_EV_K8_FR_FPU_EXCEPTIONS, 0xDB, 0x0F },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR0, 0xDC, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR1, 0xDD, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR2, 0xDE, 0x00 },
+ { PMC_EV_K8_FR_NUMBER_OF_BREAKPOINTS_FOR_DR3, 0xDF, 0x00 },
+
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT, 0xE0, 0x7 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW, 0xE1, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED,
+ 0xE2, 0x00 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND, 0xE3, 0x07 },
+ { PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION, 0xE4, 0x0F },
+ { PMC_EV_K8_NB_SIZED_COMMANDS, 0xEB, 0x7F },
+ { PMC_EV_K8_NB_PROBE_RESULT, 0xEC, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS0_BANDWIDTH, 0xF6, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS1_BANDWIDTH, 0xF7, 0x0F },
+ { PMC_EV_K8_NB_HT_BUS2_BANDWIDTH, 0xF8, 0x0F }
+
+};
+
+const int amd_event_codes_size =
+ sizeof(amd_event_codes) / sizeof(amd_event_codes[0]);
+
+/*
+ * Per-processor information
+ */
+
+struct amd_cpu {
+ struct pmc_hw pc_amdpmcs[AMD_NPMCS];
+};
+
+static struct amd_cpu **amd_pcpu;
+
+/*
+ * read a pmc register
+ */
+
+static int
+amd_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ enum pmc_mode mode;
+ const struct amd_descr *pd;
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+ KASSERT(amd_pcpu[cpu],
+ ("[amd,%d] null per-cpu, cpu %d", __LINE__, cpu));
+
+ pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = PMC_TO_MODE(pm);
+
+ PMCDBG(MDP,REA,1,"amd-read id=%d class=%d", ri, pd->pm_descr.pd_class);
+
+#ifdef DEBUG
+ KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+#endif
+
+ tmp = rdmsr(pd->pm_perfctr); /* RDMSR serializes */
+ PMCDBG(MDP,REA,2,"amd-read (pre-munge) id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(mode)) {
+ /* Sign extend 48 bit value to 64 bits. */
+ tmp = (pmc_value_t) (((int64_t) tmp << 16) >> 16);
+ tmp = AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ }
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2,"amd-read (post-munge) id=%d -> %jd", ri, *v);
+
+ return 0;
+}
+
+/*
+ * Write a PMC MSR.
+ */
+
+static int
+amd_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ const struct amd_descr *pd;
+ enum pmc_mode mode;
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] PMC not owned (cpu%d,pmc%d)", __LINE__,
+ cpu, ri));
+
+ mode = PMC_TO_MODE(pm);
+
+#ifdef DEBUG
+ KASSERT(pd->pm_descr.pd_class == amd_pmc_class,
+ ("[amd,%d] unknown PMC class (%d)", __LINE__,
+ pd->pm_descr.pd_class));
+#endif
+
+ /* use 2's complement of the count for sampling mode PMCs */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ v = AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"amd-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ /* write the PMC value */
+ wrmsr(pd->pm_perfctr, v);
+ return 0;
+}
+
+/*
+ * configure hardware pmc according to the configuration recorded in
+ * pmc 'pm'.
+ */
+
+static int
+amd_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[amd,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+ return 0;
+}
+
+/*
+ * Retrieve a configured PMC pointer from hardware state.
+ */
+
+static int
+amd_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = amd_pcpu[cpu]->pc_amdpmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch in of a
+ * thread.
+ */
+
+static int
+amd_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc;
+
+ PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
+ (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0);
+
+ /* enable the RDPMC instruction if needed */
+ if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
+ load_cr4(rcr4() | CR4_PCE);
+
+ return 0;
+}
+
+/*
+ * Machine dependent actions taken during the context switch out of a
+ * thread.
+ */
+
+static int
+amd_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc;
+ (void) pp; /* can be NULL */
+
+ PMCDBG(MDP,SWO,1, "pc=%p pp=%p enable-msr=%d", pc, pp, pp ?
+ (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS) == 1 : 0);
+
+ /* always turn off the RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+
+ return 0;
+}
+
+/*
+ * Check if a given allocation is feasible.
+ */
+
+static int
+amd_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int i;
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ enum pmc_event pe;
+ const struct pmc_descr *pd;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &amd_pmcdesc[ri].pm_descr;
+
+ /* check class match */
+ if (pd->pd_class != a->pm_class)
+ return EINVAL;
+
+ caps = pm->pm_caps;
+
+ PMCDBG(MDP,ALL,1,"amd-allocate ri=%d caps=0x%x", ri, caps);
+
+ if ((pd->pd_caps & caps) != caps)
+ return EPERM;
+
+ pe = a->pm_ev;
+
+ /* map ev to the correct event mask code */
+ config = allowed_unitmask = 0;
+ for (i = 0; i < amd_event_codes_size; i++)
+ if (amd_event_codes[i].pe_ev == pe) {
+ config =
+ AMD_PMC_TO_EVENTMASK(amd_event_codes[i].pe_code);
+ allowed_unitmask =
+ AMD_PMC_TO_UNITMASK(amd_event_codes[i].pe_mask);
+ break;
+ }
+ if (i == amd_event_codes_size)
+ return EINVAL;
+
+ unitmask = a->pm_md.pm_amd.pm_amd_config & AMD_PMC_UNITMASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return EINVAL;
+
+ if (unitmask && (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_md.pm_amd.pm_amd_config & AMD_PMC_COUNTERMASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= AMD_PMC_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= AMD_PMC_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (AMD_PMC_USR|AMD_PMC_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= AMD_PMC_EDGE;
+ if (caps & PMC_CAP_INVERT)
+ config |= AMD_PMC_INVERT;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= AMD_PMC_INT;
+
+ pm->pm_md.pm_amd.pm_amd_evsel = config; /* save config value */
+
+ PMCDBG(MDP,ALL,2,"amd-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+/*
+ * Release machine dependent state associated with a PMC. This is a
+ * no-op on this architecture.
+ *
+ */
+
+/* ARGSUSED0 */
+static int
+amd_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+#ifdef DEBUG
+ const struct amd_descr *pd;
+#endif
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[amd,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+#ifdef DEBUG
+ pd = &amd_pmcdesc[ri];
+ if (pd->pm_descr.pd_class == amd_pmc_class)
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC %d released while active", __LINE__, ri));
+#endif
+
+ return 0;
+}
+
+/*
+ * start a PMC.
+ */
+
+static int
+amd_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] starting cpu%d,pmc%d with null pmc record", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STA,1,"amd-start cpu=%d ri=%d", cpu, ri);
+
+ KASSERT(AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] pmc%d,cpu%d: Starting active PMC \"%s\"", __LINE__,
+ ri, cpu, pd->pm_descr.pd_name));
+
+ /* turn on the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel | AMD_PMC_ENABLE;
+
+ PMCDBG(MDP,STA,2,"amd-start config=0x%x", config);
+
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+amd_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ const struct amd_descr *pd;
+ uint64_t config;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &amd_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[amd,%d] cpu%d,pmc%d no PMC to stop", __LINE__,
+ cpu, ri));
+ KASSERT(!AMD_PMC_IS_STOPPED(pd->pm_evsel),
+ ("[amd,%d] PMC%d, CPU%d \"%s\" already stopped",
+ __LINE__, ri, cpu, pd->pm_descr.pd_name));
+
+ PMCDBG(MDP,STO,1,"amd-stop ri=%d", ri);
+
+ /* turn off the PMC ENABLE bit */
+ config = pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE;
+ wrmsr(pd->pm_evsel, config);
+ return 0;
+}
+
+/*
+ * Interrupt handler. This function needs to return '1' if the
+ * interrupt was this CPU's PMCs or '0' otherwise. It is not allowed
+ * to sleep or do anything a 'fast' interrupt handler is not allowed
+ * to do.
+ */
+
+static int
+amd_intr(int cpu, struct trapframe *tf)
+{
+ int i, error, retval;
+ uint32_t config, evsel, perfctr;
+ struct pmc *pm;
+ struct amd_cpu *pac;
+ pmc_value_t v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] out of range CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ retval = 0;
+
+ pac = amd_pcpu[cpu];
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - look for a running, sampling PMC which has overflowed
+ * and which has a valid 'struct pmc' association
+ *
+ * If found, we call a helper to process the interrupt.
+ *
+ * If multiple PMCs interrupt at the same time, the AMD64
+ * processor appears to deliver as many NMIs as there are
+ * outstanding PMC interrupts. So we process only one NMI
+ * interrupt at a time.
+ */
+
+ for (i = 0; retval == 0 && i < AMD_NPMCS; i++) {
+
+ if ((pm = pac->pc_amdpmcs[i].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ if (!AMD_PMC_HAS_OVERFLOWED(i))
+ continue;
+
+ retval = 1; /* Found an interrupting PMC. */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* Stop the PMC, reload count. */
+ evsel = AMD_PMC_EVSEL_0 + i;
+ perfctr = AMD_PMC_PERFCTR_0 + i;
+ v = pm->pm_sc.pm_reloadcount;
+ config = rdmsr(evsel);
+
+ KASSERT((config & ~AMD_PMC_ENABLE) ==
+ (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
+ ("[amd,%d] config mismatch reg=0x%x pm=0x%x", __LINE__,
+ config, pm->pm_md.pm_amd.pm_amd_evsel));
+
+ wrmsr(evsel, config & ~AMD_PMC_ENABLE);
+ wrmsr(perfctr, AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
+
+ /* Restart the counter if logging succeeded. */
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error == 0)
+ wrmsr(evsel, config | AMD_PMC_ENABLE);
+ }
+
+ atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (retval);
+}
+
+/*
+ * describe a PMC
+ */
+static int
+amd_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct amd_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &amd_pcpu[cpu]->pc_amdpmcs[ri];
+ pd = &amd_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return error;
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * i386 specific entry points
+ */
+
+/*
+ * return the MSR address of the given PMC.
+ */
+
+static int
+amd_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < AMD_NPMCS,
+ ("[amd,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = amd_pmcdesc[ri].pm_perfctr - AMD_PMC_PERFCTR_0;
+
+ return (0);
+}
+
+/*
+ * processor dependent initialization.
+ */
+
+static int
+amd_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int classindex, first_ri, n;
+ struct pmc_cpu *pc;
+ struct amd_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-init cpu=%d", cpu);
+
+ amd_pcpu[cpu] = pac = malloc(sizeof(struct amd_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /*
+ * Set the content of the hardware descriptors to a known
+ * state and initialize pointers in the MI per-cpu descriptor.
+ */
+ pc = pmc_pcpu[cpu];
+#if defined(__amd64__)
+ classindex = PMC_MDEP_CLASS_INDEX_K8;
+#elif defined(__i386__)
+ classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ?
+ PMC_MDEP_CLASS_INDEX_K8 : PMC_MDEP_CLASS_INDEX_K7;
+#endif
+ first_ri = md->pmd_classdep[classindex].pcd_ri;
+
+ KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu pointer", __LINE__));
+
+ for (n = 0, phw = pac->pc_amdpmcs; n < AMD_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+
+ return (0);
+}
+
+
+/*
+ * processor dependent cleanup prior to the KLD
+ * being unloaded
+ */
+
+static int
+amd_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int classindex, first_ri, i;
+ uint32_t evsel;
+ struct pmc_cpu *pc;
+ struct amd_cpu *pac;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] insane cpu number (%d)", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"amd-cleanup cpu=%d", cpu);
+
+ /*
+ * First, turn off all PMCs on this CPU.
+ */
+ for (i = 0; i < 4; i++) { /* XXX this loop is now not needed */
+ evsel = rdmsr(AMD_PMC_EVSEL_0 + i);
+ evsel &= ~AMD_PMC_ENABLE;
+ wrmsr(AMD_PMC_EVSEL_0 + i, evsel);
+ }
+
+ /*
+ * Next, free up allocated space.
+ */
+ if ((pac = amd_pcpu[cpu]) == NULL)
+ return (0);
+
+ amd_pcpu[cpu] = NULL;
+
+#ifdef DEBUG
+ for (i = 0; i < AMD_NPMCS; i++) {
+ KASSERT(pac->pc_amdpmcs[i].phw_pmc == NULL,
+ ("[amd,%d] CPU%d/PMC%d in use", __LINE__, cpu, i));
+ KASSERT(AMD_PMC_IS_STOPPED(AMD_PMC_EVSEL_0 + i),
+ ("[amd,%d] CPU%d/PMC%d not stopped", __LINE__, cpu, i));
+ }
+#endif
+
+ pc = pmc_pcpu[cpu];
+ KASSERT(pc != NULL, ("[amd,%d] NULL per-cpu state", __LINE__));
+
+#if defined(__amd64__)
+ classindex = PMC_MDEP_CLASS_INDEX_K8;
+#elif defined(__i386__)
+ classindex = md->pmd_cputype == PMC_CPU_AMD_K8 ? PMC_MDEP_CLASS_INDEX_K8 :
+ PMC_MDEP_CLASS_INDEX_K7;
+#endif
+ first_ri = md->pmd_classdep[classindex].pcd_ri;
+
+ /*
+ * Reset pointers in the MI 'per-cpu' state.
+ */
+ for (i = 0; i < AMD_NPMCS; i++) {
+ pc->pc_hwpmcs[i + first_ri] = NULL;
+ }
+
+
+ free(pac, M_PMC);
+
+ return (0);
+}
+
+/*
+ * Initialize ourselves.
+ */
+
+struct pmc_mdep *
+pmc_amd_initialize(void)
+{
+ int classindex, error, i, ncpus;
+ struct pmc_classdep *pcd;
+ enum pmc_cputype cputype;
+ struct pmc_mdep *pmc_mdep;
+ enum pmc_class class;
+ char *name;
+
+ /*
+ * The presence of hardware performance counters on the AMD
+ * Athlon, Duron or later processors, is _not_ indicated by
+ * any of the processor feature flags set by the 'CPUID'
+ * instruction, so we only check the 'instruction family'
+ * field returned by CPUID for instruction family >= 6.
+ */
+
+ name = NULL;
+ switch (cpu_id & 0xF00) {
+#if defined(__i386__)
+ case 0x600: /* Athlon(tm) processor */
+ classindex = PMC_MDEP_CLASS_INDEX_K7;
+ cputype = PMC_CPU_AMD_K7;
+ class = PMC_CLASS_K7;
+ name = "K7";
+ break;
+#endif
+ case 0xF00: /* Athlon64/Opteron processor */
+ classindex = PMC_MDEP_CLASS_INDEX_K8;
+ cputype = PMC_CPU_AMD_K8;
+ class = PMC_CLASS_K8;
+ name = "K8";
+ break;
+
+ default:
+ (void) printf("pmc: Unknown AMD CPU.\n");
+ return NULL;
+ }
+
+#ifdef DEBUG
+ amd_pmc_class = class;
+#endif
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ amd_pcpu = malloc(sizeof(struct amd_cpu *) * pmc_cpu_max(), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /*
+ * These processors have two classes of PMCs: the TSC and
+ * programmable PMCs.
+ */
+ pmc_mdep = pmc_mdep_alloc(2);
+
+ pmc_mdep->pmd_cputype = cputype;
+
+ ncpus = pmc_cpu_max();
+
+ /* Initialize the TSC. */
+ error = pmc_tsc_initialize(pmc_mdep, ncpus);
+ if (error)
+ goto error;
+
+ /* Initialize AMD K7 and K8 PMC handling. */
+ pcd = &pmc_mdep->pmd_classdep[classindex];
+
+ pcd->pcd_caps = AMD_PMC_CAPS;
+ pcd->pcd_class = class;
+ pcd->pcd_num = AMD_NPMCS;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 48;
+
+ /* fill in the correct pmc name and class */
+ for (i = 0; i < AMD_NPMCS; i++) {
+ (void) snprintf(amd_pmcdesc[i].pm_descr.pd_name,
+ sizeof(amd_pmcdesc[i].pm_descr.pd_name), "%s-%d",
+ name, i);
+ amd_pmcdesc[i].pm_descr.pd_class = class;
+ }
+
+ pcd->pcd_allocate_pmc = amd_allocate_pmc;
+ pcd->pcd_config_pmc = amd_config_pmc;
+ pcd->pcd_describe = amd_describe;
+ pcd->pcd_get_config = amd_get_config;
+ pcd->pcd_get_msr = amd_get_msr;
+ pcd->pcd_pcpu_fini = amd_pcpu_fini;
+ pcd->pcd_pcpu_init = amd_pcpu_init;
+ pcd->pcd_read_pmc = amd_read_pmc;
+ pcd->pcd_release_pmc = amd_release_pmc;
+ pcd->pcd_start_pmc = amd_start_pmc;
+ pcd->pcd_stop_pmc = amd_stop_pmc;
+ pcd->pcd_write_pmc = amd_write_pmc;
+
+ pmc_mdep->pmd_pcpu_init = NULL;
+ pmc_mdep->pmd_pcpu_fini = NULL;
+ pmc_mdep->pmd_intr = amd_intr;
+ pmc_mdep->pmd_switch_in = amd_switch_in;
+ pmc_mdep->pmd_switch_out = amd_switch_out;
+
+ pmc_mdep->pmd_npmc += AMD_NPMCS;
+
+ PMCDBG(MDP,INI,0,"%s","amd-initialize");
+
+ return (pmc_mdep);
+
+ error:
+ if (error) {
+ free(pmc_mdep, M_PMC);
+ pmc_mdep = NULL;
+ }
+
+ return (NULL);
+}
+
+/*
+ * Finalization code for AMD CPUs.
+ */
+
+void
+pmc_amd_finalize(struct pmc_mdep *md)
+{
+#if defined(INVARIANTS)
+ int classindex, i, ncpus, pmcclass;
+#endif
+
+ pmc_tsc_finalize(md);
+
+ KASSERT(amd_pcpu != NULL, ("[amd,%d] NULL per-cpu array pointer",
+ __LINE__));
+
+#if defined(INVARIANTS)
+ switch (md->pmd_cputype) {
+#if defined(__i386__)
+ case PMC_CPU_AMD_K7:
+ classindex = PMC_MDEP_CLASS_INDEX_K7;
+ pmcclass = PMC_CLASS_K7;
+ break;
+#endif
+ default:
+ classindex = PMC_MDEP_CLASS_INDEX_K8;
+ pmcclass = PMC_CLASS_K8;
+ }
+
+ KASSERT(md->pmd_classdep[classindex].pcd_class == pmcclass,
+ ("[amd,%d] pmc class mismatch", __LINE__));
+
+ ncpus = pmc_cpu_max();
+
+ for (i = 0; i < ncpus; i++)
+ KASSERT(amd_pcpu[i] == NULL, ("[amd,%d] non-null pcpu",
+ __LINE__));
+#endif
+
+ free(amd_pcpu, M_PMC);
+ amd_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_amd.h b/sys/dev/hwpmc/hwpmc_amd.h
new file mode 100644
index 0000000..b995dbe
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_amd.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _DEV_HWPMC_AMD_H_
+#define _DEV_HWPMC_AMD_H_ 1
+
+/* AMD K7 and K8 PMCs */
+
+#define AMD_PMC_EVSEL_0 0xC0010000
+#define AMD_PMC_EVSEL_1 0xC0010001
+#define AMD_PMC_EVSEL_2 0xC0010002
+#define AMD_PMC_EVSEL_3 0xC0010003
+
+#define AMD_PMC_PERFCTR_0 0xC0010004
+#define AMD_PMC_PERFCTR_1 0xC0010005
+#define AMD_PMC_PERFCTR_2 0xC0010006
+#define AMD_PMC_PERFCTR_3 0xC0010007
+
+
+#define AMD_NPMCS 4
+
+#define AMD_PMC_COUNTERMASK 0xFF000000
+#define AMD_PMC_TO_COUNTER(x) (((x) << 24) & AMD_PMC_COUNTERMASK)
+#define AMD_PMC_INVERT (1 << 23)
+#define AMD_PMC_ENABLE (1 << 22)
+#define AMD_PMC_INT (1 << 20)
+#define AMD_PMC_PC (1 << 19)
+#define AMD_PMC_EDGE (1 << 18)
+#define AMD_PMC_OS (1 << 17)
+#define AMD_PMC_USR (1 << 16)
+
+#define AMD_PMC_UNITMASK_M 0x10
+#define AMD_PMC_UNITMASK_O 0x08
+#define AMD_PMC_UNITMASK_E 0x04
+#define AMD_PMC_UNITMASK_S 0x02
+#define AMD_PMC_UNITMASK_I 0x01
+#define AMD_PMC_UNITMASK_MOESI 0x1F
+
+#define AMD_PMC_UNITMASK 0xFF00
+#define AMD_PMC_EVENTMASK 0x00FF
+
+#define AMD_PMC_TO_UNITMASK(x) (((x) << 8) & AMD_PMC_UNITMASK)
+#define AMD_PMC_TO_EVENTMASK(x) ((x) & 0xFF)
+#define AMD_VALID_BITS (AMD_PMC_COUNTERMASK | AMD_PMC_INVERT | \
+ AMD_PMC_ENABLE | AMD_PMC_INT | AMD_PMC_PC | AMD_PMC_EDGE | \
+ AMD_PMC_OS | AMD_PMC_USR | AMD_PMC_UNITMASK | AMD_PMC_EVENTMASK)
+
+#define AMD_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | PMC_CAP_THRESHOLD | \
+ PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+#define AMD_PMC_IS_STOPPED(evsel) ((rdmsr((evsel)) & AMD_PMC_ENABLE) == 0)
+#define AMD_PMC_HAS_OVERFLOWED(pmc) ((rdpmc(pmc) & (1ULL << 47)) == 0)
+
+#define AMD_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (-(V))
+#define AMD_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P))
+
+struct pmc_md_amd_op_pmcallocate {
+ uint32_t pm_amd_config;
+};
+
+#ifdef _KERNEL
+
+/* MD extension for 'struct pmc' */
+struct pmc_md_amd_pmc {
+ uint32_t pm_amd_evsel;
+};
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_AMD_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_arm.c b/sys/dev/hwpmc/hwpmc_arm.c
new file mode 100644
index 0000000..654b949
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_arm.c
@@ -0,0 +1,160 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+#ifdef CPU_XSCALE_IXP425
+ if (cpu_class == CPU_CLASS_XSCALE)
+ return pmc_xscale_initialize();
+ else
+#endif
+ return NULL;
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+#ifdef CPU_XSCALE_IXP425
+ if (cpu_class == CPU_CLASS_XSCALE)
+ pmc_xscale_finalize(md);
+ else
+ KASSERT(0, ("[arm,%d] Unknown CPU Class 0x%x", __LINE__,
+ cpu_class));
+#endif
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ uintptr_t pc, r, stackstart, stackend, fp;
+ struct thread *td;
+ int count;
+
+ KASSERT(TRAPF_USERMODE(tf) == 0,("[arm,%d] not a kernel backtrace",
+ __LINE__));
+
+ td = curthread;
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ *cc++ = pc;
+
+ if (maxsamples <= 1)
+ return (1);
+
+ stackstart = (uintptr_t) td->td_kstack;
+ stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
+ fp = PMC_TRAPFRAME_TO_FP(tf);
+
+ if (!PMC_IN_KERNEL(pc) ||
+ !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+ return (1);
+
+ for (count = 1; count < maxsamples; count++) {
+ /* Use saved lr as pc. */
+ r = fp - sizeof(uintptr_t);
+ if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
+ break;
+ pc = *(uintptr_t *)r;
+ if (!PMC_IN_KERNEL(pc))
+ break;
+
+ *cc++ = pc;
+
+ /* Switch to next frame up */
+ r = fp - 3 * sizeof(uintptr_t);
+ if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
+ break;
+ fp = *(uintptr_t *)r;
+ if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+ break;
+ }
+
+ return (count);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ uintptr_t pc, r, oldfp, fp;
+ struct thread *td;
+ int count;
+
+ KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
+ __LINE__, (void *) tf));
+
+ td = curthread;
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ *cc++ = pc;
+
+ if (maxsamples <= 1)
+ return (1);
+
+ oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
+
+ if (!PMC_IN_USERSPACE(pc) ||
+ !PMC_IN_USERSPACE(fp))
+ return (1);
+
+ for (count = 1; count < maxsamples; count++) {
+ /* Use saved lr as pc. */
+ r = fp - sizeof(uintptr_t);
+ if (copyin((void *)r, &pc, sizeof(pc)) != 0)
+ break;
+ if (!PMC_IN_USERSPACE(pc))
+ break;
+
+ *cc++ = pc;
+
+ /* Switch to next frame up */
+ oldfp = fp;
+ r = fp - 3 * sizeof(uintptr_t);
+ if (copyin((void *)r, &fp, sizeof(fp)) != 0)
+ break;
+ if (fp < oldfp || !PMC_IN_USERSPACE(fp))
+ break;
+ }
+
+ return (count);
+}
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
new file mode 100644
index 0000000..77eb93b
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -0,0 +1,2641 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Intel Core, Core 2 and Atom PMCs.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+#define CORE_CPUID_REQUEST 0xA
+#define CORE_CPUID_REQUEST_SIZE 0x4
+#define CORE_CPUID_EAX 0x0
+#define CORE_CPUID_EBX 0x1
+#define CORE_CPUID_ECX 0x2
+#define CORE_CPUID_EDX 0x3
+
+#define IAF_PMC_CAPS \
+ (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
+ PMC_CAP_USER | PMC_CAP_SYSTEM)
+#define IAF_RI_TO_MSR(RI) ((RI) + (1 << 30))
+
+#define IAP_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
+
+/*
+ * "Architectural" events defined by Intel. The values of these
+ * symbols correspond to positions in the bitmask returned by
+ * the CPUID.0AH instruction.
+ */
+enum core_arch_events {
+ CORE_AE_BRANCH_INSTRUCTION_RETIRED = 5,
+ CORE_AE_BRANCH_MISSES_RETIRED = 6,
+ CORE_AE_INSTRUCTION_RETIRED = 1,
+ CORE_AE_LLC_MISSES = 4,
+ CORE_AE_LLC_REFERENCE = 3,
+ CORE_AE_UNHALTED_REFERENCE_CYCLES = 2,
+ CORE_AE_UNHALTED_CORE_CYCLES = 0
+};
+
+static enum pmc_cputype core_cputype;
+
+struct core_cpu {
+ volatile uint32_t pc_resync;
+ volatile uint32_t pc_iafctrl; /* Fixed function control. */
+ volatile uint64_t pc_globalctrl; /* Global control register. */
+ struct pmc_hw pc_corepmcs[];
+};
+
+static struct core_cpu **core_pcpu;
+
+static uint32_t core_architectural_events;
+static uint64_t core_pmcmask;
+
+static int core_iaf_ri; /* relative index of fixed counters */
+static int core_iaf_width;
+static int core_iaf_npmc;
+
+static int core_iap_width;
+static int core_iap_npmc;
+
+static int
+core_pcpu_noop(struct pmc_mdep *md, int cpu)
+{
+ (void) md;
+ (void) cpu;
+ return (0);
+}
+
+static int
+core_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ struct pmc_cpu *pc;
+ struct core_cpu *cc;
+ struct pmc_hw *phw;
+ int core_ri, n, npmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[iaf,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"core-init cpu=%d", cpu);
+
+ core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
+ npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
+
+ if (core_cputype != PMC_CPU_INTEL_CORE)
+ npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
+
+ cc = malloc(sizeof(struct core_cpu) + npmc * sizeof(struct pmc_hw),
+ M_PMC, M_WAITOK | M_ZERO);
+
+ core_pcpu[cpu] = cc;
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL && cc != NULL,
+ ("[core,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
+
+ for (n = 0, phw = cc->pc_corepmcs; n < npmc; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) |
+ PMC_PHW_INDEX_TO_STATE(n + core_ri);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + core_ri] = phw;
+ }
+
+ return (0);
+}
+
+static int
+core_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int core_ri, n, npmc;
+ struct pmc_cpu *pc;
+ struct core_cpu *cc;
+ uint64_t msr = 0;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] insane cpu number (%d)", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"core-pcpu-fini cpu=%d", cpu);
+
+ if ((cc = core_pcpu[cpu]) == NULL)
+ return (0);
+
+ core_pcpu[cpu] = NULL;
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[core,%d] NULL per-cpu %d state", __LINE__,
+ cpu));
+
+ npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_num;
+ core_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP].pcd_ri;
+
+ for (n = 0; n < npmc; n++) {
+ msr = rdmsr(IAP_EVSEL0 + n) & ~IAP_EVSEL_MASK;
+ wrmsr(IAP_EVSEL0 + n, msr);
+ }
+
+ if (core_cputype != PMC_CPU_INTEL_CORE) {
+ msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
+ wrmsr(IAF_CTRL, msr);
+ npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF].pcd_num;
+ }
+
+ for (n = 0; n < npmc; n++)
+ pc->pc_hwpmcs[n + core_ri] = NULL;
+
+ free(cc, M_PMC);
+
+ return (0);
+}
+
+/*
+ * Fixed function counters.
+ */
+
+static pmc_value_t
+iaf_perfctr_value_to_reload_count(pmc_value_t v)
+{
+ v &= (1ULL << core_iaf_width) - 1;
+ return (1ULL << core_iaf_width) - v;
+}
+
+static pmc_value_t
+iaf_reload_count_to_perfctr_value(pmc_value_t rlc)
+{
+ return (1ULL << core_iaf_width) - rlc;
+}
+
+static int
+iaf_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event ev;
+ uint32_t caps, flags, validflags;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,ALL,1, "iaf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
+
+ if (ri < 0 || ri > core_iaf_npmc)
+ return (EINVAL);
+
+ caps = a->pm_caps;
+
+ if (a->pm_class != PMC_CLASS_IAF ||
+ (caps & IAF_PMC_CAPS) != caps)
+ return (EINVAL);
+
+ ev = pm->pm_event;
+ if (ev < PMC_EV_IAF_FIRST || ev > PMC_EV_IAF_LAST)
+ return (EINVAL);
+
+ if (ev == PMC_EV_IAF_INSTR_RETIRED_ANY && ri != 0)
+ return (EINVAL);
+ if (ev == PMC_EV_IAF_CPU_CLK_UNHALTED_CORE && ri != 1)
+ return (EINVAL);
+ if (ev == PMC_EV_IAF_CPU_CLK_UNHALTED_REF && ri != 2)
+ return (EINVAL);
+
+ flags = a->pm_md.pm_iaf.pm_iaf_flags;
+
+ validflags = IAF_MASK;
+
+ if (core_cputype != PMC_CPU_INTEL_ATOM)
+ validflags &= ~IAF_ANY;
+
+ if ((flags & ~validflags) != 0)
+ return (EINVAL);
+
+ if (caps & PMC_CAP_INTERRUPT)
+ flags |= IAF_PMI;
+ if (caps & PMC_CAP_SYSTEM)
+ flags |= IAF_OS;
+ if (caps & PMC_CAP_USER)
+ flags |= IAF_USR;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ flags |= (IAF_OS | IAF_USR);
+
+ pm->pm_md.pm_iaf.pm_iaf_ctrl = (flags << (ri * 4));
+
+ PMCDBG(MDP,ALL,2, "iaf-allocate config=0x%jx",
+ (uintmax_t) pm->pm_md.pm_iaf.pm_iaf_ctrl);
+
+ return (0);
+}
+
+static int
+iaf_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "iaf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
+ cpu));
+
+ core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+iaf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char iaf_name[PMC_NAME_MAX];
+
+ phw = &core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri];
+
+ (void) snprintf(iaf_name, sizeof(iaf_name), "IAF-%d", ri);
+ if ((error = copystr(iaf_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return (error);
+
+ pi->pm_class = PMC_CLASS_IAF;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+iaf_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
+
+ return (0);
+}
+
+static int
+iaf_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[iaf,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = IAF_RI_TO_MSR(ri);
+
+ return (0);
+}
+
+static int
+iaf_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
+ ri, ri + core_iaf_ri));
+
+ tmp = rdpmc(IAF_RI_TO_MSR(ri));
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = iaf_perfctr_value_to_reload_count(tmp);
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "iaf-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ IAF_RI_TO_MSR(ri), *v);
+
+ return (0);
+}
+
+static int
+iaf_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ PMCDBG(MDP,REL,1, "iaf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ KASSERT(core_pcpu[cpu]->pc_corepmcs[ri + core_iaf_ri].phw_pmc == NULL,
+ ("[core,%d] PHW pmc non-NULL", __LINE__));
+
+ return (0);
+}
+
+static int
+iaf_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct core_cpu *iafc;
+ uint64_t msr = 0;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,STA,1,"iaf-start cpu=%d ri=%d", cpu, ri);
+
+ iafc = core_pcpu[cpu];
+ pm = iafc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
+
+ iafc->pc_iafctrl |= pm->pm_md.pm_iaf.pm_iaf_ctrl;
+
+ msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
+ wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
+
+ do {
+ iafc->pc_resync = 0;
+ iafc->pc_globalctrl |= (1ULL << (ri + IAF_OFFSET));
+ msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
+ wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
+ IAF_GLOBAL_CTRL_MASK));
+ } while (iafc->pc_resync != 0);
+
+ PMCDBG(MDP,STA,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
+ iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
+ iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
+
+ return (0);
+}
+
+static int
+iaf_stop_pmc(int cpu, int ri)
+{
+ uint32_t fc;
+ struct core_cpu *iafc;
+ uint64_t msr = 0;
+
+ PMCDBG(MDP,STO,1,"iaf-stop cpu=%d ri=%d", cpu, ri);
+
+ iafc = core_pcpu[cpu];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ fc = (IAF_MASK << (ri * 4));
+
+ if (core_cputype != PMC_CPU_INTEL_ATOM)
+ fc &= ~IAF_ANY;
+
+ iafc->pc_iafctrl &= ~fc;
+
+ PMCDBG(MDP,STO,1,"iaf-stop iafctrl=%x", iafc->pc_iafctrl);
+ msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
+ wrmsr(IAF_CTRL, msr | (iafc->pc_iafctrl & IAF_CTRL_MASK));
+
+ do {
+ iafc->pc_resync = 0;
+ iafc->pc_globalctrl &= ~(1ULL << (ri + IAF_OFFSET));
+ msr = rdmsr(IA_GLOBAL_CTRL) & ~IAF_GLOBAL_CTRL_MASK;
+ wrmsr(IA_GLOBAL_CTRL, msr | (iafc->pc_globalctrl &
+ IAF_GLOBAL_CTRL_MASK));
+ } while (iafc->pc_resync != 0);
+
+ PMCDBG(MDP,STO,1,"iafctrl=%x(%x) globalctrl=%jx(%jx)",
+ iafc->pc_iafctrl, (uint32_t) rdmsr(IAF_CTRL),
+ iafc->pc_globalctrl, rdmsr(IA_GLOBAL_CTRL));
+
+ return (0);
+}
+
+static int
+iaf_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct core_cpu *cc;
+ struct pmc *pm;
+ uint64_t msr;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iaf_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ cc = core_pcpu[cpu];
+ pm = cc->pc_corepmcs[ri + core_iaf_ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = iaf_reload_count_to_perfctr_value(v);
+
+ /* Turn off fixed counters */
+ msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
+ wrmsr(IAF_CTRL, msr);
+
+ wrmsr(IAF_CTR0 + ri, v & ((1ULL << core_iaf_width) - 1));
+
+ /* Turn on fixed counters */
+ msr = rdmsr(IAF_CTRL) & ~IAF_CTRL_MASK;
+ wrmsr(IAF_CTRL, msr | (cc->pc_iafctrl & IAF_CTRL_MASK));
+
+ PMCDBG(MDP,WRI,1, "iaf-write cpu=%d ri=%d msr=0x%x v=%jx iafctrl=%jx "
+ "pmc=%jx", cpu, ri, IAF_RI_TO_MSR(ri), v,
+ (uintmax_t) rdmsr(IAF_CTRL),
+ (uintmax_t) rdpmc(IAF_RI_TO_MSR(ri)));
+
+ return (0);
+}
+
+
+static void
+iaf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(md != NULL, ("[iaf,%d] md is NULL", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "iaf-initialize");
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAF];
+
+ pcd->pcd_caps = IAF_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_IAF;
+ pcd->pcd_num = npmc;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = pmcwidth;
+
+ pcd->pcd_allocate_pmc = iaf_allocate_pmc;
+ pcd->pcd_config_pmc = iaf_config_pmc;
+ pcd->pcd_describe = iaf_describe;
+ pcd->pcd_get_config = iaf_get_config;
+ pcd->pcd_get_msr = iaf_get_msr;
+ pcd->pcd_pcpu_fini = core_pcpu_noop;
+ pcd->pcd_pcpu_init = core_pcpu_noop;
+ pcd->pcd_read_pmc = iaf_read_pmc;
+ pcd->pcd_release_pmc = iaf_release_pmc;
+ pcd->pcd_start_pmc = iaf_start_pmc;
+ pcd->pcd_stop_pmc = iaf_stop_pmc;
+ pcd->pcd_write_pmc = iaf_write_pmc;
+
+ md->pmd_npmc += npmc;
+}
+
+/*
+ * Intel programmable PMCs.
+ */
+
+/*
+ * Event descriptor tables.
+ *
+ * For each event id, we track:
+ *
+ * 1. The CPUs that the event is valid for.
+ *
+ * 2. If the event uses a fixed UMASK, the value of the umask field.
+ * If the event doesn't use a fixed UMASK, a mask of legal bits
+ * to check against.
+ */
+
+struct iap_event_descr {
+ enum pmc_event iap_ev;
+ unsigned char iap_evcode;
+ unsigned char iap_umask;
+ unsigned int iap_flags;
+};
+
+#define IAP_F_CC (1 << 0) /* CPU: Core */
+#define IAP_F_CC2 (1 << 1) /* CPU: Core2 family */
+#define IAP_F_CC2E (1 << 2) /* CPU: Core2 Extreme only */
+#define IAP_F_CA (1 << 3) /* CPU: Atom */
+#define IAP_F_I7 (1 << 4) /* CPU: Core i7 */
+#define IAP_F_I7O (1 << 4) /* CPU: Core i7 (old) */
+#define IAP_F_WM (1 << 5) /* CPU: Westmere */
+#define IAP_F_SB (1 << 6) /* CPU: Sandy Bridge */
+#define IAP_F_IB (1 << 7) /* CPU: Ivy Bridge */
+#define IAP_F_SBX (1 << 8) /* CPU: Sandy Bridge Xeon */
+#define IAP_F_IBX (1 << 9) /* CPU: Ivy Bridge */
+#define IAP_F_FM (1 << 10) /* Fixed mask */
+
+#define IAP_F_ALLCPUSCORE2 \
+ (IAP_F_CC | IAP_F_CC2 | IAP_F_CC2E | IAP_F_CA)
+
+/* Sub fields of UMASK that this event supports. */
+#define IAP_M_CORE (1 << 0) /* Core specificity */
+#define IAP_M_AGENT (1 << 1) /* Agent specificity */
+#define IAP_M_PREFETCH (1 << 2) /* Prefetch */
+#define IAP_M_MESI (1 << 3) /* MESI */
+#define IAP_M_SNOOPRESPONSE (1 << 4) /* Snoop response */
+#define IAP_M_SNOOPTYPE (1 << 5) /* Snoop type */
+#define IAP_M_TRANSITION (1 << 6) /* Transition */
+
+#define IAP_F_CORE (0x3 << 14) /* Core specificity */
+#define IAP_F_AGENT (0x1 << 13) /* Agent specificity */
+#define IAP_F_PREFETCH (0x3 << 12) /* Prefetch */
+#define IAP_F_MESI (0xF << 8) /* MESI */
+#define IAP_F_SNOOPRESPONSE (0xB << 8) /* Snoop response */
+#define IAP_F_SNOOPTYPE (0x3 << 8) /* Snoop type */
+#define IAP_F_TRANSITION (0x1 << 12) /* Transition */
+
+#define IAP_PREFETCH_RESERVED (0x2 << 12)
+#define IAP_CORE_THIS (0x1 << 14)
+#define IAP_CORE_ALL (0x3 << 14)
+#define IAP_F_CMASK 0xFF000000
+
+static struct iap_event_descr iap_events[] = {
+#undef IAPDESCR
+#define IAPDESCR(N,EV,UM,FLAGS) { \
+ .iap_ev = PMC_EV_IAP_EVENT_##N, \
+ .iap_evcode = (EV), \
+ .iap_umask = (UM), \
+ .iap_flags = (FLAGS) \
+ }
+
+ IAPDESCR(02H_01H, 0x02, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(02H_81H, 0x02, 0x81, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(03H_00H, 0x03, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(03H_01H, 0x03, 0x01, IAP_F_FM | IAP_F_I7O | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(03H_02H, 0x03, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(03H_04H, 0x03, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_I7O),
+ IAPDESCR(03H_08H, 0x03, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(03H_10H, 0x03, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(03H_20H, 0x03, 0x20, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(04H_00H, 0x04, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(04H_01H, 0x04, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_I7O),
+ IAPDESCR(04H_02H, 0x04, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(04H_07H, 0x04, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(04H_08H, 0x04, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(05H_00H, 0x05, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(05H_01H, 0x05, 0x01, IAP_F_FM | IAP_F_I7O | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(05H_02H, 0x05, 0x02, IAP_F_FM | IAP_F_I7O | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(05H_03H, 0x05, 0x03, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(06H_00H, 0x06, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2 |
+ IAP_F_CC2E | IAP_F_CA),
+ IAPDESCR(06H_01H, 0x06, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(06H_02H, 0x06, 0x02, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(06H_04H, 0x06, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(06H_08H, 0x06, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(06H_0FH, 0x06, 0x0F, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(07H_00H, 0x07, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(07H_01H, 0x07, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(07H_02H, 0x07, 0x02, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(07H_03H, 0x07, 0x03, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(07H_06H, 0x07, 0x06, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(07H_08H, 0x07, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_SB |
+ IAP_F_SBX),
+
+ IAPDESCR(08H_01H, 0x08, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(08H_02H, 0x08, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(08H_04H, 0x08, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_WM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(08H_05H, 0x08, 0x05, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(08H_06H, 0x08, 0x06, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(08H_07H, 0x08, 0x07, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(08H_08H, 0x08, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(08H_09H, 0x08, 0x09, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(08H_10H, 0x08, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(08H_20H, 0x08, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(08H_40H, 0x08, 0x40, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(08H_80H, 0x08, 0x80, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(08H_81H, 0x08, 0x81, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(08H_82H, 0x08, 0x82, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(08H_84H, 0x08, 0x84, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+
+ IAPDESCR(09H_01H, 0x09, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_I7O),
+ IAPDESCR(09H_02H, 0x09, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | IAP_F_I7O),
+ IAPDESCR(09H_04H, 0x09, 0x04, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(09H_08H, 0x09, 0x08, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(0BH_01H, 0x0B, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0BH_02H, 0x0B, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0BH_10H, 0x0B, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(0CH_01H, 0x0C, 0x01, IAP_F_FM | IAP_F_CC2 | IAP_F_I7 |
+ IAP_F_WM),
+ IAPDESCR(0CH_02H, 0x0C, 0x02, IAP_F_FM | IAP_F_CC2),
+ IAPDESCR(0CH_03H, 0x0C, 0x03, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(0DH_03H, 0x0D, 0x03, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(0DH_40H, 0x0D, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(0EH_01H, 0x0E, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(0EH_02H, 0x0E, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0EH_10H, 0x0E, 0x10, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(0EH_20H, 0x0E, 0x20, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(0EH_40H, 0x0E, 0x40, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+
+ IAPDESCR(0FH_01H, 0x0F, 0x01, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(0FH_02H, 0x0F, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0FH_08H, 0x0F, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0FH_10H, 0x0F, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0FH_20H, 0x0F, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(0FH_80H, 0x0F, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(10H_00H, 0x10, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(10H_01H, 0x10, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(10H_02H, 0x10, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(10H_04H, 0x10, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(10H_08H, 0x10, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(10H_10H, 0x10, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(10H_20H, 0x10, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(10H_40H, 0x10, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(10H_80H, 0x10, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(10H_81H, 0x10, 0x81, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(11H_00H, 0x11, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(11H_01H, 0x11, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(11H_02H, 0x11, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(11H_81H, 0x11, 0x81, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(12H_00H, 0x12, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(12H_01H, 0x12, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_02H, 0x12, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_04H, 0x12, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_08H, 0x12, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_10H, 0x12, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_20H, 0x12, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_40H, 0x12, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(12H_81H, 0x12, 0x81, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(13H_00H, 0x13, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(13H_01H, 0x13, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(13H_02H, 0x13, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(13H_04H, 0x13, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(13H_07H, 0x13, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(13H_81H, 0x13, 0x81, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(14H_00H, 0x14, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(14H_01H, 0x14, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(14H_02H, 0x14, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(17H_01H, 0x17, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB |
+ IAP_F_SBX),
+
+ IAPDESCR(18H_00H, 0x18, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(18H_01H, 0x18, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(19H_00H, 0x19, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(19H_01H, 0x19, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(19H_02H, 0x19, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(1DH_01H, 0x1D, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(1DH_02H, 0x1D, 0x02, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(1DH_04H, 0x1D, 0x04, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(1EH_01H, 0x1E, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(20H_01H, 0x20, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(21H, 0x21, IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(22H, 0x22, IAP_M_CORE, IAP_F_CC2),
+ IAPDESCR(23H, 0x23, IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(24H, 0x24, IAP_M_CORE | IAP_M_PREFETCH, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(24H_01H, 0x24, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_02H, 0x24, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(24H_03H, 0x24, 0x03, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_04H, 0x24, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_08H, 0x24, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_0CH, 0x24, 0x0C, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_10H, 0x24, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_20H, 0x24, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_30H, 0x24, 0x30, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_40H, 0x24, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_80H, 0x24, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_C0H, 0x24, 0xC0, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(24H_AAH, 0x24, 0xAA, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(24H_FFH, 0x24, 0xFF, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(25H, 0x25, IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(26H, 0x26, IAP_M_CORE | IAP_M_PREFETCH, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(26H_01H, 0x26, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_02H, 0x26, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_04H, 0x26, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_08H, 0x26, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_0FH, 0x26, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_10H, 0x26, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_20H, 0x26, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_40H, 0x26, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_80H, 0x26, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_F0H, 0x26, 0xF0, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(26H_FFH, 0x26, 0xFF, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(27H, 0x27, IAP_M_CORE | IAP_M_PREFETCH, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(27H_01H, 0x27, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(27H_02H, 0x27, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_04H, 0x27, 0x04, IAP_F_FM | IAP_F_I7O | IAP_F_SB |
+ IAP_F_SBX),
+ IAPDESCR(27H_08H, 0x27, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(27H_0EH, 0x27, 0x0E, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_0FH, 0x27, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(27H_10H, 0x27, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_20H, 0x27, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_40H, 0x27, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_80H, 0x27, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_E0H, 0x27, 0xE0, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(27H_F0H, 0x27, 0xF0, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(28H, 0x28, IAP_M_CORE | IAP_M_MESI, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(28H_01H, 0x28, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(28H_02H, 0x28, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SBX),
+ IAPDESCR(28H_04H, 0x28, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(28H_08H, 0x28, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(28H_0FH, 0x28, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+
+ IAPDESCR(29H, 0x29, IAP_M_CORE | IAP_M_MESI, IAP_F_CC),
+ IAPDESCR(29H, 0x29, IAP_M_CORE | IAP_M_MESI | IAP_M_PREFETCH,
+ IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(2AH, 0x2A, IAP_M_CORE | IAP_M_MESI, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(2BH, 0x2B, IAP_M_CORE | IAP_M_MESI, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(2EH, 0x2E, IAP_M_CORE | IAP_M_MESI | IAP_M_PREFETCH,
+ IAP_F_ALLCPUSCORE2),
+ IAPDESCR(2EH_01H, 0x2E, 0x01, IAP_F_FM | IAP_F_WM),
+ IAPDESCR(2EH_02H, 0x2E, 0x02, IAP_F_FM | IAP_F_WM),
+ IAPDESCR(2EH_41H, 0x2E, 0x41, IAP_F_FM | IAP_F_ALLCPUSCORE2 | IAP_F_I7 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(2EH_4FH, 0x2E, 0x4F, IAP_F_FM | IAP_F_ALLCPUSCORE2 | IAP_F_I7 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(30H, 0x30, IAP_M_CORE | IAP_M_MESI | IAP_M_PREFETCH,
+ IAP_F_ALLCPUSCORE2),
+ IAPDESCR(32H, 0x32, IAP_M_CORE | IAP_M_MESI | IAP_M_PREFETCH, IAP_F_CC),
+ IAPDESCR(32H, 0x32, IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(3AH, 0x3A, IAP_M_TRANSITION, IAP_F_CC),
+ IAPDESCR(3AH_00H, 0x3A, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(3BH_C0H, 0x3B, 0xC0, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(3CH_00H, 0x3C, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(3CH_01H, 0x3C, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(3CH_02H, 0x3C, 0x02, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(3DH_01H, 0x3D, 0x01, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(40H, 0x40, IAP_M_MESI, IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(40H_01H, 0x40, 0x01, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(40H_02H, 0x40, 0x02, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(40H_04H, 0x40, 0x04, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(40H_08H, 0x40, 0x08, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(40H_0FH, 0x40, 0x0F, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(40H_21H, 0x40, 0x21, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(41H, 0x41, IAP_M_MESI, IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(41H_01H, 0x41, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(41H_02H, 0x41, 0x02, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(41H_04H, 0x41, 0x04, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(41H_08H, 0x41, 0x08, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(41H_0FH, 0x41, 0x0F, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(41H_22H, 0x41, 0x22, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(42H, 0x42, IAP_M_MESI, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(42H_01H, 0x42, 0x01, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(42H_02H, 0x42, 0x02, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(42H_04H, 0x42, 0x04, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(42H_08H, 0x42, 0x08, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(42H_10H, 0x42, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(43H_01H, 0x43, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7),
+ IAPDESCR(43H_02H, 0x43, 0x02, IAP_F_FM | IAP_F_CA |
+ IAP_F_CC2 | IAP_F_I7),
+
+ IAPDESCR(44H_02H, 0x44, 0x02, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(45H_0FH, 0x45, 0x0F, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(46H_00H, 0x46, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(47H_00H, 0x47, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(48H_00H, 0x48, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(48H_01H, 0x48, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(48H_02H, 0x48, 0x02, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(49H_00H, 0x49, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(49H_01H, 0x49, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(49H_02H, 0x49, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(49H_04H, 0x49, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(49H_10H, 0x49, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(49H_20H, 0x49, 0x20, IAP_F_FM | IAP_F_I7),
+ IAPDESCR(49H_40H, 0x49, 0x40, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(49H_80H, 0x49, 0x80, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+
+ IAPDESCR(4BH_00H, 0x4B, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(4BH_01H, 0x4B, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 | IAP_F_I7O),
+ IAPDESCR(4BH_02H, 0x4B, 0x02, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(4BH_03H, 0x4B, 0x03, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(4BH_08H, 0x4B, 0x08, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(4CH_00H, 0x4C, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(4CH_01H, 0x4C, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(4CH_02H, 0x4C, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(4DH_01H, 0x4D, 0x01, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(4EH_01H, 0x4E, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(4EH_02H, 0x4E, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(4EH_04H, 0x4E, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(4EH_10H, 0x4E, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(4FH_00H, 0x4F, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(4FH_02H, 0x4F, 0x02, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(4FH_04H, 0x4F, 0x04, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(4FH_08H, 0x4F, 0x08, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(4FH_10H, 0x4F, 0x10, IAP_F_FM | IAP_F_WM),
+
+ IAPDESCR(51H_01H, 0x51, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(51H_02H, 0x51, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(51H_04H, 0x51, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(51H_08H, 0x51, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(52H_01H, 0x52, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(53H_01H, 0x53, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(58H_01H, 0x58, 0x01, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(58H_02H, 0x58, 0x02, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(58H_04H, 0x58, 0x04, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(58H_08H, 0x58, 0x08, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+
+ IAPDESCR(59H_20H, 0x59, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(59H_40H, 0x59, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(59H_80H, 0x59, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(5BH_0CH, 0x5B, 0x0C, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(5BH_0FH, 0x5B, 0x0F, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(5BH_40H, 0x5B, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(5BH_4FH, 0x5B, 0x4F, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(5CH_01H, 0x5C, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(5CH_02H, 0x5C, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(5EH_01H, 0x5E, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(5FH_01H, 0x5F, 0x01, IAP_F_FM | IAP_F_IB),
+ IAPDESCR(5FH_04H, 0x5F, 0x04, IAP_F_IBX),
+
+ IAPDESCR(60H, 0x60, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(60H_01H, 0x60, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(60H_02H, 0x60, 0x02, IAP_F_FM | IAP_F_WM | IAP_F_I7O | IAP_F_IB |
+ IAP_F_IBX),
+ IAPDESCR(60H_04H, 0x60, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(60H_08H, 0x60, 0x08, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(61H, 0x61, IAP_M_AGENT, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(61H_00H, 0x61, 0x00, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(62H, 0x62, IAP_M_AGENT, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(62H_00H, 0x62, 0x00, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(63H, 0x63, IAP_M_AGENT | IAP_M_CORE,
+ IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(63H, 0x63, IAP_M_CORE, IAP_F_CC),
+ IAPDESCR(63H_01H, 0x63, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(63H_02H, 0x63, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(64H, 0x64, IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(64H_40H, 0x64, 0x40, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(65H, 0x65, IAP_M_AGENT | IAP_M_CORE,
+ IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(65H, 0x65, IAP_M_CORE, IAP_F_CC),
+
+ IAPDESCR(66H, 0x66, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(67H, 0x67, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(67H, 0x67, IAP_M_AGENT, IAP_F_CC),
+
+ IAPDESCR(68H, 0x68, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(69H, 0x69, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(6AH, 0x6A, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(6BH, 0x6B, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(6CH, 0x6C, IAP_M_AGENT | IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+ IAPDESCR(6CH_01H, 0x6C, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(6DH, 0x6D, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(6DH, 0x6D, IAP_M_CORE, IAP_F_CC),
+
+ IAPDESCR(6EH, 0x6E, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(6EH, 0x6E, IAP_M_CORE, IAP_F_CC),
+
+ IAPDESCR(6FH, 0x6F, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(6FH, 0x6F, IAP_M_CORE, IAP_F_CC),
+
+ IAPDESCR(70H, 0x70, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(70H, 0x70, IAP_M_CORE, IAP_F_CC),
+
+ IAPDESCR(77H, 0x77, IAP_M_AGENT | IAP_M_SNOOPRESPONSE,
+ IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(77H, 0x77, IAP_M_AGENT | IAP_M_MESI, IAP_F_CC),
+
+ IAPDESCR(78H, 0x78, IAP_M_CORE, IAP_F_CC),
+ IAPDESCR(78H, 0x78, IAP_M_CORE | IAP_M_SNOOPTYPE, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(79H_02H, 0x79, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_04H, 0x79, 0x04, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_08H, 0x79, 0x08, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_10H, 0x79, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_20H, 0x79, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_30H, 0x79, 0x30, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(79H_18H, 0x79, 0x18, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(79H_24H, 0x79, 0x24, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(79H_3CH, 0x79, 0x3C, IAP_F_FM | IAP_F_IB | IAP_F_IBX),
+
+ IAPDESCR(7AH, 0x7A, IAP_M_AGENT, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(7BH, 0x7B, IAP_M_AGENT, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(7DH, 0x7D, IAP_M_CORE, IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(7EH, 0x7E, IAP_M_AGENT | IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(7EH_00H, 0x7E, 0x00, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(7FH, 0x7F, IAP_M_CORE, IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(80H_00H, 0x80, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(80H_01H, 0x80, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(80H_02H, 0x80, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(80H_03H, 0x80, 0x03, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM),
+ IAPDESCR(80H_04H, 0x80, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(81H_00H, 0x81, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(81H_01H, 0x81, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(81H_02H, 0x81, 0x02, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(82H_01H, 0x82, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(82H_02H, 0x82, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(82H_04H, 0x82, 0x04, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(82H_10H, 0x82, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(82H_12H, 0x82, 0x12, IAP_F_FM | IAP_F_CC2),
+ IAPDESCR(82H_40H, 0x82, 0x40, IAP_F_FM | IAP_F_CC2),
+
+ IAPDESCR(83H_01H, 0x83, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(83H_02H, 0x83, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(85H_00H, 0x85, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(85H_01H, 0x85, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(85H_02H, 0x85, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(85H_04H, 0x85, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(85H_10H, 0x85, 0x10, IAP_F_FM | IAP_F_I7O | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(85H_20H, 0x85, 0x20, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(85H_40H, 0x85, 0x40, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(85H_80H, 0x85, 0x80, IAP_F_FM | IAP_F_WM | IAP_F_I7O),
+
+ IAPDESCR(86H_00H, 0x86, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(87H_00H, 0x87, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(87H_01H, 0x87, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(87H_02H, 0x87, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(87H_04H, 0x87, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(87H_08H, 0x87, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(87H_0FH, 0x87, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(88H_00H, 0x88, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(88H_01H, 0x88, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_02H, 0x88, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_04H, 0x88, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_07H, 0x88, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(88H_08H, 0x88, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_10H, 0x88, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_20H, 0x88, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_30H, 0x88, 0x30, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(88H_40H, 0x88, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_7FH, 0x88, 0x7F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(88H_80H, 0x88, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(88H_FFH, 0x88, 0xFF, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(89H_00H, 0x89, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(89H_01H, 0x89, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_02H, 0x89, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(89H_04H, 0x89, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_07H, 0x89, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(89H_08H, 0x89, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_10H, 0x89, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_20H, 0x89, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_30H, 0x89, 0x30, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(89H_40H, 0x89, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_7FH, 0x89, 0x7F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(89H_80H, 0x89, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(89H_FFH, 0x89, 0xFF, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(8AH_00H, 0x8A, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(8BH_00H, 0x8B, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(8CH_00H, 0x8C, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(8DH_00H, 0x8D, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(8EH_00H, 0x8E, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(8FH_00H, 0x8F, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(90H_00H, 0x90, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(91H_00H, 0x91, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(92H_00H, 0x92, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(93H_00H, 0x93, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(94H_00H, 0x94, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(9CH_01H, 0x9C, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(97H_00H, 0x97, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(98H_00H, 0x98, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(A0H_00H, 0xA0, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(A1H_01H, 0xA1, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_02H, 0xA1, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_04H, 0xA1, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_08H, 0xA1, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_0CH, 0xA1, 0x0C, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_10H, 0xA1, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_20H, 0xA1, 0x20, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_30H, 0xA1, 0x30, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_40H, 0xA1, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A1H_80H, 0xA1, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(A2H_00H, 0xA2, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(A2H_01H, 0xA2, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A2H_02H, 0xA2, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(A2H_04H, 0xA2, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A2H_08H, 0xA2, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A2H_10H, 0xA2, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A2H_20H, 0xA2, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(A2H_40H, 0xA2, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(A2H_80H, 0xA2, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(A3H_01H, 0xA3, 0x01, IAP_F_FM | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A3H_02H, 0xA3, 0x02, IAP_F_FM | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A3H_04H, 0xA3, 0x04, IAP_F_FM | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(A3H_08H, 0xA3, 0x08, IAP_F_IBX),
+
+ IAPDESCR(A6H_01H, 0xA6, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(A7H_01H, 0xA7, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(A8H_01H, 0xA8, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(AAH_01H, 0xAA, 0x01, IAP_F_FM | IAP_F_CC2),
+ IAPDESCR(AAH_02H, 0xAA, 0x02, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(AAH_03H, 0xAA, 0x03, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(AAH_08H, 0xAA, 0x08, IAP_F_FM | IAP_F_CC2),
+
+ IAPDESCR(ABH_01H, 0xAB, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(ABH_02H, 0xAB, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(ACH_02H, 0xAC, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(ACH_08H, 0xAC, 0x08, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(ACH_0AH, 0xAC, 0x0A, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(AEH_01H, 0xAE, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(B0H_00H, 0xB0, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(B0H_01H, 0xB0, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(B0H_02H, 0xB0, 0x02, IAP_F_FM | IAP_F_WM | IAP_F_I7O | IAP_F_IB |
+ IAP_F_IBX),
+ IAPDESCR(B0H_04H, 0xB0, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(B0H_08H, 0xB0, 0x08, IAP_F_FM | IAP_F_WM | IAP_F_I7O |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(B0H_10H, 0xB0, 0x10, IAP_F_FM | IAP_F_WM | IAP_F_I7O),
+ IAPDESCR(B0H_20H, 0xB0, 0x20, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(B0H_40H, 0xB0, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B0H_80H, 0xB0, 0x80, IAP_F_FM | IAP_F_CA | IAP_F_WM | IAP_F_I7O),
+
+ IAPDESCR(B1H_00H, 0xB1, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(B1H_01H, 0xB1, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(B1H_02H, 0xB1, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(B1H_04H, 0xB1, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_08H, 0xB1, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_10H, 0xB1, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_1FH, 0xB1, 0x1F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_20H, 0xB1, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_3FH, 0xB1, 0x3F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_40H, 0xB1, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B1H_80H, 0xB1, 0x80, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM),
+
+ IAPDESCR(B2H_01H, 0xB2, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(B3H_01H, 0xB3, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_WM | IAP_F_I7O),
+ IAPDESCR(B3H_02H, 0xB3, 0x02, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_WM | IAP_F_I7O),
+ IAPDESCR(B3H_04H, 0xB3, 0x04, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_WM | IAP_F_I7O),
+ IAPDESCR(B3H_08H, 0xB3, 0x08, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(B3H_10H, 0xB3, 0x10, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(B3H_20H, 0xB3, 0x20, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(B3H_81H, 0xB3, 0x81, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(B3H_82H, 0xB3, 0x82, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(B3H_84H, 0xB3, 0x84, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(B3H_88H, 0xB3, 0x88, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(B3H_90H, 0xB3, 0x90, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(B3H_A0H, 0xB3, 0xA0, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(B4H_01H, 0xB4, 0x01, IAP_F_FM | IAP_F_WM),
+ IAPDESCR(B4H_02H, 0xB4, 0x02, IAP_F_FM | IAP_F_WM),
+ IAPDESCR(B4H_04H, 0xB4, 0x04, IAP_F_FM | IAP_F_WM),
+
+ IAPDESCR(B6H_01H, 0xB6, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(B7H_01H, 0xB7, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(B8H_01H, 0xB8, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B8H_02H, 0xB8, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(B8H_04H, 0xB8, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(BAH_01H, 0xBA, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(BAH_02H, 0xBA, 0x02, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(BBH_01H, 0xBB, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(BDH_01H, 0xBD, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(BDH_20H, 0xBD, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(BFH_05H, 0xBF, 0x05, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(C0H_00H, 0xC0, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C0H_01H, 0xC0, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C0H_02H, 0xC0, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB),
+ IAPDESCR(C0H_04H, 0xC0, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C0H_08H, 0xC0, 0x08, IAP_F_FM | IAP_F_CC2E),
+
+ IAPDESCR(C1H_00H, 0xC1, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(C1H_01H, 0xC1, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(C1H_02H, 0xC1, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(C1H_08H, 0xC1, 0x08, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C1H_10H, 0xC1, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C1H_20H, 0xC1, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C1H_FEH, 0xC1, 0xFE, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(C2H_00H, 0xC2, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(C2H_01H, 0xC2, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C2H_02H, 0xC2, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C2H_04H, 0xC2, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C2H_07H, 0xC2, 0x07, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(C2H_08H, 0xC2, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(C2H_0FH, 0xC2, 0x0F, IAP_F_FM | IAP_F_CC2),
+ IAPDESCR(C2H_10H, 0xC2, 0x10, IAP_F_FM | IAP_F_CA),
+
+ IAPDESCR(C3H_00H, 0xC3, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(C3H_01H, 0xC3, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C3H_02H, 0xC3, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C3H_04H, 0xC3, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C3H_10H, 0xC3, 0x10, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(C3H_20H, 0xC3, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(C4H_00H, 0xC4, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C4H_01H, 0xC4, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C4H_02H, 0xC4, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C4H_04H, 0xC4, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C4H_08H, 0xC4, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C4H_0CH, 0xC4, 0x0C, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(C4H_0FH, 0xC4, 0x0F, IAP_F_FM | IAP_F_CA),
+ IAPDESCR(C4H_10H, 0xC4, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C4H_20H, 0xC4, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C4H_40H, 0xC4, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(C5H_00H, 0xC5, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(C5H_01H, 0xC5, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_SB |
+ IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C5H_02H, 0xC5, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C5H_04H, 0xC5, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_SB |
+ IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C5H_10H, 0xC5, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(C5H_20H, 0xC5, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(C6H_00H, 0xC6, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(C6H_01H, 0xC6, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(C6H_02H, 0xC6, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(C7H_00H, 0xC7, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(C7H_01H, 0xC7, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C7H_02H, 0xC7, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C7H_04H, 0xC7, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C7H_08H, 0xC7, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C7H_10H, 0xC7, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(C7H_1FH, 0xC7, 0x1F, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(C8H_00H, 0xC8, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(C8H_20H, 0xC8, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(C9H_00H, 0xC9, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+
+ IAPDESCR(CAH_00H, 0xCA, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(CAH_01H, 0xCA, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(CAH_02H, 0xCA, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(CAH_04H, 0xCA, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(CAH_08H, 0xCA, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(CAH_10H, 0xCA, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(CAH_1EH, 0xCA, 0x1E, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(CBH_01H, 0xCB, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CBH_02H, 0xCB, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CBH_04H, 0xCB, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CBH_08H, 0xCB, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CBH_10H, 0xCB, 0x10, IAP_F_FM | IAP_F_CC2 | IAP_F_I7 |
+ IAP_F_WM),
+ IAPDESCR(CBH_40H, 0xCB, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CBH_80H, 0xCB, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(CCH_00H, 0xCC, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(CCH_01H, 0xCC, 0x01, IAP_F_FM | IAP_F_ALLCPUSCORE2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CCH_02H, 0xCC, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CCH_03H, 0xCC, 0x03, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(CCH_20H, 0xCC, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(CDH_00H, 0xCD, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(CDH_01H, 0xCD, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(CDH_02H, 0xCD, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(CEH_00H, 0xCE, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(CFH_00H, 0xCF, 0x00, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(D0H_00H, 0xD0, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D0H_01H, 0xD0, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D0H_02H, 0xD0, 0x02, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D0H_10H, 0xD0, 0x10, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D0H_20H, 0xD0, 0x20, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D0H_40H, 0xD0, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D0H_80H, 0xD0, 0X80, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(D1H_01H, 0xD1, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_SB |
+ IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D1H_02H, 0xD1, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D1H_04H, 0xD1, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D1H_08H, 0xD1, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(D1H_20H, 0xD1, 0x20, IAP_F_FM | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D1H_40H, 0xD1, 0x40, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(D2H_01H, 0xD2, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(D2H_02H, 0xD2, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(D2H_04H, 0xD2, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(D2H_08H, 0xD2, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_IBX),
+ IAPDESCR(D2H_0FH, 0xD2, 0x0F, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(D2H_10H, 0xD2, 0x10, IAP_F_FM | IAP_F_CC2E),
+
+ IAPDESCR(D3H_01H, 0xD3, 0x01, IAP_F_FM | IAP_F_IB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(D3H_04H, 0xD3, 0x04, IAP_F_FM | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(D3H_10H, 0xD3, 0x10, IAP_F_IBX),
+ IAPDESCR(D3H_20H, 0xD3, 0x20, IAP_F_IBX),
+
+ IAPDESCR(D4H_01H, 0xD4, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(D4H_02H, 0xD4, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_SB | IAP_F_SBX),
+ IAPDESCR(D4H_04H, 0xD4, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(D4H_08H, 0xD4, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(D4H_0FH, 0xD4, 0x0F, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(D5H_01H, 0xD5, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2 |
+ IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(D5H_02H, 0xD5, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(D5H_04H, 0xD5, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(D5H_08H, 0xD5, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(D5H_0FH, 0xD5, 0x0F, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(D7H_00H, 0xD7, 0x00, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(D8H_00H, 0xD8, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D8H_01H, 0xD8, 0x01, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D8H_02H, 0xD8, 0x02, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D8H_03H, 0xD8, 0x03, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D8H_04H, 0xD8, 0x04, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(D9H_00H, 0xD9, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D9H_01H, 0xD9, 0x01, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D9H_02H, 0xD9, 0x02, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(D9H_03H, 0xD9, 0x03, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(DAH_00H, 0xDA, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(DAH_01H, 0xDA, 0x01, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(DAH_02H, 0xDA, 0x02, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(DBH_00H, 0xDB, 0x00, IAP_F_FM | IAP_F_CC),
+ IAPDESCR(DBH_01H, 0xDB, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(DCH_01H, 0xDC, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(DCH_02H, 0xDC, 0x02, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(DCH_04H, 0xDC, 0x04, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(DCH_08H, 0xDC, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(DCH_10H, 0xDC, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+ IAPDESCR(DCH_1FH, 0xDC, 0x1F, IAP_F_FM | IAP_F_CA | IAP_F_CC2),
+
+ IAPDESCR(E0H_00H, 0xE0, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(E0H_01H, 0xE0, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM),
+
+ IAPDESCR(E2H_00H, 0xE2, 0x00, IAP_F_FM | IAP_F_CC),
+
+ IAPDESCR(E4H_00H, 0xE4, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(E4H_01H, 0xE4, 0x01, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(E5H_01H, 0xE5, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(E6H_00H, 0xE6, 0x00, IAP_F_FM | IAP_F_CC | IAP_F_CC2),
+ IAPDESCR(E6H_01H, 0xE6, 0x01, IAP_F_FM | IAP_F_CA | IAP_F_I7 |
+ IAP_F_WM | IAP_F_SBX),
+ IAPDESCR(E6H_02H, 0xE6, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(E6H_1FH, 0xE6, 0x1F, IAP_F_IBX),
+
+ IAPDESCR(E8H_01H, 0xE8, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(E8H_02H, 0xE8, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+ IAPDESCR(E8H_03H, 0xE8, 0x03, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(ECH_01H, 0xEC, 0x01, IAP_F_FM | IAP_F_WM),
+
+ IAPDESCR(F0H_00H, 0xF0, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(F0H_01H, 0xF0, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_02H, 0xF0, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_04H, 0xF0, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_08H, 0xF0, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_10H, 0xF0, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_20H, 0xF0, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_40H, 0xF0, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F0H_80H, 0xF0, 0x80, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(F1H_01H, 0xF1, 0x01, IAP_F_FM | IAP_F_SB | IAP_F_IB |
+ IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F1H_02H, 0xF1, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F1H_04H, 0xF1, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F1H_07H, 0xF1, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+
+ IAPDESCR(F2H_01H, 0xF2, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F2H_02H, 0xF2, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F2H_04H, 0xF2, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F2H_08H, 0xF2, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX),
+ IAPDESCR(F2H_0AH, 0xF2, 0x0A, IAP_F_FM | IAP_F_SB | IAP_F_SBX |
+ IAP_F_IBX),
+ IAPDESCR(F2H_0FH, 0xF2, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(F3H_01H, 0xF3, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F3H_02H, 0xF3, 0x02, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F3H_04H, 0xF3, 0x04, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F3H_08H, 0xF3, 0x08, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F3H_10H, 0xF3, 0x10, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F3H_20H, 0xF3, 0x20, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(F4H_01H, 0xF4, 0x01, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F4H_02H, 0xF4, 0x02, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F4H_04H, 0xF4, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7O),
+ IAPDESCR(F4H_08H, 0xF4, 0x08, IAP_F_FM | IAP_F_I7O),
+ IAPDESCR(F4H_10H, 0xF4, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM |
+ IAP_F_SB | IAP_F_SBX),
+
+ IAPDESCR(F6H_01H, 0xF6, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM),
+
+ IAPDESCR(F7H_01H, 0xF7, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(F7H_02H, 0xF7, 0x02, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(F7H_04H, 0xF7, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+
+ IAPDESCR(F8H_00H, 0xF8, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2),
+ IAPDESCR(F8H_01H, 0xF8, 0x01, IAP_F_FM | IAP_F_I7O),
+
+ IAPDESCR(FDH_01H, 0xFD, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_02H, 0xFD, 0x02, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_04H, 0xFD, 0x04, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_08H, 0xFD, 0x08, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_10H, 0xFD, 0x10, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_20H, 0xFD, 0x20, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+ IAPDESCR(FDH_40H, 0xFD, 0x40, IAP_F_FM | IAP_F_WM | IAP_F_I7),
+};
+
+static const int niap_events = sizeof(iap_events) / sizeof(iap_events[0]);
+
+static pmc_value_t
+iap_perfctr_value_to_reload_count(pmc_value_t v)
+{
+ v &= (1ULL << core_iap_width) - 1;
+ return (1ULL << core_iap_width) - v;
+}
+
+static pmc_value_t
+iap_reload_count_to_perfctr_value(pmc_value_t rlc)
+{
+ return (1ULL << core_iap_width) - rlc;
+}
+
+static int
+iap_pmc_has_overflowed(int ri)
+{
+ uint64_t v;
+
+ /*
+ * We treat a Core (i.e., Intel architecture v1) PMC as has
+ * having overflowed if its MSB is zero.
+ */
+ v = rdpmc(ri);
+ return ((v & (1ULL << (core_iap_width - 1))) == 0);
+}
+
+/*
+ * Check an event against the set of supported architectural events.
+ *
+ * Returns 1 if the event is architectural and unsupported on this
+ * CPU. Returns 0 otherwise.
+ */
+
+static int
+iap_architectural_event_is_unsupported(enum pmc_event pe)
+{
+ enum core_arch_events ae;
+
+ switch (pe) {
+ case PMC_EV_IAP_EVENT_3CH_00H:
+ ae = CORE_AE_UNHALTED_CORE_CYCLES;
+ break;
+ case PMC_EV_IAP_EVENT_C0H_00H:
+ ae = CORE_AE_INSTRUCTION_RETIRED;
+ break;
+ case PMC_EV_IAP_EVENT_3CH_01H:
+ ae = CORE_AE_UNHALTED_REFERENCE_CYCLES;
+ break;
+ case PMC_EV_IAP_EVENT_2EH_4FH:
+ ae = CORE_AE_LLC_REFERENCE;
+ break;
+ case PMC_EV_IAP_EVENT_2EH_41H:
+ ae = CORE_AE_LLC_MISSES;
+ break;
+ case PMC_EV_IAP_EVENT_C4H_00H:
+ ae = CORE_AE_BRANCH_INSTRUCTION_RETIRED;
+ break;
+ case PMC_EV_IAP_EVENT_C5H_00H:
+ ae = CORE_AE_BRANCH_MISSES_RETIRED;
+ break;
+
+ default: /* Non architectural event. */
+ return (0);
+ }
+
+ return ((core_architectural_events & (1 << ae)) == 0);
+}
+
+static int
+iap_event_corei7_ok_on_counter(enum pmc_event pe, int ri)
+{
+ uint32_t mask;
+
+ switch (pe) {
+ /*
+ * Events valid only on counter 0, 1.
+ */
+ case PMC_EV_IAP_EVENT_40H_01H:
+ case PMC_EV_IAP_EVENT_40H_02H:
+ case PMC_EV_IAP_EVENT_40H_04H:
+ case PMC_EV_IAP_EVENT_40H_08H:
+ case PMC_EV_IAP_EVENT_40H_0FH:
+ case PMC_EV_IAP_EVENT_41H_02H:
+ case PMC_EV_IAP_EVENT_41H_04H:
+ case PMC_EV_IAP_EVENT_41H_08H:
+ case PMC_EV_IAP_EVENT_42H_01H:
+ case PMC_EV_IAP_EVENT_42H_02H:
+ case PMC_EV_IAP_EVENT_42H_04H:
+ case PMC_EV_IAP_EVENT_42H_08H:
+ case PMC_EV_IAP_EVENT_43H_01H:
+ case PMC_EV_IAP_EVENT_43H_02H:
+ case PMC_EV_IAP_EVENT_51H_01H:
+ case PMC_EV_IAP_EVENT_51H_02H:
+ case PMC_EV_IAP_EVENT_51H_04H:
+ case PMC_EV_IAP_EVENT_51H_08H:
+ case PMC_EV_IAP_EVENT_63H_01H:
+ case PMC_EV_IAP_EVENT_63H_02H:
+ mask = 0x3;
+ break;
+
+ default:
+ mask = ~0; /* Any row index is ok. */
+ }
+
+ return (mask & (1 << ri));
+}
+
+static int
+iap_event_westmere_ok_on_counter(enum pmc_event pe, int ri)
+{
+ uint32_t mask;
+
+ switch (pe) {
+ /*
+ * Events valid only on counter 0.
+ */
+ case PMC_EV_IAP_EVENT_60H_01H:
+ case PMC_EV_IAP_EVENT_60H_02H:
+ case PMC_EV_IAP_EVENT_60H_04H:
+ case PMC_EV_IAP_EVENT_60H_08H:
+ case PMC_EV_IAP_EVENT_B3H_01H:
+ case PMC_EV_IAP_EVENT_B3H_02H:
+ case PMC_EV_IAP_EVENT_B3H_04H:
+ mask = 0x1;
+ break;
+
+ /*
+ * Events valid only on counter 0, 1.
+ */
+ case PMC_EV_IAP_EVENT_4CH_01H:
+ case PMC_EV_IAP_EVENT_4EH_01H:
+ case PMC_EV_IAP_EVENT_4EH_02H:
+ case PMC_EV_IAP_EVENT_4EH_04H:
+ case PMC_EV_IAP_EVENT_51H_01H:
+ case PMC_EV_IAP_EVENT_51H_02H:
+ case PMC_EV_IAP_EVENT_51H_04H:
+ case PMC_EV_IAP_EVENT_51H_08H:
+ case PMC_EV_IAP_EVENT_63H_01H:
+ case PMC_EV_IAP_EVENT_63H_02H:
+ mask = 0x3;
+ break;
+
+ default:
+ mask = ~0; /* Any row index is ok. */
+ }
+
+ return (mask & (1 << ri));
+}
+
+static int
+iap_event_sb_sbx_ib_ibx_ok_on_counter(enum pmc_event pe, int ri)
+{
+ uint32_t mask;
+
+ switch (pe) {
+ /* Events valid only on counter 0. */
+ case PMC_EV_IAP_EVENT_B7H_01H:
+ mask = 0x1;
+ break;
+ /* Events valid only on counter 1. */
+ case PMC_EV_IAP_EVENT_C0H_01H:
+ mask = 0x1;
+ break;
+ /* Events valid only on counter 2. */
+ case PMC_EV_IAP_EVENT_48H_01H:
+ case PMC_EV_IAP_EVENT_A2H_02H:
+ mask = 0x4;
+ break;
+ /* Events valid only on counter 3. */
+ case PMC_EV_IAP_EVENT_A3H_08H:
+ case PMC_EV_IAP_EVENT_BBH_01H:
+ case PMC_EV_IAP_EVENT_CDH_01H:
+ case PMC_EV_IAP_EVENT_CDH_02H:
+ mask = 0x8;
+ break;
+ default:
+ mask = ~0; /* Any row index is ok. */
+ }
+
+ return (mask & (1 << ri));
+}
+
+static int
+iap_event_ok_on_counter(enum pmc_event pe, int ri)
+{
+ uint32_t mask;
+
+ switch (pe) {
+ /*
+ * Events valid only on counter 0.
+ */
+ case PMC_EV_IAP_EVENT_10H_00H:
+ case PMC_EV_IAP_EVENT_14H_00H:
+ case PMC_EV_IAP_EVENT_18H_00H:
+ case PMC_EV_IAP_EVENT_B3H_01H:
+ case PMC_EV_IAP_EVENT_B3H_02H:
+ case PMC_EV_IAP_EVENT_B3H_04H:
+ case PMC_EV_IAP_EVENT_C1H_00H:
+ case PMC_EV_IAP_EVENT_CBH_01H:
+ case PMC_EV_IAP_EVENT_CBH_02H:
+ mask = (1 << 0);
+ break;
+
+ /*
+ * Events valid only on counter 1.
+ */
+ case PMC_EV_IAP_EVENT_11H_00H:
+ case PMC_EV_IAP_EVENT_12H_00H:
+ case PMC_EV_IAP_EVENT_13H_00H:
+ mask = (1 << 1);
+ break;
+
+ default:
+ mask = ~0; /* Any row index is ok. */
+ }
+
+ return (mask & (1 << ri));
+}
+
+static int
+iap_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int n, model;
+ enum pmc_event ev;
+ struct iap_event_descr *ie;
+ uint32_t c, caps, config, cpuflag, evsel, mask;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row-index value %d", __LINE__, ri));
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((IAP_PMC_CAPS & caps) != caps)
+ return (EPERM);
+
+ ev = pm->pm_event;
+
+ if (iap_architectural_event_is_unsupported(ev))
+ return (EOPNOTSUPP);
+
+ /*
+ * A small number of events are not supported in all the
+ * processors based on a given microarchitecture.
+ */
+ if (ev == PMC_EV_IAP_EVENT_0FH_01H || ev == PMC_EV_IAP_EVENT_0FH_80H) {
+ model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
+ if (core_cputype == PMC_CPU_INTEL_COREI7 && model != 0x2E)
+ return (EINVAL);
+ }
+
+ switch (core_cputype) {
+ case PMC_CPU_INTEL_COREI7:
+ if (iap_event_corei7_ok_on_counter(ev, ri) == 0)
+ return (EINVAL);
+ break;
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
+ case PMC_CPU_INTEL_IVYBRIDGE:
+ case PMC_CPU_INTEL_IVYBRIDGE_XEON:
+ if (iap_event_sb_sbx_ib_ibx_ok_on_counter(ev, ri) == 0)
+ return (EINVAL);
+ break;
+ case PMC_CPU_INTEL_WESTMERE:
+ if (iap_event_westmere_ok_on_counter(ev, ri) == 0)
+ return (EINVAL);
+ break;
+ default:
+ if (iap_event_ok_on_counter(ev, ri) == 0)
+ return (EINVAL);
+ }
+
+ /*
+ * Look for an event descriptor with matching CPU and event id
+ * fields.
+ */
+
+ switch (core_cputype) {
+ default:
+ case PMC_CPU_INTEL_ATOM:
+ cpuflag = IAP_F_CA;
+ break;
+ case PMC_CPU_INTEL_CORE:
+ cpuflag = IAP_F_CC;
+ break;
+ case PMC_CPU_INTEL_CORE2:
+ cpuflag = IAP_F_CC2;
+ break;
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ cpuflag = IAP_F_CC2 | IAP_F_CC2E;
+ break;
+ case PMC_CPU_INTEL_COREI7:
+ cpuflag = IAP_F_I7;
+ break;
+ case PMC_CPU_INTEL_IVYBRIDGE:
+ cpuflag = IAP_F_IB;
+ break;
+ case PMC_CPU_INTEL_IVYBRIDGE_XEON:
+ cpuflag = IAP_F_IBX;
+ break;
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ cpuflag = IAP_F_SB;
+ break;
+ case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
+ cpuflag = IAP_F_SBX;
+ break;
+ case PMC_CPU_INTEL_WESTMERE:
+ cpuflag = IAP_F_WM;
+ break;
+ }
+
+ for (n = 0, ie = iap_events; n < niap_events; n++, ie++)
+ if (ie->iap_ev == ev && ie->iap_flags & cpuflag)
+ break;
+
+ if (n == niap_events)
+ return (EINVAL);
+
+ /*
+ * A matching event descriptor has been found, so start
+ * assembling the contents of the event select register.
+ */
+ evsel = ie->iap_evcode;
+
+ config = a->pm_md.pm_iap.pm_iap_config & ~IAP_F_CMASK;
+
+ /*
+ * If the event uses a fixed umask value, reject any umask
+ * bits set by the user.
+ */
+ if (ie->iap_flags & IAP_F_FM) {
+
+ if (IAP_UMASK(config) != 0)
+ return (EINVAL);
+
+ evsel |= (ie->iap_umask << 8);
+
+ } else {
+
+ /*
+ * Otherwise, the UMASK value needs to be taken from
+ * the MD fields of the allocation request. Reject
+ * requests that specify reserved bits.
+ */
+
+ mask = 0;
+
+ if (ie->iap_umask & IAP_M_CORE) {
+ if ((c = (config & IAP_F_CORE)) != IAP_CORE_ALL &&
+ c != IAP_CORE_THIS)
+ return (EINVAL);
+ mask |= IAP_F_CORE;
+ }
+
+ if (ie->iap_umask & IAP_M_AGENT)
+ mask |= IAP_F_AGENT;
+
+ if (ie->iap_umask & IAP_M_PREFETCH) {
+
+ if ((c = (config & IAP_F_PREFETCH)) ==
+ IAP_PREFETCH_RESERVED)
+ return (EINVAL);
+
+ mask |= IAP_F_PREFETCH;
+ }
+
+ if (ie->iap_umask & IAP_M_MESI)
+ mask |= IAP_F_MESI;
+
+ if (ie->iap_umask & IAP_M_SNOOPRESPONSE)
+ mask |= IAP_F_SNOOPRESPONSE;
+
+ if (ie->iap_umask & IAP_M_SNOOPTYPE)
+ mask |= IAP_F_SNOOPTYPE;
+
+ if (ie->iap_umask & IAP_M_TRANSITION)
+ mask |= IAP_F_TRANSITION;
+
+ /*
+ * If bits outside of the allowed set of umask bits
+ * are set, reject the request.
+ */
+ if (config & ~mask)
+ return (EINVAL);
+
+ evsel |= (config & mask);
+
+ }
+
+ /*
+ * Only Atom and SandyBridge CPUs support the 'ANY' qualifier.
+ */
+ if (core_cputype == PMC_CPU_INTEL_ATOM ||
+ core_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
+ core_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON)
+ evsel |= (config & IAP_ANY);
+ else if (config & IAP_ANY)
+ return (EINVAL);
+
+ /*
+ * Check offcore response configuration.
+ */
+ if (a->pm_md.pm_iap.pm_iap_rsp != 0) {
+ if (ev != PMC_EV_IAP_EVENT_B7H_01H &&
+ ev != PMC_EV_IAP_EVENT_BBH_01H)
+ return (EINVAL);
+ if (core_cputype == PMC_CPU_INTEL_COREI7 &&
+ ev == PMC_EV_IAP_EVENT_BBH_01H)
+ return (EINVAL);
+ if ((core_cputype == PMC_CPU_INTEL_COREI7 ||
+ core_cputype == PMC_CPU_INTEL_WESTMERE) &&
+ a->pm_md.pm_iap.pm_iap_rsp & ~IA_OFFCORE_RSP_MASK_I7WM)
+ return (EINVAL);
+ else if ((core_cputype == PMC_CPU_INTEL_SANDYBRIDGE ||
+ core_cputype == PMC_CPU_INTEL_SANDYBRIDGE_XEON ||
+ core_cputype == PMC_CPU_INTEL_IVYBRIDGE ||
+ core_cputype == PMC_CPU_INTEL_IVYBRIDGE_XEON) &&
+ a->pm_md.pm_iap.pm_iap_rsp & ~IA_OFFCORE_RSP_MASK_SBIB)
+ return (EINVAL);
+ pm->pm_md.pm_iap.pm_iap_rsp = a->pm_md.pm_iap.pm_iap_rsp;
+ }
+
+ if (caps & PMC_CAP_THRESHOLD)
+ evsel |= (a->pm_md.pm_iap.pm_iap_config & IAP_F_CMASK);
+ if (caps & PMC_CAP_USER)
+ evsel |= IAP_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ evsel |= IAP_OS;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ evsel |= (IAP_OS | IAP_USR);
+ if (caps & PMC_CAP_EDGE)
+ evsel |= IAP_EDGE;
+ if (caps & PMC_CAP_INVERT)
+ evsel |= IAP_INV;
+ if (caps & PMC_CAP_INTERRUPT)
+ evsel |= IAP_INT;
+
+ pm->pm_md.pm_iap.pm_iap_evsel = evsel;
+
+ return (0);
+}
+
+static int
+iap_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "iap-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(core_pcpu[cpu] != NULL, ("[core,%d] null per-cpu %d", __LINE__,
+ cpu));
+
+ core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+iap_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char iap_name[PMC_NAME_MAX];
+
+ phw = &core_pcpu[cpu]->pc_corepmcs[ri];
+
+ (void) snprintf(iap_name, sizeof(iap_name), "IAP-%d", ri);
+ if ((error = copystr(iap_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return (error);
+
+ pi->pm_class = PMC_CLASS_IAP;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+iap_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
+
+ return (0);
+}
+
+static int
+iap_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[iap,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = ri;
+
+ return (0);
+}
+
+static int
+iap_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
+ ri));
+
+ tmp = rdpmc(ri);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = iap_perfctr_value_to_reload_count(tmp);
+ else
+ *v = tmp & ((1ULL << core_iap_width) - 1);
+
+ PMCDBG(MDP,REA,1, "iap-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ ri, *v);
+
+ return (0);
+}
+
+static int
+iap_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ (void) pm;
+
+ PMCDBG(MDP,REL,1, "iap-release cpu=%d ri=%d pm=%p", cpu, ri,
+ pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ KASSERT(core_pcpu[cpu]->pc_corepmcs[ri].phw_pmc
+ == NULL, ("[core,%d] PHW pmc non-NULL", __LINE__));
+
+ return (0);
+}
+
+static int
+iap_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ uint32_t evsel;
+ struct core_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row-index %d", __LINE__, ri));
+
+ cc = core_pcpu[cpu];
+ pm = cc->pc_corepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] starting cpu%d,ri%d with no pmc configured",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "iap-start cpu=%d ri=%d", cpu, ri);
+
+ evsel = pm->pm_md.pm_iap.pm_iap_evsel;
+
+ PMCDBG(MDP,STA,2, "iap-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
+ cpu, ri, IAP_EVSEL0 + ri, evsel);
+
+ /* Event specific configuration. */
+ switch (pm->pm_event) {
+ case PMC_EV_IAP_EVENT_B7H_01H:
+ wrmsr(IA_OFFCORE_RSP0, pm->pm_md.pm_iap.pm_iap_rsp);
+ break;
+ case PMC_EV_IAP_EVENT_BBH_01H:
+ wrmsr(IA_OFFCORE_RSP1, pm->pm_md.pm_iap.pm_iap_rsp);
+ break;
+ default:
+ break;
+ }
+
+ wrmsr(IAP_EVSEL0 + ri, evsel | IAP_EN);
+
+ if (core_cputype == PMC_CPU_INTEL_CORE)
+ return (0);
+
+ do {
+ cc->pc_resync = 0;
+ cc->pc_globalctrl |= (1ULL << ri);
+ wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
+ } while (cc->pc_resync != 0);
+
+ return (0);
+}
+
+static int
+iap_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct core_cpu *cc;
+ uint64_t msr;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row index %d", __LINE__, ri));
+
+ cc = core_pcpu[cpu];
+ pm = cc->pc_corepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STO,1, "iap-stop cpu=%d ri=%d", cpu, ri);
+
+ msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
+ wrmsr(IAP_EVSEL0 + ri, msr); /* stop hw */
+
+ if (core_cputype == PMC_CPU_INTEL_CORE)
+ return (0);
+
+ msr = 0;
+ do {
+ cc->pc_resync = 0;
+ cc->pc_globalctrl &= ~(1ULL << ri);
+ msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
+ wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl);
+ } while (cc->pc_resync != 0);
+
+ return (0);
+}
+
+static int
+iap_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ struct core_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[core,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < core_iap_npmc,
+ ("[core,%d] illegal row index %d", __LINE__, ri));
+
+ cc = core_pcpu[cpu];
+ pm = cc->pc_corepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[core,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "iap-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
+ IAP_PMC0 + ri, v);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = iap_reload_count_to_perfctr_value(v);
+
+ /*
+ * Write the new value to the counter. The counter will be in
+ * a stopped state when the pcd_write() entry point is called.
+ */
+
+ wrmsr(IAP_PMC0 + ri, v & ((1ULL << core_iap_width) - 1));
+
+ return (0);
+}
+
+
+static void
+iap_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth,
+ int flags)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(md != NULL, ("[iap,%d] md is NULL", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "iap-initialize");
+
+ /* Remember the set of architectural events supported. */
+ core_architectural_events = ~flags;
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_IAP];
+
+ pcd->pcd_caps = IAP_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_IAP;
+ pcd->pcd_num = npmc;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = pmcwidth;
+
+ pcd->pcd_allocate_pmc = iap_allocate_pmc;
+ pcd->pcd_config_pmc = iap_config_pmc;
+ pcd->pcd_describe = iap_describe;
+ pcd->pcd_get_config = iap_get_config;
+ pcd->pcd_get_msr = iap_get_msr;
+ pcd->pcd_pcpu_fini = core_pcpu_fini;
+ pcd->pcd_pcpu_init = core_pcpu_init;
+ pcd->pcd_read_pmc = iap_read_pmc;
+ pcd->pcd_release_pmc = iap_release_pmc;
+ pcd->pcd_start_pmc = iap_start_pmc;
+ pcd->pcd_stop_pmc = iap_stop_pmc;
+ pcd->pcd_write_pmc = iap_write_pmc;
+
+ md->pmd_npmc += npmc;
+}
+
+static int
+core_intr(int cpu, struct trapframe *tf)
+{
+ pmc_value_t v;
+ struct pmc *pm;
+ struct core_cpu *cc;
+ int error, found_interrupt, ri;
+ uint64_t msr;
+
+ PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ found_interrupt = 0;
+ cc = core_pcpu[cpu];
+
+ for (ri = 0; ri < core_iap_npmc; ri++) {
+
+ if ((pm = cc->pc_corepmcs[ri].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ if (!iap_pmc_has_overflowed(ri))
+ continue;
+
+ found_interrupt = 1;
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+
+ v = pm->pm_sc.pm_reloadcount;
+ v = iaf_reload_count_to_perfctr_value(v);
+
+ /*
+ * Stop the counter, reload it but only restart it if
+ * the PMC is not stalled.
+ */
+ msr = rdmsr(IAP_EVSEL0 + ri) & ~IAP_EVSEL_MASK;
+ wrmsr(IAP_EVSEL0 + ri, msr);
+ wrmsr(IAP_PMC0 + ri, v);
+
+ if (error)
+ continue;
+
+ wrmsr(IAP_EVSEL0 + ri, msr | (pm->pm_md.pm_iap.pm_iap_evsel |
+ IAP_EN));
+ }
+
+ if (found_interrupt)
+ lapic_reenable_pmc();
+
+ atomic_add_int(found_interrupt ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (found_interrupt);
+}
+
+static int
+core2_intr(int cpu, struct trapframe *tf)
+{
+ int error, found_interrupt, n;
+ uint64_t flag, intrstatus, intrenable, msr;
+ struct pmc *pm;
+ struct core_cpu *cc;
+ pmc_value_t v;
+
+ PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ /*
+ * The IA_GLOBAL_STATUS (MSR 0x38E) register indicates which
+ * PMCs have a pending PMI interrupt. We take a 'snapshot' of
+ * the current set of interrupting PMCs and process these
+ * after stopping them.
+ */
+ intrstatus = rdmsr(IA_GLOBAL_STATUS);
+ intrenable = intrstatus & core_pmcmask;
+
+ PMCDBG(MDP,INT, 1, "cpu=%d intrstatus=%jx", cpu,
+ (uintmax_t) intrstatus);
+
+ found_interrupt = 0;
+ cc = core_pcpu[cpu];
+
+ KASSERT(cc != NULL, ("[core,%d] null pcpu", __LINE__));
+
+ cc->pc_globalctrl &= ~intrenable;
+ cc->pc_resync = 1; /* MSRs now potentially out of sync. */
+
+ /*
+ * Stop PMCs and clear overflow status bits.
+ */
+ msr = rdmsr(IA_GLOBAL_CTRL) & ~IA_GLOBAL_CTRL_MASK;
+ wrmsr(IA_GLOBAL_CTRL, msr);
+ wrmsr(IA_GLOBAL_OVF_CTRL, intrenable |
+ IA_GLOBAL_STATUS_FLAG_OVFBUF |
+ IA_GLOBAL_STATUS_FLAG_CONDCHG);
+
+ /*
+ * Look for interrupts from fixed function PMCs.
+ */
+ for (n = 0, flag = (1ULL << IAF_OFFSET); n < core_iaf_npmc;
+ n++, flag <<= 1) {
+
+ if ((intrstatus & flag) == 0)
+ continue;
+
+ found_interrupt = 1;
+
+ pm = cc->pc_corepmcs[n + core_iaf_ri].phw_pmc;
+ if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error)
+ intrenable &= ~flag;
+
+ v = iaf_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
+
+ /* Reload sampling count. */
+ wrmsr(IAF_CTR0 + n, v);
+
+ PMCDBG(MDP,INT, 1, "iaf-intr cpu=%d error=%d v=%jx(%jx)", cpu, error,
+ (uintmax_t) v, (uintmax_t) rdpmc(IAF_RI_TO_MSR(n)));
+ }
+
+ /*
+ * Process interrupts from the programmable counters.
+ */
+ for (n = 0, flag = 1; n < core_iap_npmc; n++, flag <<= 1) {
+ if ((intrstatus & flag) == 0)
+ continue;
+
+ found_interrupt = 1;
+
+ pm = cc->pc_corepmcs[n].phw_pmc;
+ if (pm == NULL || pm->pm_state != PMC_STATE_RUNNING ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error)
+ intrenable &= ~flag;
+
+ v = iap_reload_count_to_perfctr_value(pm->pm_sc.pm_reloadcount);
+
+ PMCDBG(MDP,INT, 1, "iap-intr cpu=%d error=%d v=%jx", cpu, error,
+ (uintmax_t) v);
+
+ /* Reload sampling count. */
+ wrmsr(IAP_PMC0 + n, v);
+ }
+
+ /*
+ * Reenable all non-stalled PMCs.
+ */
+ PMCDBG(MDP,INT, 1, "cpu=%d intrenable=%jx", cpu,
+ (uintmax_t) intrenable);
+
+ cc->pc_globalctrl |= intrenable;
+
+ wrmsr(IA_GLOBAL_CTRL, cc->pc_globalctrl & IA_GLOBAL_CTRL_MASK);
+
+ PMCDBG(MDP,INT, 1, "cpu=%d fixedctrl=%jx globalctrl=%jx status=%jx "
+ "ovf=%jx", cpu, (uintmax_t) rdmsr(IAF_CTRL),
+ (uintmax_t) rdmsr(IA_GLOBAL_CTRL),
+ (uintmax_t) rdmsr(IA_GLOBAL_STATUS),
+ (uintmax_t) rdmsr(IA_GLOBAL_OVF_CTRL));
+
+ if (found_interrupt)
+ lapic_reenable_pmc();
+
+ atomic_add_int(found_interrupt ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (found_interrupt);
+}
+
+int
+pmc_core_initialize(struct pmc_mdep *md, int maxcpu)
+{
+ int cpuid[CORE_CPUID_REQUEST_SIZE];
+ int ipa_version, flags, nflags;
+
+ do_cpuid(CORE_CPUID_REQUEST, cpuid);
+
+ ipa_version = cpuid[CORE_CPUID_EAX] & 0xFF;
+
+ PMCDBG(MDP,INI,1,"core-init cputype=%d ncpu=%d ipa-version=%d",
+ md->pmd_cputype, maxcpu, ipa_version);
+
+ if (ipa_version < 1 || ipa_version > 3) {
+ /* Unknown PMC architecture. */
+ printf("hwpc_core: unknown PMC architecture: %d\n",
+ ipa_version);
+ return (EPROGMISMATCH);
+ }
+
+ core_cputype = md->pmd_cputype;
+
+ core_pmcmask = 0;
+
+ /*
+ * Initialize programmable counters.
+ */
+ KASSERT(ipa_version >= 1,
+ ("[core,%d] ipa_version %d too small", __LINE__, ipa_version));
+
+ core_iap_npmc = (cpuid[CORE_CPUID_EAX] >> 8) & 0xFF;
+ core_iap_width = (cpuid[CORE_CPUID_EAX] >> 16) & 0xFF;
+
+ core_pmcmask |= ((1ULL << core_iap_npmc) - 1);
+
+ nflags = (cpuid[CORE_CPUID_EAX] >> 24) & 0xFF;
+ flags = cpuid[CORE_CPUID_EBX] & ((1 << nflags) - 1);
+
+ iap_initialize(md, maxcpu, core_iap_npmc, core_iap_width, flags);
+
+ /*
+ * Initialize fixed function counters, if present.
+ */
+ if (core_cputype != PMC_CPU_INTEL_CORE) {
+ KASSERT(ipa_version >= 2,
+ ("[core,%d] ipa_version %d too small", __LINE__,
+ ipa_version));
+
+ core_iaf_ri = core_iap_npmc;
+ core_iaf_npmc = cpuid[CORE_CPUID_EDX] & 0x1F;
+ core_iaf_width = (cpuid[CORE_CPUID_EDX] >> 5) & 0xFF;
+
+ iaf_initialize(md, maxcpu, core_iaf_npmc, core_iaf_width);
+ core_pmcmask |= ((1ULL << core_iaf_npmc) - 1) << IAF_OFFSET;
+ }
+
+ PMCDBG(MDP,INI,1,"core-init pmcmask=0x%jx iafri=%d", core_pmcmask,
+ core_iaf_ri);
+
+ core_pcpu = malloc(sizeof(struct core_cpu **) * maxcpu, M_PMC,
+ M_ZERO | M_WAITOK);
+
+ /*
+ * Choose the appropriate interrupt handler.
+ */
+ if (ipa_version == 1)
+ md->pmd_intr = core_intr;
+ else
+ md->pmd_intr = core2_intr;
+
+ md->pmd_pcpu_fini = NULL;
+ md->pmd_pcpu_init = NULL;
+
+ return (0);
+}
+
+void
+pmc_core_finalize(struct pmc_mdep *md)
+{
+ PMCDBG(MDP,INI,1, "%s", "core-finalize");
+
+ free(core_pcpu, M_PMC);
+ core_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_core.h b/sys/dev/hwpmc/hwpmc_core.h
new file mode 100644
index 0000000..334bab7
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_core.h
@@ -0,0 +1,191 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_CORE_H_
+#define _DEV_HWPMC_CORE_H_ 1
+
+/*
+ * Fixed-function PMCs.
+ */
+struct pmc_md_iaf_op_pmcallocate {
+ uint16_t pm_iaf_flags; /* additional flags */
+};
+
+#define IAF_OS 0x1
+#define IAF_USR 0x2
+#define IAF_ANY 0x4
+#define IAF_PMI 0x8
+
+/*
+ * Programmable PMCs.
+ */
+struct pmc_md_iap_op_pmcallocate {
+ uint32_t pm_iap_config;
+ uint64_t pm_iap_rsp;
+};
+
+#define IAP_EVSEL(C) ((C) & 0xFF)
+#define IAP_UMASK(C) ((C) & 0xFF00)
+#define IAP_USR (1 << 16)
+#define IAP_OS (1 << 17)
+#define IAP_EDGE (1 << 18)
+#define IAP_INT (1 << 20)
+#define IAP_ANY (1 << 21)
+#define IAP_EN (1 << 22)
+#define IAP_INV (1 << 23)
+#define IAP_CMASK(C) (((C) & 0xFF) << 24)
+
+#define IA_OFFCORE_RSP_MASK_I7WM 0x000000F7FF
+#define IA_OFFCORE_RSP_MASK_SBIB 0x3F807F8FFF
+
+#ifdef _KERNEL
+
+/*
+ * Fixed-function counters.
+ */
+
+#define IAF_MASK 0xF
+
+#define IAF_COUNTER_MASK 0x0000ffffffffffff
+#define IAF_CTR0 0x309
+#define IAF_CTR1 0x30A
+#define IAF_CTR2 0x30B
+
+/*
+ * The IAF_CTRL MSR is laid out in the following way.
+ *
+ * Bit Position Use
+ * 63 - 12 Reserved (do not touch)
+ * 11 Ctr 2 PMI
+ * 10 Reserved (do not touch)
+ * 9-8 Ctr 2 Enable
+ * 7 Ctr 1 PMI
+ * 6 Reserved (do not touch)
+ * 5-4 Ctr 1 Enable
+ * 3 Ctr 0 PMI
+ * 2 Reserved (do not touch)
+ * 1-0 Ctr 0 Enable (3: All Levels, 2: User, 1: OS, 0: Disable)
+ */
+
+#define IAF_OFFSET 32
+#define IAF_CTRL 0x38D
+#define IAF_CTRL_MASK 0x0000000000000bbb
+
+/*
+ * Programmable counters.
+ */
+
+#define IAP_PMC0 0x0C1
+
+/*
+ * IAP_EVSEL(n) is laid out in the following way.
+ *
+ * Bit Position Use
+ * 63-31 Reserved (do not touch)
+ * 31-24 Counter Mask
+ * 23 Invert
+ * 22 Enable
+ * 21 Reserved (do not touch)
+ * 20 APIC Interrupt Enable
+ * 19 Pin Control
+ * 18 Edge Detect
+ * 17 OS
+ * 16 User
+ * 15-8 Unit Mask
+ * 7-0 Event Select
+ */
+
+#define IAP_EVSEL_MASK 0x00000000ffdfffff
+#define IAP_EVSEL0 0x186
+
+/*
+ * Simplified programming interface in Intel Performance Architecture
+ * v2 and later.
+ */
+
+#define IA_GLOBAL_STATUS 0x38E
+#define IA_GLOBAL_CTRL 0x38F
+
+/*
+ * IA_GLOBAL_CTRL is layed out in the following way.
+ *
+ * Bit Position Use
+ * 63-35 Reserved (do not touch)
+ * 34 IAF Counter 2 Enable
+ * 33 IAF Counter 1 Enable
+ * 32 IAF Counter 0 Enable
+ * 31-0 Depends on programmable counters
+ */
+
+/* The mask is only for the fixed porttion of the register. */
+#define IAF_GLOBAL_CTRL_MASK 0x0000000700000000
+
+/* The mask is only for the programmable porttion of the register. */
+#define IAP_GLOBAL_CTRL_MASK 0x00000000ffffffff
+
+/* The mask is for both the fixed and programmable porttions of the register. */
+#define IA_GLOBAL_CTRL_MASK 0x00000007ffffffff
+
+#define IA_GLOBAL_OVF_CTRL 0x390
+
+#define IA_GLOBAL_STATUS_FLAG_CONDCHG (1ULL << 63)
+#define IA_GLOBAL_STATUS_FLAG_OVFBUF (1ULL << 62)
+
+/*
+ * Offcore response configuration.
+ */
+#define IA_OFFCORE_RSP0 0x1A6
+#define IA_OFFCORE_RSP1 0x1A7
+
+struct pmc_md_iaf_pmc {
+ uint64_t pm_iaf_ctrl;
+};
+
+struct pmc_md_iap_pmc {
+ uint32_t pm_iap_evsel;
+ uint64_t pm_iap_rsp;
+};
+
+/*
+ * Prototypes.
+ */
+
+int pmc_core_initialize(struct pmc_mdep *_md, int _maxcpu);
+void pmc_core_finalize(struct pmc_mdep *_md);
+
+void pmc_core_mark_started(int _cpu, int _pmc);
+
+int pmc_iaf_initialize(struct pmc_mdep *_md, int _maxcpu, int _npmc, int _width);
+void pmc_iaf_finalize(struct pmc_mdep *_md);
+
+int pmc_iap_initialize(struct pmc_mdep *_md, int _maxcpu, int _npmc, int _width,
+ int _flags);
+void pmc_iap_finalize(struct pmc_mdep *_md);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_CORE_H */
diff --git a/sys/dev/hwpmc/hwpmc_ia64.c b/sys/dev/hwpmc/hwpmc_ia64.c
new file mode 100644
index 0000000..ce1caf6
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_ia64.c
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+
+#include <machine/pmc_mdep.h>
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ return NULL;
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+ (void) md;
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
diff --git a/sys/dev/hwpmc/hwpmc_intel.c b/sys/dev/hwpmc/hwpmc_intel.c
new file mode 100644
index 0000000..00ec29e
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_intel.c
@@ -0,0 +1,326 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Common code for handling Intel CPUs.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+static int
+intel_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc;
+
+ PMCDBG(MDP,SWI,1, "pc=%p pp=%p enable-msr=%d", pc, pp,
+ pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS);
+
+ /* allow the RDPMC instruction if needed */
+ if (pp->pp_flags & PMC_PP_ENABLE_MSR_ACCESS)
+ load_cr4(rcr4() | CR4_PCE);
+
+ PMCDBG(MDP,SWI,1, "cr4=0x%jx", (uintmax_t) rcr4());
+
+ return 0;
+}
+
+static int
+intel_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc;
+ (void) pp; /* can be NULL */
+
+ PMCDBG(MDP,SWO,1, "pc=%p pp=%p cr4=0x%jx", pc, pp,
+ (uintmax_t) rcr4());
+
+ /* always turn off the RDPMC instruction */
+ load_cr4(rcr4() & ~CR4_PCE);
+
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_intel_initialize(void)
+{
+ struct pmc_mdep *pmc_mdep;
+ enum pmc_cputype cputype;
+ int error, model, nclasses, ncpus;
+
+ KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL,
+ ("[intel,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,0, "intel-initialize cpuid=0x%x", cpu_id);
+
+ cputype = -1;
+ nclasses = 2;
+
+ model = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
+
+ switch (cpu_id & 0xF00) {
+#if defined(__i386__)
+ case 0x500: /* Pentium family processors */
+ cputype = PMC_CPU_INTEL_P5;
+ break;
+#endif
+ case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
+ switch (model) {
+#if defined(__i386__)
+ case 0x1:
+ cputype = PMC_CPU_INTEL_P6;
+ break;
+ case 0x3: case 0x5:
+ cputype = PMC_CPU_INTEL_PII;
+ break;
+ case 0x6: case 0x16:
+ cputype = PMC_CPU_INTEL_CL;
+ break;
+ case 0x7: case 0x8: case 0xA: case 0xB:
+ cputype = PMC_CPU_INTEL_PIII;
+ break;
+ case 0x9: case 0xD:
+ cputype = PMC_CPU_INTEL_PM;
+ break;
+#endif
+ case 0xE:
+ cputype = PMC_CPU_INTEL_CORE;
+ break;
+ case 0xF:
+ cputype = PMC_CPU_INTEL_CORE2;
+ nclasses = 3;
+ break;
+ case 0x17:
+ cputype = PMC_CPU_INTEL_CORE2EXTREME;
+ nclasses = 3;
+ break;
+ case 0x1C: /* Per Intel document 320047-002. */
+ cputype = PMC_CPU_INTEL_ATOM;
+ nclasses = 3;
+ break;
+ case 0x1A:
+ case 0x1E: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */
+ case 0x1F: /* Per Intel document 253669-032 9/2009, pages A-2 and A-57 */
+ case 0x2E:
+ cputype = PMC_CPU_INTEL_COREI7;
+ nclasses = 5;
+ break;
+ case 0x25: /* Per Intel document 253669-033US 12/2009. */
+ case 0x2C: /* Per Intel document 253669-033US 12/2009. */
+ cputype = PMC_CPU_INTEL_WESTMERE;
+ nclasses = 5;
+ break;
+ case 0x2A: /* Per Intel document 253669-039US 05/2011. */
+ cputype = PMC_CPU_INTEL_SANDYBRIDGE;
+ nclasses = 5;
+ break;
+ case 0x2D: /* Per Intel document 253669-044US 08/2012. */
+ cputype = PMC_CPU_INTEL_SANDYBRIDGE_XEON;
+ nclasses = 3;
+ break;
+ case 0x3A: /* Per Intel document 253669-043US 05/2012. */
+ cputype = PMC_CPU_INTEL_IVYBRIDGE;
+ nclasses = 3;
+ break;
+ case 0x3E: /* Per Intel document 325462-045US 01/2013. */
+ cputype = PMC_CPU_INTEL_IVYBRIDGE_XEON;
+ nclasses = 3;
+ break;
+ }
+ break;
+#if defined(__i386__) || defined(__amd64__)
+ case 0xF00: /* P4 */
+ if (model >= 0 && model <= 6) /* known models */
+ cputype = PMC_CPU_INTEL_PIV;
+ break;
+ }
+#endif
+
+ if ((int) cputype == -1) {
+ printf("pmc: Unknown Intel CPU.\n");
+ return (NULL);
+ }
+
+ /* Allocate base class and initialize machine dependent struct */
+ pmc_mdep = pmc_mdep_alloc(nclasses);
+
+ pmc_mdep->pmd_cputype = cputype;
+ pmc_mdep->pmd_switch_in = intel_switch_in;
+ pmc_mdep->pmd_switch_out = intel_switch_out;
+
+ ncpus = pmc_cpu_max();
+
+ error = pmc_tsc_initialize(pmc_mdep, ncpus);
+ if (error)
+ goto error;
+
+ switch (cputype) {
+#if defined(__i386__) || defined(__amd64__)
+ /*
+ * Intel Core, Core 2 and Atom processors.
+ */
+ case PMC_CPU_INTEL_ATOM:
+ case PMC_CPU_INTEL_CORE:
+ case PMC_CPU_INTEL_CORE2:
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ case PMC_CPU_INTEL_COREI7:
+ case PMC_CPU_INTEL_IVYBRIDGE:
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ case PMC_CPU_INTEL_WESTMERE:
+ case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
+ case PMC_CPU_INTEL_IVYBRIDGE_XEON:
+ error = pmc_core_initialize(pmc_mdep, ncpus);
+ break;
+
+ /*
+ * Intel Pentium 4 Processors, and P4/EMT64 processors.
+ */
+
+ case PMC_CPU_INTEL_PIV:
+ error = pmc_p4_initialize(pmc_mdep, ncpus);
+ break;
+#endif
+
+#if defined(__i386__)
+ /*
+ * P6 Family Processors
+ */
+
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+ error = pmc_p6_initialize(pmc_mdep, ncpus);
+ break;
+
+ /*
+ * Intel Pentium PMCs.
+ */
+
+ case PMC_CPU_INTEL_P5:
+ error = pmc_p5_initialize(pmc_mdep, ncpus);
+ break;
+#endif
+
+ default:
+ KASSERT(0, ("[intel,%d] Unknown CPU type", __LINE__));
+ }
+
+ if (error)
+ goto error;
+
+ /*
+ * Init the uncore class.
+ */
+#if defined(__i386__) || defined(__amd64__)
+ switch (cputype) {
+ /*
+ * Intel Corei7 and Westmere processors.
+ */
+ case PMC_CPU_INTEL_COREI7:
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ case PMC_CPU_INTEL_WESTMERE:
+ error = pmc_uncore_initialize(pmc_mdep, ncpus);
+ break;
+ default:
+ break;
+ }
+#endif
+
+ error:
+ if (error) {
+ free(pmc_mdep, M_PMC);
+ pmc_mdep = NULL;
+ }
+
+ return (pmc_mdep);
+}
+
+void
+pmc_intel_finalize(struct pmc_mdep *md)
+{
+ pmc_tsc_finalize(md);
+
+ switch (md->pmd_cputype) {
+#if defined(__i386__) || defined(__amd64__)
+ case PMC_CPU_INTEL_ATOM:
+ case PMC_CPU_INTEL_CORE:
+ case PMC_CPU_INTEL_CORE2:
+ case PMC_CPU_INTEL_CORE2EXTREME:
+ case PMC_CPU_INTEL_COREI7:
+ case PMC_CPU_INTEL_IVYBRIDGE:
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ case PMC_CPU_INTEL_WESTMERE:
+ case PMC_CPU_INTEL_SANDYBRIDGE_XEON:
+ case PMC_CPU_INTEL_IVYBRIDGE_XEON:
+ pmc_core_finalize(md);
+ break;
+
+ case PMC_CPU_INTEL_PIV:
+ pmc_p4_finalize(md);
+ break;
+#endif
+#if defined(__i386__)
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+ pmc_p6_finalize(md);
+ break;
+ case PMC_CPU_INTEL_P5:
+ pmc_p5_finalize(md);
+ break;
+#endif
+ default:
+ KASSERT(0, ("[intel,%d] unknown CPU type", __LINE__));
+ }
+
+ /*
+ * Uncore.
+ */
+#if defined(__i386__) || defined(__amd64__)
+ switch (md->pmd_cputype) {
+ case PMC_CPU_INTEL_COREI7:
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ case PMC_CPU_INTEL_WESTMERE:
+ pmc_uncore_finalize(md);
+ break;
+ default:
+ break;
+ }
+#endif
+}
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
new file mode 100644
index 0000000..880bcaa
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -0,0 +1,1070 @@
+/*-
+ * Copyright (c) 2005-2007 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Logging code for hwpmc(4)
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/capability.h>
+#include <sys/file.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/pmclog.h>
+#include <sys/proc.h>
+#include <sys/signalvar.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+
+/*
+ * Sysctl tunables
+ */
+
+SYSCTL_DECL(_kern_hwpmc);
+
+/*
+ * kern.hwpmc.logbuffersize -- size of the per-cpu owner buffers.
+ */
+
+static int pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "logbuffersize", &pmclog_buffer_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, logbuffersize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmclog_buffer_size, 0, "size of log buffers in kilobytes");
+
+/*
+ * kern.hwpmc.nbuffer -- number of global log buffers
+ */
+
+static int pmc_nlogbuffers = PMC_NLOGBUFFERS;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nbuffers", &pmc_nlogbuffers);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_nlogbuffers, 0, "number of global log buffers");
+
+/*
+ * Global log buffer list and associated spin lock.
+ */
+
+TAILQ_HEAD(, pmclog_buffer) pmc_bufferlist =
+ TAILQ_HEAD_INITIALIZER(pmc_bufferlist);
+static struct mtx pmc_bufferlist_mtx; /* spin lock */
+static struct mtx pmc_kthread_mtx; /* sleep lock */
+
+#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D) do { \
+ const int __roundup = roundup(sizeof(*D), \
+ sizeof(uint32_t)); \
+ (D)->plb_fence = ((char *) (D)) + \
+ 1024*pmclog_buffer_size; \
+ (D)->plb_base = (D)->plb_ptr = ((char *) (D)) + \
+ __roundup; \
+ } while (0)
+
+
+/*
+ * Log file record constructors.
+ */
+#define _PMCLOG_TO_HEADER(T,L) \
+ ((PMCLOG_HEADER_MAGIC << 24) | \
+ (PMCLOG_TYPE_ ## T << 16) | \
+ ((L) & 0xFFFF))
+
+/* reserve LEN bytes of space and initialize the entry header */
+#define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \
+ uint32_t *_le; \
+ int _len = roundup((LEN), sizeof(uint32_t)); \
+ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
+ ACTION; \
+ } \
+ *_le = _PMCLOG_TO_HEADER(TYPE,_len); \
+ _le += 3 /* skip over timestamp */
+
+#define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return)
+#define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \
+ error=ENOMEM;goto error)
+
+#define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0)
+#define PMCLOG_EMIT64(V) do { \
+ *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
+ *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
+ } while (0)
+
+
+/* Emit a string. Caution: does NOT update _le, so needs to be last */
+#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0)
+#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
+
+#define PMCLOG_DESPATCH(PO) \
+ pmclog_release((PO)); \
+ } while (0)
+
+
+/*
+ * Assertions about the log file format.
+ */
+
+CTASSERT(sizeof(struct pmclog_callchain) == 6*4 +
+ PMC_CALLCHAIN_DEPTH_MAX*sizeof(uintfptr_t));
+CTASSERT(sizeof(struct pmclog_closelog) == 3*4);
+CTASSERT(sizeof(struct pmclog_dropnotify) == 3*4);
+CTASSERT(sizeof(struct pmclog_map_in) == PATH_MAX +
+ 4*4 + sizeof(uintfptr_t));
+CTASSERT(offsetof(struct pmclog_map_in,pl_pathname) ==
+ 4*4 + sizeof(uintfptr_t));
+CTASSERT(sizeof(struct pmclog_map_out) == 4*4 + 2*sizeof(uintfptr_t));
+CTASSERT(sizeof(struct pmclog_pcsample) == 6*4 + sizeof(uintfptr_t));
+CTASSERT(sizeof(struct pmclog_pmcallocate) == 6*4);
+CTASSERT(sizeof(struct pmclog_pmcattach) == 5*4 + PATH_MAX);
+CTASSERT(offsetof(struct pmclog_pmcattach,pl_pathname) == 5*4);
+CTASSERT(sizeof(struct pmclog_pmcdetach) == 5*4);
+CTASSERT(sizeof(struct pmclog_proccsw) == 5*4 + 8);
+CTASSERT(sizeof(struct pmclog_procexec) == 5*4 + PATH_MAX +
+ sizeof(uintfptr_t));
+CTASSERT(offsetof(struct pmclog_procexec,pl_pathname) == 5*4 +
+ sizeof(uintfptr_t));
+CTASSERT(sizeof(struct pmclog_procexit) == 5*4 + 8);
+CTASSERT(sizeof(struct pmclog_procfork) == 5*4);
+CTASSERT(sizeof(struct pmclog_sysexit) == 4*4);
+CTASSERT(sizeof(struct pmclog_userdata) == 4*4);
+
+/*
+ * Log buffer structure
+ */
+
+struct pmclog_buffer {
+ TAILQ_ENTRY(pmclog_buffer) plb_next;
+ char *plb_base;
+ char *plb_ptr;
+ char *plb_fence;
+};
+
+/*
+ * Prototypes
+ */
+
+static int pmclog_get_buffer(struct pmc_owner *po);
+static void pmclog_loop(void *arg);
+static void pmclog_release(struct pmc_owner *po);
+static uint32_t *pmclog_reserve(struct pmc_owner *po, int length);
+static void pmclog_schedule_io(struct pmc_owner *po);
+static void pmclog_stop_kthread(struct pmc_owner *po);
+
+/*
+ * Helper functions
+ */
+
+/*
+ * Get a log buffer
+ */
+
+static int
+pmclog_get_buffer(struct pmc_owner *po)
+{
+ struct pmclog_buffer *plb;
+
+ mtx_assert(&po->po_mtx, MA_OWNED);
+
+ KASSERT(po->po_curbuf == NULL,
+ ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po));
+
+ mtx_lock_spin(&pmc_bufferlist_mtx);
+ if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL)
+ TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
+ mtx_unlock_spin(&pmc_bufferlist_mtx);
+
+ PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb);
+
+#ifdef DEBUG
+ if (plb)
+ KASSERT(plb->plb_ptr == plb->plb_base &&
+ plb->plb_base < plb->plb_fence,
+ ("[pmclog,%d] po=%p buffer invariants: ptr=%p "
+ "base=%p fence=%p", __LINE__, po, plb->plb_ptr,
+ plb->plb_base, plb->plb_fence));
+#endif
+
+ po->po_curbuf = plb;
+
+ /* update stats */
+ atomic_add_int(&pmc_stats.pm_buffer_requests, 1);
+ if (plb == NULL)
+ atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1);
+
+ return (plb ? 0 : ENOMEM);
+}
+
+/*
+ * Log handler loop.
+ *
+ * This function is executed by each pmc owner's helper thread.
+ */
+
+static void
+pmclog_loop(void *arg)
+{
+ int error;
+ struct pmc_owner *po;
+ struct pmclog_buffer *lb;
+ struct proc *p;
+ struct ucred *ownercred;
+ struct ucred *mycred;
+ struct thread *td;
+ struct uio auio;
+ struct iovec aiov;
+ size_t nbytes;
+
+ po = (struct pmc_owner *) arg;
+ p = po->po_owner;
+ td = curthread;
+ mycred = td->td_ucred;
+
+ PROC_LOCK(p);
+ ownercred = crhold(p->p_ucred);
+ PROC_UNLOCK(p);
+
+ PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
+ KASSERT(po->po_kthread == curthread->td_proc,
+ ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
+ po, po->po_kthread, curthread->td_proc));
+
+ lb = NULL;
+
+
+ /*
+ * Loop waiting for I/O requests to be added to the owner
+ * struct's queue. The loop is exited when the log file
+ * is deconfigured.
+ */
+
+ mtx_lock(&pmc_kthread_mtx);
+
+ for (;;) {
+
+ /* check if we've been asked to exit */
+ if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
+ break;
+
+ if (lb == NULL) { /* look for a fresh buffer to write */
+ mtx_lock_spin(&po->po_mtx);
+ if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
+ mtx_unlock_spin(&po->po_mtx);
+
+ /* No more buffers and shutdown required. */
+ if (po->po_flags & PMC_PO_SHUTDOWN) {
+ mtx_unlock(&pmc_kthread_mtx);
+ /*
+ * Close the file to get PMCLOG_EOF
+ * error in pmclog(3).
+ */
+ fo_close(po->po_file, curthread);
+ mtx_lock(&pmc_kthread_mtx);
+ break;
+ }
+
+ (void) msleep(po, &pmc_kthread_mtx, PWAIT,
+ "pmcloop", 0);
+ continue;
+ }
+
+ TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
+ mtx_unlock_spin(&po->po_mtx);
+ }
+
+ mtx_unlock(&pmc_kthread_mtx);
+
+ /* process the request */
+ PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
+ lb->plb_base, lb->plb_ptr);
+ /* change our thread's credentials before issuing the I/O */
+
+ aiov.iov_base = lb->plb_base;
+ aiov.iov_len = nbytes = lb->plb_ptr - lb->plb_base;
+
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = -1;
+ auio.uio_resid = nbytes;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_td = td;
+
+ /* switch thread credentials -- see kern_ktrace.c */
+ td->td_ucred = ownercred;
+ error = fo_write(po->po_file, &auio, ownercred, 0, td);
+ td->td_ucred = mycred;
+
+ if (error) {
+ /* XXX some errors are recoverable */
+ /* send a SIGIO to the owner and exit */
+ PROC_LOCK(p);
+ kern_psignal(p, SIGIO);
+ PROC_UNLOCK(p);
+
+ mtx_lock(&pmc_kthread_mtx);
+
+ po->po_error = error; /* save for flush log */
+
+ PMCDBG(LOG,WRI,2, "po=%p error=%d", po, error);
+
+ break;
+ }
+
+ mtx_lock(&pmc_kthread_mtx);
+
+ /* put the used buffer back into the global pool */
+ PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
+
+ mtx_lock_spin(&pmc_bufferlist_mtx);
+ TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
+ mtx_unlock_spin(&pmc_bufferlist_mtx);
+
+ lb = NULL;
+ }
+
+ wakeup_one(po->po_kthread);
+ po->po_kthread = NULL;
+
+ mtx_unlock(&pmc_kthread_mtx);
+
+ /* return the current I/O buffer to the global pool */
+ if (lb) {
+ PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
+
+ mtx_lock_spin(&pmc_bufferlist_mtx);
+ TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
+ mtx_unlock_spin(&pmc_bufferlist_mtx);
+ }
+
+ /*
+ * Exit this thread, signalling the waiter
+ */
+
+ crfree(ownercred);
+
+ kproc_exit(0);
+}
+
+/*
+ * Release and log entry and schedule an I/O if needed.
+ */
+
+static void
+pmclog_release(struct pmc_owner *po)
+{
+ KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
+ ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
+ po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
+ KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
+ ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
+ po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
+
+ /* schedule an I/O if we've filled a buffer */
+ if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence)
+ pmclog_schedule_io(po);
+
+ mtx_unlock_spin(&po->po_mtx);
+
+ PMCDBG(LOG,REL,1, "po=%p", po);
+}
+
+
+/*
+ * Attempt to reserve 'length' bytes of space in an owner's log
+ * buffer. The function returns a pointer to 'length' bytes of space
+ * if there was enough space or returns NULL if no space was
+ * available. Non-null returns do so with the po mutex locked. The
+ * caller must invoke pmclog_release() on the pmc owner structure
+ * when done.
+ */
+
+static uint32_t *
+pmclog_reserve(struct pmc_owner *po, int length)
+{
+ uintptr_t newptr, oldptr;
+ uint32_t *lh;
+ struct timespec ts;
+
+ PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length);
+
+ KASSERT(length % sizeof(uint32_t) == 0,
+ ("[pmclog,%d] length not a multiple of word size", __LINE__));
+
+ mtx_lock_spin(&po->po_mtx);
+
+ /* No more data when shutdown in progress. */
+ if (po->po_flags & PMC_PO_SHUTDOWN) {
+ mtx_unlock_spin(&po->po_mtx);
+ return (NULL);
+ }
+
+ if (po->po_curbuf == NULL)
+ if (pmclog_get_buffer(po) != 0) {
+ mtx_unlock_spin(&po->po_mtx);
+ return (NULL);
+ }
+
+ KASSERT(po->po_curbuf != NULL,
+ ("[pmclog,%d] po=%p no current buffer", __LINE__, po));
+
+ KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base &&
+ po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
+ ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
+ __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
+ po->po_curbuf->plb_fence));
+
+ oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
+ newptr = oldptr + length;
+
+ KASSERT(oldptr != (uintptr_t) NULL,
+ ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po));
+
+ /*
+ * If we have space in the current buffer, return a pointer to
+ * available space with the PO structure locked.
+ */
+ if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) {
+ po->po_curbuf->plb_ptr = (char *) newptr;
+ goto done;
+ }
+
+ /*
+ * Otherwise, schedule the current buffer for output and get a
+ * fresh buffer.
+ */
+ pmclog_schedule_io(po);
+
+ if (pmclog_get_buffer(po) != 0) {
+ mtx_unlock_spin(&po->po_mtx);
+ return (NULL);
+ }
+
+ KASSERT(po->po_curbuf != NULL,
+ ("[pmclog,%d] po=%p no current buffer", __LINE__, po));
+
+ KASSERT(po->po_curbuf->plb_ptr != NULL,
+ ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__));
+
+ KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base &&
+ po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
+ ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
+ __LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
+ po->po_curbuf->plb_fence));
+
+ oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
+
+ done:
+ lh = (uint32_t *) oldptr;
+ lh++; /* skip header */
+ getnanotime(&ts); /* fill in the timestamp */
+ *lh++ = ts.tv_sec & 0xFFFFFFFF;
+ *lh++ = ts.tv_nsec & 0xFFFFFFF;
+ return ((uint32_t *) oldptr);
+}
+
+/*
+ * Schedule an I/O.
+ *
+ * Transfer the current buffer to the helper kthread.
+ */
+
+static void
+pmclog_schedule_io(struct pmc_owner *po)
+{
+ KASSERT(po->po_curbuf != NULL,
+ ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po));
+
+ KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
+ ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
+ po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
+ KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
+ ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
+ po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));
+
+ PMCDBG(LOG,SIO, 1, "po=%p", po);
+
+ mtx_assert(&po->po_mtx, MA_OWNED);
+
+ /*
+ * Add the current buffer to the tail of the buffer list and
+ * wakeup the helper.
+ */
+ TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next);
+ po->po_curbuf = NULL;
+ wakeup_one(po);
+}
+
+/*
+ * Stop the helper kthread.
+ */
+
+static void
+pmclog_stop_kthread(struct pmc_owner *po)
+{
+ /*
+ * Close the file to force the thread out of fo_write,
+ * unset flag, wakeup the helper thread,
+ * wait for it to exit
+ */
+
+ if (po->po_file != NULL)
+ fo_close(po->po_file, curthread);
+
+ mtx_lock(&pmc_kthread_mtx);
+ po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
+ wakeup_one(po);
+ if (po->po_kthread)
+ msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
+ mtx_unlock(&pmc_kthread_mtx);
+}
+
+/*
+ * Public functions
+ */
+
+/*
+ * Configure a log file for pmc owner 'po'.
+ *
+ * Parameter 'logfd' is a file handle referencing an open file in the
+ * owner process. This file needs to have been opened for writing.
+ */
+
+int
+pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
+{
+ int error;
+ struct proc *p;
+
+ /*
+ * As long as it is possible to get a LOR between pmc_sx lock and
+ * proctree/allproc sx locks used for adding a new process, assure
+ * the former is not held here.
+ */
+ sx_assert(&pmc_sx, SA_UNLOCKED);
+ PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);
+
+ p = po->po_owner;
+
+ /* return EBUSY if a log file was already present */
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ return (EBUSY);
+
+ KASSERT(po->po_kthread == NULL,
+ ("[pmclog,%d] po=%p kthread (%p) already present", __LINE__, po,
+ po->po_kthread));
+ KASSERT(po->po_file == NULL,
+ ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po,
+ po->po_file));
+
+ /* get a reference to the file state */
+ error = fget_write(curthread, logfd, CAP_WRITE, &po->po_file);
+ if (error)
+ goto error;
+
+ /* mark process as owning a log file */
+ po->po_flags |= PMC_PO_OWNS_LOGFILE;
+ error = kproc_create(pmclog_loop, po, &po->po_kthread,
+ RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid);
+ if (error)
+ goto error;
+
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ /* create a log initialization entry */
+ PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
+ sizeof(struct pmclog_initialize));
+ PMCLOG_EMIT32(PMC_VERSION);
+ PMCLOG_EMIT32(md->pmd_cputype);
+ PMCLOG_DESPATCH(po);
+
+ return (0);
+
+ error:
+ /* shutdown the thread */
+ if (po->po_kthread)
+ pmclog_stop_kthread(po);
+
+ KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not "
+ "stopped", __LINE__, po));
+
+ if (po->po_file)
+ (void) fdrop(po->po_file, curthread);
+ po->po_file = NULL; /* clear file and error state */
+ po->po_error = 0;
+
+ return (error);
+}
+
+
+/*
+ * De-configure a log file. This will throw away any buffers queued
+ * for this owner process.
+ */
+
+int
+pmclog_deconfigure_log(struct pmc_owner *po)
+{
+ int error;
+ struct pmclog_buffer *lb;
+
+ PMCDBG(LOG,CFG,1, "de-config po=%p", po);
+
+ if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
+ return (EINVAL);
+
+ KASSERT(po->po_sscount == 0,
+ ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po));
+ KASSERT(po->po_file != NULL,
+ ("[pmclog,%d] po=%p no log file", __LINE__, po));
+
+ /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
+ pmclog_stop_kthread(po);
+
+ KASSERT(po->po_kthread == NULL,
+ ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po));
+
+ /* return all queued log buffers to the global pool */
+ while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
+ TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
+ PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
+ mtx_lock_spin(&pmc_bufferlist_mtx);
+ TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
+ mtx_unlock_spin(&pmc_bufferlist_mtx);
+ }
+
+ /* return the 'current' buffer to the global pool */
+ if ((lb = po->po_curbuf) != NULL) {
+ PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
+ mtx_lock_spin(&pmc_bufferlist_mtx);
+ TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
+ mtx_unlock_spin(&pmc_bufferlist_mtx);
+ }
+
+ /* drop a reference to the fd */
+ error = fdrop(po->po_file, curthread);
+ po->po_file = NULL;
+ po->po_error = 0;
+
+ return (error);
+}
+
+/*
+ * Flush a process' log buffer.
+ */
+
+int
+pmclog_flush(struct pmc_owner *po)
+{
+ int error;
+ struct pmclog_buffer *lb;
+
+ PMCDBG(LOG,FLS,1, "po=%p", po);
+
+ /*
+ * If there is a pending error recorded by the logger thread,
+ * return that.
+ */
+ if (po->po_error)
+ return (po->po_error);
+
+ error = 0;
+
+ /*
+ * Check that we do have an active log file.
+ */
+ mtx_lock(&pmc_kthread_mtx);
+ if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
+ error = EINVAL;
+ goto error;
+ }
+
+ /*
+ * Schedule the current buffer if any and not empty.
+ */
+ mtx_lock_spin(&po->po_mtx);
+ lb = po->po_curbuf;
+ if (lb && lb->plb_ptr != lb->plb_base) {
+ pmclog_schedule_io(po);
+ } else
+ error = ENOBUFS;
+ mtx_unlock_spin(&po->po_mtx);
+
+ error:
+ mtx_unlock(&pmc_kthread_mtx);
+
+ return (error);
+}
+
+int
+pmclog_close(struct pmc_owner *po)
+{
+
+ PMCDBG(LOG,CLO,1, "po=%p", po);
+
+ mtx_lock(&pmc_kthread_mtx);
+
+ /*
+ * Schedule the current buffer.
+ */
+ mtx_lock_spin(&po->po_mtx);
+ if (po->po_curbuf)
+ pmclog_schedule_io(po);
+ else
+ wakeup_one(po);
+ mtx_unlock_spin(&po->po_mtx);
+
+ /*
+ * Initiate shutdown: no new data queued,
+ * thread will close file on last block.
+ */
+ po->po_flags |= PMC_PO_SHUTDOWN;
+
+ mtx_unlock(&pmc_kthread_mtx);
+
+ return (0);
+}
+
+void
+pmclog_process_callchain(struct pmc *pm, struct pmc_sample *ps)
+{
+ int n, recordlen;
+ uint32_t flags;
+ struct pmc_owner *po;
+
+ PMCDBG(LOG,SAM,1,"pm=%p pid=%d n=%d", pm, ps->ps_pid,
+ ps->ps_nsamples);
+
+ recordlen = offsetof(struct pmclog_callchain, pl_pc) +
+ ps->ps_nsamples * sizeof(uintfptr_t);
+ po = pm->pm_owner;
+ flags = PMC_CALLCHAIN_TO_CPUFLAGS(ps->ps_cpu,ps->ps_flags);
+ PMCLOG_RESERVE(po, CALLCHAIN, recordlen);
+ PMCLOG_EMIT32(ps->ps_pid);
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(flags);
+ for (n = 0; n < ps->ps_nsamples; n++)
+ PMCLOG_EMITADDR(ps->ps_pc[n]);
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_closelog(struct pmc_owner *po)
+{
+ PMCLOG_RESERVE(po,CLOSELOG,sizeof(struct pmclog_closelog));
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_dropnotify(struct pmc_owner *po)
+{
+ PMCLOG_RESERVE(po,DROPNOTIFY,sizeof(struct pmclog_dropnotify));
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_map_in(struct pmc_owner *po, pid_t pid, uintfptr_t start,
+ const char *path)
+{
+ int pathlen, recordlen;
+
+ KASSERT(path != NULL, ("[pmclog,%d] map-in, null path", __LINE__));
+
+ pathlen = strlen(path) + 1; /* #bytes for path name */
+ recordlen = offsetof(struct pmclog_map_in, pl_pathname) +
+ pathlen;
+
+ PMCLOG_RESERVE(po, MAP_IN, recordlen);
+ PMCLOG_EMIT32(pid);
+ PMCLOG_EMITADDR(start);
+ PMCLOG_EMITSTRING(path,pathlen);
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_map_out(struct pmc_owner *po, pid_t pid, uintfptr_t start,
+ uintfptr_t end)
+{
+ KASSERT(start <= end, ("[pmclog,%d] start > end", __LINE__));
+
+ PMCLOG_RESERVE(po, MAP_OUT, sizeof(struct pmclog_map_out));
+ PMCLOG_EMIT32(pid);
+ PMCLOG_EMITADDR(start);
+ PMCLOG_EMITADDR(end);
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_pmcallocate(struct pmc *pm)
+{
+ struct pmc_owner *po;
+ struct pmc_soft *ps;
+
+ po = pm->pm_owner;
+
+ PMCDBG(LOG,ALL,1, "pm=%p", pm);
+
+ if (PMC_TO_CLASS(pm) == PMC_CLASS_SOFT) {
+ PMCLOG_RESERVE(po, PMCALLOCATEDYN,
+ sizeof(struct pmclog_pmcallocatedyn));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pm->pm_event);
+ PMCLOG_EMIT32(pm->pm_flags);
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ if (ps != NULL)
+ PMCLOG_EMITSTRING(ps->ps_ev.pm_ev_name,PMC_NAME_MAX);
+ else
+ PMCLOG_EMITNULLSTRING(PMC_NAME_MAX);
+ pmc_soft_ev_release(ps);
+ PMCLOG_DESPATCH(po);
+ } else {
+ PMCLOG_RESERVE(po, PMCALLOCATE,
+ sizeof(struct pmclog_pmcallocate));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pm->pm_event);
+ PMCLOG_EMIT32(pm->pm_flags);
+ PMCLOG_DESPATCH(po);
+ }
+}
+
+void
+pmclog_process_pmcattach(struct pmc *pm, pid_t pid, char *path)
+{
+ int pathlen, recordlen;
+ struct pmc_owner *po;
+
+ PMCDBG(LOG,ATT,1,"pm=%p pid=%d", pm, pid);
+
+ po = pm->pm_owner;
+
+ pathlen = strlen(path) + 1; /* #bytes for the string */
+ recordlen = offsetof(struct pmclog_pmcattach, pl_pathname) + pathlen;
+
+ PMCLOG_RESERVE(po, PMCATTACH, recordlen);
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pid);
+ PMCLOG_EMITSTRING(path, pathlen);
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_pmcdetach(struct pmc *pm, pid_t pid)
+{
+ struct pmc_owner *po;
+
+ PMCDBG(LOG,ATT,1,"!pm=%p pid=%d", pm, pid);
+
+ po = pm->pm_owner;
+
+ PMCLOG_RESERVE(po, PMCDETACH, sizeof(struct pmclog_pmcdetach));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT32(pid);
+ PMCLOG_DESPATCH(po);
+}
+
+/*
+ * Log a context switch event to the log file.
+ */
+
+void
+pmclog_process_proccsw(struct pmc *pm, struct pmc_process *pp, pmc_value_t v)
+{
+ struct pmc_owner *po;
+
+ KASSERT(pm->pm_flags & PMC_F_LOG_PROCCSW,
+ ("[pmclog,%d] log-process-csw called gratuitously", __LINE__));
+
+ PMCDBG(LOG,SWO,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
+ v);
+
+ po = pm->pm_owner;
+
+ PMCLOG_RESERVE(po, PROCCSW, sizeof(struct pmclog_proccsw));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT64(v);
+ PMCLOG_EMIT32(pp->pp_proc->p_pid);
+ PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_procexec(struct pmc_owner *po, pmc_id_t pmid, pid_t pid,
+ uintfptr_t startaddr, char *path)
+{
+ int pathlen, recordlen;
+
+ PMCDBG(LOG,EXC,1,"po=%p pid=%d path=\"%s\"", po, pid, path);
+
+ pathlen = strlen(path) + 1; /* #bytes for the path */
+ recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen;
+
+ PMCLOG_RESERVE(po, PROCEXEC, recordlen);
+ PMCLOG_EMIT32(pid);
+ PMCLOG_EMITADDR(startaddr);
+ PMCLOG_EMIT32(pmid);
+ PMCLOG_EMITSTRING(path,pathlen);
+ PMCLOG_DESPATCH(po);
+}
+
+/*
+ * Log a process exit event (and accumulated pmc value) to the log file.
+ */
+
+void
+pmclog_process_procexit(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_owner *po;
+
+ ri = PMC_TO_ROWINDEX(pm);
+ PMCDBG(LOG,EXT,1,"pm=%p pid=%d v=%jx", pm, pp->pp_proc->p_pid,
+ pp->pp_pmcs[ri].pp_pmcval);
+
+ po = pm->pm_owner;
+
+ PMCLOG_RESERVE(po, PROCEXIT, sizeof(struct pmclog_procexit));
+ PMCLOG_EMIT32(pm->pm_id);
+ PMCLOG_EMIT64(pp->pp_pmcs[ri].pp_pmcval);
+ PMCLOG_EMIT32(pp->pp_proc->p_pid);
+ PMCLOG_DESPATCH(po);
+}
+
+/*
+ * Log a fork event.
+ */
+
+void
+pmclog_process_procfork(struct pmc_owner *po, pid_t oldpid, pid_t newpid)
+{
+ PMCLOG_RESERVE(po, PROCFORK, sizeof(struct pmclog_procfork));
+ PMCLOG_EMIT32(oldpid);
+ PMCLOG_EMIT32(newpid);
+ PMCLOG_DESPATCH(po);
+}
+
+/*
+ * Log a process exit event of the form suitable for system-wide PMCs.
+ */
+
+void
+pmclog_process_sysexit(struct pmc_owner *po, pid_t pid)
+{
+ PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit));
+ PMCLOG_EMIT32(pid);
+ PMCLOG_DESPATCH(po);
+}
+
+/*
+ * Write a user log entry.
+ */
+
+int
+pmclog_process_userlog(struct pmc_owner *po, struct pmc_op_writelog *wl)
+{
+ int error;
+
+ PMCDBG(LOG,WRI,1, "writelog po=%p ud=0x%x", po, wl->pm_userdata);
+
+ error = 0;
+
+ PMCLOG_RESERVE_WITH_ERROR(po, USERDATA,
+ sizeof(struct pmclog_userdata));
+ PMCLOG_EMIT32(wl->pm_userdata);
+ PMCLOG_DESPATCH(po);
+
+ error:
+ return (error);
+}
+
+/*
+ * Initialization.
+ *
+ * Create a pool of log buffers and initialize mutexes.
+ */
+
+void
+pmclog_initialize()
+{
+ int n;
+ struct pmclog_buffer *plb;
+
+ if (pmclog_buffer_size <= 0) {
+ (void) printf("hwpmc: tunable logbuffersize=%d must be "
+ "greater than zero.\n", pmclog_buffer_size);
+ pmclog_buffer_size = PMC_LOG_BUFFER_SIZE;
+ }
+
+ if (pmc_nlogbuffers <= 0) {
+ (void) printf("hwpmc: tunable nlogbuffers=%d must be greater "
+ "than zero.\n", pmc_nlogbuffers);
+ pmc_nlogbuffers = PMC_NLOGBUFFERS;
+ }
+
+ /* create global pool of log buffers */
+ for (n = 0; n < pmc_nlogbuffers; n++) {
+ plb = malloc(1024 * pmclog_buffer_size, M_PMC,
+ M_WAITOK|M_ZERO);
+ PMCLOG_INIT_BUFFER_DESCRIPTOR(plb);
+ TAILQ_INSERT_HEAD(&pmc_bufferlist, plb, plb_next);
+ }
+ mtx_init(&pmc_bufferlist_mtx, "pmc-buffer-list", "pmc-leaf",
+ MTX_SPIN);
+ mtx_init(&pmc_kthread_mtx, "pmc-kthread", "pmc-sleep", MTX_DEF);
+}
+
+/*
+ * Shutdown logging.
+ *
+ * Destroy mutexes and release memory back the to free pool.
+ */
+
+void
+pmclog_shutdown()
+{
+ struct pmclog_buffer *plb;
+
+ mtx_destroy(&pmc_kthread_mtx);
+ mtx_destroy(&pmc_bufferlist_mtx);
+
+ while ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) {
+ TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
+ free(plb, M_PMC);
+ }
+}
diff --git a/sys/dev/hwpmc/hwpmc_mips.c b/sys/dev/hwpmc/hwpmc_mips.c
new file mode 100644
index 0000000..68a81e0
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_mips.c
@@ -0,0 +1,807 @@
+/*-
+ * Copyright (c) 2010, George V. Neville-Neil <gnn@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_hwpmc_hooks.h"
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/md_var.h>
+#include <machine/mips_opcode.h>
+#include <machine/vmparam.h>
+
+int mips_npmcs;
+
+/*
+ * Per-processor information.
+ */
+struct mips_cpu {
+ struct pmc_hw *pc_mipspmcs;
+};
+
+static struct mips_cpu **mips_pcpu;
+
+#if defined(__mips_n64)
+# define MIPS_IS_VALID_KERNELADDR(reg) ((((reg) & 3) == 0) && \
+ ((vm_offset_t)(reg) >= MIPS_XKPHYS_START))
+#else
+# define MIPS_IS_VALID_KERNELADDR(reg) ((((reg) & 3) == 0) && \
+ ((vm_offset_t)(reg) >= MIPS_KSEG0_START))
+#endif
+
+/*
+ * We need some reasonable default to prevent backtrace code
+ * from wandering too far
+ */
+#define MAX_FUNCTION_SIZE 0x10000
+#define MAX_PROLOGUE_SIZE 0x100
+
+static int
+mips_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event pe;
+ uint32_t caps, config, counter;
+ uint32_t event;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row index %d", __LINE__, ri));
+
+ caps = a->pm_caps;
+ if (a->pm_class != mips_pmc_spec.ps_cpuclass)
+ return (EINVAL);
+ pe = a->pm_ev;
+ counter = MIPS_CTR_ALL;
+ event = 0;
+ for (i = 0; i < mips_event_codes_size; i++) {
+ if (mips_event_codes[i].pe_ev == pe) {
+ event = mips_event_codes[i].pe_code;
+ counter = mips_event_codes[i].pe_counter;
+ break;
+ }
+ }
+
+ if (i == mips_event_codes_size)
+ return (EINVAL);
+
+ if ((counter != MIPS_CTR_ALL) && (counter != ri))
+ return (EINVAL);
+
+ config = mips_get_perfctl(cpu, ri, event, caps);
+
+ pm->pm_md.pm_mips_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"mips-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+
+static int
+mips_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row index %d", __LINE__, ri));
+
+ pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
+ tmp = mips_pmcn_read(ri);
+ PMCDBG(MDP,REA,2,"mips-read id=%d -> %jd", ri, tmp);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = tmp - (1UL << (mips_pmc_spec.ps_counter_width - 1));
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+mips_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = (1UL << (mips_pmc_spec.ps_counter_width - 1)) - v;
+
+ PMCDBG(MDP,WRI,1,"mips-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ mips_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+mips_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &mips_pcpu[cpu]->pc_mipspmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[mips,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+mips_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+
+ phw = &mips_pcpu[cpu]->pc_mipspmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_mips_evsel;
+
+ /* Enable the PMC. */
+ switch (ri) {
+ case 0:
+ mips_wr_perfcnt0(config);
+ break;
+ case 1:
+ mips_wr_perfcnt2(config);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+mips_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+
+ phw = &mips_pcpu[cpu]->pc_mipspmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ *
+ * Clearing the entire register turns the counter off as well
+ * as removes the previously sampled event.
+ */
+ switch (ri) {
+ case 0:
+ mips_wr_perfcnt0(0);
+ break;
+ case 1:
+ mips_wr_perfcnt2(0);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+mips_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &mips_pcpu[cpu]->pc_mipspmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[mips,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+mips_pmc_intr(int cpu, struct trapframe *tf)
+{
+ int error;
+ int retval, ri;
+ struct pmc *pm;
+ struct mips_cpu *pc;
+ uint32_t r0, r2;
+ pmc_value_t r;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] CPU %d out of range", __LINE__, cpu));
+
+ retval = 0;
+ pc = mips_pcpu[cpu];
+
+ /* Stop PMCs without clearing the counter */
+ r0 = mips_rd_perfcnt0();
+ mips_wr_perfcnt0(r0 & ~(0x1f));
+ r2 = mips_rd_perfcnt2();
+ mips_wr_perfcnt2(r2 & ~(0x1f));
+
+ for (ri = 0; ri < mips_npmcs; ri++) {
+ pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
+ if (pm == NULL)
+ continue;
+ if (! PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ r = mips_pmcn_read(ri);
+
+ /* If bit 31 is set, the counter has overflowed */
+ if ((r & (1UL << (mips_pmc_spec.ps_counter_width - 1))) == 0)
+ continue;
+
+ retval = 1;
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error) {
+ /* Clear/disable the relevant counter */
+ if (ri == 0)
+ r0 = 0;
+ else if (ri == 1)
+ r2 = 0;
+ mips_stop_pmc(cpu, ri);
+ }
+
+ /* Reload sampling count */
+ mips_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
+ }
+
+ /*
+ * Re-enable the PMC counters where they left off.
+ *
+ * Any counter which overflowed will have its sample count
+ * reloaded in the loop above.
+ */
+ mips_wr_perfcnt0(r0);
+ mips_wr_perfcnt2(r2);
+
+ return retval;
+}
+
+static int
+mips_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char mips_name[PMC_NAME_MAX];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &mips_pcpu[cpu]->pc_mipspmcs[ri];
+ snprintf(mips_name, sizeof(mips_name), "MIPS-%d", ri);
+ if ((error = copystr(mips_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return error;
+ pi->pm_class = mips_pmc_spec.ps_cpuclass;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+mips_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+/*
+ * XXX don't know what we should do here.
+ */
+static int
+mips_pmc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+mips_pmc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+mips_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct pmc_cpu *pc;
+ struct mips_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"mips-init cpu=%d", cpu);
+
+ mips_pcpu[cpu] = pac = malloc(sizeof(struct mips_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+ pac->pc_mipspmcs = malloc(sizeof(struct pmc_hw) * mips_npmcs,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS].pcd_ri;
+ KASSERT(pc != NULL, ("[mips,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_mipspmcs; i < mips_npmcs; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /*
+ * Clear the counter control register which has the effect
+ * of disabling counting.
+ */
+ for (i = 0; i < mips_npmcs; i++)
+ mips_pmcn_write(i, 0);
+
+ return 0;
+}
+
+static int
+mips_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_mips_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+
+ /*
+ * TODO: Use More bit of PerfCntlX register to detect actual
+ * number of counters
+ */
+ mips_npmcs = 2;
+
+ PMCDBG(MDP,INI,1,"mips-init npmcs=%d", mips_npmcs);
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ mips_pcpu = malloc(sizeof(struct mips_cpu *) * pmc_cpu_max(), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = pmc_mdep_alloc(1);
+
+ pmc_mdep->pmd_cputype = mips_pmc_spec.ps_cputype;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_MIPS];
+ pcd->pcd_caps = mips_pmc_spec.ps_capabilities;
+ pcd->pcd_class = mips_pmc_spec.ps_cpuclass;
+ pcd->pcd_num = mips_npmcs;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = mips_pmc_spec.ps_counter_width;
+
+ pcd->pcd_allocate_pmc = mips_allocate_pmc;
+ pcd->pcd_config_pmc = mips_config_pmc;
+ pcd->pcd_pcpu_fini = mips_pcpu_fini;
+ pcd->pcd_pcpu_init = mips_pcpu_init;
+ pcd->pcd_describe = mips_describe;
+ pcd->pcd_get_config = mips_get_config;
+ pcd->pcd_read_pmc = mips_read_pmc;
+ pcd->pcd_release_pmc = mips_release_pmc;
+ pcd->pcd_start_pmc = mips_start_pmc;
+ pcd->pcd_stop_pmc = mips_stop_pmc;
+ pcd->pcd_write_pmc = mips_write_pmc;
+
+ pmc_mdep->pmd_intr = mips_pmc_intr;
+ pmc_mdep->pmd_switch_in = mips_pmc_switch_in;
+ pmc_mdep->pmd_switch_out = mips_pmc_switch_out;
+
+ pmc_mdep->pmd_npmc += mips_npmcs;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_mips_finalize(struct pmc_mdep *md)
+{
+ (void) md;
+}
+
+#ifdef HWPMC_MIPS_BACKTRACE
+
+static int
+pmc_next_frame(register_t *pc, register_t *sp)
+{
+ InstFmt i;
+ uintptr_t va;
+ uint32_t instr, mask;
+ int more, stksize;
+ register_t ra = 0;
+
+ /* Jump here after a nonstandard (interrupt handler) frame */
+ stksize = 0;
+
+ /* check for bad SP: could foul up next frame */
+ if (!MIPS_IS_VALID_KERNELADDR(*sp)) {
+ goto error;
+ }
+
+ /* check for bad PC */
+ if (!MIPS_IS_VALID_KERNELADDR(*pc)) {
+ goto error;
+ }
+
+ /*
+ * Find the beginning of the current subroutine by scanning
+ * backwards from the current PC for the end of the previous
+ * subroutine.
+ */
+ va = *pc - sizeof(int);
+ while (1) {
+ instr = *((uint32_t *)va);
+
+ /* [d]addiu sp,sp,-X */
+ if (((instr & 0xffff8000) == 0x27bd8000)
+ || ((instr & 0xffff8000) == 0x67bd8000))
+ break;
+
+ /* jr ra */
+ if (instr == 0x03e00008) {
+ /* skip over branch-delay slot instruction */
+ va += 2 * sizeof(int);
+ break;
+ }
+
+ va -= sizeof(int);
+ }
+
+ /* skip over nulls which might separate .o files */
+ while ((instr = *((uint32_t *)va)) == 0)
+ va += sizeof(int);
+
+ /* scan forwards to find stack size and any saved registers */
+ stksize = 0;
+ more = 3;
+ mask = 0;
+ for (; more; va += sizeof(int),
+ more = (more == 3) ? 3 : more - 1) {
+ /* stop if hit our current position */
+ if (va >= *pc)
+ break;
+ instr = *((uint32_t *)va);
+ i.word = instr;
+ switch (i.JType.op) {
+ case OP_SPECIAL:
+ switch (i.RType.func) {
+ case OP_JR:
+ case OP_JALR:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_SYSCALL:
+ case OP_BREAK:
+ more = 1; /* stop now */
+ };
+ break;
+
+ case OP_BCOND:
+ case OP_J:
+ case OP_JAL:
+ case OP_BEQ:
+ case OP_BNE:
+ case OP_BLEZ:
+ case OP_BGTZ:
+ more = 2; /* stop after next instruction */
+ break;
+
+ case OP_COP0:
+ case OP_COP1:
+ case OP_COP2:
+ case OP_COP3:
+ switch (i.RType.rs) {
+ case OP_BCx:
+ case OP_BCy:
+ more = 2; /* stop after next instruction */
+ };
+ break;
+
+ case OP_SW:
+ case OP_SD:
+ /*
+ * SP is being saved using S8(FP). Most likely it indicates
+ * that SP is modified in the function and we can't get
+ * its value safely without emulating code backward
+ * So just bail out on functions like this
+ */
+ if ((i.IType.rs == 30) && (i.IType.rt = 29))
+ return (-1);
+
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ if (i.IType.rt == 31)
+ ra = *((register_t *)(*sp + (short)i.IType.imm));
+ break;
+
+ case OP_ADDI:
+ case OP_ADDIU:
+ case OP_DADDI:
+ case OP_DADDIU:
+ /* look for stack pointer adjustment */
+ if (i.IType.rs != 29 || i.IType.rt != 29)
+ break;
+ stksize = -((short)i.IType.imm);
+ }
+ }
+
+ if (!MIPS_IS_VALID_KERNELADDR(ra))
+ return (-1);
+
+ *pc = ra;
+ *sp += stksize;
+
+ return (0);
+
+error:
+ return (-1);
+}
+
+static int
+pmc_next_uframe(register_t *pc, register_t *sp, register_t *ra)
+{
+ int offset, registers_on_stack;
+ uint32_t opcode, mask;
+ register_t function_start;
+ int stksize;
+ InstFmt i;
+
+ registers_on_stack = 0;
+ mask = 0;
+ function_start = 0;
+ offset = 0;
+ stksize = 0;
+
+ while (offset < MAX_FUNCTION_SIZE) {
+ opcode = fuword32((void *)(*pc - offset));
+
+ /* [d]addiu sp, sp, -X*/
+ if (((opcode & 0xffff8000) == 0x27bd8000)
+ || ((opcode & 0xffff8000) == 0x67bd8000)) {
+ function_start = *pc - offset;
+ registers_on_stack = 1;
+ break;
+ }
+
+ /* lui gp, X */
+ if ((opcode & 0xffff8000) == 0x3c1c0000) {
+ /*
+ * Function might start with this instruction
+ * Keep an eye on "jr ra" and sp correction
+ * with positive value further on
+ */
+ function_start = *pc - offset;
+ }
+
+ if (function_start) {
+ /*
+ * Stop looking further. Possible end of
+ * function instruction: it means there is no
+ * stack modifications, sp is unchanged
+ */
+
+ /* [d]addiu sp,sp,X */
+ if (((opcode & 0xffff8000) == 0x27bd0000)
+ || ((opcode & 0xffff8000) == 0x67bd0000))
+ break;
+
+ if (opcode == 0x03e00008)
+ break;
+ }
+
+ offset += sizeof(int);
+ }
+
+ if (!function_start)
+ return (-1);
+
+ if (registers_on_stack) {
+ offset = 0;
+ while ((offset < MAX_PROLOGUE_SIZE)
+ && ((function_start + offset) < *pc)) {
+ i.word = fuword32((void *)(function_start + offset));
+ switch (i.JType.op) {
+ case OP_SW:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ if (i.IType.rt == 31)
+ *ra = fuword32((void *)(*sp + (short)i.IType.imm));
+ break;
+
+#if defined(__mips_n64)
+ case OP_SD:
+ /* look for saved registers on the stack */
+ if (i.IType.rs != 29)
+ break;
+ /* only restore the first one */
+ if (mask & (1 << i.IType.rt))
+ break;
+ mask |= (1 << i.IType.rt);
+ /* ra */
+ if (i.IType.rt == 31)
+ *ra = fuword64((void *)(*sp + (short)i.IType.imm));
+ break;
+#endif
+
+ case OP_ADDI:
+ case OP_ADDIU:
+ case OP_DADDI:
+ case OP_DADDIU:
+ /* look for stack pointer adjustment */
+ if (i.IType.rs != 29 || i.IType.rt != 29)
+ break;
+ stksize = -((short)i.IType.imm);
+ }
+
+ offset += sizeof(int);
+ }
+ }
+
+ /*
+ * We reached the end of backtrace
+ */
+ if (*pc == *ra)
+ return (-1);
+
+ *pc = *ra;
+ *sp += stksize;
+
+ return (0);
+}
+
+#endif /* HWPMC_MIPS_BACKTRACE */
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ return pmc_mips_initialize();
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+ return pmc_mips_finalize(md);
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int nframes,
+ struct trapframe *tf)
+{
+ register_t pc, ra, sp;
+ int frames = 0;
+
+ pc = tf->pc;
+ sp = tf->sp;
+ ra = tf->ra;
+
+ cc[frames++] = pc;
+
+#ifdef HWPMC_MIPS_BACKTRACE
+ /*
+ * Unwind, and unwind, and unwind
+ */
+ while (1) {
+ if (frames >= nframes)
+ break;
+
+ if (pmc_next_frame(&pc, &sp) < 0)
+ break;
+
+ cc[frames++] = pc;
+ }
+#endif
+
+ return (frames);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int nframes,
+ struct trapframe *tf)
+{
+ register_t pc, ra, sp;
+ int frames = 0;
+
+ pc = tf->pc;
+ sp = tf->sp;
+ ra = tf->ra;
+
+ cc[frames++] = pc;
+
+#ifdef HWPMC_MIPS_BACKTRACE
+
+ /*
+ * Unwind, and unwind, and unwind
+ */
+ while (1) {
+ if (frames >= nframes)
+ break;
+
+ if (pmc_next_uframe(&pc, &sp, &ra) < 0)
+ break;
+
+ cc[frames++] = pc;
+ }
+#endif
+
+ return (frames);
+}
diff --git a/sys/dev/hwpmc/hwpmc_mips24k.c b/sys/dev/hwpmc/hwpmc_mips24k.c
new file mode 100644
index 0000000..18d7f6c
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_mips24k.c
@@ -0,0 +1,229 @@
+/*-
+ * Copyright (c) 2010 George V. Neville-Neil <gnn@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/pmc_mdep.h>
+
+#define MIPS24K_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define MIPS24K_PMC_INTERRUPT_ENABLE 0x10 /* Enable interrupts */
+#define MIPS24K_PMC_USER_ENABLE 0x08 /* Count in USER mode */
+#define MIPS24K_PMC_SUPER_ENABLE 0x04 /* Count in SUPERVISOR mode */
+#define MIPS24K_PMC_KERNEL_ENABLE 0x02 /* Count in KERNEL mode */
+#define MIPS24K_PMC_ENABLE (MIPS24K_PMC_USER_ENABLE | \
+ MIPS24K_PMC_SUPER_ENABLE | \
+ MIPS24K_PMC_KERNEL_ENABLE)
+
+#define MIPS24K_PMC_SELECT 5 /* Which bit position the event starts at. */
+
+const struct mips_event_code_map mips_event_codes[] = {
+ { PMC_EV_MIPS24K_CYCLE, MIPS_CTR_ALL, 0},
+ { PMC_EV_MIPS24K_INSTR_EXECUTED, MIPS_CTR_ALL, 1},
+ { PMC_EV_MIPS24K_BRANCH_COMPLETED, MIPS_CTR_0, 2},
+ { PMC_EV_MIPS24K_BRANCH_MISPRED, MIPS_CTR_1, 2},
+ { PMC_EV_MIPS24K_RETURN, MIPS_CTR_0, 3},
+ { PMC_EV_MIPS24K_RETURN_MISPRED, MIPS_CTR_1, 3},
+ { PMC_EV_MIPS24K_RETURN_NOT_31, MIPS_CTR_0, 4},
+ { PMC_EV_MIPS24K_RETURN_NOTPRED, MIPS_CTR_1, 4},
+ { PMC_EV_MIPS24K_ITLB_ACCESS, MIPS_CTR_0, 5},
+ { PMC_EV_MIPS24K_ITLB_MISS, MIPS_CTR_1, 5},
+ { PMC_EV_MIPS24K_DTLB_ACCESS, MIPS_CTR_0, 6},
+ { PMC_EV_MIPS24K_DTLB_MISS, MIPS_CTR_1, 6},
+ { PMC_EV_MIPS24K_JTLB_IACCESS, MIPS_CTR_0, 7},
+ { PMC_EV_MIPS24K_JTLB_IMISS, MIPS_CTR_1, 7},
+ { PMC_EV_MIPS24K_JTLB_DACCESS, MIPS_CTR_0, 8},
+ { PMC_EV_MIPS24K_JTLB_DMISS, MIPS_CTR_1, 8},
+ { PMC_EV_MIPS24K_IC_FETCH, MIPS_CTR_0, 9},
+ { PMC_EV_MIPS24K_IC_MISS, MIPS_CTR_1, 9},
+ { PMC_EV_MIPS24K_DC_LOADSTORE, MIPS_CTR_0, 10},
+ { PMC_EV_MIPS24K_DC_WRITEBACK, MIPS_CTR_1, 10},
+ { PMC_EV_MIPS24K_DC_MISS, MIPS_CTR_ALL, 11},
+ /* 12 reserved */
+ { PMC_EV_MIPS24K_STORE_MISS, MIPS_CTR_0, 13},
+ { PMC_EV_MIPS24K_LOAD_MISS, MIPS_CTR_1, 13},
+ { PMC_EV_MIPS24K_INTEGER_COMPLETED, MIPS_CTR_0, 14},
+ { PMC_EV_MIPS24K_FP_COMPLETED, MIPS_CTR_1, 14},
+ { PMC_EV_MIPS24K_LOAD_COMPLETED, MIPS_CTR_0, 15},
+ { PMC_EV_MIPS24K_STORE_COMPLETED, MIPS_CTR_1, 15},
+ { PMC_EV_MIPS24K_BARRIER_COMPLETED, MIPS_CTR_0, 16},
+ { PMC_EV_MIPS24K_MIPS16_COMPLETED, MIPS_CTR_1, 16},
+ { PMC_EV_MIPS24K_NOP_COMPLETED, MIPS_CTR_0, 17},
+ { PMC_EV_MIPS24K_INTEGER_MULDIV_COMPLETED, MIPS_CTR_1, 17},
+ { PMC_EV_MIPS24K_RF_STALL, MIPS_CTR_0, 18},
+ { PMC_EV_MIPS24K_INSTR_REFETCH, MIPS_CTR_1, 18},
+ { PMC_EV_MIPS24K_STORE_COND_COMPLETED, MIPS_CTR_0, 19},
+ { PMC_EV_MIPS24K_STORE_COND_FAILED, MIPS_CTR_1, 19},
+ { PMC_EV_MIPS24K_ICACHE_REQUESTS, MIPS_CTR_0, 20},
+ { PMC_EV_MIPS24K_ICACHE_HIT, MIPS_CTR_1, 20},
+ { PMC_EV_MIPS24K_L2_WRITEBACK, MIPS_CTR_0, 21},
+ { PMC_EV_MIPS24K_L2_ACCESS, MIPS_CTR_1, 21},
+ { PMC_EV_MIPS24K_L2_MISS, MIPS_CTR_0, 22},
+ { PMC_EV_MIPS24K_L2_ERR_CORRECTED, MIPS_CTR_1, 22},
+ { PMC_EV_MIPS24K_EXCEPTIONS, MIPS_CTR_0, 23},
+ /* Event 23 on COP0 1/3 is undefined */
+ { PMC_EV_MIPS24K_RF_CYCLES_STALLED, MIPS_CTR_0, 24},
+ { PMC_EV_MIPS24K_IFU_CYCLES_STALLED, MIPS_CTR_0, 25},
+ { PMC_EV_MIPS24K_ALU_CYCLES_STALLED, MIPS_CTR_1, 25},
+ /* Events 26 through 32 undefined or reserved to customers */
+ { PMC_EV_MIPS24K_UNCACHED_LOAD, MIPS_CTR_0, 33},
+ { PMC_EV_MIPS24K_UNCACHED_STORE, MIPS_CTR_1, 33},
+ { PMC_EV_MIPS24K_CP2_REG_TO_REG_COMPLETED, MIPS_CTR_0, 35},
+ { PMC_EV_MIPS24K_MFTC_COMPLETED, MIPS_CTR_1, 35},
+ /* Event 36 reserved */
+ { PMC_EV_MIPS24K_IC_BLOCKED_CYCLES, MIPS_CTR_0, 37},
+ { PMC_EV_MIPS24K_DC_BLOCKED_CYCLES, MIPS_CTR_1, 37},
+ { PMC_EV_MIPS24K_L2_IMISS_STALL_CYCLES, MIPS_CTR_0, 38},
+ { PMC_EV_MIPS24K_L2_DMISS_STALL_CYCLES, MIPS_CTR_1, 38},
+ { PMC_EV_MIPS24K_DMISS_CYCLES, MIPS_CTR_0, 39},
+ { PMC_EV_MIPS24K_L2_MISS_CYCLES, MIPS_CTR_1, 39},
+ { PMC_EV_MIPS24K_UNCACHED_BLOCK_CYCLES, MIPS_CTR_0, 40},
+ { PMC_EV_MIPS24K_MDU_STALL_CYCLES, MIPS_CTR_0, 41},
+ { PMC_EV_MIPS24K_FPU_STALL_CYCLES, MIPS_CTR_1, 41},
+ { PMC_EV_MIPS24K_CP2_STALL_CYCLES, MIPS_CTR_0, 42},
+ { PMC_EV_MIPS24K_COREXTEND_STALL_CYCLES, MIPS_CTR_1, 42},
+ { PMC_EV_MIPS24K_ISPRAM_STALL_CYCLES, MIPS_CTR_0, 43},
+ { PMC_EV_MIPS24K_DSPRAM_STALL_CYCLES, MIPS_CTR_1, 43},
+ { PMC_EV_MIPS24K_CACHE_STALL_CYCLES, MIPS_CTR_0, 44},
+ /* Event 44 undefined on 1/3 */
+ { PMC_EV_MIPS24K_LOAD_TO_USE_STALLS, MIPS_CTR_0, 45},
+ { PMC_EV_MIPS24K_BASE_MISPRED_STALLS, MIPS_CTR_1, 45},
+ { PMC_EV_MIPS24K_CPO_READ_STALLS, MIPS_CTR_0, 46},
+ { PMC_EV_MIPS24K_BRANCH_MISPRED_CYCLES, MIPS_CTR_1, 46},
+ /* Event 47 reserved */
+ { PMC_EV_MIPS24K_IFETCH_BUFFER_FULL, MIPS_CTR_0, 48},
+ { PMC_EV_MIPS24K_FETCH_BUFFER_ALLOCATED, MIPS_CTR_1, 48},
+ { PMC_EV_MIPS24K_EJTAG_ITRIGGER, MIPS_CTR_0, 49},
+ { PMC_EV_MIPS24K_EJTAG_DTRIGGER, MIPS_CTR_1, 49},
+ { PMC_EV_MIPS24K_FSB_LT_QUARTER, MIPS_CTR_0, 50},
+ { PMC_EV_MIPS24K_FSB_QUARTER_TO_HALF, MIPS_CTR_1, 50},
+ { PMC_EV_MIPS24K_FSB_GT_HALF, MIPS_CTR_0, 51},
+ { PMC_EV_MIPS24K_FSB_FULL_PIPELINE_STALLS, MIPS_CTR_1, 51},
+ { PMC_EV_MIPS24K_LDQ_LT_QUARTER, MIPS_CTR_0, 52},
+ { PMC_EV_MIPS24K_LDQ_QUARTER_TO_HALF, MIPS_CTR_1, 52},
+ { PMC_EV_MIPS24K_LDQ_GT_HALF, MIPS_CTR_0, 53},
+ { PMC_EV_MIPS24K_LDQ_FULL_PIPELINE_STALLS, MIPS_CTR_1, 53},
+ { PMC_EV_MIPS24K_WBB_LT_QUARTER, MIPS_CTR_0, 54},
+ { PMC_EV_MIPS24K_WBB_QUARTER_TO_HALF, MIPS_CTR_1, 54},
+ { PMC_EV_MIPS24K_WBB_GT_HALF, MIPS_CTR_0, 55},
+ { PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS, MIPS_CTR_1, 55},
+ /* Events 56-63 reserved */
+ { PMC_EV_MIPS24K_REQUEST_LATENCY, MIPS_CTR_0, 61},
+ { PMC_EV_MIPS24K_REQUEST_COUNT, MIPS_CTR_1, 61}
+
+};
+
+const int mips_event_codes_size =
+ sizeof(mips_event_codes) / sizeof(mips_event_codes[0]);
+
+struct mips_pmc_spec mips_pmc_spec = {
+ .ps_cpuclass = PMC_CLASS_MIPS24K,
+ .ps_cputype = PMC_CPU_MIPS_24K,
+ .ps_capabilities = MIPS24K_PMC_CAPS,
+ .ps_counter_width = 32
+};
+
+/*
+ * Performance Count Register N
+ */
+uint64_t
+mips_pmcn_read(unsigned int pmc)
+{
+ uint32_t reg = 0;
+
+ KASSERT(pmc < mips_npmcs, ("[mips24k,%d] illegal PMC number %d",
+ __LINE__, pmc));
+
+ /* The counter value is the next value after the control register. */
+ switch (pmc) {
+ case 0:
+ reg = mips_rd_perfcnt1();
+ break;
+ case 1:
+ reg = mips_rd_perfcnt3();
+ break;
+ default:
+ return 0;
+ }
+ return (reg);
+}
+
+uint64_t
+mips_pmcn_write(unsigned int pmc, uint64_t reg)
+{
+
+ KASSERT(pmc < mips_npmcs, ("[mips24k,%d] illegal PMC number %d",
+ __LINE__, pmc));
+
+ switch (pmc) {
+ case 0:
+ mips_wr_perfcnt1(reg);
+ break;
+ case 1:
+ mips_wr_perfcnt3(reg);
+ break;
+ default:
+ return 0;
+ }
+ return (reg);
+}
+
+uint32_t
+mips_get_perfctl(int cpu, int ri, uint32_t event, uint32_t caps)
+{
+ uint32_t config;
+
+ config = event;
+
+ config <<= MIPS24K_PMC_SELECT;
+
+ if (caps & PMC_CAP_SYSTEM)
+ config |= (MIPS24K_PMC_SUPER_ENABLE |
+ MIPS24K_PMC_KERNEL_ENABLE);
+ if (caps & PMC_CAP_USER)
+ config |= MIPS24K_PMC_USER_ENABLE;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ config |= MIPS24K_PMC_ENABLE;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= MIPS24K_PMC_INTERRUPT_ENABLE;
+
+ PMCDBG(MDP,ALL,2,"mips24k-get_perfctl ri=%d -> config=0x%x", ri, config);
+
+ return (config);
+}
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
new file mode 100644
index 0000000..2f2f05a
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -0,0 +1,5139 @@
+/*-
+ * Copyright (c) 2003-2008 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/pmclog.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/resourcevar.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/smp.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+
+#include <sys/linker.h> /* needs to be after <sys/malloc.h> */
+
+#include <machine/atomic.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+
+#include "hwpmc_soft.h"
+
+/*
+ * Types
+ */
+
+enum pmc_flags {
+ PMC_FLAG_NONE = 0x00, /* do nothing */
+ PMC_FLAG_REMOVE = 0x01, /* atomically remove entry from hash */
+ PMC_FLAG_ALLOCATE = 0x02, /* add entry to hash if not found */
+};
+
+/*
+ * The offset in sysent where the syscall is allocated.
+ */
+
+static int pmc_syscall_num = NO_SYSCALL;
+struct pmc_cpu **pmc_pcpu; /* per-cpu state */
+pmc_value_t *pmc_pcpu_saved; /* saved PMC values: CSW handling */
+
+#define PMC_PCPU_SAVED(C,R) pmc_pcpu_saved[(R) + md->pmd_npmc*(C)]
+
+struct mtx_pool *pmc_mtxpool;
+static int *pmc_pmcdisp; /* PMC row dispositions */
+
+#define PMC_ROW_DISP_IS_FREE(R) (pmc_pmcdisp[(R)] == 0)
+#define PMC_ROW_DISP_IS_THREAD(R) (pmc_pmcdisp[(R)] > 0)
+#define PMC_ROW_DISP_IS_STANDALONE(R) (pmc_pmcdisp[(R)] < 0)
+
+#define PMC_MARK_ROW_FREE(R) do { \
+ pmc_pmcdisp[(R)] = 0; \
+} while (0)
+
+#define PMC_MARK_ROW_STANDALONE(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= (-pmc_cpu_max_active()), \
+ ("[pmc,%d] row disposition error", __LINE__)); \
+} while (0)
+
+#define PMC_UNMARK_ROW_STANDALONE(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+ KASSERT(pmc_pmcdisp[(R)] <= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+#define PMC_MARK_ROW_THREAD(R) do { \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&pmc_pmcdisp[(R)], 1); \
+} while (0)
+
+#define PMC_UNMARK_ROW_THREAD(R) do { \
+ atomic_add_int(&pmc_pmcdisp[(R)], -1); \
+ KASSERT(pmc_pmcdisp[(R)] >= 0, ("[pmc,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+
+/* various event handlers */
+static eventhandler_tag pmc_exit_tag, pmc_fork_tag;
+
+/* Module statistics */
+struct pmc_op_getdriverstats pmc_stats;
+
+/* Machine/processor dependent operations */
+static struct pmc_mdep *md;
+
+/*
+ * Hash tables mapping owner processes and target threads to PMCs.
+ */
+
+struct mtx pmc_processhash_mtx; /* spin mutex */
+static u_long pmc_processhashmask;
+static LIST_HEAD(pmc_processhash, pmc_process) *pmc_processhash;
+
+/*
+ * Hash table of PMC owner descriptors. This table is protected by
+ * the shared PMC "sx" lock.
+ */
+
+static u_long pmc_ownerhashmask;
+static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
+
+/*
+ * List of PMC owners with system-wide sampling PMCs.
+ */
+
+static LIST_HEAD(, pmc_owner) pmc_ss_owners;
+
+
+/*
+ * A map of row indices to classdep structures.
+ */
+static struct pmc_classdep **pmc_rowindex_to_classdep;
+
+/*
+ * Prototypes
+ */
+
+#ifdef DEBUG
+static int pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS);
+static int pmc_debugflags_parse(char *newstr, char *fence);
+#endif
+
+static int load(struct module *module, int cmd, void *arg);
+static int pmc_attach_process(struct proc *p, struct pmc *pm);
+static struct pmc *pmc_allocate_pmc_descriptor(void);
+static struct pmc_owner *pmc_allocate_owner_descriptor(struct proc *p);
+static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
+static int pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
+ int cpu);
+static int pmc_can_attach(struct pmc *pm, struct proc *p);
+static void pmc_capture_user_callchain(int cpu, int soft, struct trapframe *tf);
+static void pmc_cleanup(void);
+static int pmc_detach_process(struct proc *p, struct pmc *pm);
+static int pmc_detach_one_process(struct proc *p, struct pmc *pm,
+ int flags);
+static void pmc_destroy_owner_descriptor(struct pmc_owner *po);
+static struct pmc_owner *pmc_find_owner_descriptor(struct proc *p);
+static int pmc_find_pmc(pmc_id_t pmcid, struct pmc **pm);
+static struct pmc *pmc_find_pmc_descriptor_in_process(struct pmc_owner *po,
+ pmc_id_t pmc);
+static struct pmc_process *pmc_find_process_descriptor(struct proc *p,
+ uint32_t mode);
+static void pmc_force_context_switch(void);
+static void pmc_link_target_process(struct pmc *pm,
+ struct pmc_process *pp);
+static void pmc_log_all_process_mappings(struct pmc_owner *po);
+static void pmc_log_kernel_mappings(struct pmc *pm);
+static void pmc_log_process_mappings(struct pmc_owner *po, struct proc *p);
+static void pmc_maybe_remove_owner(struct pmc_owner *po);
+static void pmc_process_csw_in(struct thread *td);
+static void pmc_process_csw_out(struct thread *td);
+static void pmc_process_exit(void *arg, struct proc *p);
+static void pmc_process_fork(void *arg, struct proc *p1,
+ struct proc *p2, int n);
+static void pmc_process_samples(int cpu, int soft);
+static void pmc_release_pmc_descriptor(struct pmc *pmc);
+static void pmc_remove_owner(struct pmc_owner *po);
+static void pmc_remove_process_descriptor(struct pmc_process *pp);
+static void pmc_restore_cpu_binding(struct pmc_binding *pb);
+static void pmc_save_cpu_binding(struct pmc_binding *pb);
+static void pmc_select_cpu(int cpu);
+static int pmc_start(struct pmc *pm);
+static int pmc_stop(struct pmc *pm);
+static int pmc_syscall_handler(struct thread *td, void *syscall_args);
+static void pmc_unlink_target_process(struct pmc *pmc,
+ struct pmc_process *pp);
+static int generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp);
+static int generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp);
+static struct pmc_mdep *pmc_generic_cpu_initialize(void);
+static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
+
+/*
+ * Kernel tunables and sysctl(8) interface.
+ */
+
+SYSCTL_DECL(_kern_hwpmc);
+
+static int pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "callchaindepth", &pmc_callchaindepth);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, callchaindepth, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_callchaindepth, 0, "depth of call chain records");
+
+#ifdef DEBUG
+struct pmc_debugflags pmc_debugflags = PMC_DEBUG_DEFAULT_FLAGS;
+char pmc_debugstr[PMC_DEBUG_STRSIZE];
+TUNABLE_STR(PMC_SYSCTL_NAME_PREFIX "debugflags", pmc_debugstr,
+ sizeof(pmc_debugstr));
+SYSCTL_PROC(_kern_hwpmc, OID_AUTO, debugflags,
+ CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_TUN,
+ 0, 0, pmc_debugflags_sysctl_handler, "A", "debug flags");
+#endif
+
+/*
+ * kern.hwpmc.hashrows -- determines the number of rows in the
+ * of the hash table used to look up threads
+ */
+
+static int pmc_hashsize = PMC_HASH_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "hashsize", &pmc_hashsize);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, hashsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_hashsize, 0, "rows in hash tables");
+
+/*
+ * kern.hwpmc.nsamples --- number of PC samples/callchain stacks per CPU
+ */
+
+static int pmc_nsamples = PMC_NSAMPLES;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "nsamples", &pmc_nsamples);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, nsamples, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_nsamples, 0, "number of PC samples per CPU");
+
+
+/*
+ * kern.hwpmc.mtxpoolsize -- number of mutexes in the mutex pool.
+ */
+
+static int pmc_mtxpool_size = PMC_MTXPOOL_SIZE;
+TUNABLE_INT(PMC_SYSCTL_NAME_PREFIX "mtxpoolsize", &pmc_mtxpool_size);
+SYSCTL_INT(_kern_hwpmc, OID_AUTO, mtxpoolsize, CTLFLAG_TUN|CTLFLAG_RD,
+ &pmc_mtxpool_size, 0, "size of spin mutex pool");
+
+
+/*
+ * security.bsd.unprivileged_syspmcs -- allow non-root processes to
+ * allocate system-wide PMCs.
+ *
+ * Allowing unprivileged processes to allocate system PMCs is convenient
+ * if system-wide measurements need to be taken concurrently with other
+ * per-process measurements. This feature is turned off by default.
+ */
+
+static int pmc_unprivileged_syspmcs = 0;
+TUNABLE_INT("security.bsd.unprivileged_syspmcs", &pmc_unprivileged_syspmcs);
+SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_syspmcs, CTLFLAG_RW,
+ &pmc_unprivileged_syspmcs, 0,
+ "allow unprivileged process to allocate system PMCs");
+
+/*
+ * Hash function. Discard the lower 2 bits of the pointer since
+ * these are always zero for our uses. The hash multiplier is
+ * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
+ */
+
+#if LONG_BIT == 64
+#define _PMC_HM 11400714819323198486u
+#elif LONG_BIT == 32
+#define _PMC_HM 2654435769u
+#else
+#error Must know the size of 'long' to compile
+#endif
+
+#define PMC_HASH_PTR(P,M) ((((unsigned long) (P) >> 2) * _PMC_HM) & (M))
+
+/*
+ * Syscall structures
+ */
+
+/* The `sysent' for the new syscall */
+static struct sysent pmc_sysent = {
+ 2, /* sy_narg */
+ pmc_syscall_handler /* sy_call */
+};
+
+static struct syscall_module_data pmc_syscall_mod = {
+ load,
+ NULL,
+ &pmc_syscall_num,
+ &pmc_sysent,
+ { 0, NULL }
+};
+
+static moduledata_t pmc_mod = {
+ PMC_MODULE_NAME,
+ syscall_module_handler,
+ &pmc_syscall_mod
+};
+
+DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY);
+MODULE_VERSION(pmc, PMC_VERSION);
+
+#ifdef DEBUG
+enum pmc_dbgparse_state {
+ PMCDS_WS, /* in whitespace */
+ PMCDS_MAJOR, /* seen a major keyword */
+ PMCDS_MINOR
+};
+
+static int
+pmc_debugflags_parse(char *newstr, char *fence)
+{
+ char c, *p, *q;
+ struct pmc_debugflags *tmpflags;
+ int error, found, *newbits, tmp;
+ size_t kwlen;
+
+ tmpflags = malloc(sizeof(*tmpflags), M_PMC, M_WAITOK|M_ZERO);
+
+ p = newstr;
+ error = 0;
+
+ for (; p < fence && (c = *p); p++) {
+
+ /* skip white space */
+ if (c == ' ' || c == '\t')
+ continue;
+
+ /* look for a keyword followed by "=" */
+ for (q = p; p < fence && (c = *p) && c != '='; p++)
+ ;
+ if (c != '=') {
+ error = EINVAL;
+ goto done;
+ }
+
+ kwlen = p - q;
+ newbits = NULL;
+
+ /* lookup flag group name */
+#define DBG_SET_FLAG_MAJ(S,F) \
+ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
+ newbits = &tmpflags->pdb_ ## F;
+
+ DBG_SET_FLAG_MAJ("cpu", CPU);
+ DBG_SET_FLAG_MAJ("csw", CSW);
+ DBG_SET_FLAG_MAJ("logging", LOG);
+ DBG_SET_FLAG_MAJ("module", MOD);
+ DBG_SET_FLAG_MAJ("md", MDP);
+ DBG_SET_FLAG_MAJ("owner", OWN);
+ DBG_SET_FLAG_MAJ("pmc", PMC);
+ DBG_SET_FLAG_MAJ("process", PRC);
+ DBG_SET_FLAG_MAJ("sampling", SAM);
+
+ if (newbits == NULL) {
+ error = EINVAL;
+ goto done;
+ }
+
+ p++; /* skip the '=' */
+
+ /* Now parse the individual flags */
+ tmp = 0;
+ newflag:
+ for (q = p; p < fence && (c = *p); p++)
+ if (c == ' ' || c == '\t' || c == ',')
+ break;
+
+ /* p == fence or c == ws or c == "," or c == 0 */
+
+ if ((kwlen = p - q) == 0) {
+ *newbits = tmp;
+ continue;
+ }
+
+ found = 0;
+#define DBG_SET_FLAG_MIN(S,F) \
+ if (kwlen == sizeof(S)-1 && strncmp(q, S, kwlen) == 0) \
+ tmp |= found = (1 << PMC_DEBUG_MIN_ ## F)
+
+ /* a '*' denotes all possible flags in the group */
+ if (kwlen == 1 && *q == '*')
+ tmp = found = ~0;
+ /* look for individual flag names */
+ DBG_SET_FLAG_MIN("allocaterow", ALR);
+ DBG_SET_FLAG_MIN("allocate", ALL);
+ DBG_SET_FLAG_MIN("attach", ATT);
+ DBG_SET_FLAG_MIN("bind", BND);
+ DBG_SET_FLAG_MIN("config", CFG);
+ DBG_SET_FLAG_MIN("exec", EXC);
+ DBG_SET_FLAG_MIN("exit", EXT);
+ DBG_SET_FLAG_MIN("find", FND);
+ DBG_SET_FLAG_MIN("flush", FLS);
+ DBG_SET_FLAG_MIN("fork", FRK);
+ DBG_SET_FLAG_MIN("getbuf", GTB);
+ DBG_SET_FLAG_MIN("hook", PMH);
+ DBG_SET_FLAG_MIN("init", INI);
+ DBG_SET_FLAG_MIN("intr", INT);
+ DBG_SET_FLAG_MIN("linktarget", TLK);
+ DBG_SET_FLAG_MIN("mayberemove", OMR);
+ DBG_SET_FLAG_MIN("ops", OPS);
+ DBG_SET_FLAG_MIN("read", REA);
+ DBG_SET_FLAG_MIN("register", REG);
+ DBG_SET_FLAG_MIN("release", REL);
+ DBG_SET_FLAG_MIN("remove", ORM);
+ DBG_SET_FLAG_MIN("sample", SAM);
+ DBG_SET_FLAG_MIN("scheduleio", SIO);
+ DBG_SET_FLAG_MIN("select", SEL);
+ DBG_SET_FLAG_MIN("signal", SIG);
+ DBG_SET_FLAG_MIN("swi", SWI);
+ DBG_SET_FLAG_MIN("swo", SWO);
+ DBG_SET_FLAG_MIN("start", STA);
+ DBG_SET_FLAG_MIN("stop", STO);
+ DBG_SET_FLAG_MIN("syscall", PMS);
+ DBG_SET_FLAG_MIN("unlinktarget", TUL);
+ DBG_SET_FLAG_MIN("write", WRI);
+ if (found == 0) {
+ /* unrecognized flag name */
+ error = EINVAL;
+ goto done;
+ }
+
+ if (c == 0 || c == ' ' || c == '\t') { /* end of flag group */
+ *newbits = tmp;
+ continue;
+ }
+
+ p++;
+ goto newflag;
+ }
+
+ /* save the new flag set */
+ bcopy(tmpflags, &pmc_debugflags, sizeof(pmc_debugflags));
+
+ done:
+ free(tmpflags, M_PMC);
+ return error;
+}
+
+static int
+pmc_debugflags_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ char *fence, *newstr;
+ int error;
+ unsigned int n;
+
+ (void) arg1; (void) arg2; /* unused parameters */
+
+ n = sizeof(pmc_debugstr);
+ newstr = malloc(n, M_PMC, M_WAITOK|M_ZERO);
+ (void) strlcpy(newstr, pmc_debugstr, n);
+
+ error = sysctl_handle_string(oidp, newstr, n, req);
+
+ /* if there is a new string, parse and copy it */
+ if (error == 0 && req->newptr != NULL) {
+ fence = newstr + (n < req->newlen ? n : req->newlen + 1);
+ if ((error = pmc_debugflags_parse(newstr, fence)) == 0)
+ (void) strlcpy(pmc_debugstr, newstr,
+ sizeof(pmc_debugstr));
+ }
+
+ free(newstr, M_PMC);
+
+ return error;
+}
+#endif
+
+/*
+ * Map a row index to a classdep structure and return the adjusted row
+ * index for the PMC class index.
+ */
+static struct pmc_classdep *
+pmc_ri_to_classdep(struct pmc_mdep *md, int ri, int *adjri)
+{
+ struct pmc_classdep *pcd;
+
+ (void) md;
+
+ KASSERT(ri >= 0 && ri < md->pmd_npmc,
+ ("[pmc,%d] illegal row-index %d", __LINE__, ri));
+
+ pcd = pmc_rowindex_to_classdep[ri];
+
+ KASSERT(pcd != NULL,
+ ("[pmc,%d] ri %d null pcd", __LINE__, ri));
+
+ *adjri = ri - pcd->pcd_ri;
+
+ KASSERT(*adjri >= 0 && *adjri < pcd->pcd_num,
+ ("[pmc,%d] adjusted row-index %d", __LINE__, *adjri));
+
+ return (pcd);
+}
+
+/*
+ * Concurrency Control
+ *
+ * The driver manages the following data structures:
+ *
+ * - target process descriptors, one per target process
+ * - owner process descriptors (and attached lists), one per owner process
+ * - lookup hash tables for owner and target processes
+ * - PMC descriptors (and attached lists)
+ * - per-cpu hardware state
+ * - the 'hook' variable through which the kernel calls into
+ * this module
+ * - the machine hardware state (managed by the MD layer)
+ *
+ * These data structures are accessed from:
+ *
+ * - thread context-switch code
+ * - interrupt handlers (possibly on multiple cpus)
+ * - kernel threads on multiple cpus running on behalf of user
+ * processes doing system calls
+ * - this driver's private kernel threads
+ *
+ * = Locks and Locking strategy =
+ *
+ * The driver uses four locking strategies for its operation:
+ *
+ * - The global SX lock "pmc_sx" is used to protect internal
+ * data structures.
+ *
+ * Calls into the module by syscall() start with this lock being
+ * held in exclusive mode. Depending on the requested operation,
+ * the lock may be downgraded to 'shared' mode to allow more
+ * concurrent readers into the module. Calls into the module from
+ * other parts of the kernel acquire the lock in shared mode.
+ *
+ * This SX lock is held in exclusive mode for any operations that
+ * modify the linkages between the driver's internal data structures.
+ *
+ * The 'pmc_hook' function pointer is also protected by this lock.
+ * It is only examined with the sx lock held in exclusive mode. The
+ * kernel module is allowed to be unloaded only with the sx lock held
+ * in exclusive mode. In normal syscall handling, after acquiring the
+ * pmc_sx lock we first check that 'pmc_hook' is non-null before
+ * proceeding. This prevents races between the thread unloading the module
+ * and other threads seeking to use the module.
+ *
+ * - Lookups of target process structures and owner process structures
+ * cannot use the global "pmc_sx" SX lock because these lookups need
+ * to happen during context switches and in other critical sections
+ * where sleeping is not allowed. We protect these lookup tables
+ * with their own private spin-mutexes, "pmc_processhash_mtx" and
+ * "pmc_ownerhash_mtx".
+ *
+ * - Interrupt handlers work in a lock free manner. At interrupt
+ * time, handlers look at the PMC pointer (phw->phw_pmc) configured
+ * when the PMC was started. If this pointer is NULL, the interrupt
+ * is ignored after updating driver statistics. We ensure that this
+ * pointer is set (using an atomic operation if necessary) before the
+ * PMC hardware is started. Conversely, this pointer is unset atomically
+ * only after the PMC hardware is stopped.
+ *
+ * We ensure that everything needed for the operation of an
+ * interrupt handler is available without it needing to acquire any
+ * locks. We also ensure that a PMC's software state is destroyed only
+ * after the PMC is taken off hardware (on all CPUs).
+ *
+ * - Context-switch handling with process-private PMCs needs more
+ * care.
+ *
+ * A given process may be the target of multiple PMCs. For example,
+ * PMCATTACH and PMCDETACH may be requested by a process on one CPU
+ * while the target process is running on another. A PMC could also
+ * be getting released because its owner is exiting. We tackle
+ * these situations in the following manner:
+ *
+ * - each target process structure 'pmc_process' has an array
+ * of 'struct pmc *' pointers, one for each hardware PMC.
+ *
+ * - At context switch IN time, each "target" PMC in RUNNING state
+ * gets started on hardware and a pointer to each PMC is copied into
+ * the per-cpu phw array. The 'runcount' for the PMC is
+ * incremented.
+ *
+ * - At context switch OUT time, all process-virtual PMCs are stopped
+ * on hardware. The saved value is added to the PMCs value field
+ * only if the PMC is in a non-deleted state (the PMCs state could
+ * have changed during the current time slice).
+ *
+ * Note that since in-between a switch IN on a processor and a switch
+ * OUT, the PMC could have been released on another CPU. Therefore
+ * context switch OUT always looks at the hardware state to turn
+ * OFF PMCs and will update a PMC's saved value only if reachable
+ * from the target process record.
+ *
+ * - OP PMCRELEASE could be called on a PMC at any time (the PMC could
+ * be attached to many processes at the time of the call and could
+ * be active on multiple CPUs).
+ *
+ * We prevent further scheduling of the PMC by marking it as in
+ * state 'DELETED'. If the runcount of the PMC is non-zero then
+ * this PMC is currently running on a CPU somewhere. The thread
+ * doing the PMCRELEASE operation waits by repeatedly doing a
+ * pause() till the runcount comes to zero.
+ *
+ * The contents of a PMC descriptor (struct pmc) are protected using
+ * a spin-mutex. In order to save space, we use a mutex pool.
+ *
+ * In terms of lock types used by witness(4), we use:
+ * - Type "pmc-sx", used by the global SX lock.
+ * - Type "pmc-sleep", for sleep mutexes used by logger threads.
+ * - Type "pmc-per-proc", for protecting PMC owner descriptors.
+ * - Type "pmc-leaf", used for all other spin mutexes.
+ */
+
+/*
+ * save the cpu binding of the current kthread
+ */
+
+static void
+pmc_save_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "%s", "save-cpu");
+ thread_lock(curthread);
+ pb->pb_bound = sched_is_bound(curthread);
+ pb->pb_cpu = curthread->td_oncpu;
+ thread_unlock(curthread);
+ PMCDBG(CPU,BND,2, "save-cpu cpu=%d", pb->pb_cpu);
+}
+
+/*
+ * restore the cpu binding of the current thread
+ */
+
+static void
+pmc_restore_cpu_binding(struct pmc_binding *pb)
+{
+ PMCDBG(CPU,BND,2, "restore-cpu curcpu=%d restore=%d",
+ curthread->td_oncpu, pb->pb_cpu);
+ thread_lock(curthread);
+ if (pb->pb_bound)
+ sched_bind(curthread, pb->pb_cpu);
+ else
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+ PMCDBG(CPU,BND,2, "%s", "restore-cpu done");
+}
+
+/*
+ * move execution over the specified cpu and bind it there.
+ */
+
+static void
+pmc_select_cpu(int cpu)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[pmc,%d] bad cpu number %d", __LINE__, cpu));
+
+ /* Never move to an inactive CPU. */
+ KASSERT(pmc_cpu_is_active(cpu), ("[pmc,%d] selecting inactive "
+ "CPU %d", __LINE__, cpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d", cpu);
+ thread_lock(curthread);
+ sched_bind(curthread, cpu);
+ thread_unlock(curthread);
+
+ KASSERT(curthread->td_oncpu == cpu,
+ ("[pmc,%d] CPU not bound [cpu=%d, curr=%d]", __LINE__,
+ cpu, curthread->td_oncpu));
+
+ PMCDBG(CPU,SEL,2, "select-cpu cpu=%d ok", cpu);
+}
+
+/*
+ * Force a context switch.
+ *
+ * We do this by pause'ing for 1 tick -- invoking mi_switch() is not
+ * guaranteed to force a context switch.
+ */
+
+static void
+pmc_force_context_switch(void)
+{
+
+ pause("pmcctx", 1);
+}
+
+/*
+ * Get the file name for an executable. This is a simple wrapper
+ * around vn_fullpath(9).
+ */
+
+static void
+pmc_getfilename(struct vnode *v, char **fullpath, char **freepath)
+{
+
+ *fullpath = "unknown";
+ *freepath = NULL;
+ vn_fullpath(curthread, v, fullpath, freepath);
+}
+
+/*
+ * remove an process owning PMCs
+ */
+
+void
+pmc_remove_owner(struct pmc_owner *po)
+{
+ struct pmc *pm, *tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(OWN,ORM,1, "remove-owner po=%p", po);
+
+ /* Remove descriptor from the owner hash table */
+ LIST_REMOVE(po, po_next);
+
+ /* release all owned PMC descriptors */
+ LIST_FOREACH_SAFE(pm, &po->po_pmcs, pm_next, tmp) {
+ PMCDBG(OWN,ORM,2, "pmc=%p", pm);
+ KASSERT(pm->pm_owner == po,
+ ("[pmc,%d] owner %p != po %p", __LINE__, pm->pm_owner, po));
+
+ pmc_release_pmc_descriptor(pm); /* will unlink from the list */
+ }
+
+ KASSERT(po->po_sscount == 0,
+ ("[pmc,%d] SS count not zero", __LINE__));
+ KASSERT(LIST_EMPTY(&po->po_pmcs),
+ ("[pmc,%d] PMC list not empty", __LINE__));
+
+ /* de-configure the log file if present */
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_deconfigure_log(po);
+}
+
+/*
+ * remove an owner process record if all conditions are met.
+ */
+
+static void
+pmc_maybe_remove_owner(struct pmc_owner *po)
+{
+
+ PMCDBG(OWN,OMR,1, "maybe-remove-owner po=%p", po);
+
+ /*
+ * Remove owner record if
+ * - this process does not own any PMCs
+ * - this process has not allocated a system-wide sampling buffer
+ */
+
+ if (LIST_EMPTY(&po->po_pmcs) &&
+ ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)) {
+ pmc_remove_owner(po);
+ pmc_destroy_owner_descriptor(po);
+ }
+}
+
+/*
+ * Add an association between a target process and a PMC.
+ */
+
+static void
+pmc_link_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct pmc_target *pt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+ KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
+ ("[pmc,%d] Attaching a non-process-virtual pmc=%p to pid=%d",
+ __LINE__, pm, pp->pp_proc->p_pid));
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= ((int) md->pmd_npmc - 1),
+ ("[pmc,%d] Illegal reference count %d for process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = PMC_TO_ROWINDEX(pm);
+
+ PMCDBG(PRC,TLK,1, "link-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+#ifdef DEBUG
+ LIST_FOREACH(pt, &pm->pm_targets, pt_next)
+ if (pt->pt_process == pp)
+ KASSERT(0, ("[pmc,%d] pp %p already in pmc %p targets",
+ __LINE__, pp, pm));
+#endif
+
+ pt = malloc(sizeof(struct pmc_target), M_PMC, M_WAITOK|M_ZERO);
+ pt->pt_process = pp;
+
+ LIST_INSERT_HEAD(&pm->pm_targets, pt, pt_next);
+
+ atomic_store_rel_ptr((uintptr_t *)&pp->pp_pmcs[ri].pp_pmc,
+ (uintptr_t)pm);
+
+ if (pm->pm_owner->po_owner == pp->pp_proc)
+ pm->pm_flags |= PMC_F_ATTACHED_TO_OWNER;
+
+ /*
+ * Initialize the per-process values at this row index.
+ */
+ pp->pp_pmcs[ri].pp_pmcval = PMC_TO_MODE(pm) == PMC_MODE_TS ?
+ pm->pm_sc.pm_reloadcount : 0;
+
+ pp->pp_refcnt++;
+
+}
+
+/*
+ * Removes the association between a target process and a PMC.
+ */
+
+static void
+pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
+{
+ int ri;
+ struct proc *p;
+ struct pmc_target *ptgt;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL && pp != NULL,
+ ("[pmc,%d] Null pm %p or pp %p", __LINE__, pm, pp));
+
+ KASSERT(pp->pp_refcnt >= 1 && pp->pp_refcnt <= (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on process record %p",
+ __LINE__, pp->pp_refcnt, (void *) pp));
+
+ ri = PMC_TO_ROWINDEX(pm);
+
+ PMCDBG(PRC,TUL,1, "unlink-target pmc=%p ri=%d pmc-process=%p",
+ pm, ri, pp);
+
+ KASSERT(pp->pp_pmcs[ri].pp_pmc == pm,
+ ("[pmc,%d] PMC ri %d mismatch pmc %p pp->[ri] %p", __LINE__,
+ ri, pm, pp->pp_pmcs[ri].pp_pmc));
+
+ pp->pp_pmcs[ri].pp_pmc = NULL;
+ pp->pp_pmcs[ri].pp_pmcval = (pmc_value_t) 0;
+
+ /* Remove owner-specific flags */
+ if (pm->pm_owner->po_owner == pp->pp_proc) {
+ pp->pp_flags &= ~PMC_PP_ENABLE_MSR_ACCESS;
+ pm->pm_flags &= ~PMC_F_ATTACHED_TO_OWNER;
+ }
+
+ pp->pp_refcnt--;
+
+ /* Remove the target process from the PMC structure */
+ LIST_FOREACH(ptgt, &pm->pm_targets, pt_next)
+ if (ptgt->pt_process == pp)
+ break;
+
+ KASSERT(ptgt != NULL, ("[pmc,%d] process %p (pp: %p) not found "
+ "in pmc %p", __LINE__, pp->pp_proc, pp, pm));
+
+ LIST_REMOVE(ptgt, pt_next);
+ free(ptgt, M_PMC);
+
+ /* if the PMC now lacks targets, send the owner a SIGIO */
+ if (LIST_EMPTY(&pm->pm_targets)) {
+ p = pm->pm_owner->po_owner;
+ PROC_LOCK(p);
+ kern_psignal(p, SIGIO);
+ PROC_UNLOCK(p);
+
+ PMCDBG(PRC,SIG,2, "signalling proc=%p signal=%d", p,
+ SIGIO);
+ }
+}
+
+/*
+ * Check if PMC 'pm' may be attached to target process 't'.
+ */
+
+static int
+pmc_can_attach(struct pmc *pm, struct proc *t)
+{
+ struct proc *o; /* pmc owner */
+ struct ucred *oc, *tc; /* owner, target credentials */
+ int decline_attach, i;
+
+ /*
+ * A PMC's owner can always attach that PMC to itself.
+ */
+
+ if ((o = pm->pm_owner->po_owner) == t)
+ return 0;
+
+ PROC_LOCK(o);
+ oc = o->p_ucred;
+ crhold(oc);
+ PROC_UNLOCK(o);
+
+ PROC_LOCK(t);
+ tc = t->p_ucred;
+ crhold(tc);
+ PROC_UNLOCK(t);
+
+ /*
+ * The effective uid of the PMC owner should match at least one
+ * of the {effective,real,saved} uids of the target process.
+ */
+
+ decline_attach = oc->cr_uid != tc->cr_uid &&
+ oc->cr_uid != tc->cr_svuid &&
+ oc->cr_uid != tc->cr_ruid;
+
+ /*
+ * Every one of the target's group ids, must be in the owner's
+ * group list.
+ */
+ for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ decline_attach = !groupmember(tc->cr_groups[i], oc);
+
+ /* check the read and saved gids too */
+ if (decline_attach == 0)
+ decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ !groupmember(tc->cr_svgid, oc);
+
+ crfree(tc);
+ crfree(oc);
+
+ return !decline_attach;
+}
+
+/*
+ * Attach a process to a PMC.
+ */
+
+static int
+pmc_attach_one_process(struct proc *p, struct pmc *pm)
+{
+ int ri;
+ char *fullpath, *freepath;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,2, "attach-one pm=%p ri=%d proc=%p (%d, %s)", pm,
+ PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
+
+ /*
+ * Locate the process descriptor corresponding to process 'p',
+ * allocating space as needed.
+ *
+ * Verify that rowindex 'pm_rowindex' is free in the process
+ * descriptor.
+ *
+ * If not, allocate space for a descriptor and link the
+ * process descriptor and PMC.
+ */
+ ri = PMC_TO_ROWINDEX(pm);
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_ALLOCATE)) == NULL)
+ return ENOMEM;
+
+ if (pp->pp_pmcs[ri].pp_pmc == pm) /* already present at slot [ri] */
+ return EEXIST;
+
+ if (pp->pp_pmcs[ri].pp_pmc != NULL)
+ return EBUSY;
+
+ pmc_link_target_process(pm, pp);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) &&
+ (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) == 0)
+ pm->pm_flags |= PMC_F_NEEDS_LOGFILE;
+
+ pm->pm_flags |= PMC_F_ATTACH_DONE; /* mark as attached */
+
+ /* issue an attach event to a configured log file */
+ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE) {
+ pmc_getfilename(p->p_textvp, &fullpath, &freepath);
+ if (p->p_flag & P_KTHREAD) {
+ fullpath = kernelname;
+ freepath = NULL;
+ } else
+ pmclog_process_pmcattach(pm, p->p_pid, fullpath);
+ if (freepath)
+ free(freepath, M_TEMP);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ pmc_log_process_mappings(pm->pm_owner, p);
+ }
+ /* mark process as using HWPMCs */
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Attach a process and optionally its children
+ */
+
+static int
+pmc_attach_process(struct proc *p, struct pmc *pm)
+{
+ int error;
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "attach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
+
+
+ /*
+ * If this PMC successfully allowed a GETMSR operation
+ * in the past, disallow further ATTACHes.
+ */
+
+ if ((pm->pm_flags & PMC_PP_ENABLE_MSR_ACCESS) != 0)
+ return EPERM;
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_attach_one_process(p, pm);
+
+ /*
+ * Traverse all child processes, attaching them to
+ * this PMC.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ if ((error = pmc_attach_one_process(p, pm)) != 0)
+ break;
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ if (error)
+ (void) pmc_detach_process(top, pm);
+
+ done:
+ sx_sunlock(&proctree_lock);
+ return error;
+}
+
+/*
+ * Detach a process from a PMC. If there are no other PMCs tracking
+ * this process, remove the process structure from its hash table. If
+ * 'flags' contains PMC_FLAG_REMOVE, then free the process structure.
+ */
+
+static int
+pmc_detach_one_process(struct proc *p, struct pmc *pm, int flags)
+{
+ int ri;
+ struct pmc_process *pp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm pointer", __LINE__));
+
+ ri = PMC_TO_ROWINDEX(pm);
+
+ PMCDBG(PRC,ATT,2, "detach-one pm=%p ri=%d proc=%p (%d, %s) flags=0x%x",
+ pm, ri, p, p->p_pid, p->p_comm, flags);
+
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL)
+ return ESRCH;
+
+ if (pp->pp_pmcs[ri].pp_pmc != pm)
+ return EINVAL;
+
+ pmc_unlink_target_process(pm, pp);
+
+ /* Issue a detach entry if a log file is configured */
+ if (pm->pm_owner->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_pmcdetach(pm, p->p_pid);
+
+ /*
+ * If there are no PMCs targetting this process, we remove its
+ * descriptor from the target hash table and unset the P_HWPMC
+ * flag in the struct proc.
+ */
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal refcnt %d for process struct %p",
+ __LINE__, pp->pp_refcnt, pp));
+
+ if (pp->pp_refcnt != 0) /* still a target of some PMC */
+ return 0;
+
+ pmc_remove_process_descriptor(pp);
+
+ if (flags & PMC_FLAG_REMOVE)
+ free(pp, M_PMC);
+
+ PROC_LOCK(p);
+ p->p_flag &= ~P_HWPMC;
+ PROC_UNLOCK(p);
+
+ return 0;
+}
+
+/*
+ * Detach a process and optionally its descendants from a PMC.
+ */
+
+static int
+pmc_detach_process(struct proc *p, struct pmc *pm)
+{
+ struct proc *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PRC,ATT,1, "detach pm=%p ri=%d proc=%p (%d, %s)", pm,
+ PMC_TO_ROWINDEX(pm), p, p->p_pid, p->p_comm);
+
+ if ((pm->pm_flags & PMC_F_DESCENDANTS) == 0)
+ return pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ /*
+ * Traverse all children, detaching them from this PMC. We
+ * ignore errors since we could be detaching a PMC from a
+ * partially attached proc tree.
+ */
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ (void) pmc_detach_one_process(p, pm, PMC_FLAG_REMOVE);
+
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+
+ done:
+ sx_sunlock(&proctree_lock);
+
+ if (LIST_EMPTY(&pm->pm_targets))
+ pm->pm_flags &= ~PMC_F_ATTACH_DONE;
+
+ return 0;
+}
+
+
+/*
+ * Thread context switch IN
+ */
+
+static void
+pmc_process_csw_in(struct thread *td)
+{
+ int cpu;
+ unsigned int adjri, ri;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ pmc_value_t newvalue;
+ struct pmc_process *pp;
+ struct pmc_classdep *pcd;
+
+ p = td->td_proc;
+
+ if ((pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE)) == NULL)
+ return;
+
+ KASSERT(pp->pp_proc == td->td_proc,
+ ("[pmc,%d] not my thread state", __LINE__));
+
+ critical_enter(); /* no preemption from this point */
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CSW,SWI,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[pmc,%d] wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) == NULL)
+ continue;
+
+ KASSERT(PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)),
+ ("[pmc,%d] Target PMC in non-virtual mode (%d)",
+ __LINE__, PMC_TO_MODE(pm)));
+
+ KASSERT(PMC_TO_ROWINDEX(pm) == ri,
+ ("[pmc,%d] Row index mismatch pmc %d != ri %d",
+ __LINE__, PMC_TO_ROWINDEX(pm), ri));
+
+ /*
+ * Only PMCs that are marked as 'RUNNING' need
+ * be placed on hardware.
+ */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* increment PMC runcount */
+ atomic_add_rel_int(&pm->pm_runcount, 1);
+
+ /* configure the HWPMC we are going to use. */
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+ pcd->pcd_config_pmc(cpu, adjri, pm);
+
+ phw = pc->pc_hwpmcs[ri];
+
+ KASSERT(phw != NULL,
+ ("[pmc,%d] null hw pointer", __LINE__));
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc,%d] hw->pmc %p != pmc %p", __LINE__,
+ phw->phw_pmc, pm));
+
+ /*
+ * Write out saved value and start the PMC.
+ *
+ * Sampling PMCs use a per-process value, while
+ * counting mode PMCs use a per-pmc value that is
+ * inherited across descendants.
+ */
+ if (PMC_TO_MODE(pm) == PMC_MODE_TS) {
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ newvalue = PMC_PCPU_SAVED(cpu,ri) =
+ pp->pp_pmcs[ri].pp_pmcval;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ } else {
+ KASSERT(PMC_TO_MODE(pm) == PMC_MODE_TC,
+ ("[pmc,%d] illegal mode=%d", __LINE__,
+ PMC_TO_MODE(pm)));
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ newvalue = PMC_PCPU_SAVED(cpu, ri) =
+ pm->pm_gv.pm_savedvalue;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ }
+
+ PMCDBG(CSW,SWI,1,"cpu=%d ri=%d new=%jd", cpu, ri, newvalue);
+
+ pcd->pcd_write_pmc(cpu, adjri, newvalue);
+ pcd->pcd_start_pmc(cpu, adjri);
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch-in actions.
+ */
+
+ (void) (*md->pmd_switch_in)(pc, pp);
+
+ critical_exit();
+
+}
+
+/*
+ * Thread context switch OUT.
+ */
+
+static void
+pmc_process_csw_out(struct thread *td)
+{
+ int cpu;
+ int64_t tmp;
+ struct pmc *pm;
+ struct proc *p;
+ enum pmc_mode mode;
+ struct pmc_cpu *pc;
+ pmc_value_t newvalue;
+ unsigned int adjri, ri;
+ struct pmc_process *pp;
+ struct pmc_classdep *pcd;
+
+
+ /*
+ * Locate our process descriptor; this may be NULL if
+ * this process is exiting and we have already removed
+ * the process from the target process table.
+ *
+ * Note that due to kernel preemption, multiple
+ * context switches may happen while the process is
+ * exiting.
+ *
+ * Note also that if the target process cannot be
+ * found we still need to deconfigure any PMCs that
+ * are currently running on hardware.
+ */
+
+ p = td->td_proc;
+ pp = pmc_find_process_descriptor(p, PMC_FLAG_NONE);
+
+ /*
+ * save PMCs
+ */
+
+ critical_enter();
+
+ cpu = PCPU_GET(cpuid); /* td->td_oncpu is invalid */
+
+ PMCDBG(CSW,SWO,1, "cpu=%d proc=%p (%d, %s) pp=%p", cpu, p,
+ p->p_pid, p->p_comm, pp);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[pmc,%d wierd CPU id %d", __LINE__, cpu));
+
+ pc = pmc_pcpu[cpu];
+
+ /*
+ * When a PMC gets unlinked from a target PMC, it will
+ * be removed from the target's pp_pmc[] array.
+ *
+ * However, on a MP system, the target could have been
+ * executing on another CPU at the time of the unlink.
+ * So, at context switch OUT time, we need to look at
+ * the hardware to determine if a PMC is scheduled on
+ * it.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+ pm = NULL;
+ (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
+
+ if (pm == NULL) /* nothing at this row index */
+ continue;
+
+ mode = PMC_TO_MODE(pm);
+ if (!PMC_IS_VIRTUAL_MODE(mode))
+ continue; /* not a process virtual PMC */
+
+ KASSERT(PMC_TO_ROWINDEX(pm) == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, PMC_TO_ROWINDEX(pm), ri));
+
+ /* Stop hardware if not already stopped */
+ if (pm->pm_stalled == 0)
+ pcd->pcd_stop_pmc(cpu, adjri);
+
+ /* reduce this PMC's runcount */
+ atomic_subtract_rel_int(&pm->pm_runcount, 1);
+
+ /*
+ * If this PMC is associated with this process,
+ * save the reading.
+ */
+
+ if (pp != NULL && pp->pp_pmcs[ri].pp_pmc != NULL) {
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p", __LINE__,
+ pm, ri, pp->pp_pmcs[ri].pp_pmc));
+
+ KASSERT(pp->pp_refcnt > 0,
+ ("[pmc,%d] pp refcnt = %d", __LINE__,
+ pp->pp_refcnt));
+
+ pcd->pcd_read_pmc(cpu, adjri, &newvalue);
+
+ tmp = newvalue - PMC_PCPU_SAVED(cpu,ri);
+
+ PMCDBG(CSW,SWO,1,"cpu=%d ri=%d tmp=%jd", cpu, ri,
+ tmp);
+
+ if (mode == PMC_MODE_TS) {
+
+ /*
+ * For sampling process-virtual PMCs,
+ * we expect the count to be
+ * decreasing as the 'value'
+ * programmed into the PMC is the
+ * number of events to be seen till
+ * the next sampling interrupt.
+ */
+ if (tmp < 0)
+ tmp += pm->pm_sc.pm_reloadcount;
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ pp->pp_pmcs[ri].pp_pmcval -= tmp;
+ if ((int64_t) pp->pp_pmcs[ri].pp_pmcval < 0)
+ pp->pp_pmcs[ri].pp_pmcval +=
+ pm->pm_sc.pm_reloadcount;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ } else {
+
+ /*
+ * For counting process-virtual PMCs,
+ * we expect the count to be
+ * increasing monotonically, modulo a 64
+ * bit wraparound.
+ */
+ KASSERT((int64_t) tmp >= 0,
+ ("[pmc,%d] negative increment cpu=%d "
+ "ri=%d newvalue=%jx saved=%jx "
+ "incr=%jx", __LINE__, cpu, ri,
+ newvalue, PMC_PCPU_SAVED(cpu,ri), tmp));
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ if (pm->pm_flags & PMC_F_LOG_PROCCSW)
+ pmclog_process_proccsw(pm, pp, tmp);
+ }
+ }
+
+ /* mark hardware as free */
+ pcd->pcd_config_pmc(cpu, adjri, NULL);
+ }
+
+ /*
+ * perform any other architecture/cpu dependent thread
+ * switch out functions.
+ */
+
+ (void) (*md->pmd_switch_out)(pc, pp);
+
+ critical_exit();
+}
+
+/*
+ * Log a KLD operation.
+ */
+
+static void
+pmc_process_kld_load(struct pmckern_map_in *pkm)
+{
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_LOCKED);
+
+ /*
+ * Notify owners of system sampling PMCs about KLD operations.
+ */
+
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_map_in(po, (pid_t) -1, pkm->pm_address,
+ (char *) pkm->pm_file);
+
+ /*
+ * TODO: Notify owners of (all) process-sampling PMCs too.
+ */
+
+ return;
+}
+
+static void
+pmc_process_kld_unload(struct pmckern_map_out *pkm)
+{
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_LOCKED);
+
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_map_out(po, (pid_t) -1,
+ pkm->pm_address, pkm->pm_address + pkm->pm_size);
+
+ /*
+ * TODO: Notify owners of process-sampling PMCs.
+ */
+}
+
+/*
+ * A mapping change for a process.
+ */
+
+static void
+pmc_process_mmap(struct thread *td, struct pmckern_map_in *pkm)
+{
+ int ri;
+ pid_t pid;
+ char *fullpath, *freepath;
+ const struct pmc *pm;
+ struct pmc_owner *po;
+ const struct pmc_process *pp;
+
+ freepath = fullpath = NULL;
+ pmc_getfilename((struct vnode *) pkm->pm_file, &fullpath, &freepath);
+
+ pid = td->td_proc->p_pid;
+
+ /* Inform owners of all system-wide sampling PMCs. */
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_map_in(po, pid, pkm->pm_address, fullpath);
+
+ if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
+ goto done;
+
+ /*
+ * Inform sampling PMC owners tracking this process.
+ */
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
+ PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ pmclog_process_map_in(pm->pm_owner,
+ pid, pkm->pm_address, fullpath);
+
+ done:
+ if (freepath)
+ free(freepath, M_TEMP);
+}
+
+
+/*
+ * Log an munmap request.
+ */
+
+static void
+pmc_process_munmap(struct thread *td, struct pmckern_map_out *pkm)
+{
+ int ri;
+ pid_t pid;
+ struct pmc_owner *po;
+ const struct pmc *pm;
+ const struct pmc_process *pp;
+
+ pid = td->td_proc->p_pid;
+
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_map_out(po, pid, pkm->pm_address,
+ pkm->pm_address + pkm->pm_size);
+
+ if ((pp = pmc_find_process_descriptor(td->td_proc, 0)) == NULL)
+ return;
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL &&
+ PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ pmclog_process_map_out(pm->pm_owner, pid,
+ pkm->pm_address, pkm->pm_address + pkm->pm_size);
+}
+
+/*
+ * Log mapping information about the kernel.
+ */
+
+static void
+pmc_log_kernel_mappings(struct pmc *pm)
+{
+ struct pmc_owner *po;
+ struct pmckern_map_in *km, *kmbase;
+
+ sx_assert(&pmc_sx, SX_LOCKED);
+ KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
+ ("[pmc,%d] non-sampling PMC (%p) desires mapping information",
+ __LINE__, (void *) pm));
+
+ po = pm->pm_owner;
+
+ if (po->po_flags & PMC_PO_INITIAL_MAPPINGS_DONE)
+ return;
+
+ /*
+ * Log the current set of kernel modules.
+ */
+ kmbase = linker_hwpmc_list_objects();
+ for (km = kmbase; km->pm_file != NULL; km++) {
+ PMCDBG(LOG,REG,1,"%s %p", (char *) km->pm_file,
+ (void *) km->pm_address);
+ pmclog_process_map_in(po, (pid_t) -1, km->pm_address,
+ km->pm_file);
+ }
+ free(kmbase, M_LINKER);
+
+ po->po_flags |= PMC_PO_INITIAL_MAPPINGS_DONE;
+}
+
+/*
+ * Log the mappings for a single process.
+ */
+
+static void
+pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
+{
+ vm_map_t map;
+ struct vnode *vp;
+ struct vmspace *vm;
+ vm_map_entry_t entry;
+ vm_offset_t last_end;
+ u_int last_timestamp;
+ struct vnode *last_vp;
+ vm_offset_t start_addr;
+ vm_object_t obj, lobj, tobj;
+ char *fullpath, *freepath;
+
+ last_vp = NULL;
+ last_end = (vm_offset_t) 0;
+ fullpath = freepath = NULL;
+
+ if ((vm = vmspace_acquire_ref(p)) == NULL)
+ return;
+
+ map = &vm->vm_map;
+ vm_map_lock_read(map);
+
+ for (entry = map->header.next; entry != &map->header; entry = entry->next) {
+
+ if (entry == NULL) {
+ PMCDBG(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
+ "NULL! pid=%d vm_map=%p\n", p->p_pid, map);
+ break;
+ }
+
+ /*
+ * We only care about executable map entries.
+ */
+ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
+ !(entry->protection & VM_PROT_EXECUTE) ||
+ (entry->object.vm_object == NULL)) {
+ continue;
+ }
+
+ obj = entry->object.vm_object;
+ VM_OBJECT_LOCK(obj);
+
+ /*
+ * Walk the backing_object list to find the base
+ * (non-shadowed) vm_object.
+ */
+ for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
+ if (tobj != obj)
+ VM_OBJECT_LOCK(tobj);
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+ lobj = tobj;
+ }
+
+ /*
+ * At this point lobj is the base vm_object and it is locked.
+ */
+ if (lobj == NULL) {
+ PMCDBG(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
+ "vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
+ VM_OBJECT_UNLOCK(obj);
+ continue;
+ }
+
+ if (lobj->type != OBJT_VNODE || lobj->handle == NULL) {
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_UNLOCK(obj);
+ continue;
+ }
+
+ /*
+ * Skip contiguous regions that point to the same
+ * vnode, so we don't emit redundant MAP-IN
+ * directives.
+ */
+ if (entry->start == last_end && lobj->handle == last_vp) {
+ last_end = entry->end;
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_UNLOCK(obj);
+ continue;
+ }
+
+ /*
+ * We don't want to keep the proc's vm_map or this
+ * vm_object locked while we walk the pathname, since
+ * vn_fullpath() can sleep. However, if we drop the
+ * lock, it's possible for concurrent activity to
+ * modify the vm_map list. To protect against this,
+ * we save the vm_map timestamp before we release the
+ * lock, and check it after we reacquire the lock
+ * below.
+ */
+ start_addr = entry->start;
+ last_end = entry->end;
+ last_timestamp = map->timestamp;
+ vm_map_unlock_read(map);
+
+ vp = lobj->handle;
+ vref(vp);
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+
+ VM_OBJECT_UNLOCK(obj);
+
+ freepath = NULL;
+ pmc_getfilename(vp, &fullpath, &freepath);
+ last_vp = vp;
+
+ vrele(vp);
+
+ vp = NULL;
+ pmclog_process_map_in(po, p->p_pid, start_addr, fullpath);
+ if (freepath)
+ free(freepath, M_TEMP);
+
+ vm_map_lock_read(map);
+
+ /*
+ * If our saved timestamp doesn't match, this means
+ * that the vm_map was modified out from under us and
+ * we can't trust our current "entry" pointer. Do a
+ * new lookup for this entry. If there is no entry
+ * for this address range, vm_map_lookup_entry() will
+ * return the previous one, so we always want to go to
+ * entry->next on the next loop iteration.
+ *
+ * There is an edge condition here that can occur if
+ * there is no entry at or before this address. In
+ * this situation, vm_map_lookup_entry returns
+ * &map->header, which would cause our loop to abort
+ * without processing the rest of the map. However,
+ * in practice this will never happen for process
+ * vm_map. This is because the executable's text
+ * segment is the first mapping in the proc's address
+ * space, and this mapping is never removed until the
+ * process exits, so there will always be a non-header
+ * entry at or before the requested address for
+ * vm_map_lookup_entry to return.
+ */
+ if (map->timestamp != last_timestamp)
+ vm_map_lookup_entry(map, last_end - 1, &entry);
+ }
+
+ vm_map_unlock_read(map);
+ vmspace_free(vm);
+ return;
+}
+
+/*
+ * Log mappings for all processes in the system.
+ */
+
+static void
+pmc_log_all_process_mappings(struct pmc_owner *po)
+{
+ struct proc *p, *top;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((p = pfind(1)) == NULL)
+ panic("[pmc,%d] Cannot find init", __LINE__);
+
+ PROC_UNLOCK(p);
+
+ sx_slock(&proctree_lock);
+
+ top = p;
+
+ for (;;) {
+ pmc_log_process_mappings(po, p);
+ if (!LIST_EMPTY(&p->p_children))
+ p = LIST_FIRST(&p->p_children);
+ else for (;;) {
+ if (p == top)
+ goto done;
+ if (LIST_NEXT(p, p_sibling)) {
+ p = LIST_NEXT(p, p_sibling);
+ break;
+ }
+ p = p->p_pptr;
+ }
+ }
+ done:
+ sx_sunlock(&proctree_lock);
+}
+
+/*
+ * The 'hook' invoked from the kernel proper
+ */
+
+
+#ifdef DEBUG
+const char *pmc_hooknames[] = {
+ /* these strings correspond to PMC_FN_* in <sys/pmckern.h> */
+ "",
+ "EXEC",
+ "CSW-IN",
+ "CSW-OUT",
+ "SAMPLE",
+ "KLDLOAD",
+ "KLDUNLOAD",
+ "MMAP",
+ "MUNMAP",
+ "CALLCHAIN-NMI",
+ "CALLCHAIN-SOFT",
+ "SOFTSAMPLING"
+};
+#endif
+
+static int
+pmc_hook_handler(struct thread *td, int function, void *arg)
+{
+
+ PMCDBG(MOD,PMH,1, "hook td=%p func=%d \"%s\" arg=%p", td, function,
+ pmc_hooknames[function], arg);
+
+ switch (function)
+ {
+
+ /*
+ * Process exec()
+ */
+
+ case PMC_FN_PROCESS_EXEC:
+ {
+ char *fullpath, *freepath;
+ unsigned int ri;
+ int is_using_hwpmcs;
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+ struct pmckern_procexec *pk;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ p = td->td_proc;
+ pmc_getfilename(p->p_textvp, &fullpath, &freepath);
+
+ pk = (struct pmckern_procexec *) arg;
+
+ /* Inform owners of SS mode PMCs of the exec event. */
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_procexec(po, PMC_ID_INVALID,
+ p->p_pid, pk->pm_entryaddr, fullpath);
+
+ PROC_LOCK(p);
+ is_using_hwpmcs = p->p_flag & P_HWPMC;
+ PROC_UNLOCK(p);
+
+ if (!is_using_hwpmcs) {
+ if (freepath)
+ free(freepath, M_TEMP);
+ break;
+ }
+
+ /*
+ * PMCs are not inherited across an exec(): remove any
+ * PMCs that this process is the owner of.
+ */
+
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ pmc_destroy_owner_descriptor(po);
+ }
+
+ /*
+ * If the process being exec'ed is not the target of any
+ * PMC, we are done.
+ */
+ if ((pp = pmc_find_process_descriptor(p, 0)) == NULL) {
+ if (freepath)
+ free(freepath, M_TEMP);
+ break;
+ }
+
+ /*
+ * Log the exec event to all monitoring owners. Skip
+ * owners who have already recieved the event because
+ * they had system sampling PMCs active.
+ */
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
+ po = pm->pm_owner;
+ if (po->po_sscount == 0 &&
+ po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_procexec(po, pm->pm_id,
+ p->p_pid, pk->pm_entryaddr,
+ fullpath);
+ }
+
+ if (freepath)
+ free(freepath, M_TEMP);
+
+
+ PMCDBG(PRC,EXC,1, "exec proc=%p (%d, %s) cred-changed=%d",
+ p, p->p_pid, p->p_comm, pk->pm_credentialschanged);
+
+ if (pk->pm_credentialschanged == 0) /* no change */
+ break;
+
+ /*
+ * If the newly exec()'ed process has a different credential
+ * than before, allow it to be the target of a PMC only if
+ * the PMC's owner has sufficient priviledge.
+ */
+
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL)
+ if (pmc_can_attach(pm, td->td_proc) != 0)
+ pmc_detach_one_process(td->td_proc,
+ pm, PMC_FLAG_NONE);
+
+ KASSERT(pp->pp_refcnt >= 0 && pp->pp_refcnt <= (int) md->pmd_npmc,
+ ("[pmc,%d] Illegal ref count %d on pp %p", __LINE__,
+ pp->pp_refcnt, pp));
+
+ /*
+ * If this process is no longer the target of any
+ * PMCs, we can remove the process entry and free
+ * up space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ free(pp, M_PMC);
+ break;
+ }
+
+ }
+ break;
+
+ case PMC_FN_CSW_IN:
+ pmc_process_csw_in(td);
+ break;
+
+ case PMC_FN_CSW_OUT:
+ pmc_process_csw_out(td);
+ break;
+
+ /*
+ * Process accumulated PC samples.
+ *
+ * This function is expected to be called by hardclock() for
+ * each CPU that has accumulated PC samples.
+ *
+ * This function is to be executed on the CPU whose samples
+ * are being processed.
+ */
+ case PMC_FN_DO_SAMPLES:
+
+ /*
+ * Clear the cpu specific bit in the CPU mask before
+ * do the rest of the processing. If the NMI handler
+ * gets invoked after the "atomic_clear_int()" call
+ * below but before "pmc_process_samples()" gets
+ * around to processing the interrupt, then we will
+ * come back here at the next hardclock() tick (and
+ * may find nothing to do if "pmc_process_samples()"
+ * had already processed the interrupt). We don't
+ * lose the interrupt sample.
+ */
+ CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask);
+ pmc_process_samples(PCPU_GET(cpuid), PMC_HR);
+ pmc_process_samples(PCPU_GET(cpuid), PMC_SR);
+ break;
+
+
+ case PMC_FN_KLD_LOAD:
+ sx_assert(&pmc_sx, SX_LOCKED);
+ pmc_process_kld_load((struct pmckern_map_in *) arg);
+ break;
+
+ case PMC_FN_KLD_UNLOAD:
+ sx_assert(&pmc_sx, SX_LOCKED);
+ pmc_process_kld_unload((struct pmckern_map_out *) arg);
+ break;
+
+ case PMC_FN_MMAP:
+ sx_assert(&pmc_sx, SX_LOCKED);
+ pmc_process_mmap(td, (struct pmckern_map_in *) arg);
+ break;
+
+ case PMC_FN_MUNMAP:
+ sx_assert(&pmc_sx, SX_LOCKED);
+ pmc_process_munmap(td, (struct pmckern_map_out *) arg);
+ break;
+
+ case PMC_FN_USER_CALLCHAIN:
+ /*
+ * Record a call chain.
+ */
+ KASSERT(td == curthread, ("[pmc,%d] td != curthread",
+ __LINE__));
+
+ pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_HR,
+ (struct trapframe *) arg);
+ td->td_pflags &= ~TDP_CALLCHAIN;
+ break;
+
+ case PMC_FN_USER_CALLCHAIN_SOFT:
+ /*
+ * Record a call chain.
+ */
+ KASSERT(td == curthread, ("[pmc,%d] td != curthread",
+ __LINE__));
+ pmc_capture_user_callchain(PCPU_GET(cpuid), PMC_SR,
+ (struct trapframe *) arg);
+ td->td_pflags &= ~TDP_CALLCHAIN;
+ break;
+
+ case PMC_FN_SOFT_SAMPLING:
+ /*
+ * Call soft PMC sampling intr.
+ */
+ pmc_soft_intr((struct pmckern_soft *) arg);
+ break;
+
+ default:
+#ifdef DEBUG
+ KASSERT(0, ("[pmc,%d] unknown hook %d\n", __LINE__, function));
+#endif
+ break;
+
+ }
+
+ return 0;
+}
+
+/*
+ * allocate a 'struct pmc_owner' descriptor in the owner hash table.
+ */
+
+static struct pmc_owner *
+pmc_allocate_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ /* allocate space for N pointers and one descriptor struct */
+ po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
+ po->po_sscount = po->po_error = po->po_flags = po->po_logprocmaps = 0;
+ po->po_file = NULL;
+ po->po_owner = p;
+ po->po_kthread = NULL;
+ LIST_INIT(&po->po_pmcs);
+ LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
+
+ TAILQ_INIT(&po->po_logbuffers);
+ mtx_init(&po->po_mtx, "pmc-owner-mtx", "pmc-per-proc", MTX_SPIN);
+
+ PMCDBG(OWN,ALL,1, "allocate-owner proc=%p (%d, %s) pmc-owner=%p",
+ p, p->p_pid, p->p_comm, po);
+
+ return po;
+}
+
+static void
+pmc_destroy_owner_descriptor(struct pmc_owner *po)
+{
+
+ PMCDBG(OWN,REL,1, "destroy-owner po=%p proc=%p (%d, %s)",
+ po, po->po_owner, po->po_owner->p_pid, po->po_owner->p_comm);
+
+ mtx_destroy(&po->po_mtx);
+ free(po, M_PMC);
+}
+
+/*
+ * find the descriptor corresponding to process 'p', adding or removing it
+ * as specified by 'mode'.
+ */
+
+static struct pmc_process *
+pmc_find_process_descriptor(struct proc *p, uint32_t mode)
+{
+ uint32_t hindex;
+ struct pmc_process *pp, *ppnew;
+ struct pmc_processhash *pph;
+
+ hindex = PMC_HASH_PTR(p, pmc_processhashmask);
+ pph = &pmc_processhash[hindex];
+
+ ppnew = NULL;
+
+ /*
+ * Pre-allocate memory in the FIND_ALLOCATE case since we
+ * cannot call malloc(9) once we hold a spin lock.
+ */
+ if (mode & PMC_FLAG_ALLOCATE)
+ ppnew = malloc(sizeof(struct pmc_process) + md->pmd_npmc *
+ sizeof(struct pmc_targetstate), M_PMC, M_WAITOK|M_ZERO);
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_FOREACH(pp, pph, pp_next)
+ if (pp->pp_proc == p)
+ break;
+
+ if ((mode & PMC_FLAG_REMOVE) && pp != NULL)
+ LIST_REMOVE(pp, pp_next);
+
+ if ((mode & PMC_FLAG_ALLOCATE) && pp == NULL &&
+ ppnew != NULL) {
+ ppnew->pp_proc = p;
+ LIST_INSERT_HEAD(pph, ppnew, pp_next);
+ pp = ppnew;
+ ppnew = NULL;
+ }
+ mtx_unlock_spin(&pmc_processhash_mtx);
+
+ if (pp != NULL && ppnew != NULL)
+ free(ppnew, M_PMC);
+
+ return pp;
+}
+
+/*
+ * remove a process descriptor from the process hash table.
+ */
+
+static void
+pmc_remove_process_descriptor(struct pmc_process *pp)
+{
+ KASSERT(pp->pp_refcnt == 0,
+ ("[pmc,%d] Removing process descriptor %p with count %d",
+ __LINE__, pp, pp->pp_refcnt));
+
+ mtx_lock_spin(&pmc_processhash_mtx);
+ LIST_REMOVE(pp, pp_next);
+ mtx_unlock_spin(&pmc_processhash_mtx);
+}
+
+
+/*
+ * find an owner descriptor corresponding to proc 'p'
+ */
+
+static struct pmc_owner *
+pmc_find_owner_descriptor(struct proc *p)
+{
+ uint32_t hindex;
+ struct pmc_owner *po;
+ struct pmc_ownerhash *poh;
+
+ hindex = PMC_HASH_PTR(p, pmc_ownerhashmask);
+ poh = &pmc_ownerhash[hindex];
+
+ po = NULL;
+ LIST_FOREACH(po, poh, po_next)
+ if (po->po_owner == p)
+ break;
+
+ PMCDBG(OWN,FND,1, "find-owner proc=%p (%d, %s) hindex=0x%x -> "
+ "pmc-owner=%p", p, p->p_pid, p->p_comm, hindex, po);
+
+ return po;
+}
+
+/*
+ * pmc_allocate_pmc_descriptor
+ *
+ * Allocate a pmc descriptor and initialize its
+ * fields.
+ */
+
+static struct pmc *
+pmc_allocate_pmc_descriptor(void)
+{
+ struct pmc *pmc;
+
+ pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
+
+ if (pmc != NULL) {
+ pmc->pm_owner = NULL;
+ LIST_INIT(&pmc->pm_targets);
+ }
+
+ PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
+
+ return pmc;
+}
+
+/*
+ * Destroy a pmc descriptor.
+ */
+
+static void
+pmc_destroy_pmc_descriptor(struct pmc *pm)
+{
+ (void) pm;
+
+#ifdef DEBUG
+ KASSERT(pm->pm_state == PMC_STATE_DELETED ||
+ pm->pm_state == PMC_STATE_FREE,
+ ("[pmc,%d] destroying non-deleted PMC", __LINE__));
+ KASSERT(LIST_EMPTY(&pm->pm_targets),
+ ("[pmc,%d] destroying pmc with targets", __LINE__));
+ KASSERT(pm->pm_owner == NULL,
+ ("[pmc,%d] destroying pmc attached to an owner", __LINE__));
+ KASSERT(pm->pm_runcount == 0,
+ ("[pmc,%d] pmc has non-zero run count %d", __LINE__,
+ pm->pm_runcount));
+#endif
+}
+
+static void
+pmc_wait_for_pmc_idle(struct pmc *pm)
+{
+#ifdef DEBUG
+ volatile int maxloop;
+
+ maxloop = 100 * pmc_cpu_max();
+#endif
+ /*
+ * Loop (with a forced context switch) till the PMC's runcount
+ * comes down to zero.
+ */
+ while (atomic_load_acq_32(&pm->pm_runcount) > 0) {
+#ifdef DEBUG
+ maxloop--;
+ KASSERT(maxloop > 0,
+ ("[pmc,%d] (ri%d, rc%d) waiting too long for "
+ "pmc to be free", __LINE__,
+ PMC_TO_ROWINDEX(pm), pm->pm_runcount));
+#endif
+ pmc_force_context_switch();
+ }
+}
+
+/*
+ * This function does the following things:
+ *
+ * - detaches the PMC from hardware
+ * - unlinks all target threads that were attached to it
+ * - removes the PMC from its owner's list
+ * - destroy's the PMC private mutex
+ *
+ * Once this function completes, the given pmc pointer can be safely
+ * FREE'd by the caller.
+ */
+
+static void
+pmc_release_pmc_descriptor(struct pmc *pm)
+{
+ enum pmc_mode mode;
+ struct pmc_hw *phw;
+ u_int adjri, ri, cpu;
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+ struct pmc_process *pp;
+ struct pmc_classdep *pcd;
+ struct pmc_target *ptgt, *tmp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(pm, ("[pmc,%d] null pmc", __LINE__));
+
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+ mode = PMC_TO_MODE(pm);
+
+ PMCDBG(PMC,REL,1, "release-pmc pmc=%p ri=%d mode=%d", pm, ri,
+ mode);
+
+ /*
+ * First, we take the PMC off hardware.
+ */
+ cpu = 0;
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+
+ /*
+ * A system mode PMC runs on a specific CPU. Switch
+ * to this CPU and turn hardware off.
+ */
+ pmc_save_cpu_binding(&pb);
+
+ cpu = PMC_TO_CPU(pm);
+
+ pmc_select_cpu(cpu);
+
+ /* switch off non-stalled CPUs */
+ if (pm->pm_state == PMC_STATE_RUNNING &&
+ pm->pm_stalled == 0) {
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[pmc, %d] pmc ptr ri(%d) hw(%p) pm(%p)",
+ __LINE__, ri, phw->phw_pmc, pm));
+ PMCDBG(PMC,REL,2, "stopping cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ pcd->pcd_stop_pmc(cpu, adjri);
+ critical_exit();
+ }
+
+ PMCDBG(PMC,REL,2, "decfg cpu=%d ri=%d", cpu, ri);
+
+ critical_enter();
+ pcd->pcd_config_pmc(cpu, adjri, NULL);
+ critical_exit();
+
+ /* adjust the global and process count of SS mode PMCs */
+ if (mode == PMC_MODE_SS && pm->pm_state == PMC_STATE_RUNNING) {
+ po = pm->pm_owner;
+ po->po_sscount--;
+ if (po->po_sscount == 0) {
+ atomic_subtract_rel_int(&pmc_ss_count, 1);
+ LIST_REMOVE(po, po_ssnext);
+ }
+ }
+
+ pm->pm_state = PMC_STATE_DELETED;
+
+ pmc_restore_cpu_binding(&pb);
+
+ /*
+ * We could have references to this PMC structure in
+ * the per-cpu sample queues. Wait for the queue to
+ * drain.
+ */
+ pmc_wait_for_pmc_idle(pm);
+
+ } else if (PMC_IS_VIRTUAL_MODE(mode)) {
+
+ /*
+ * A virtual PMC could be running on multiple CPUs at
+ * a given instant.
+ *
+ * By marking its state as DELETED, we ensure that
+ * this PMC is never further scheduled on hardware.
+ *
+ * Then we wait till all CPUs are done with this PMC.
+ */
+ pm->pm_state = PMC_STATE_DELETED;
+
+
+ /* Wait for the PMCs runcount to come to zero. */
+ pmc_wait_for_pmc_idle(pm);
+
+ /*
+ * At this point the PMC is off all CPUs and cannot be
+ * freshly scheduled onto a CPU. It is now safe to
+ * unlink all targets from this PMC. If a
+ * process-record's refcount falls to zero, we remove
+ * it from the hash table. The module-wide SX lock
+ * protects us from races.
+ */
+ LIST_FOREACH_SAFE(ptgt, &pm->pm_targets, pt_next, tmp) {
+ pp = ptgt->pt_process;
+ pmc_unlink_target_process(pm, pp); /* frees 'ptgt' */
+
+ PMCDBG(PMC,REL,3, "pp->refcnt=%d", pp->pp_refcnt);
+
+ /*
+ * If the target process record shows that no
+ * PMCs are attached to it, reclaim its space.
+ */
+
+ if (pp->pp_refcnt == 0) {
+ pmc_remove_process_descriptor(pp);
+ free(pp, M_PMC);
+ }
+ }
+
+ cpu = curthread->td_oncpu; /* setup cpu for pmd_release() */
+
+ }
+
+ /*
+ * Release any MD resources
+ */
+ (void) pcd->pcd_release_pmc(cpu, adjri, pm);
+
+ /*
+ * Update row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm)))
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ else
+ PMC_UNMARK_ROW_THREAD(ri);
+
+ /* unlink from the owner's list */
+ if (pm->pm_owner) {
+ LIST_REMOVE(pm, pm_next);
+ pm->pm_owner = NULL;
+ }
+
+ pmc_destroy_pmc_descriptor(pm);
+}
+
+/*
+ * Register an owner and a pmc.
+ */
+
+static int
+pmc_register_owner(struct proc *p, struct pmc *pmc)
+{
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((po = pmc_find_owner_descriptor(p)) == NULL)
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL)
+ return ENOMEM;
+
+ KASSERT(pmc->pm_owner == NULL,
+ ("[pmc,%d] attempting to own an initialized PMC", __LINE__));
+ pmc->pm_owner = po;
+
+ LIST_INSERT_HEAD(&po->po_pmcs, pmc, pm_next);
+
+ PROC_LOCK(p);
+ p->p_flag |= P_HWPMC;
+ PROC_UNLOCK(p);
+
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_pmcallocate(pmc);
+
+ PMCDBG(PMC,REG,1, "register-owner pmc-owner=%p pmc=%p",
+ po, pmc);
+
+ return 0;
+}
+
+/*
+ * Return the current row disposition:
+ * == 0 => FREE
+ * > 0 => PROCESS MODE
+ * < 0 => SYSTEM MODE
+ */
+
+int
+pmc_getrowdisp(int ri)
+{
+ return pmc_pmcdisp[ri];
+}
+
+/*
+ * Check if a PMC at row index 'ri' can be allocated to the current
+ * process.
+ *
+ * Allocation can fail if:
+ * - the current process is already being profiled by a PMC at index 'ri',
+ * attached to it via OP_PMCATTACH.
+ * - the current process has already allocated a PMC at index 'ri'
+ * via OP_ALLOCATE.
+ */
+
+static int
+pmc_can_allocate_rowindex(struct proc *p, unsigned int ri, int cpu)
+{
+ enum pmc_mode mode;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+
+ PMCDBG(PMC,ALR,1, "can-allocate-rowindex proc=%p (%d, %s) ri=%d "
+ "cpu=%d", p, p->p_pid, p->p_comm, ri, cpu);
+
+ /*
+ * We shouldn't have already allocated a process-mode PMC at
+ * row index 'ri'.
+ *
+ * We shouldn't have allocated a system-wide PMC on the same
+ * CPU and same RI.
+ */
+ if ((po = pmc_find_owner_descriptor(p)) != NULL)
+ LIST_FOREACH(pm, &po->po_pmcs, pm_next) {
+ if (PMC_TO_ROWINDEX(pm) == ri) {
+ mode = PMC_TO_MODE(pm);
+ if (PMC_IS_VIRTUAL_MODE(mode))
+ return EEXIST;
+ if (PMC_IS_SYSTEM_MODE(mode) &&
+ (int) PMC_TO_CPU(pm) == cpu)
+ return EEXIST;
+ }
+ }
+
+ /*
+ * We also shouldn't be the target of any PMC at this index
+ * since otherwise a PMC_ATTACH to ourselves will fail.
+ */
+ if ((pp = pmc_find_process_descriptor(p, 0)) != NULL)
+ if (pp->pp_pmcs[ri].pp_pmc)
+ return EEXIST;
+
+ PMCDBG(PMC,ALR,2, "can-allocate-rowindex proc=%p (%d, %s) ri=%d ok",
+ p, p->p_pid, p->p_comm, ri);
+
+ return 0;
+}
+
+/*
+ * Check if a given PMC at row index 'ri' can be currently used in
+ * mode 'mode'.
+ */
+
+static int
+pmc_can_allocate_row(int ri, enum pmc_mode mode)
+{
+ enum pmc_disp disp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ PMCDBG(PMC,ALR,1, "can-allocate-row ri=%d mode=%d", ri, mode);
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ disp = PMC_DISP_STANDALONE;
+ else
+ disp = PMC_DISP_THREAD;
+
+ /*
+ * check disposition for PMC row 'ri':
+ *
+ * Expected disposition Row-disposition Result
+ *
+ * STANDALONE STANDALONE or FREE proceed
+ * STANDALONE THREAD fail
+ * THREAD THREAD or FREE proceed
+ * THREAD STANDALONE fail
+ */
+
+ if (!PMC_ROW_DISP_IS_FREE(ri) &&
+ !(disp == PMC_DISP_THREAD && PMC_ROW_DISP_IS_THREAD(ri)) &&
+ !(disp == PMC_DISP_STANDALONE && PMC_ROW_DISP_IS_STANDALONE(ri)))
+ return EBUSY;
+
+ /*
+ * All OK
+ */
+
+ PMCDBG(PMC,ALR,2, "can-allocate-row ri=%d mode=%d ok", ri, mode);
+
+ return 0;
+
+}
+
+/*
+ * Find a PMC descriptor with user handle 'pmcid' for thread 'td'.
+ */
+
+static struct pmc *
+pmc_find_pmc_descriptor_in_process(struct pmc_owner *po, pmc_id_t pmcid)
+{
+ struct pmc *pm;
+
+ KASSERT(PMC_ID_TO_ROWINDEX(pmcid) < md->pmd_npmc,
+ ("[pmc,%d] Illegal pmc index %d (max %d)", __LINE__,
+ PMC_ID_TO_ROWINDEX(pmcid), md->pmd_npmc));
+
+ LIST_FOREACH(pm, &po->po_pmcs, pm_next)
+ if (pm->pm_id == pmcid)
+ return pm;
+
+ return NULL;
+}
+
+static int
+pmc_find_pmc(pmc_id_t pmcid, struct pmc **pmc)
+{
+
+ struct pmc *pm;
+ struct pmc_owner *po;
+
+ PMCDBG(PMC,FND,1, "find-pmc id=%d", pmcid);
+
+ if ((po = pmc_find_owner_descriptor(curthread->td_proc)) == NULL)
+ return ESRCH;
+
+ if ((pm = pmc_find_pmc_descriptor_in_process(po, pmcid)) == NULL)
+ return EINVAL;
+
+ PMCDBG(PMC,FND,2, "find-pmc id=%d -> pmc=%p", pmcid, pm);
+
+ *pmc = pm;
+ return 0;
+}
+
+/*
+ * Start a PMC.
+ */
+
+static int
+pmc_start(struct pmc *pm)
+{
+ enum pmc_mode mode;
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+ struct pmc_classdep *pcd;
+ int adjri, error, cpu, ri;
+
+ KASSERT(pm != NULL,
+ ("[pmc,%d] null pm", __LINE__));
+
+ mode = PMC_TO_MODE(pm);
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ error = 0;
+
+ PMCDBG(PMC,OPS,1, "start pmc=%p mode=%d ri=%d", pm, mode, ri);
+
+ po = pm->pm_owner;
+
+ /*
+ * Disallow PMCSTART if a logfile is required but has not been
+ * configured yet.
+ */
+ if ((pm->pm_flags & PMC_F_NEEDS_LOGFILE) &&
+ (po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
+ return (EDOOFUS); /* programming error */
+
+ /*
+ * If this is a sampling mode PMC, log mapping information for
+ * the kernel modules that are currently loaded.
+ */
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ pmc_log_kernel_mappings(pm);
+
+ if (PMC_IS_VIRTUAL_MODE(mode)) {
+
+ /*
+ * If a PMCATTACH has never been done on this PMC,
+ * attach it to its owner process.
+ */
+
+ if (LIST_EMPTY(&pm->pm_targets))
+ error = (pm->pm_flags & PMC_F_ATTACH_DONE) ? ESRCH :
+ pmc_attach_process(po->po_owner, pm);
+
+ /*
+ * If the PMC is attached to its owner, then force a context
+ * switch to ensure that the MD state gets set correctly.
+ */
+
+ if (error == 0) {
+ pm->pm_state = PMC_STATE_RUNNING;
+ if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER)
+ pmc_force_context_switch();
+ }
+
+ return (error);
+ }
+
+
+ /*
+ * A system-wide PMC.
+ *
+ * Add the owner to the global list if this is a system-wide
+ * sampling PMC.
+ */
+
+ if (mode == PMC_MODE_SS) {
+ if (po->po_sscount == 0) {
+ LIST_INSERT_HEAD(&pmc_ss_owners, po, po_ssnext);
+ atomic_add_rel_int(&pmc_ss_count, 1);
+ PMCDBG(PMC,OPS,1, "po=%p in global list", po);
+ }
+ po->po_sscount++;
+
+ /*
+ * Log mapping information for all existing processes in the
+ * system. Subsequent mappings are logged as they happen;
+ * see pmc_process_mmap().
+ */
+ if (po->po_logprocmaps == 0) {
+ pmc_log_all_process_mappings(po);
+ po->po_logprocmaps = 1;
+ }
+ }
+
+ /*
+ * Move to the CPU associated with this
+ * PMC, and start the hardware.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = PMC_TO_CPU(pm);
+
+ if (!pmc_cpu_is_active(cpu))
+ return (ENXIO);
+
+ pmc_select_cpu(cpu);
+
+ /*
+ * global PMCs are configured at allocation time
+ * so write out the initial value and start the PMC.
+ */
+
+ pm->pm_state = PMC_STATE_RUNNING;
+
+ critical_enter();
+ if ((error = pcd->pcd_write_pmc(cpu, adjri,
+ PMC_IS_SAMPLING_MODE(mode) ?
+ pm->pm_sc.pm_reloadcount :
+ pm->pm_sc.pm_initial)) == 0)
+ error = pcd->pcd_start_pmc(cpu, adjri);
+ critical_exit();
+
+ pmc_restore_cpu_binding(&pb);
+
+ return (error);
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+pmc_stop(struct pmc *pm)
+{
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+ struct pmc_classdep *pcd;
+ int adjri, cpu, error, ri;
+
+ KASSERT(pm != NULL, ("[pmc,%d] null pmc", __LINE__));
+
+ PMCDBG(PMC,OPS,1, "stop pmc=%p mode=%d ri=%d", pm,
+ PMC_TO_MODE(pm), PMC_TO_ROWINDEX(pm));
+
+ pm->pm_state = PMC_STATE_STOPPED;
+
+ /*
+ * If the PMC is a virtual mode one, changing the state to
+ * non-RUNNING is enough to ensure that the PMC never gets
+ * scheduled.
+ *
+ * If this PMC is current running on a CPU, then it will
+ * handled correctly at the time its target process is context
+ * switched out.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
+ return 0;
+
+ /*
+ * A system-mode PMC. Move to the CPU associated with
+ * this PMC, and stop the hardware. We update the
+ * 'initial count' so that a subsequent PMCSTART will
+ * resume counting from the current hardware count.
+ */
+
+ pmc_save_cpu_binding(&pb);
+
+ cpu = PMC_TO_CPU(pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[pmc,%d] illegal cpu=%d", __LINE__, cpu));
+
+ if (!pmc_cpu_is_active(cpu))
+ return ENXIO;
+
+ pmc_select_cpu(cpu);
+
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ critical_enter();
+ if ((error = pcd->pcd_stop_pmc(cpu, adjri)) == 0)
+ error = pcd->pcd_read_pmc(cpu, adjri, &pm->pm_sc.pm_initial);
+ critical_exit();
+
+ pmc_restore_cpu_binding(&pb);
+
+ po = pm->pm_owner;
+
+ /* remove this owner from the global list of SS PMC owners */
+ if (PMC_TO_MODE(pm) == PMC_MODE_SS) {
+ po->po_sscount--;
+ if (po->po_sscount == 0) {
+ atomic_subtract_rel_int(&pmc_ss_count, 1);
+ LIST_REMOVE(po, po_ssnext);
+ PMCDBG(PMC,OPS,2,"po=%p removed from global list", po);
+ }
+ }
+
+ return (error);
+}
+
+
+#ifdef DEBUG
+static const char *pmc_op_to_name[] = {
+#undef __PMC_OP
+#define __PMC_OP(N, D) #N ,
+ __PMC_OPS()
+ NULL
+};
+#endif
+
+/*
+ * The syscall interface
+ */
+
+#define PMC_GET_SX_XLOCK(...) do { \
+ sx_xlock(&pmc_sx); \
+ if (pmc_hook == NULL) { \
+ sx_xunlock(&pmc_sx); \
+ return __VA_ARGS__; \
+ } \
+} while (0)
+
+#define PMC_DOWNGRADE_SX() do { \
+ sx_downgrade(&pmc_sx); \
+ is_sx_downgraded = 1; \
+} while (0)
+
+static int
+pmc_syscall_handler(struct thread *td, void *syscall_args)
+{
+ int error, is_sx_downgraded, is_sx_locked, op;
+ struct pmc_syscall_args *c;
+ void *arg;
+
+ PMC_GET_SX_XLOCK(ENOSYS);
+
+ DROP_GIANT();
+
+ is_sx_downgraded = 0;
+ is_sx_locked = 1;
+
+ c = (struct pmc_syscall_args *) syscall_args;
+
+ op = c->pmop_code;
+ arg = c->pmop_data;
+
+ PMCDBG(MOD,PMS,1, "syscall op=%d \"%s\" arg=%p", op,
+ pmc_op_to_name[op], arg);
+
+ error = 0;
+ atomic_add_int(&pmc_stats.pm_syscalls, 1);
+
+ switch(op)
+ {
+
+
+ /*
+ * Configure a log file.
+ *
+ * XXX This OP will be reworked.
+ */
+
+ case PMC_OP_CONFIGURELOG:
+ {
+ struct proc *p;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_op_configurelog cl;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &cl, sizeof(cl))) != 0)
+ break;
+
+ /* mark this process as owning a log file */
+ p = td->td_proc;
+ if ((po = pmc_find_owner_descriptor(p)) == NULL)
+ if ((po = pmc_allocate_owner_descriptor(p)) == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+ /*
+ * If a valid fd was passed in, try to configure that,
+ * otherwise if 'fd' was less than zero and there was
+ * a log file configured, flush its buffers and
+ * de-configure it.
+ */
+ if (cl.pm_logfd >= 0) {
+ sx_xunlock(&pmc_sx);
+ is_sx_locked = 0;
+ error = pmclog_configure_log(md, po, cl.pm_logfd);
+ } else if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
+ pmclog_process_closelog(po);
+ error = pmclog_close(po);
+ if (error == 0) {
+ LIST_FOREACH(pm, &po->po_pmcs, pm_next)
+ if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
+ pm->pm_state == PMC_STATE_RUNNING)
+ pmc_stop(pm);
+ error = pmclog_deconfigure_log(po);
+ }
+ } else
+ error = EINVAL;
+
+ if (error)
+ break;
+ }
+ break;
+
+ /*
+ * Flush a log file.
+ */
+
+ case PMC_OP_FLUSHLOG:
+ {
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmclog_flush(po);
+ }
+ break;
+
+ /*
+ * Close a log file.
+ */
+
+ case PMC_OP_CLOSELOG:
+ {
+ struct pmc_owner *po;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmclog_close(po);
+ }
+ break;
+
+ /*
+ * Retrieve hardware configuration.
+ */
+
+ case PMC_OP_GETCPUINFO: /* CPU information */
+ {
+ struct pmc_op_getcpuinfo gci;
+ struct pmc_classinfo *pci;
+ struct pmc_classdep *pcd;
+ int cl;
+
+ gci.pm_cputype = md->pmd_cputype;
+ gci.pm_ncpu = pmc_cpu_max();
+ gci.pm_npmc = md->pmd_npmc;
+ gci.pm_nclass = md->pmd_nclass;
+ pci = gci.pm_classes;
+ pcd = md->pmd_classdep;
+ for (cl = 0; cl < md->pmd_nclass; cl++, pci++, pcd++) {
+ pci->pm_caps = pcd->pcd_caps;
+ pci->pm_class = pcd->pcd_class;
+ pci->pm_width = pcd->pcd_width;
+ pci->pm_num = pcd->pcd_num;
+ }
+ error = copyout(&gci, arg, sizeof(gci));
+ }
+ break;
+
+ /*
+ * Retrieve soft events list.
+ */
+ case PMC_OP_GETDYNEVENTINFO:
+ {
+ enum pmc_class cl;
+ enum pmc_event ev;
+ struct pmc_op_getdyneventinfo *gei;
+ struct pmc_dyn_event_descr dev;
+ struct pmc_soft *ps;
+ uint32_t nevent;
+
+ sx_assert(&pmc_sx, SX_LOCKED);
+
+ gei = (struct pmc_op_getdyneventinfo *) arg;
+
+ if ((error = copyin(&gei->pm_class, &cl, sizeof(cl))) != 0)
+ break;
+
+ /* Only SOFT class is dynamic. */
+ if (cl != PMC_CLASS_SOFT) {
+ error = EINVAL;
+ break;
+ }
+
+ nevent = 0;
+ for (ev = PMC_EV_SOFT_FIRST; (int)ev <= PMC_EV_SOFT_LAST; ev++) {
+ ps = pmc_soft_ev_acquire(ev);
+ if (ps == NULL)
+ continue;
+ bcopy(&ps->ps_ev, &dev, sizeof(dev));
+ pmc_soft_ev_release(ps);
+
+ error = copyout(&dev,
+ &gei->pm_events[nevent],
+ sizeof(struct pmc_dyn_event_descr));
+ if (error != 0)
+ break;
+ nevent++;
+ }
+ if (error != 0)
+ break;
+
+ error = copyout(&nevent, &gei->pm_nevent,
+ sizeof(nevent));
+ }
+ break;
+
+ /*
+ * Get module statistics
+ */
+
+ case PMC_OP_GETDRIVERSTATS:
+ {
+ struct pmc_op_getdriverstats gms;
+
+ bcopy(&pmc_stats, &gms, sizeof(gms));
+ error = copyout(&gms, arg, sizeof(gms));
+ }
+ break;
+
+
+ /*
+ * Retrieve module version number
+ */
+
+ case PMC_OP_GETMODULEVERSION:
+ {
+ uint32_t cv, modv;
+
+ /* retrieve the client's idea of the ABI version */
+ if ((error = copyin(arg, &cv, sizeof(uint32_t))) != 0)
+ break;
+ /* don't service clients newer than our driver */
+ modv = PMC_VERSION;
+ if ((cv & 0xFFFF0000) > (modv & 0xFFFF0000)) {
+ error = EPROGMISMATCH;
+ break;
+ }
+ error = copyout(&modv, arg, sizeof(int));
+ }
+ break;
+
+
+ /*
+ * Retrieve the state of all the PMCs on a given
+ * CPU.
+ */
+
+ case PMC_OP_GETPMCINFO:
+ {
+ int ari;
+ struct pmc *pm;
+ size_t pmcinfo_size;
+ uint32_t cpu, n, npmc;
+ struct pmc_owner *po;
+ struct pmc_binding pb;
+ struct pmc_classdep *pcd;
+ struct pmc_info *p, *pmcinfo;
+ struct pmc_op_getpmcinfo *gpi;
+
+ PMC_DOWNGRADE_SX();
+
+ gpi = (struct pmc_op_getpmcinfo *) arg;
+
+ if ((error = copyin(&gpi->pm_cpu, &cpu, sizeof(cpu))) != 0)
+ break;
+
+ if (cpu >= pmc_cpu_max()) {
+ error = EINVAL;
+ break;
+ }
+
+ if (!pmc_cpu_is_active(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* switch to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ npmc = md->pmd_npmc;
+
+ pmcinfo_size = npmc * sizeof(struct pmc_info);
+ pmcinfo = malloc(pmcinfo_size, M_PMC, M_WAITOK);
+
+ p = pmcinfo;
+
+ for (n = 0; n < md->pmd_npmc; n++, p++) {
+
+ pcd = pmc_ri_to_classdep(md, n, &ari);
+
+ KASSERT(pcd != NULL,
+ ("[pmc,%d] null pcd ri=%d", __LINE__, n));
+
+ if ((error = pcd->pcd_describe(cpu, ari, p, &pm)) != 0)
+ break;
+
+ if (PMC_ROW_DISP_IS_STANDALONE(n))
+ p->pm_rowdisp = PMC_DISP_STANDALONE;
+ else if (PMC_ROW_DISP_IS_THREAD(n))
+ p->pm_rowdisp = PMC_DISP_THREAD;
+ else
+ p->pm_rowdisp = PMC_DISP_FREE;
+
+ p->pm_ownerpid = -1;
+
+ if (pm == NULL) /* no PMC associated */
+ continue;
+
+ po = pm->pm_owner;
+
+ KASSERT(po->po_owner != NULL,
+ ("[pmc,%d] pmc_owner had a null proc pointer",
+ __LINE__));
+
+ p->pm_ownerpid = po->po_owner->p_pid;
+ p->pm_mode = PMC_TO_MODE(pm);
+ p->pm_event = pm->pm_event;
+ p->pm_flags = pm->pm_flags;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ p->pm_reloadcount =
+ pm->pm_sc.pm_reloadcount;
+ }
+
+ pmc_restore_cpu_binding(&pb);
+
+ /* now copy out the PMC info collected */
+ if (error == 0)
+ error = copyout(pmcinfo, &gpi->pm_pmcs, pmcinfo_size);
+
+ free(pmcinfo, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Set the administrative state of a PMC. I.e. whether
+ * the PMC is to be used or not.
+ */
+
+ case PMC_OP_PMCADMIN:
+ {
+ int cpu, ri;
+ enum pmc_state request;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+ struct pmc_op_pmcadmin pma;
+ struct pmc_binding pb;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ KASSERT(td == curthread,
+ ("[pmc,%d] td != curthread", __LINE__));
+
+ error = priv_check(td, PRIV_PMC_MANAGE);
+ if (error)
+ break;
+
+ if ((error = copyin(arg, &pma, sizeof(pma))) != 0)
+ break;
+
+ cpu = pma.pm_cpu;
+
+ if (cpu < 0 || cpu >= (int) pmc_cpu_max()) {
+ error = EINVAL;
+ break;
+ }
+
+ if (!pmc_cpu_is_active(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ request = pma.pm_state;
+
+ if (request != PMC_STATE_DISABLED &&
+ request != PMC_STATE_FREE) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = pma.pm_pmc; /* pmc id == row index */
+ if (ri < 0 || ri >= (int) md->pmd_npmc) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * We can't disable a PMC with a row-index allocated
+ * for process virtual PMCs.
+ */
+
+ if (PMC_ROW_DISP_IS_THREAD(ri) &&
+ request == PMC_STATE_DISABLED) {
+ error = EBUSY;
+ break;
+ }
+
+ /*
+ * otherwise, this PMC on this CPU is either free or
+ * in system-wide mode.
+ */
+
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ pc = pmc_pcpu[cpu];
+ phw = pc->pc_hwpmcs[ri];
+
+ /*
+ * XXX do we need some kind of 'forced' disable?
+ */
+
+ if (phw->phw_pmc == NULL) {
+ if (request == PMC_STATE_DISABLED &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED)) {
+ phw->phw_state &= ~PMC_PHW_FLAG_IS_ENABLED;
+ PMC_MARK_ROW_STANDALONE(ri);
+ } else if (request == PMC_STATE_FREE &&
+ (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0) {
+ phw->phw_state |= PMC_PHW_FLAG_IS_ENABLED;
+ PMC_UNMARK_ROW_STANDALONE(ri);
+ }
+ /* other cases are a no-op */
+ } else
+ error = EBUSY;
+
+ pmc_restore_cpu_binding(&pb);
+ }
+ break;
+
+
+ /*
+ * Allocate a PMC.
+ */
+
+ case PMC_OP_PMCALLOCATE:
+ {
+ int adjri, n;
+ u_int cpu;
+ uint32_t caps;
+ struct pmc *pmc;
+ enum pmc_mode mode;
+ struct pmc_hw *phw;
+ struct pmc_binding pb;
+ struct pmc_classdep *pcd;
+ struct pmc_op_pmcallocate pa;
+
+ if ((error = copyin(arg, &pa, sizeof(pa))) != 0)
+ break;
+
+ caps = pa.pm_caps;
+ mode = pa.pm_mode;
+ cpu = pa.pm_cpu;
+
+ if ((mode != PMC_MODE_SS && mode != PMC_MODE_SC &&
+ mode != PMC_MODE_TS && mode != PMC_MODE_TC) ||
+ (cpu != (u_int) PMC_CPU_ANY && cpu >= pmc_cpu_max())) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Virtual PMCs should only ask for a default CPU.
+ * System mode PMCs need to specify a non-default CPU.
+ */
+
+ if ((PMC_IS_VIRTUAL_MODE(mode) && cpu != (u_int) PMC_CPU_ANY) ||
+ (PMC_IS_SYSTEM_MODE(mode) && cpu == (u_int) PMC_CPU_ANY)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * Check that an inactive CPU is not being asked for.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode) && !pmc_cpu_is_active(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /*
+ * Refuse an allocation for a system-wide PMC if this
+ * process has been jailed, or if this process lacks
+ * super-user credentials and the sysctl tunable
+ * 'security.bsd.unprivileged_syspmcs' is zero.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ if (jailed(curthread->td_ucred)) {
+ error = EPERM;
+ break;
+ }
+ if (!pmc_unprivileged_syspmcs) {
+ error = priv_check(curthread,
+ PRIV_PMC_SYSTEM);
+ if (error)
+ break;
+ }
+ }
+
+ /*
+ * Look for valid values for 'pm_flags'
+ */
+
+ if ((pa.pm_flags & ~(PMC_F_DESCENDANTS | PMC_F_LOG_PROCCSW |
+ PMC_F_LOG_PROCEXIT | PMC_F_CALLCHAIN)) != 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /* process logging options are not allowed for system PMCs */
+ if (PMC_IS_SYSTEM_MODE(mode) && (pa.pm_flags &
+ (PMC_F_LOG_PROCCSW | PMC_F_LOG_PROCEXIT))) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * All sampling mode PMCs need to be able to interrupt the
+ * CPU.
+ */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ caps |= PMC_CAP_INTERRUPT;
+
+ /* A valid class specifier should have been passed in. */
+ for (n = 0; n < md->pmd_nclass; n++)
+ if (md->pmd_classdep[n].pcd_class == pa.pm_class)
+ break;
+ if (n == md->pmd_nclass) {
+ error = EINVAL;
+ break;
+ }
+
+ /* The requested PMC capabilities should be feasible. */
+ if ((md->pmd_classdep[n].pcd_caps & caps) != caps) {
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ PMCDBG(PMC,ALL,2, "event=%d caps=0x%x mode=%d cpu=%d",
+ pa.pm_ev, caps, mode, cpu);
+
+ pmc = pmc_allocate_pmc_descriptor();
+ pmc->pm_id = PMC_ID_MAKE_ID(cpu,pa.pm_mode,pa.pm_class,
+ PMC_ID_INVALID);
+ pmc->pm_event = pa.pm_ev;
+ pmc->pm_state = PMC_STATE_FREE;
+ pmc->pm_caps = caps;
+ pmc->pm_flags = pa.pm_flags;
+
+ /* switch thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+
+#define PMC_IS_SHAREABLE_PMC(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_state & \
+ PMC_PHW_FLAG_IS_SHAREABLE)
+#define PMC_IS_UNALLOCATED(cpu, n) \
+ (pmc_pcpu[(cpu)]->pc_hwpmcs[(n)]->phw_pmc == NULL)
+
+ if (PMC_IS_SYSTEM_MODE(mode)) {
+ pmc_select_cpu(cpu);
+ for (n = 0; n < (int) md->pmd_npmc; n++) {
+ pcd = pmc_ri_to_classdep(md, n, &adjri);
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n, cpu) == 0 &&
+ (PMC_IS_UNALLOCATED(cpu, n) ||
+ PMC_IS_SHAREABLE_PMC(cpu, n)) &&
+ pcd->pcd_allocate_pmc(cpu, adjri, pmc,
+ &pa) == 0)
+ break;
+ }
+ } else {
+ /* Process virtual mode */
+ for (n = 0; n < (int) md->pmd_npmc; n++) {
+ pcd = pmc_ri_to_classdep(md, n, &adjri);
+ if (pmc_can_allocate_row(n, mode) == 0 &&
+ pmc_can_allocate_rowindex(
+ curthread->td_proc, n,
+ PMC_CPU_ANY) == 0 &&
+ pcd->pcd_allocate_pmc(curthread->td_oncpu,
+ adjri, pmc, &pa) == 0)
+ break;
+ }
+ }
+
+#undef PMC_IS_UNALLOCATED
+#undef PMC_IS_SHAREABLE_PMC
+
+ pmc_restore_cpu_binding(&pb);
+
+ if (n == (int) md->pmd_npmc) {
+ pmc_destroy_pmc_descriptor(pmc);
+ free(pmc, M_PMC);
+ pmc = NULL;
+ error = EINVAL;
+ break;
+ }
+
+ /* Fill in the correct value in the ID field */
+ pmc->pm_id = PMC_ID_MAKE_ID(cpu,mode,pa.pm_class,n);
+
+ PMCDBG(PMC,ALL,2, "ev=%d class=%d mode=%d n=%d -> pmcid=%x",
+ pmc->pm_event, pa.pm_class, mode, n, pmc->pm_id);
+
+ /* Process mode PMCs with logging enabled need log files */
+ if (pmc->pm_flags & (PMC_F_LOG_PROCEXIT | PMC_F_LOG_PROCCSW))
+ pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
+
+ /* All system mode sampling PMCs require a log file */
+ if (PMC_IS_SAMPLING_MODE(mode) && PMC_IS_SYSTEM_MODE(mode))
+ pmc->pm_flags |= PMC_F_NEEDS_LOGFILE;
+
+ /*
+ * Configure global pmc's immediately
+ */
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pmc))) {
+
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[n];
+ pcd = pmc_ri_to_classdep(md, n, &adjri);
+
+ if ((phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) == 0 ||
+ (error = pcd->pcd_config_pmc(cpu, adjri, pmc)) != 0) {
+ (void) pcd->pcd_release_pmc(cpu, adjri, pmc);
+ pmc_destroy_pmc_descriptor(pmc);
+ free(pmc, M_PMC);
+ pmc = NULL;
+ pmc_restore_cpu_binding(&pb);
+ error = EPERM;
+ break;
+ }
+
+ pmc_restore_cpu_binding(&pb);
+ }
+
+ pmc->pm_state = PMC_STATE_ALLOCATED;
+
+ /*
+ * mark row disposition
+ */
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ PMC_MARK_ROW_STANDALONE(n);
+ else
+ PMC_MARK_ROW_THREAD(n);
+
+ /*
+ * Register this PMC with the current thread as its owner.
+ */
+
+ if ((error =
+ pmc_register_owner(curthread->td_proc, pmc)) != 0) {
+ pmc_release_pmc_descriptor(pmc);
+ free(pmc, M_PMC);
+ pmc = NULL;
+ break;
+ }
+
+ /*
+ * Return the allocated index.
+ */
+
+ pa.pm_pmcid = pmc->pm_id;
+
+ error = copyout(&pa, arg, sizeof(pa));
+ }
+ break;
+
+
+ /*
+ * Attach a PMC to a process.
+ */
+
+ case PMC_OP_PMCATTACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ error = EINVAL;
+ break;
+ }
+
+ /* PMCs may be (re)attached only when allocated or stopped */
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ } else if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED) {
+ error = EINVAL;
+ break;
+ }
+
+ /* lookup pid */
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Ignore processes that are working on exiting.
+ */
+ if (p->p_flag & P_WEXIT) {
+ error = ESRCH;
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+ break;
+ }
+
+ /*
+ * we are allowed to attach a PMC to a process if
+ * we can debug it.
+ */
+ error = p_candebug(curthread, p);
+
+ PROC_UNLOCK(p);
+
+ if (error == 0)
+ error = pmc_attach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Detach an attached PMC from a process.
+ */
+
+ case PMC_OP_PMCDETACH:
+ {
+ struct pmc *pm;
+ struct proc *p;
+ struct pmc_op_pmcattach a;
+
+ if ((error = copyin(arg, &a, sizeof(a))) != 0)
+ break;
+
+ if (a.pm_pid < 0) {
+ error = EINVAL;
+ break;
+ } else if (a.pm_pid == 0)
+ a.pm_pid = td->td_proc->p_pid;
+
+ if ((error = pmc_find_pmc(a.pm_pmc, &pm)) != 0)
+ break;
+
+ if ((p = pfind(a.pm_pid)) == NULL) {
+ error = ESRCH;
+ break;
+ }
+
+ /*
+ * Treat processes that are in the process of exiting
+ * as if they were not present.
+ */
+
+ if (p->p_flag & P_WEXIT)
+ error = ESRCH;
+
+ PROC_UNLOCK(p); /* pfind() returns a locked process */
+
+ if (error == 0)
+ error = pmc_detach_process(p, pm);
+ }
+ break;
+
+
+ /*
+ * Retrieve the MSR number associated with the counter
+ * 'pmc_id'. This allows processes to directly use RDPMC
+ * instructions to read their PMCs, without the overhead of a
+ * system call.
+ */
+
+ case PMC_OP_PMCGETMSR:
+ {
+ int adjri, ri;
+ struct pmc *pm;
+ struct pmc_target *pt;
+ struct pmc_op_getmsr gm;
+ struct pmc_classdep *pcd;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &gm, sizeof(gm))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(gm.pm_pmcid, &pm)) != 0)
+ break;
+
+ /*
+ * The allocated PMC has to be a process virtual PMC,
+ * i.e., of type MODE_T[CS]. Global PMCs can only be
+ * read using the PMCREAD operation since they may be
+ * allocated on a different CPU than the one we could
+ * be running on at the time of the RDPMC instruction.
+ *
+ * The GETMSR operation is not allowed for PMCs that
+ * are inherited across processes.
+ */
+
+ if (!PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) ||
+ (pm->pm_flags & PMC_F_DESCENDANTS)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * It only makes sense to use a RDPMC (or its
+ * equivalent instruction on non-x86 architectures) on
+ * a process that has allocated and attached a PMC to
+ * itself. Conversely the PMC is only allowed to have
+ * one process attached to it -- its owner.
+ */
+
+ if ((pt = LIST_FIRST(&pm->pm_targets)) == NULL ||
+ LIST_NEXT(pt, pt_next) != NULL ||
+ pt->pt_process->pp_proc != pm->pm_owner->po_owner) {
+ error = EINVAL;
+ break;
+ }
+
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ /* PMC class has no 'GETMSR' support */
+ if (pcd->pcd_get_msr == NULL) {
+ error = ENOSYS;
+ break;
+ }
+
+ if ((error = (*pcd->pcd_get_msr)(adjri, &gm.pm_msr)) < 0)
+ break;
+
+ if ((error = copyout(&gm, arg, sizeof(gm))) < 0)
+ break;
+
+ /*
+ * Mark our process as using MSRs. Update machine
+ * state using a forced context switch.
+ */
+
+ pt->pt_process->pp_flags |= PMC_PP_ENABLE_MSR_ACCESS;
+ pmc_force_context_switch();
+
+ }
+ break;
+
+ /*
+ * Release an allocated PMC
+ */
+
+ case PMC_OP_PMCRELEASE:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_op_simple sp;
+
+ /*
+ * Find PMC pointer for the named PMC.
+ *
+ * Use pmc_release_pmc_descriptor() to switch off the
+ * PMC, remove all its target threads, and remove the
+ * PMC from its owner's list.
+ *
+ * Remove the owner record if this is the last PMC
+ * owned.
+ *
+ * Free up space.
+ */
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ po = pm->pm_owner;
+ pmc_release_pmc_descriptor(pm);
+ pmc_maybe_remove_owner(po);
+
+ free(pm, M_PMC);
+ }
+ break;
+
+
+ /*
+ * Read and/or write a PMC.
+ */
+
+ case PMC_OP_PMCRW:
+ {
+ int adjri;
+ struct pmc *pm;
+ uint32_t cpu, ri;
+ pmc_value_t oldvalue;
+ struct pmc_binding pb;
+ struct pmc_op_pmcrw prw;
+ struct pmc_classdep *pcd;
+ struct pmc_op_pmcrw *pprw;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &prw, sizeof(prw))) != 0)
+ break;
+
+ ri = 0;
+ PMCDBG(PMC,OPS,1, "rw id=%d flags=0x%x", prw.pm_pmcid,
+ prw.pm_flags);
+
+ /* must have at least one flag set */
+ if ((prw.pm_flags & (PMC_F_OLDVALUE|PMC_F_NEWVALUE)) == 0) {
+ error = EINVAL;
+ break;
+ }
+
+ /* locate pmc descriptor */
+ if ((error = pmc_find_pmc(prw.pm_pmcid, &pm)) != 0)
+ break;
+
+ /* Can't read a PMC that hasn't been started. */
+ if (pm->pm_state != PMC_STATE_ALLOCATED &&
+ pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ /* writing a new value is allowed only for 'STOPPED' pmcs */
+ if (pm->pm_state == PMC_STATE_RUNNING &&
+ (prw.pm_flags & PMC_F_NEWVALUE)) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
+
+ /*
+ * If this PMC is attached to its owner (i.e.,
+ * the process requesting this operation) and
+ * is running, then attempt to get an
+ * upto-date reading from hardware for a READ.
+ * Writes are only allowed when the PMC is
+ * stopped, so only update the saved value
+ * field.
+ *
+ * If the PMC is not running, or is not
+ * attached to its owner, read/write to the
+ * savedvalue field.
+ */
+
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ cpu = curthread->td_oncpu;
+
+ if (prw.pm_flags & PMC_F_OLDVALUE) {
+ if ((pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) &&
+ (pm->pm_state == PMC_STATE_RUNNING))
+ error = (*pcd->pcd_read_pmc)(cpu, adjri,
+ &oldvalue);
+ else
+ oldvalue = pm->pm_gv.pm_savedvalue;
+ }
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ pm->pm_gv.pm_savedvalue = prw.pm_value;
+
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+
+ } else { /* System mode PMCs */
+ cpu = PMC_TO_CPU(pm);
+ ri = PMC_TO_ROWINDEX(pm);
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ if (!pmc_cpu_is_active(cpu)) {
+ error = ENXIO;
+ break;
+ }
+
+ /* move this thread to CPU 'cpu' */
+ pmc_save_cpu_binding(&pb);
+ pmc_select_cpu(cpu);
+
+ critical_enter();
+ /* save old value */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = (*pcd->pcd_read_pmc)(cpu, adjri,
+ &oldvalue)))
+ goto error;
+ /* write out new value */
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ error = (*pcd->pcd_write_pmc)(cpu, adjri,
+ prw.pm_value);
+ error:
+ critical_exit();
+ pmc_restore_cpu_binding(&pb);
+ if (error)
+ break;
+ }
+
+ pprw = (struct pmc_op_pmcrw *) arg;
+
+#ifdef DEBUG
+ if (prw.pm_flags & PMC_F_NEWVALUE)
+ PMCDBG(PMC,OPS,2, "rw id=%d new %jx -> old %jx",
+ ri, prw.pm_value, oldvalue);
+ else if (prw.pm_flags & PMC_F_OLDVALUE)
+ PMCDBG(PMC,OPS,2, "rw id=%d -> old %jx", ri, oldvalue);
+#endif
+
+ /* return old value if requested */
+ if (prw.pm_flags & PMC_F_OLDVALUE)
+ if ((error = copyout(&oldvalue, &pprw->pm_value,
+ sizeof(prw.pm_value))))
+ break;
+
+ }
+ break;
+
+
+ /*
+ * Set the sampling rate for a sampling mode PMC and the
+ * initial count for a counting mode PMC.
+ */
+
+ case PMC_OP_PMCSETCOUNT:
+ {
+ struct pmc *pm;
+ struct pmc_op_pmcsetcount sc;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sc, sizeof(sc))) != 0)
+ break;
+
+ if ((error = pmc_find_pmc(sc.pm_pmcid, &pm)) != 0)
+ break;
+
+ if (pm->pm_state == PMC_STATE_RUNNING) {
+ error = EBUSY;
+ break;
+ }
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ pm->pm_sc.pm_reloadcount = sc.pm_count;
+ else
+ pm->pm_sc.pm_initial = sc.pm_count;
+ }
+ break;
+
+
+ /*
+ * Start a PMC.
+ */
+
+ case PMC_OP_PMCSTART:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ sx_assert(&pmc_sx, SX_XLOCKED);
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_id,
+ ("[pmc,%d] pmcid %x != id %x", __LINE__,
+ pm->pm_id, pmcid));
+
+ if (pm->pm_state == PMC_STATE_RUNNING) /* already running */
+ break;
+ else if (pm->pm_state != PMC_STATE_STOPPED &&
+ pm->pm_state != PMC_STATE_ALLOCATED) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_start(pm);
+ }
+ break;
+
+
+ /*
+ * Stop a PMC.
+ */
+
+ case PMC_OP_PMCSTOP:
+ {
+ pmc_id_t pmcid;
+ struct pmc *pm;
+ struct pmc_op_simple sp;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &sp, sizeof(sp))) != 0)
+ break;
+
+ pmcid = sp.pm_pmcid;
+
+ /*
+ * Mark the PMC as inactive and invoke the MD stop
+ * routines if needed.
+ */
+
+ if ((error = pmc_find_pmc(pmcid, &pm)) != 0)
+ break;
+
+ KASSERT(pmcid == pm->pm_id,
+ ("[pmc,%d] pmc id %x != pmcid %x", __LINE__,
+ pm->pm_id, pmcid));
+
+ if (pm->pm_state == PMC_STATE_STOPPED) /* already stopped */
+ break;
+ else if (pm->pm_state != PMC_STATE_RUNNING) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmc_stop(pm);
+ }
+ break;
+
+
+ /*
+ * Write a user supplied value to the log file.
+ */
+
+ case PMC_OP_WRITELOG:
+ {
+ struct pmc_op_writelog wl;
+ struct pmc_owner *po;
+
+ PMC_DOWNGRADE_SX();
+
+ if ((error = copyin(arg, &wl, sizeof(wl))) != 0)
+ break;
+
+ if ((po = pmc_find_owner_descriptor(td->td_proc)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
+ error = EINVAL;
+ break;
+ }
+
+ error = pmclog_process_userlog(po, &wl);
+ }
+ break;
+
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (is_sx_locked != 0) {
+ if (is_sx_downgraded)
+ sx_sunlock(&pmc_sx);
+ else
+ sx_xunlock(&pmc_sx);
+ }
+
+ if (error)
+ atomic_add_int(&pmc_stats.pm_syscall_errors, 1);
+
+ PICKUP_GIANT();
+
+ return error;
+}
+
+/*
+ * Helper functions
+ */
+
+
+/*
+ * Mark the thread as needing callchain capture and post an AST. The
+ * actual callchain capture will be done in a context where it is safe
+ * to take page faults.
+ */
+
+static void
+pmc_post_callchain_callback(void)
+{
+ struct thread *td;
+
+ td = curthread;
+
+ /*
+ * If there is multiple PMCs for the same interrupt ignore new post
+ */
+ if (td->td_pflags & TDP_CALLCHAIN)
+ return;
+
+ /*
+ * Mark this thread as needing callchain capture.
+ * `td->td_pflags' will be safe to touch because this thread
+ * was in user space when it was interrupted.
+ */
+ td->td_pflags |= TDP_CALLCHAIN;
+
+ /*
+ * Don't let this thread migrate between CPUs until callchain
+ * capture completes.
+ */
+ sched_pin();
+
+ return;
+}
+
+/*
+ * Interrupt processing.
+ *
+ * Find a free slot in the per-cpu array of samples and capture the
+ * current callchain there. If a sample was successfully added, a bit
+ * is set in mask 'pmc_cpumask' denoting that the DO_SAMPLES hook
+ * needs to be invoked from the clock handler.
+ *
+ * This function is meant to be called from an NMI handler. It cannot
+ * use any of the locking primitives supplied by the OS.
+ */
+
+int
+pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf,
+ int inuserspace)
+{
+ int error, callchaindepth;
+ struct thread *td;
+ struct pmc_sample *ps;
+ struct pmc_samplebuffer *psb;
+
+ error = 0;
+
+ /*
+ * Allocate space for a sample buffer.
+ */
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
+
+ ps = psb->ps_write;
+ if (ps->ps_nsamples) { /* in use, reader hasn't caught up */
+ pm->pm_stalled = 1;
+ atomic_add_int(&pmc_stats.pm_intr_bufferfull, 1);
+ PMCDBG(SAM,INT,1,"(spc) cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d",
+ cpu, pm, (void *) tf, inuserspace,
+ (int) (psb->ps_write - psb->ps_samples),
+ (int) (psb->ps_read - psb->ps_samples));
+ error = ENOMEM;
+ goto done;
+ }
+
+
+ /* Fill in entry. */
+ PMCDBG(SAM,INT,1,"cpu=%d pm=%p tf=%p um=%d wr=%d rd=%d", cpu, pm,
+ (void *) tf, inuserspace,
+ (int) (psb->ps_write - psb->ps_samples),
+ (int) (psb->ps_read - psb->ps_samples));
+
+ KASSERT(pm->pm_runcount >= 0,
+ ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
+ pm->pm_runcount));
+
+ atomic_add_rel_int(&pm->pm_runcount, 1); /* hold onto PMC */
+
+ ps->ps_pmc = pm;
+ if ((td = curthread) && td->td_proc)
+ ps->ps_pid = td->td_proc->p_pid;
+ else
+ ps->ps_pid = -1;
+ ps->ps_cpu = cpu;
+ ps->ps_td = td;
+ ps->ps_flags = inuserspace ? PMC_CC_F_USERSPACE : 0;
+
+ callchaindepth = (pm->pm_flags & PMC_F_CALLCHAIN) ?
+ pmc_callchaindepth : 1;
+
+ if (callchaindepth == 1)
+ ps->ps_pc[0] = PMC_TRAPFRAME_TO_PC(tf);
+ else {
+ /*
+ * Kernel stack traversals can be done immediately,
+ * while we defer to an AST for user space traversals.
+ */
+ if (!inuserspace) {
+ callchaindepth =
+ pmc_save_kernel_callchain(ps->ps_pc,
+ callchaindepth, tf);
+ } else {
+ pmc_post_callchain_callback();
+ callchaindepth = PMC_SAMPLE_INUSE;
+ }
+ }
+
+ ps->ps_nsamples = callchaindepth; /* mark entry as in use */
+
+ /* increment write pointer, modulo ring buffer size */
+ ps++;
+ if (ps == psb->ps_fence)
+ psb->ps_write = psb->ps_samples;
+ else
+ psb->ps_write = ps;
+
+ done:
+ /* mark CPU as needing processing */
+ CPU_SET_ATOMIC(cpu, &pmc_cpumask);
+
+ return (error);
+}
+
+/*
+ * Capture a user call chain. This function will be called from ast()
+ * before control returns to userland and before the process gets
+ * rescheduled.
+ */
+
+static void
+pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf)
+{
+ int i;
+ struct pmc *pm;
+ struct thread *td;
+ struct pmc_sample *ps;
+ struct pmc_samplebuffer *psb;
+#ifdef INVARIANTS
+ int ncallchains;
+#endif
+
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
+ td = curthread;
+
+ KASSERT(td->td_pflags & TDP_CALLCHAIN,
+ ("[pmc,%d] Retrieving callchain for thread that doesn't want it",
+ __LINE__));
+
+#ifdef INVARIANTS
+ ncallchains = 0;
+#endif
+
+ /*
+ * Iterate through all deferred callchain requests.
+ */
+
+ ps = psb->ps_samples;
+ for (i = 0; i < pmc_nsamples; i++, ps++) {
+
+ if (ps->ps_nsamples != PMC_SAMPLE_INUSE)
+ continue;
+ if (ps->ps_td != td)
+ continue;
+
+ KASSERT(ps->ps_cpu == cpu,
+ ("[pmc,%d] cpu mismatch ps_cpu=%d pcpu=%d", __LINE__,
+ ps->ps_cpu, PCPU_GET(cpuid)));
+
+ pm = ps->ps_pmc;
+
+ KASSERT(pm->pm_flags & PMC_F_CALLCHAIN,
+ ("[pmc,%d] Retrieving callchain for PMC that doesn't "
+ "want it", __LINE__));
+
+ KASSERT(pm->pm_runcount > 0,
+ ("[pmc,%d] runcount %d", __LINE__, pm->pm_runcount));
+
+ /*
+ * Retrieve the callchain and mark the sample buffer
+ * as 'processable' by the timer tick sweep code.
+ */
+ ps->ps_nsamples = pmc_save_user_callchain(ps->ps_pc,
+ pmc_callchaindepth, tf);
+
+#ifdef INVARIANTS
+ ncallchains++;
+#endif
+ }
+
+ KASSERT(ncallchains > 0,
+ ("[pmc,%d] cpu %d didn't find a sample to collect", __LINE__,
+ cpu));
+
+ KASSERT(td->td_pinned == 1,
+ ("[pmc,%d] invalid td_pinned value", __LINE__));
+ sched_unpin(); /* Can migrate safely now. */
+
+ return;
+}
+
+/*
+ * Process saved PC samples.
+ */
+
+static void
+pmc_process_samples(int cpu, int ring)
+{
+ struct pmc *pm;
+ int adjri, n;
+ struct thread *td;
+ struct pmc_owner *po;
+ struct pmc_sample *ps;
+ struct pmc_classdep *pcd;
+ struct pmc_samplebuffer *psb;
+
+ KASSERT(PCPU_GET(cpuid) == cpu,
+ ("[pmc,%d] not on the correct CPU pcpu=%d cpu=%d", __LINE__,
+ PCPU_GET(cpuid), cpu));
+
+ psb = pmc_pcpu[cpu]->pc_sb[ring];
+
+ for (n = 0; n < pmc_nsamples; n++) { /* bound on #iterations */
+
+ ps = psb->ps_read;
+ if (ps->ps_nsamples == PMC_SAMPLE_FREE)
+ break;
+
+ pm = ps->ps_pmc;
+
+ KASSERT(pm->pm_runcount > 0,
+ ("[pmc,%d] pm=%p runcount %d", __LINE__, (void *) pm,
+ pm->pm_runcount));
+
+ po = pm->pm_owner;
+
+ KASSERT(PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)),
+ ("[pmc,%d] pmc=%p non-sampling mode=%d", __LINE__,
+ pm, PMC_TO_MODE(pm)));
+
+ /* Ignore PMCs that have been switched off */
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ goto entrydone;
+
+ /* If there is a pending AST wait for completion */
+ if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
+ /* Need a rescan at a later time. */
+ CPU_SET_ATOMIC(cpu, &pmc_cpumask);
+ break;
+ }
+
+ PMCDBG(SAM,OPS,1,"cpu=%d pm=%p n=%d fl=%x wr=%d rd=%d", cpu,
+ pm, ps->ps_nsamples, ps->ps_flags,
+ (int) (psb->ps_write - psb->ps_samples),
+ (int) (psb->ps_read - psb->ps_samples));
+
+ /*
+ * If this is a process-mode PMC that is attached to
+ * its owner, and if the PC is in user mode, update
+ * profiling statistics like timer-based profiling
+ * would have done.
+ */
+ if (pm->pm_flags & PMC_F_ATTACHED_TO_OWNER) {
+ if (ps->ps_flags & PMC_CC_F_USERSPACE) {
+ td = FIRST_THREAD_IN_PROC(po->po_owner);
+ addupc_intr(td, ps->ps_pc[0], 1);
+ }
+ goto entrydone;
+ }
+
+ /*
+ * Otherwise, this is either a sampling mode PMC that
+ * is attached to a different process than its owner,
+ * or a system-wide sampling PMC. Dispatch a log
+ * entry to the PMC's owner process.
+ */
+ pmclog_process_callchain(pm, ps);
+
+ entrydone:
+ ps->ps_nsamples = 0; /* mark entry as free */
+ atomic_subtract_rel_int(&pm->pm_runcount, 1);
+
+ /* increment read pointer, modulo sample size */
+ if (++ps == psb->ps_fence)
+ psb->ps_read = psb->ps_samples;
+ else
+ psb->ps_read = ps;
+ }
+
+ atomic_add_int(&pmc_stats.pm_log_sweeps, 1);
+
+ /* Do not re-enable stalled PMCs if we failed to process any samples */
+ if (n == 0)
+ return;
+
+ /*
+ * Restart any stalled sampling PMCs on this CPU.
+ *
+ * If the NMI handler sets the pm_stalled field of a PMC after
+ * the check below, we'll end up processing the stalled PMC at
+ * the next hardclock tick.
+ */
+ for (n = 0; n < md->pmd_npmc; n++) {
+ pcd = pmc_ri_to_classdep(md, n, &adjri);
+ KASSERT(pcd != NULL,
+ ("[pmc,%d] null pcd ri=%d", __LINE__, n));
+ (void) (*pcd->pcd_get_config)(cpu,adjri,&pm);
+
+ if (pm == NULL || /* !cfg'ed */
+ pm->pm_state != PMC_STATE_RUNNING || /* !active */
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)) || /* !sampling */
+ pm->pm_stalled == 0) /* !stalled */
+ continue;
+
+ pm->pm_stalled = 0;
+ (*pcd->pcd_start_pmc)(cpu, adjri);
+ }
+}
+
+/*
+ * Event handlers.
+ */
+
+/*
+ * Handle a process exit.
+ *
+ * Remove this process from all hash tables. If this process
+ * owned any PMCs, turn off those PMCs and deallocate them,
+ * removing any associations with target processes.
+ *
+ * This function will be called by the last 'thread' of a
+ * process.
+ *
+ * XXX This eventhandler gets called early in the exit process.
+ * Consider using a 'hook' invocation from thread_exit() or equivalent
+ * spot. Another negative is that kse_exit doesn't seem to call
+ * exit1() [??].
+ *
+ */
+
+static void
+pmc_process_exit(void *arg __unused, struct proc *p)
+{
+ struct pmc *pm;
+ int adjri, cpu;
+ unsigned int ri;
+ int is_using_hwpmcs;
+ struct pmc_owner *po;
+ struct pmc_process *pp;
+ struct pmc_classdep *pcd;
+ pmc_value_t newvalue, tmp;
+
+ PROC_LOCK(p);
+ is_using_hwpmcs = p->p_flag & P_HWPMC;
+ PROC_UNLOCK(p);
+
+ /*
+ * Log a sysexit event to all SS PMC owners.
+ */
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_sysexit(po, p->p_pid);
+
+ if (!is_using_hwpmcs)
+ return;
+
+ PMC_GET_SX_XLOCK();
+ PMCDBG(PRC,EXT,1,"process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ /*
+ * Since this code is invoked by the last thread in an exiting
+ * process, we would have context switched IN at some prior
+ * point. However, with PREEMPTION, kernel mode context
+ * switches may happen any time, so we want to disable a
+ * context switch OUT till we get any PMCs targetting this
+ * process off the hardware.
+ *
+ * We also need to atomically remove this process'
+ * entry from our target process hash table, using
+ * PMC_FLAG_REMOVE.
+ */
+ PMCDBG(PRC,EXT,1, "process-exit proc=%p (%d, %s)", p, p->p_pid,
+ p->p_comm);
+
+ critical_enter(); /* no preemption */
+
+ cpu = curthread->td_oncpu;
+
+ if ((pp = pmc_find_process_descriptor(p,
+ PMC_FLAG_REMOVE)) != NULL) {
+
+ PMCDBG(PRC,EXT,2,
+ "process-exit proc=%p pmc-process=%p", p, pp);
+
+ /*
+ * The exiting process could the target of
+ * some PMCs which will be running on
+ * currently executing CPU.
+ *
+ * We need to turn these PMCs off like we
+ * would do at context switch OUT time.
+ */
+ for (ri = 0; ri < md->pmd_npmc; ri++) {
+
+ /*
+ * Pick up the pmc pointer from hardware
+ * state similar to the CSW_OUT code.
+ */
+ pm = NULL;
+
+ pcd = pmc_ri_to_classdep(md, ri, &adjri);
+
+ (void) (*pcd->pcd_get_config)(cpu, adjri, &pm);
+
+ PMCDBG(PRC,EXT,2, "ri=%d pm=%p", ri, pm);
+
+ if (pm == NULL ||
+ !PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)))
+ continue;
+
+ PMCDBG(PRC,EXT,2, "ppmcs[%d]=%p pm=%p "
+ "state=%d", ri, pp->pp_pmcs[ri].pp_pmc,
+ pm, pm->pm_state);
+
+ KASSERT(PMC_TO_ROWINDEX(pm) == ri,
+ ("[pmc,%d] ri mismatch pmc(%d) ri(%d)",
+ __LINE__, PMC_TO_ROWINDEX(pm), ri));
+
+ KASSERT(pm == pp->pp_pmcs[ri].pp_pmc,
+ ("[pmc,%d] pm %p != pp_pmcs[%d] %p",
+ __LINE__, pm, ri, pp->pp_pmcs[ri].pp_pmc));
+
+ (void) pcd->pcd_stop_pmc(cpu, adjri);
+
+ KASSERT(pm->pm_runcount > 0,
+ ("[pmc,%d] bad runcount ri %d rc %d",
+ __LINE__, ri, pm->pm_runcount));
+
+ /* Stop hardware only if it is actually running */
+ if (pm->pm_state == PMC_STATE_RUNNING &&
+ pm->pm_stalled == 0) {
+ pcd->pcd_read_pmc(cpu, adjri, &newvalue);
+ tmp = newvalue -
+ PMC_PCPU_SAVED(cpu,ri);
+
+ mtx_pool_lock_spin(pmc_mtxpool, pm);
+ pm->pm_gv.pm_savedvalue += tmp;
+ pp->pp_pmcs[ri].pp_pmcval += tmp;
+ mtx_pool_unlock_spin(pmc_mtxpool, pm);
+ }
+
+ atomic_subtract_rel_int(&pm->pm_runcount,1);
+
+ KASSERT((int) pm->pm_runcount >= 0,
+ ("[pmc,%d] runcount is %d", __LINE__, ri));
+
+ (void) pcd->pcd_config_pmc(cpu, adjri, NULL);
+ }
+
+ /*
+ * Inform the MD layer of this pseudo "context switch
+ * out"
+ */
+ (void) md->pmd_switch_out(pmc_pcpu[cpu], pp);
+
+ critical_exit(); /* ok to be pre-empted now */
+
+ /*
+ * Unlink this process from the PMCs that are
+ * targetting it. This will send a signal to
+ * all PMC owner's whose PMCs are orphaned.
+ *
+ * Log PMC value at exit time if requested.
+ */
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
+ if (pm->pm_flags & PMC_F_NEEDS_LOGFILE &&
+ PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)))
+ pmclog_process_procexit(pm, pp);
+ pmc_unlink_target_process(pm, pp);
+ }
+ free(pp, M_PMC);
+
+ } else
+ critical_exit(); /* pp == NULL */
+
+
+ /*
+ * If the process owned PMCs, free them up and free up
+ * memory.
+ */
+ if ((po = pmc_find_owner_descriptor(p)) != NULL) {
+ pmc_remove_owner(po);
+ pmc_destroy_owner_descriptor(po);
+ }
+
+ sx_xunlock(&pmc_sx);
+}
+
+/*
+ * Handle a process fork.
+ *
+ * If the parent process 'p1' is under HWPMC monitoring, then copy
+ * over any attached PMCs that have 'do_descendants' semantics.
+ */
+
+static void
+pmc_process_fork(void *arg __unused, struct proc *p1, struct proc *newproc,
+ int flags)
+{
+ int is_using_hwpmcs;
+ unsigned int ri;
+ uint32_t do_descendants;
+ struct pmc *pm;
+ struct pmc_owner *po;
+ struct pmc_process *ppnew, *ppold;
+
+ (void) flags; /* unused parameter */
+
+ PROC_LOCK(p1);
+ is_using_hwpmcs = p1->p_flag & P_HWPMC;
+ PROC_UNLOCK(p1);
+
+ /*
+ * If there are system-wide sampling PMCs active, we need to
+ * log all fork events to their owner's logs.
+ */
+
+ LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+ if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
+
+ if (!is_using_hwpmcs)
+ return;
+
+ PMC_GET_SX_XLOCK();
+ PMCDBG(PMC,FRK,1, "process-fork proc=%p (%d, %s) -> %p", p1,
+ p1->p_pid, p1->p_comm, newproc);
+
+ /*
+ * If the parent process (curthread->td_proc) is a
+ * target of any PMCs, look for PMCs that are to be
+ * inherited, and link these into the new process
+ * descriptor.
+ */
+ if ((ppold = pmc_find_process_descriptor(curthread->td_proc,
+ PMC_FLAG_NONE)) == NULL)
+ goto done; /* nothing to do */
+
+ do_descendants = 0;
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL)
+ do_descendants |= pm->pm_flags & PMC_F_DESCENDANTS;
+ if (do_descendants == 0) /* nothing to do */
+ goto done;
+
+ /* allocate a descriptor for the new process */
+ if ((ppnew = pmc_find_process_descriptor(newproc,
+ PMC_FLAG_ALLOCATE)) == NULL)
+ goto done;
+
+ /*
+ * Run through all PMCs that were targeting the old process
+ * and which specified F_DESCENDANTS and attach them to the
+ * new process.
+ *
+ * Log the fork event to all owners of PMCs attached to this
+ * process, if not already logged.
+ */
+ for (ri = 0; ri < md->pmd_npmc; ri++)
+ if ((pm = ppold->pp_pmcs[ri].pp_pmc) != NULL &&
+ (pm->pm_flags & PMC_F_DESCENDANTS)) {
+ pmc_link_target_process(pm, ppnew);
+ po = pm->pm_owner;
+ if (po->po_sscount == 0 &&
+ po->po_flags & PMC_PO_OWNS_LOGFILE)
+ pmclog_process_procfork(po, p1->p_pid,
+ newproc->p_pid);
+ }
+
+ /*
+ * Now mark the new process as being tracked by this driver.
+ */
+ PROC_LOCK(newproc);
+ newproc->p_flag |= P_HWPMC;
+ PROC_UNLOCK(newproc);
+
+ done:
+ sx_xunlock(&pmc_sx);
+}
+
+
+/*
+ * initialization
+ */
+
+static const char *pmc_name_of_pmcclass[] = {
+#undef __PMC_CLASS
+#define __PMC_CLASS(N) #N ,
+ __PMC_CLASSES()
+};
+
+/*
+ * Base class initializer: allocate structure and set default classes.
+ */
+struct pmc_mdep *
+pmc_mdep_alloc(int nclasses)
+{
+ struct pmc_mdep *md;
+ int n;
+
+ /* SOFT + md classes */
+ n = 1 + nclasses;
+ md = malloc(sizeof(struct pmc_mdep) + n *
+ sizeof(struct pmc_classdep), M_PMC, M_WAITOK|M_ZERO);
+ if (md != NULL) {
+ md->pmd_nclass = n;
+
+ /* Add base class. */
+ pmc_soft_initialize(md);
+ }
+
+ return md;
+}
+
+void
+pmc_mdep_free(struct pmc_mdep *md)
+{
+ pmc_soft_finalize(md);
+ free(md, M_PMC);
+}
+
+static int
+generic_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc; (void) pp;
+
+ return (0);
+}
+
+static int
+generic_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ (void) pc; (void) pp;
+
+ return (0);
+}
+
+static struct pmc_mdep *
+pmc_generic_cpu_initialize(void)
+{
+ struct pmc_mdep *md;
+
+ md = pmc_mdep_alloc(0);
+
+ md->pmd_cputype = PMC_CPU_GENERIC;
+
+ md->pmd_pcpu_init = NULL;
+ md->pmd_pcpu_fini = NULL;
+ md->pmd_switch_in = generic_switch_in;
+ md->pmd_switch_out = generic_switch_out;
+
+ return (md);
+}
+
+static void
+pmc_generic_cpu_finalize(struct pmc_mdep *md)
+{
+ (void) md;
+}
+
+
+static int
+pmc_initialize(void)
+{
+ int c, cpu, error, n, ri;
+ unsigned int maxcpu;
+ struct pmc_binding pb;
+ struct pmc_sample *ps;
+ struct pmc_classdep *pcd;
+ struct pmc_samplebuffer *sb;
+
+ md = NULL;
+ error = 0;
+
+#ifdef DEBUG
+ /* parse debug flags first */
+ if (TUNABLE_STR_FETCH(PMC_SYSCTL_NAME_PREFIX "debugflags",
+ pmc_debugstr, sizeof(pmc_debugstr)))
+ pmc_debugflags_parse(pmc_debugstr,
+ pmc_debugstr+strlen(pmc_debugstr));
+#endif
+
+ PMCDBG(MOD,INI,0, "PMC Initialize (version %x)", PMC_VERSION);
+
+ /* check kernel version */
+ if (pmc_kernel_version != PMC_VERSION) {
+ if (pmc_kernel_version == 0)
+ printf("hwpmc: this kernel has not been compiled with "
+ "'options HWPMC_HOOKS'.\n");
+ else
+ printf("hwpmc: kernel version (0x%x) does not match "
+ "module version (0x%x).\n", pmc_kernel_version,
+ PMC_VERSION);
+ return EPROGMISMATCH;
+ }
+
+ /*
+ * check sysctl parameters
+ */
+
+ if (pmc_hashsize <= 0) {
+ (void) printf("hwpmc: tunable \"hashsize\"=%d must be "
+ "greater than zero.\n", pmc_hashsize);
+ pmc_hashsize = PMC_HASH_SIZE;
+ }
+
+ if (pmc_nsamples <= 0 || pmc_nsamples > 65535) {
+ (void) printf("hwpmc: tunable \"nsamples\"=%d out of "
+ "range.\n", pmc_nsamples);
+ pmc_nsamples = PMC_NSAMPLES;
+ }
+
+ if (pmc_callchaindepth <= 0 ||
+ pmc_callchaindepth > PMC_CALLCHAIN_DEPTH_MAX) {
+ (void) printf("hwpmc: tunable \"callchaindepth\"=%d out of "
+ "range.\n", pmc_callchaindepth);
+ pmc_callchaindepth = PMC_CALLCHAIN_DEPTH;
+ }
+
+ md = pmc_md_initialize();
+ if (md == NULL) {
+ /* Default to generic CPU. */
+ md = pmc_generic_cpu_initialize();
+ if (md == NULL)
+ return (ENOSYS);
+ }
+
+ KASSERT(md->pmd_nclass >= 1 && md->pmd_npmc >= 1,
+ ("[pmc,%d] no classes or pmcs", __LINE__));
+
+ /* Compute the map from row-indices to classdep pointers. */
+ pmc_rowindex_to_classdep = malloc(sizeof(struct pmc_classdep *) *
+ md->pmd_npmc, M_PMC, M_WAITOK|M_ZERO);
+
+ for (n = 0; n < md->pmd_npmc; n++)
+ pmc_rowindex_to_classdep[n] = NULL;
+ for (ri = c = 0; c < md->pmd_nclass; c++) {
+ pcd = &md->pmd_classdep[c];
+ for (n = 0; n < pcd->pcd_num; n++, ri++)
+ pmc_rowindex_to_classdep[ri] = pcd;
+ }
+
+ KASSERT(ri == md->pmd_npmc,
+ ("[pmc,%d] npmc miscomputed: ri=%d, md->npmc=%d", __LINE__,
+ ri, md->pmd_npmc));
+
+ maxcpu = pmc_cpu_max();
+
+ /* allocate space for the per-cpu array */
+ pmc_pcpu = malloc(maxcpu * sizeof(struct pmc_cpu *), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /* per-cpu 'saved values' for managing process-mode PMCs */
+ pmc_pcpu_saved = malloc(sizeof(pmc_value_t) * maxcpu * md->pmd_npmc,
+ M_PMC, M_WAITOK);
+
+ /* Perform CPU-dependent initialization. */
+ pmc_save_cpu_binding(&pb);
+ error = 0;
+ for (cpu = 0; error == 0 && cpu < maxcpu; cpu++) {
+ if (!pmc_cpu_is_active(cpu))
+ continue;
+ pmc_select_cpu(cpu);
+ pmc_pcpu[cpu] = malloc(sizeof(struct pmc_cpu) +
+ md->pmd_npmc * sizeof(struct pmc_hw *), M_PMC,
+ M_WAITOK|M_ZERO);
+ if (md->pmd_pcpu_init)
+ error = md->pmd_pcpu_init(md, cpu);
+ for (n = 0; error == 0 && n < md->pmd_nclass; n++)
+ error = md->pmd_classdep[n].pcd_pcpu_init(md, cpu);
+ }
+ pmc_restore_cpu_binding(&pb);
+
+ if (error)
+ return (error);
+
+ /* allocate space for the sample array */
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (!pmc_cpu_is_active(cpu))
+ continue;
+
+ sb = malloc(sizeof(struct pmc_samplebuffer) +
+ pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
+ M_WAITOK|M_ZERO);
+ sb->ps_read = sb->ps_write = sb->ps_samples;
+ sb->ps_fence = sb->ps_samples + pmc_nsamples;
+
+ KASSERT(pmc_pcpu[cpu] != NULL,
+ ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
+
+ sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
+ sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
+
+ for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
+ ps->ps_pc = sb->ps_callchains +
+ (n * pmc_callchaindepth);
+
+ pmc_pcpu[cpu]->pc_sb[PMC_HR] = sb;
+
+ sb = malloc(sizeof(struct pmc_samplebuffer) +
+ pmc_nsamples * sizeof(struct pmc_sample), M_PMC,
+ M_WAITOK|M_ZERO);
+ sb->ps_read = sb->ps_write = sb->ps_samples;
+ sb->ps_fence = sb->ps_samples + pmc_nsamples;
+
+ KASSERT(pmc_pcpu[cpu] != NULL,
+ ("[pmc,%d] cpu=%d Null per-cpu data", __LINE__, cpu));
+
+ sb->ps_callchains = malloc(pmc_callchaindepth * pmc_nsamples *
+ sizeof(uintptr_t), M_PMC, M_WAITOK|M_ZERO);
+
+ for (n = 0, ps = sb->ps_samples; n < pmc_nsamples; n++, ps++)
+ ps->ps_pc = sb->ps_callchains +
+ (n * pmc_callchaindepth);
+
+ pmc_pcpu[cpu]->pc_sb[PMC_SR] = sb;
+ }
+
+ /* allocate space for the row disposition array */
+ pmc_pmcdisp = malloc(sizeof(enum pmc_mode) * md->pmd_npmc,
+ M_PMC, M_WAITOK|M_ZERO);
+
+ KASSERT(pmc_pmcdisp != NULL,
+ ("[pmc,%d] pmcdisp allocation returned NULL", __LINE__));
+
+ /* mark all PMCs as available */
+ for (n = 0; n < (int) md->pmd_npmc; n++)
+ PMC_MARK_ROW_FREE(n);
+
+ /* allocate thread hash tables */
+ pmc_ownerhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_ownerhashmask);
+
+ pmc_processhash = hashinit(pmc_hashsize, M_PMC,
+ &pmc_processhashmask);
+ mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
+ MTX_SPIN);
+
+ LIST_INIT(&pmc_ss_owners);
+ pmc_ss_count = 0;
+
+ /* allocate a pool of spin mutexes */
+ pmc_mtxpool = mtx_pool_create("pmc-leaf", pmc_mtxpool_size,
+ MTX_SPIN);
+
+ PMCDBG(MOD,INI,1, "pmc_ownerhash=%p, mask=0x%lx "
+ "targethash=%p mask=0x%lx", pmc_ownerhash, pmc_ownerhashmask,
+ pmc_processhash, pmc_processhashmask);
+
+ /* register process {exit,fork,exec} handlers */
+ pmc_exit_tag = EVENTHANDLER_REGISTER(process_exit,
+ pmc_process_exit, NULL, EVENTHANDLER_PRI_ANY);
+ pmc_fork_tag = EVENTHANDLER_REGISTER(process_fork,
+ pmc_process_fork, NULL, EVENTHANDLER_PRI_ANY);
+
+ /* initialize logging */
+ pmclog_initialize();
+
+ /* set hook functions */
+ pmc_intr = md->pmd_intr;
+ pmc_hook = pmc_hook_handler;
+
+ if (error == 0) {
+ printf(PMC_MODULE_NAME ":");
+ for (n = 0; n < (int) md->pmd_nclass; n++) {
+ pcd = &md->pmd_classdep[n];
+ printf(" %s/%d/%d/0x%b",
+ pmc_name_of_pmcclass[pcd->pcd_class],
+ pcd->pcd_num,
+ pcd->pcd_width,
+ pcd->pcd_caps,
+ "\20"
+ "\1INT\2USR\3SYS\4EDG\5THR"
+ "\6REA\7WRI\10INV\11QUA\12PRC"
+ "\13TAG\14CSC");
+ }
+ printf("\n");
+ }
+
+ return (error);
+}
+
+/* prepare to be unloaded */
+static void
+pmc_cleanup(void)
+{
+ int c, cpu;
+ unsigned int maxcpu;
+ struct pmc_ownerhash *ph;
+ struct pmc_owner *po, *tmp;
+ struct pmc_binding pb;
+#ifdef DEBUG
+ struct pmc_processhash *prh;
+#endif
+
+ PMCDBG(MOD,INI,0, "%s", "cleanup");
+
+ /* switch off sampling */
+ CPU_ZERO(&pmc_cpumask);
+ pmc_intr = NULL;
+
+ sx_xlock(&pmc_sx);
+ if (pmc_hook == NULL) { /* being unloaded already */
+ sx_xunlock(&pmc_sx);
+ return;
+ }
+
+ pmc_hook = NULL; /* prevent new threads from entering module */
+
+ /* deregister event handlers */
+ EVENTHANDLER_DEREGISTER(process_fork, pmc_fork_tag);
+ EVENTHANDLER_DEREGISTER(process_exit, pmc_exit_tag);
+
+ /* send SIGBUS to all owner threads, free up allocations */
+ if (pmc_ownerhash)
+ for (ph = pmc_ownerhash;
+ ph <= &pmc_ownerhash[pmc_ownerhashmask];
+ ph++) {
+ LIST_FOREACH_SAFE(po, ph, po_next, tmp) {
+ pmc_remove_owner(po);
+
+ /* send SIGBUS to owner processes */
+ PMCDBG(MOD,INI,2, "cleanup signal proc=%p "
+ "(%d, %s)", po->po_owner,
+ po->po_owner->p_pid,
+ po->po_owner->p_comm);
+
+ PROC_LOCK(po->po_owner);
+ kern_psignal(po->po_owner, SIGBUS);
+ PROC_UNLOCK(po->po_owner);
+
+ pmc_destroy_owner_descriptor(po);
+ }
+ }
+
+ /* reclaim allocated data structures */
+ if (pmc_mtxpool)
+ mtx_pool_destroy(&pmc_mtxpool);
+
+ mtx_destroy(&pmc_processhash_mtx);
+ if (pmc_processhash) {
+#ifdef DEBUG
+ struct pmc_process *pp;
+
+ PMCDBG(MOD,INI,3, "%s", "destroy process hash");
+ for (prh = pmc_processhash;
+ prh <= &pmc_processhash[pmc_processhashmask];
+ prh++)
+ LIST_FOREACH(pp, prh, pp_next)
+ PMCDBG(MOD,INI,3, "pid=%d", pp->pp_proc->p_pid);
+#endif
+
+ hashdestroy(pmc_processhash, M_PMC, pmc_processhashmask);
+ pmc_processhash = NULL;
+ }
+
+ if (pmc_ownerhash) {
+ PMCDBG(MOD,INI,3, "%s", "destroy owner hash");
+ hashdestroy(pmc_ownerhash, M_PMC, pmc_ownerhashmask);
+ pmc_ownerhash = NULL;
+ }
+
+ KASSERT(LIST_EMPTY(&pmc_ss_owners),
+ ("[pmc,%d] Global SS owner list not empty", __LINE__));
+ KASSERT(pmc_ss_count == 0,
+ ("[pmc,%d] Global SS count not empty", __LINE__));
+
+ /* do processor and pmc-class dependent cleanup */
+ maxcpu = pmc_cpu_max();
+
+ PMCDBG(MOD,INI,3, "%s", "md cleanup");
+ if (md) {
+ pmc_save_cpu_binding(&pb);
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ PMCDBG(MOD,INI,1,"pmc-cleanup cpu=%d pcs=%p",
+ cpu, pmc_pcpu[cpu]);
+ if (!pmc_cpu_is_active(cpu) || pmc_pcpu[cpu] == NULL)
+ continue;
+ pmc_select_cpu(cpu);
+ for (c = 0; c < md->pmd_nclass; c++)
+ md->pmd_classdep[c].pcd_pcpu_fini(md, cpu);
+ if (md->pmd_pcpu_fini)
+ md->pmd_pcpu_fini(md, cpu);
+ }
+
+ if (md->pmd_cputype == PMC_CPU_GENERIC)
+ pmc_generic_cpu_finalize(md);
+ else
+ pmc_md_finalize(md);
+
+ pmc_mdep_free(md);
+ md = NULL;
+ pmc_restore_cpu_binding(&pb);
+ }
+
+ /* Free per-cpu descriptors. */
+ for (cpu = 0; cpu < maxcpu; cpu++) {
+ if (!pmc_cpu_is_active(cpu))
+ continue;
+ KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_HR] != NULL,
+ ("[pmc,%d] Null hw cpu sample buffer cpu=%d", __LINE__,
+ cpu));
+ KASSERT(pmc_pcpu[cpu]->pc_sb[PMC_SR] != NULL,
+ ("[pmc,%d] Null sw cpu sample buffer cpu=%d", __LINE__,
+ cpu));
+ free(pmc_pcpu[cpu]->pc_sb[PMC_HR]->ps_callchains, M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_HR], M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_SR]->ps_callchains, M_PMC);
+ free(pmc_pcpu[cpu]->pc_sb[PMC_SR], M_PMC);
+ free(pmc_pcpu[cpu], M_PMC);
+ }
+
+ free(pmc_pcpu, M_PMC);
+ pmc_pcpu = NULL;
+
+ free(pmc_pcpu_saved, M_PMC);
+ pmc_pcpu_saved = NULL;
+
+ if (pmc_pmcdisp) {
+ free(pmc_pmcdisp, M_PMC);
+ pmc_pmcdisp = NULL;
+ }
+
+ if (pmc_rowindex_to_classdep) {
+ free(pmc_rowindex_to_classdep, M_PMC);
+ pmc_rowindex_to_classdep = NULL;
+ }
+
+ pmclog_shutdown();
+
+ sx_xunlock(&pmc_sx); /* we are done */
+}
+
+/*
+ * The function called at load/unload.
+ */
+
+static int
+load (struct module *module __unused, int cmd, void *arg __unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (cmd) {
+ case MOD_LOAD :
+ /* initialize the subsystem */
+ error = pmc_initialize();
+ if (error != 0)
+ break;
+ PMCDBG(MOD,INI,1, "syscall=%d maxcpu=%d",
+ pmc_syscall_num, pmc_cpu_max());
+ break;
+
+
+ case MOD_UNLOAD :
+ case MOD_SHUTDOWN:
+ pmc_cleanup();
+ PMCDBG(MOD,INI,1, "%s", "unloaded");
+ break;
+
+ default :
+ error = EINVAL; /* XXX should panic(9) */
+ break;
+ }
+
+ return error;
+}
+
+/* memory pool */
+MALLOC_DEFINE(M_PMC, "pmc", "Memory space for the PMC module");
diff --git a/sys/dev/hwpmc/hwpmc_octeon.c b/sys/dev/hwpmc/hwpmc_octeon.c
new file mode 100644
index 0000000..824a7b0
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_octeon.c
@@ -0,0 +1,195 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/pmc_mdep.h>
+
+#include <contrib/octeon-sdk/cvmx.h>
+#include <contrib/octeon-sdk/cvmx-core.h>
+
+#define OCTEON_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+const struct mips_event_code_map mips_event_codes[] =
+{
+ { PMC_EV_OCTEON_CLK, MIPS_CTR_ALL, CVMX_CORE_PERF_CLK },
+ { PMC_EV_OCTEON_ISSUE, MIPS_CTR_ALL, CVMX_CORE_PERF_ISSUE },
+ { PMC_EV_OCTEON_RET, MIPS_CTR_ALL, CVMX_CORE_PERF_RET },
+ { PMC_EV_OCTEON_NISSUE, MIPS_CTR_ALL, CVMX_CORE_PERF_NISSUE },
+ { PMC_EV_OCTEON_SISSUE, MIPS_CTR_ALL, CVMX_CORE_PERF_SISSUE },
+ { PMC_EV_OCTEON_DISSUE, MIPS_CTR_ALL, CVMX_CORE_PERF_DISSUE },
+ { PMC_EV_OCTEON_IFI, MIPS_CTR_ALL, CVMX_CORE_PERF_IFI },
+ { PMC_EV_OCTEON_BR, MIPS_CTR_ALL, CVMX_CORE_PERF_BR },
+ { PMC_EV_OCTEON_BRMIS, MIPS_CTR_ALL, CVMX_CORE_PERF_BRMIS },
+ { PMC_EV_OCTEON_J, MIPS_CTR_ALL, CVMX_CORE_PERF_J },
+ { PMC_EV_OCTEON_JMIS, MIPS_CTR_ALL, CVMX_CORE_PERF_JMIS },
+ { PMC_EV_OCTEON_REPLAY, MIPS_CTR_ALL, CVMX_CORE_PERF_REPLAY },
+ { PMC_EV_OCTEON_IUNA, MIPS_CTR_ALL, CVMX_CORE_PERF_IUNA },
+ { PMC_EV_OCTEON_TRAP, MIPS_CTR_ALL, CVMX_CORE_PERF_TRAP },
+ { PMC_EV_OCTEON_UULOAD, MIPS_CTR_ALL, CVMX_CORE_PERF_UULOAD },
+ { PMC_EV_OCTEON_UUSTORE, MIPS_CTR_ALL, CVMX_CORE_PERF_UUSTORE },
+ { PMC_EV_OCTEON_ULOAD, MIPS_CTR_ALL, CVMX_CORE_PERF_ULOAD },
+ { PMC_EV_OCTEON_USTORE, MIPS_CTR_ALL, CVMX_CORE_PERF_USTORE },
+ { PMC_EV_OCTEON_EC, MIPS_CTR_ALL, CVMX_CORE_PERF_EC },
+ { PMC_EV_OCTEON_MC, MIPS_CTR_ALL, CVMX_CORE_PERF_MC },
+ { PMC_EV_OCTEON_CC, MIPS_CTR_ALL, CVMX_CORE_PERF_CC },
+ { PMC_EV_OCTEON_CSRC, MIPS_CTR_ALL, CVMX_CORE_PERF_CSRC },
+ { PMC_EV_OCTEON_CFETCH, MIPS_CTR_ALL, CVMX_CORE_PERF_CFETCH },
+ { PMC_EV_OCTEON_CPREF, MIPS_CTR_ALL, CVMX_CORE_PERF_CPREF },
+ { PMC_EV_OCTEON_ICA, MIPS_CTR_ALL, CVMX_CORE_PERF_ICA },
+ { PMC_EV_OCTEON_II, MIPS_CTR_ALL, CVMX_CORE_PERF_II },
+ { PMC_EV_OCTEON_IP, MIPS_CTR_ALL, CVMX_CORE_PERF_IP },
+ { PMC_EV_OCTEON_CIMISS, MIPS_CTR_ALL, CVMX_CORE_PERF_CIMISS },
+ { PMC_EV_OCTEON_WBUF, MIPS_CTR_ALL, CVMX_CORE_PERF_WBUF },
+ { PMC_EV_OCTEON_WDAT, MIPS_CTR_ALL, CVMX_CORE_PERF_WDAT },
+ { PMC_EV_OCTEON_WBUFLD, MIPS_CTR_ALL, CVMX_CORE_PERF_WBUFLD },
+ { PMC_EV_OCTEON_WBUFFL, MIPS_CTR_ALL, CVMX_CORE_PERF_WBUFFL },
+ { PMC_EV_OCTEON_WBUFTR, MIPS_CTR_ALL, CVMX_CORE_PERF_WBUFTR },
+ { PMC_EV_OCTEON_BADD, MIPS_CTR_ALL, CVMX_CORE_PERF_BADD },
+ { PMC_EV_OCTEON_BADDL2, MIPS_CTR_ALL, CVMX_CORE_PERF_BADDL2 },
+ { PMC_EV_OCTEON_BFILL, MIPS_CTR_ALL, CVMX_CORE_PERF_BFILL },
+ { PMC_EV_OCTEON_DDIDS, MIPS_CTR_ALL, CVMX_CORE_PERF_DDIDS },
+ { PMC_EV_OCTEON_IDIDS, MIPS_CTR_ALL, CVMX_CORE_PERF_IDIDS },
+ { PMC_EV_OCTEON_DIDNA, MIPS_CTR_ALL, CVMX_CORE_PERF_DIDNA },
+ { PMC_EV_OCTEON_LDS, MIPS_CTR_ALL, CVMX_CORE_PERF_LDS },
+ { PMC_EV_OCTEON_LMLDS, MIPS_CTR_ALL, CVMX_CORE_PERF_LMLDS },
+ { PMC_EV_OCTEON_IOLDS, MIPS_CTR_ALL, CVMX_CORE_PERF_IOLDS },
+ { PMC_EV_OCTEON_DMLDS, MIPS_CTR_ALL, CVMX_CORE_PERF_DMLDS },
+ { PMC_EV_OCTEON_STS, MIPS_CTR_ALL, CVMX_CORE_PERF_STS },
+ { PMC_EV_OCTEON_LMSTS, MIPS_CTR_ALL, CVMX_CORE_PERF_LMSTS },
+ { PMC_EV_OCTEON_IOSTS, MIPS_CTR_ALL, CVMX_CORE_PERF_IOSTS },
+ { PMC_EV_OCTEON_IOBDMA, MIPS_CTR_ALL, CVMX_CORE_PERF_IOBDMA },
+ { PMC_EV_OCTEON_DTLB, MIPS_CTR_ALL, CVMX_CORE_PERF_DTLB },
+ { PMC_EV_OCTEON_DTLBAD, MIPS_CTR_ALL, CVMX_CORE_PERF_DTLBAD },
+ { PMC_EV_OCTEON_ITLB, MIPS_CTR_ALL, CVMX_CORE_PERF_ITLB },
+ { PMC_EV_OCTEON_SYNC, MIPS_CTR_ALL, CVMX_CORE_PERF_SYNC },
+ { PMC_EV_OCTEON_SYNCIOB, MIPS_CTR_ALL, CVMX_CORE_PERF_SYNCIOB },
+ { PMC_EV_OCTEON_SYNCW, MIPS_CTR_ALL, CVMX_CORE_PERF_SYNCW },
+};
+
+const int mips_event_codes_size =
+ sizeof(mips_event_codes) / sizeof(mips_event_codes[0]);
+
+struct mips_pmc_spec mips_pmc_spec = {
+ .ps_cpuclass = PMC_CLASS_OCTEON,
+ .ps_cputype = PMC_CPU_MIPS_OCTEON,
+ .ps_capabilities = OCTEON_PMC_CAPS,
+ .ps_counter_width = 64
+};
+
+/*
+ * Performance Count Register N
+ */
+uint64_t
+mips_pmcn_read(unsigned int pmc)
+{
+ uint64_t reg = 0;
+
+ KASSERT(pmc < mips_npmcs, ("[mips,%d] illegal PMC number %d",
+ __LINE__, pmc));
+
+ /* The counter value is the next value after the control register. */
+ switch (pmc) {
+ case 0:
+ CVMX_MF_COP0(reg, COP0_PERFVALUE0);
+ break;
+ case 1:
+ CVMX_MF_COP0(reg, COP0_PERFVALUE1);
+ break;
+ default:
+ return 0;
+ }
+ return (reg);
+}
+
+uint64_t
+mips_pmcn_write(unsigned int pmc, uint64_t reg)
+{
+
+ KASSERT(pmc < mips_npmcs, ("[mips,%d] illegal PMC number %d",
+ __LINE__, pmc));
+
+ switch (pmc) {
+ case 0:
+ CVMX_MT_COP0(reg, COP0_PERFVALUE0);
+ break;
+ case 1:
+ CVMX_MT_COP0(reg, COP0_PERFVALUE1);
+ break;
+ default:
+ return 0;
+ }
+ return (reg);
+}
+
+uint32_t
+mips_get_perfctl(int cpu, int ri, uint32_t event, uint32_t caps)
+{
+ cvmx_core_perf_control_t control;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[mips,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < mips_npmcs,
+ ("[mips,%d] illegal row index %d", __LINE__, ri));
+
+ control.s.event = event;
+
+ if (caps & PMC_CAP_SYSTEM) {
+ control.s.k = 1;
+ control.s.s = 1;
+ control.s.ex = 1;
+ }
+
+ if (caps & PMC_CAP_USER)
+ control.s.u = 1;
+
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0) {
+ control.s.k = 1;
+ control.s.s = 1;
+ control.s.u = 1;
+ control.s.ex = 1;
+ }
+
+ if (caps & PMC_CAP_INTERRUPT)
+ control.s.ie = 1;
+
+ PMCDBG(MDP,ALL,2,"mips-allocate ri=%d -> config=0x%x", ri, control.u32);
+
+ return (control.u32);
+}
diff --git a/sys/dev/hwpmc/hwpmc_pentium.c b/sys/dev/hwpmc/hwpmc_pentium.c
new file mode 100644
index 0000000..0084fa8
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_pentium.c
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2003-2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+
+/*
+ * Intel Pentium PMCs
+ */
+
+int
+pmc_p5_initialize(struct pmc_mdep *pmc_mdep, int ncpus)
+{
+ (void) pmc_mdep; (void) ncpus;
+ return (ENOSYS); /* nothing here yet */
+}
+
+void
+pmc_p5_finalize(struct pmc_mdep *pmc_mdep)
+{
+ (void) pmc_mdep;
+}
diff --git a/sys/dev/hwpmc/hwpmc_pentium.h b/sys/dev/hwpmc/hwpmc_pentium.h
new file mode 100644
index 0000000..9bb8e78
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_pentium.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _DEV_HWPMC_PENTIUM_H_
+#define _DEV_HWPMC_PENTIUM_H_ 1
+
+/* Intel Pentium PMCs */
+
+#define PENTIUM_NPMCS 2
+#define PENTIUM_CESR_PC1 (1 << 25)
+#define PENTIUM_CESR_CC1_MASK 0x01C00000
+#define PENTIUM_CESR_TO_CC1(C) (((C) & 0x07) << 22)
+#define PENTIUM_CESR_ES1_MASK 0x003F0000
+#define PENTIUM_CESR_TO_ES1(E) (((E) & 0x3F) << 16)
+#define PENTIUM_CESR_PC0 (1 << 9)
+#define PENTIUM_CESR_CC0_MASK 0x000001C0
+#define PENTIUM_CESR_TO_CC0(C) (((C) & 0x07) << 6)
+#define PENTIUM_CESR_ES0_MASK 0x0000003F
+#define PENTIUM_CESR_TO_ES0(E) ((E) & 0x3F)
+#define PENTIUM_CESR_RESERVED 0xFC00FC00
+
+#define PENTIUM_MSR_CESR 0x11
+#define PENTIUM_MSR_CTR0 0x12
+#define PENTIUM_MSR_CTR1 0x13
+
+struct pmc_md_pentium_op_pmcallocate {
+ uint32_t pm_pentium_config;
+};
+
+#ifdef _KERNEL
+
+/* MD extension for 'struct pmc' */
+struct pmc_md_pentium_pmc {
+ uint32_t pm_pentium_cesr;
+};
+
+
+/*
+ * Prototypes
+ */
+
+int pmc_p5_initialize(struct pmc_mdep *_md, int _ncpus);
+void pmc_p5_finalize(struct pmc_mdep *_md);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_PENTIUM_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_piv.c b/sys/dev/hwpmc/hwpmc_piv.c
new file mode 100644
index 0000000..26b23a1
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_piv.c
@@ -0,0 +1,1698 @@
+/*-
+ * Copyright (c) 2003-2007 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM 4 SUPPORT
+ *
+ * The P4 has 18 PMCs, divided into 4 groups with 4,4,4 and 6 PMCs
+ * respectively. Each PMC comprises of two model specific registers:
+ * a counter configuration control register (CCCR) and a counter
+ * register that holds the actual event counts.
+ *
+ * Configuring an event requires the use of one of 45 event selection
+ * control registers (ESCR). Events are associated with specific
+ * ESCRs. Each PMC group has a set of ESCRs it can use.
+ *
+ * - The BPU counter group (4 PMCs) can use the 16 ESCRs:
+ * BPU_ESCR{0,1}, IS_ESCR{0,1}, MOB_ESCR{0,1}, ITLB_ESCR{0,1},
+ * PMH_ESCR{0,1}, IX_ESCR{0,1}, FSB_ESCR{0,}, BSU_ESCR{0,1}.
+ *
+ * - The MS counter group (4 PMCs) can use the 6 ESCRs: MS_ESCR{0,1},
+ * TC_ESCR{0,1}, TBPU_ESCR{0,1}.
+ *
+ * - The FLAME counter group (4 PMCs) can use the 10 ESCRs:
+ * FLAME_ESCR{0,1}, FIRM_ESCR{0,1}, SAAT_ESCR{0,1}, U2L_ESCR{0,1},
+ * DAC_ESCR{0,1}.
+ *
+ * - The IQ counter group (6 PMCs) can use the 13 ESCRs: IQ_ESCR{0,1},
+ * ALF_ESCR{0,1}, RAT_ESCR{0,1}, SSU_ESCR0, CRU_ESCR{0,1,2,3,4,5}.
+ *
+ * Even-numbered ESCRs can be used with counters 0, 1 and 4 (if
+ * present) of a counter group. Odd-numbers ESCRs can be used with
+ * counters 2, 3 and 5 (if present) of a counter group. The
+ * 'p4_escrs[]' table describes these restrictions in a form that
+ * function 'p4_allocate()' uses for making allocation decisions.
+ *
+ * SYSTEM-MODE AND THREAD-MODE ALLOCATION
+ *
+ * In addition to remembering the state of PMC rows
+ * ('FREE','STANDALONE', or 'THREAD'), we similar need to track the
+ * state of ESCR rows. If an ESCR is allocated to a system-mode PMC
+ * on a CPU we cannot allocate this to a thread-mode PMC. On a
+ * multi-cpu (multiple physical CPUs) system, ESCR allocation on each
+ * CPU is tracked by the pc_escrs[] array.
+ *
+ * Each system-mode PMC that is using an ESCR records its row-index in
+ * the appropriate entry and system-mode allocation attempts check
+ * that an ESCR is available using this array. Process-mode PMCs do
+ * not use the pc_escrs[] array, since ESCR row itself would have been
+ * marked as in 'THREAD' mode.
+ *
+ * HYPERTHREADING SUPPORT
+ *
+ * When HTT is enabled, the FreeBSD kernel treats the two 'logical'
+ * cpus as independent CPUs and can schedule kernel threads on them
+ * independently. However, the two logical CPUs share the same set of
+ * PMC resources. We need to ensure that:
+ * - PMCs that use the PMC_F_DESCENDANTS semantics are handled correctly,
+ * and,
+ * - Threads of multi-threaded processes that get scheduled on the same
+ * physical CPU are handled correctly.
+ *
+ * HTT Detection
+ *
+ * Not all HTT capable systems will have HTT enabled. We detect the
+ * presence of HTT by detecting if 'p4_init()' was called for a secondary
+ * CPU in a HTT pair.
+ *
+ * Note that hwpmc(4) cannot currently deal with a change in HTT status once
+ * loaded.
+ *
+ * Handling HTT READ / WRITE / START / STOP
+ *
+ * PMC resources are shared across the CPUs in an HTT pair. We
+ * designate the lower numbered CPU in a HTT pair as the 'primary'
+ * CPU. In each primary CPU's state we keep track of a 'runcount'
+ * which reflects the number of PMC-using processes that have been
+ * scheduled on its secondary CPU. Process-mode PMC operations will
+ * actually 'start' or 'stop' hardware only if these are the first or
+ * last processes respectively to use the hardware. PMC values
+ * written by a 'write' operation are saved and are transferred to
+ * hardware at PMC 'start' time if the runcount is 0. If the runcount
+ * is greater than 0 at the time of a 'start' operation, we keep track
+ * of the actual hardware value at the time of the 'start' operation
+ * and use this to adjust the final readings at PMC 'stop' or 'read'
+ * time.
+ *
+ * Execution sequences:
+ *
+ * Case 1: CPUx +...- (no overlap)
+ * CPUy +...-
+ * RC 0 1 0 1 0
+ *
+ * Case 2: CPUx +........- (partial overlap)
+ * CPUy +........-
+ * RC 0 1 2 1 0
+ *
+ * Case 3: CPUx +..............- (fully overlapped)
+ * CPUy +.....-
+ * RC 0 1 2 1 0
+ *
+ * Key:
+ * 'CPU[xy]' : one of the two logical processors on a HTT CPU.
+ * 'RC' : run count (#threads per physical core).
+ * '+' : point in time when a thread is put on a CPU.
+ * '-' : point in time where a thread is taken off a CPU.
+ *
+ * Handling HTT CONFIG
+ *
+ * Different processes attached to the same PMC may get scheduled on
+ * the two logical processors in the package. We keep track of config
+ * and de-config operations using the CFGFLAGS fields of the per-physical
+ * cpu state.
+ */
+
+#define P4_PMCS() \
+ P4_PMC(BPU_COUNTER0) \
+ P4_PMC(BPU_COUNTER1) \
+ P4_PMC(BPU_COUNTER2) \
+ P4_PMC(BPU_COUNTER3) \
+ P4_PMC(MS_COUNTER0) \
+ P4_PMC(MS_COUNTER1) \
+ P4_PMC(MS_COUNTER2) \
+ P4_PMC(MS_COUNTER3) \
+ P4_PMC(FLAME_COUNTER0) \
+ P4_PMC(FLAME_COUNTER1) \
+ P4_PMC(FLAME_COUNTER2) \
+ P4_PMC(FLAME_COUNTER3) \
+ P4_PMC(IQ_COUNTER0) \
+ P4_PMC(IQ_COUNTER1) \
+ P4_PMC(IQ_COUNTER2) \
+ P4_PMC(IQ_COUNTER3) \
+ P4_PMC(IQ_COUNTER4) \
+ P4_PMC(IQ_COUNTER5) \
+ P4_PMC(NONE)
+
+enum pmc_p4pmc {
+#undef P4_PMC
+#define P4_PMC(N) P4_PMC_##N ,
+ P4_PMCS()
+};
+
+/*
+ * P4 ESCR descriptors
+ */
+
+#define P4_ESCRS() \
+ P4_ESCR(BSU_ESCR0, 0x3A0, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BSU_ESCR1, 0x3A1, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FSB_ESCR0, 0x3A2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(FSB_ESCR1, 0x3A3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(FIRM_ESCR0, 0x3A4, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FIRM_ESCR1, 0x3A5, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(FLAME_ESCR0, 0x3A6, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(FLAME_ESCR1, 0x3A7, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(DAC_ESCR0, 0x3A8, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(DAC_ESCR1, 0x3A9, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(MOB_ESCR0, 0x3AA, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(MOB_ESCR1, 0x3AB, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(PMH_ESCR0, 0x3AC, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(PMH_ESCR1, 0x3AD, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(SAAT_ESCR0, 0x3AE, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(SAAT_ESCR1, 0x3AF, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(U2L_ESCR0, 0x3B0, FLAME_COUNTER0, FLAME_COUNTER1, NONE) \
+ P4_ESCR(U2L_ESCR1, 0x3B1, FLAME_COUNTER2, FLAME_COUNTER3, NONE) \
+ P4_ESCR(BPU_ESCR0, 0x3B2, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(BPU_ESCR1, 0x3B3, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(IS_ESCR0, 0x3B4, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IS_ESCR1, 0x3B5, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ITLB_ESCR0, 0x3B6, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(ITLB_ESCR1, 0x3B7, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(CRU_ESCR0, 0x3B8, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR1, 0x3B9, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(IQ_ESCR0, 0x3BA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(IQ_ESCR1, 0x3BB, IQ_COUNTER1, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(RAT_ESCR0, 0x3BC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(RAT_ESCR1, 0x3BD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(SSU_ESCR0, 0x3BE, IQ_COUNTER0, IQ_COUNTER2, IQ_COUNTER4) \
+ P4_ESCR(MS_ESCR0, 0x3C0, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(MS_ESCR1, 0x3C1, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TBPU_ESCR0, 0x3C2, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TBPU_ESCR1, 0x3C3, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(TC_ESCR0, 0x3C4, MS_COUNTER0, MS_COUNTER1, NONE) \
+ P4_ESCR(TC_ESCR1, 0x3C5, MS_COUNTER2, MS_COUNTER3, NONE) \
+ P4_ESCR(IX_ESCR0, 0x3C8, BPU_COUNTER0, BPU_COUNTER1, NONE) \
+ P4_ESCR(IX_ESCR1, 0x3C9, BPU_COUNTER2, BPU_COUNTER3, NONE) \
+ P4_ESCR(ALF_ESCR0, 0x3CA, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(ALF_ESCR1, 0x3CB, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR2, 0x3CC, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR3, 0x3CD, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(CRU_ESCR4, 0x3E0, IQ_COUNTER0, IQ_COUNTER1, IQ_COUNTER4) \
+ P4_ESCR(CRU_ESCR5, 0x3E1, IQ_COUNTER2, IQ_COUNTER3, IQ_COUNTER5) \
+ P4_ESCR(NONE, ~0, NONE, NONE, NONE)
+
+enum pmc_p4escr {
+#define P4_ESCR(N, MSR, P1, P2, P3) P4_ESCR_##N ,
+ P4_ESCRS()
+#undef P4_ESCR
+};
+
+struct pmc_p4escr_descr {
+ const char pm_escrname[PMC_NAME_MAX];
+ u_short pm_escr_msr;
+ const enum pmc_p4pmc pm_pmcs[P4_MAX_PMC_PER_ESCR];
+};
+
+static struct pmc_p4escr_descr p4_escrs[] =
+{
+#define P4_ESCR(N, MSR, P1, P2, P3) \
+ { \
+ .pm_escrname = #N, \
+ .pm_escr_msr = (MSR), \
+ .pm_pmcs = \
+ { \
+ P4_PMC_##P1, \
+ P4_PMC_##P2, \
+ P4_PMC_##P3 \
+ } \
+ } ,
+
+ P4_ESCRS()
+
+#undef P4_ESCR
+};
+
+/*
+ * P4 Event descriptor
+ */
+
+struct p4_event_descr {
+ const enum pmc_event pm_event;
+ const uint32_t pm_escr_eventselect;
+ const uint32_t pm_cccr_select;
+ const char pm_is_ti_event;
+ enum pmc_p4escr pm_escrs[P4_MAX_ESCR_PER_EVENT];
+};
+
+static struct p4_event_descr p4_events[] = {
+
+#define P4_EVDESCR(NAME, ESCREVENTSEL, CCCRSEL, TI_EVENT, ESCR0, ESCR1) \
+ { \
+ .pm_event = PMC_EV_P4_##NAME, \
+ .pm_escr_eventselect = (ESCREVENTSEL), \
+ .pm_cccr_select = (CCCRSEL), \
+ .pm_is_ti_event = (TI_EVENT), \
+ .pm_escrs = \
+ { \
+ P4_ESCR_##ESCR0, \
+ P4_ESCR_##ESCR1 \
+ } \
+ }
+
+P4_EVDESCR(TC_DELIVER_MODE, 0x01, 0x01, TRUE, TC_ESCR0, TC_ESCR1),
+P4_EVDESCR(BPU_FETCH_REQUEST, 0x03, 0x00, FALSE, BPU_ESCR0, BPU_ESCR1),
+P4_EVDESCR(ITLB_REFERENCE, 0x18, 0x03, FALSE, ITLB_ESCR0, ITLB_ESCR1),
+P4_EVDESCR(MEMORY_CANCEL, 0x02, 0x05, FALSE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(MEMORY_COMPLETE, 0x08, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(LOAD_PORT_REPLAY, 0x04, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(STORE_PORT_REPLAY, 0x05, 0x02, FALSE, SAAT_ESCR0, SAAT_ESCR1),
+P4_EVDESCR(MOB_LOAD_REPLAY, 0x03, 0x02, FALSE, MOB_ESCR0, MOB_ESCR1),
+P4_EVDESCR(PAGE_WALK_TYPE, 0x01, 0x04, TRUE, PMH_ESCR0, PMH_ESCR1),
+P4_EVDESCR(BSQ_CACHE_REFERENCE, 0x0C, 0x07, FALSE, BSU_ESCR0, BSU_ESCR1),
+P4_EVDESCR(IOQ_ALLOCATION, 0x03, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(IOQ_ACTIVE_ENTRIES, 0x1A, 0x06, FALSE, FSB_ESCR1, NONE),
+P4_EVDESCR(FSB_DATA_ACTIVITY, 0x17, 0x06, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BSQ_ALLOCATION, 0x05, 0x07, FALSE, BSU_ESCR0, NONE),
+P4_EVDESCR(BSQ_ACTIVE_ENTRIES, 0x06, 0x07, FALSE, BSU_ESCR1, NONE),
+ /* BSQ_ACTIVE_ENTRIES inherits CPU specificity from BSQ_ALLOCATION */
+P4_EVDESCR(SSE_INPUT_ASSIST, 0x34, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_SP_UOP, 0x08, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(PACKED_DP_UOP, 0x0C, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_SP_UOP, 0x0A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(SCALAR_DP_UOP, 0x0E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(64BIT_MMX_UOP, 0x02, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(128BIT_MMX_UOP, 0x1A, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_FP_UOP, 0x04, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(X87_SIMD_MOVES_UOP, 0x2E, 0x01, TRUE, FIRM_ESCR0, FIRM_ESCR1),
+P4_EVDESCR(GLOBAL_POWER_EVENTS, 0x13, 0x06, FALSE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(TC_MS_XFER, 0x05, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(UOP_QUEUE_WRITES, 0x09, 0x00, FALSE, MS_ESCR0, MS_ESCR1),
+P4_EVDESCR(RETIRED_MISPRED_BRANCH_TYPE,
+ 0x05, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RETIRED_BRANCH_TYPE, 0x04, 0x02, FALSE, TBPU_ESCR0, TBPU_ESCR1),
+P4_EVDESCR(RESOURCE_STALL, 0x01, 0x01, FALSE, ALF_ESCR0, ALF_ESCR1),
+P4_EVDESCR(WC_BUFFER, 0x05, 0x05, TRUE, DAC_ESCR0, DAC_ESCR1),
+P4_EVDESCR(B2B_CYCLES, 0x16, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(BNR, 0x08, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(SNOOP, 0x06, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(RESPONSE, 0x04, 0x03, TRUE, FSB_ESCR0, FSB_ESCR1),
+P4_EVDESCR(FRONT_END_EVENT, 0x08, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(EXECUTION_EVENT, 0x0C, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(REPLAY_EVENT, 0x09, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(INSTR_RETIRED, 0x02, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOPS_RETIRED, 0x01, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(UOP_TYPE, 0x02, 0x02, FALSE, RAT_ESCR0, RAT_ESCR1),
+P4_EVDESCR(BRANCH_RETIRED, 0x06, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MISPRED_BRANCH_RETIRED, 0x03, 0x04, FALSE, CRU_ESCR0, CRU_ESCR1),
+P4_EVDESCR(X87_ASSIST, 0x03, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3),
+P4_EVDESCR(MACHINE_CLEAR, 0x02, 0x05, FALSE, CRU_ESCR2, CRU_ESCR3)
+
+#undef P4_EVDESCR
+};
+
+#define P4_EVENT_IS_TI(E) ((E)->pm_is_ti_event == TRUE)
+
+#define P4_NEVENTS (PMC_EV_P4_LAST - PMC_EV_P4_FIRST + 1)
+
+/*
+ * P4 PMC descriptors
+ */
+
+struct p4pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ enum pmc_p4pmc pm_pmcnum; /* PMC number */
+ uint32_t pm_pmc_msr; /* PERFCTR MSR address */
+ uint32_t pm_cccr_msr; /* CCCR MSR address */
+};
+
+static struct p4pmc_descr p4_pmcdesc[P4_NPMCS] = {
+#define P4_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE | \
+ PMC_CAP_TAGGING | PMC_CAP_CASCADE)
+
+#define P4_PMCDESCR(N, PMC, CCCR) \
+ { \
+ .pm_descr = \
+ { \
+ .pd_name = #N, \
+ .pd_class = PMC_CLASS_P4, \
+ .pd_caps = P4_PMC_CAPS, \
+ .pd_width = 40 \
+ }, \
+ .pm_pmcnum = P4_PMC_##N, \
+ .pm_cccr_msr = (CCCR), \
+ .pm_pmc_msr = (PMC) \
+ }
+
+ P4_PMCDESCR(BPU_COUNTER0, 0x300, 0x360),
+ P4_PMCDESCR(BPU_COUNTER1, 0x301, 0x361),
+ P4_PMCDESCR(BPU_COUNTER2, 0x302, 0x362),
+ P4_PMCDESCR(BPU_COUNTER3, 0x303, 0x363),
+ P4_PMCDESCR(MS_COUNTER0, 0x304, 0x364),
+ P4_PMCDESCR(MS_COUNTER1, 0x305, 0x365),
+ P4_PMCDESCR(MS_COUNTER2, 0x306, 0x366),
+ P4_PMCDESCR(MS_COUNTER3, 0x307, 0x367),
+ P4_PMCDESCR(FLAME_COUNTER0, 0x308, 0x368),
+ P4_PMCDESCR(FLAME_COUNTER1, 0x309, 0x369),
+ P4_PMCDESCR(FLAME_COUNTER2, 0x30A, 0x36A),
+ P4_PMCDESCR(FLAME_COUNTER3, 0x30B, 0x36B),
+ P4_PMCDESCR(IQ_COUNTER0, 0x30C, 0x36C),
+ P4_PMCDESCR(IQ_COUNTER1, 0x30D, 0x36D),
+ P4_PMCDESCR(IQ_COUNTER2, 0x30E, 0x36E),
+ P4_PMCDESCR(IQ_COUNTER3, 0x30F, 0x36F),
+ P4_PMCDESCR(IQ_COUNTER4, 0x310, 0x370),
+ P4_PMCDESCR(IQ_COUNTER5, 0x311, 0x371),
+
+#undef P4_PMCDESCR
+};
+
+/* HTT support */
+#define P4_NHTT 2 /* logical processors/chip */
+
+static int p4_system_has_htt;
+
+/*
+ * Per-CPU data structure for P4 class CPUs
+ *
+ * [19 struct pmc_hw structures]
+ * [45 ESCRs status bytes]
+ * [per-cpu spin mutex]
+ * [19 flag fields for holding config flags and a runcount]
+ * [19*2 hw value fields] (Thread mode PMC support)
+ * or
+ * [19*2 EIP values] (Sampling mode PMCs)
+ * [19*2 pmc value fields] (Thread mode PMC support))
+ */
+
+struct p4_cpu {
+ struct pmc_hw pc_p4pmcs[P4_NPMCS];
+ char pc_escrs[P4_NESCR];
+ struct mtx pc_mtx; /* spin lock */
+ uint32_t pc_intrflag; /* NMI handler flags */
+ unsigned int pc_intrlock; /* NMI handler spin lock */
+ unsigned char pc_flags[P4_NPMCS]; /* 4 bits each: {cfg,run}count */
+ union {
+ pmc_value_t pc_hw[P4_NPMCS * P4_NHTT];
+ uintptr_t pc_ip[P4_NPMCS * P4_NHTT];
+ } pc_si;
+ pmc_value_t pc_pmc_values[P4_NPMCS * P4_NHTT];
+};
+
+static struct p4_cpu **p4_pcpu;
+
+#define P4_PCPU_PMC_VALUE(PC,RI,CPU) (PC)->pc_pmc_values[(RI)*((CPU) & 1)]
+#define P4_PCPU_HW_VALUE(PC,RI,CPU) (PC)->pc_si.pc_hw[(RI)*((CPU) & 1)]
+#define P4_PCPU_SAVED_IP(PC,RI,CPU) (PC)->pc_si.pc_ip[(RI)*((CPU) & 1)]
+
+#define P4_PCPU_GET_FLAGS(PC,RI,MASK) ((PC)->pc_flags[(RI)] & (MASK))
+#define P4_PCPU_SET_FLAGS(PC,RI,MASK,VAL) do { \
+ char _tmp; \
+ _tmp = (PC)->pc_flags[(RI)]; \
+ _tmp &= ~(MASK); \
+ _tmp |= (VAL) & (MASK); \
+ (PC)->pc_flags[(RI)] = _tmp; \
+} while (0)
+
+#define P4_PCPU_GET_RUNCOUNT(PC,RI) P4_PCPU_GET_FLAGS(PC,RI,0x0F)
+#define P4_PCPU_SET_RUNCOUNT(PC,RI,V) P4_PCPU_SET_FLAGS(PC,RI,0x0F,V)
+
+#define P4_PCPU_GET_CFGFLAGS(PC,RI) (P4_PCPU_GET_FLAGS(PC,RI,0xF0) >> 4)
+#define P4_PCPU_SET_CFGFLAGS(PC,RI,C) P4_PCPU_SET_FLAGS(PC,RI,0xF0,((C) <<4))
+
+#define P4_CPU_TO_FLAG(C) (P4_CPU_IS_HTT_SECONDARY(cpu) ? 0x2 : 0x1)
+
+#define P4_PCPU_GET_INTRFLAG(PC,I) ((PC)->pc_intrflag & (1 << (I)))
+#define P4_PCPU_SET_INTRFLAG(PC,I,V) do { \
+ uint32_t __mask; \
+ __mask = 1 << (I); \
+ if ((V)) \
+ (PC)->pc_intrflag |= __mask; \
+ else \
+ (PC)->pc_intrflag &= ~__mask; \
+ } while (0)
+
+/*
+ * A minimal spin lock implementation for use inside the NMI handler.
+ *
+ * We don't want to use a regular spin lock here, because curthread
+ * may not be consistent at the time the handler is invoked.
+ */
+#define P4_PCPU_ACQ_INTR_SPINLOCK(PC) do { \
+ while (!atomic_cmpset_acq_int(&pc->pc_intrlock, 0, 1)) \
+ ia32_pause(); \
+ } while (0)
+#define P4_PCPU_REL_INTR_SPINLOCK(PC) \
+ atomic_store_rel_int(&pc->pc_intrlock, 0);
+
+/* ESCR row disposition */
+static int p4_escrdisp[P4_NESCR];
+
+#define P4_ESCR_ROW_DISP_IS_THREAD(E) (p4_escrdisp[(E)] > 0)
+#define P4_ESCR_ROW_DISP_IS_STANDALONE(E) (p4_escrdisp[(E)] < 0)
+#define P4_ESCR_ROW_DISP_IS_FREE(E) (p4_escrdisp[(E)] == 0)
+
+#define P4_ESCR_MARK_ROW_STANDALONE(E) do { \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= (-pmc_cpu_max_active()), \
+ ("[p4,%d] row disposition error", __LINE__)); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_STANDALONE(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+ KASSERT(p4_escrdisp[(E)] <= 0, ("[p4,%d] row disposition error",\
+ __LINE__)); \
+} while (0)
+
+#define P4_ESCR_MARK_ROW_THREAD(E) do { \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
+ __LINE__)); \
+ atomic_add_int(&p4_escrdisp[(E)], 1); \
+} while (0)
+
+#define P4_ESCR_UNMARK_ROW_THREAD(E) do { \
+ atomic_add_int(&p4_escrdisp[(E)], -1); \
+ KASSERT(p4_escrdisp[(E)] >= 0, ("[p4,%d] row disposition error", \
+ __LINE__)); \
+} while (0)
+
+#define P4_PMC_IS_STOPPED(cccr) ((rdmsr(cccr) & P4_CCCR_ENABLE) == 0)
+
+#define P4_CPU_IS_HTT_SECONDARY(cpu) \
+ (p4_system_has_htt ? ((cpu) & 1) : 0)
+#define P4_TO_HTT_PRIMARY(cpu) \
+ (p4_system_has_htt ? ((cpu) & ~1) : (cpu))
+
+#define P4_CCCR_Tx_MASK (~(P4_CCCR_OVF_PMI_T0|P4_CCCR_OVF_PMI_T1| \
+ P4_CCCR_ENABLE|P4_CCCR_OVF))
+#define P4_ESCR_Tx_MASK (~(P4_ESCR_T0_OS|P4_ESCR_T0_USR|P4_ESCR_T1_OS| \
+ P4_ESCR_T1_USR))
+
+/*
+ * support routines
+ */
+
+static struct p4_event_descr *
+p4_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P4_NEVENTS; n++)
+ if (p4_events[n].pm_event == ev)
+ break;
+ if (n == P4_NEVENTS)
+ return (NULL);
+ return (&p4_events[n]);
+}
+
+/*
+ * Initialize per-cpu state
+ */
+
+static int
+p4_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ char *pescr;
+ int n, first_ri, phycpu;
+ struct pmc_hw *phw;
+ struct p4_cpu *p4c;
+ struct pmc_cpu *pc, *plc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0, "p4-init cpu=%d is-primary=%d", cpu,
+ pmc_cpu_is_primary(cpu) != 0);
+
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri;
+
+ /*
+ * The two CPUs in an HT pair share their per-cpu state.
+ *
+ * For HT capable CPUs, we assume that the two logical
+ * processors in the HT pair get two consecutive CPU ids
+ * starting with an even id #.
+ *
+ * The primary CPU (the even numbered CPU of the pair) would
+ * have been initialized prior to the initialization for the
+ * secondary.
+ */
+
+ if (!pmc_cpu_is_primary(cpu) && (cpu & 1)) {
+
+ p4_system_has_htt = 1;
+
+ phycpu = P4_TO_HTT_PRIMARY(cpu);
+ pc = pmc_pcpu[phycpu];
+ plc = pmc_pcpu[cpu];
+
+ KASSERT(plc != pc, ("[p4,%d] per-cpu config error", __LINE__));
+
+ PMCDBG(MDP,INI,1, "p4-init cpu=%d phycpu=%d pc=%p", cpu,
+ phycpu, pc);
+ KASSERT(pc, ("[p4,%d] Null Per-Cpu state cpu=%d phycpu=%d",
+ __LINE__, cpu, phycpu));
+
+ /* PMCs are shared with the physical CPU. */
+ for (n = 0; n < P4_NPMCS; n++)
+ plc->pc_hwpmcs[n + first_ri] =
+ pc->pc_hwpmcs[n + first_ri];
+
+ return (0);
+ }
+
+ p4c = malloc(sizeof(struct p4_cpu), M_PMC, M_WAITOK|M_ZERO);
+
+ if (p4c == NULL)
+ return (ENOMEM);
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[p4,%d] cpu %d null per-cpu", __LINE__, cpu));
+
+ p4_pcpu[cpu] = p4c;
+ phw = p4c->pc_p4pmcs;
+
+ for (n = 0; n < P4_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+
+ pescr = p4c->pc_escrs;
+ for (n = 0; n < P4_NESCR; n++)
+ *pescr++ = P4_INVALID_PMC_INDEX;
+
+ mtx_init(&p4c->pc_mtx, "p4-pcpu", "pmc-leaf", MTX_SPIN);
+
+ return (0);
+}
+
+/*
+ * Destroy per-cpu state.
+ */
+
+static int
+p4_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct p4_cpu *p4c;
+ struct pmc_cpu *pc;
+
+ PMCDBG(MDP,INI,0, "p4-cleanup cpu=%d", cpu);
+
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4].pcd_ri;
+
+ for (i = 0; i < P4_NPMCS; i++)
+ pc->pc_hwpmcs[i + first_ri] = NULL;
+
+ if (!pmc_cpu_is_primary(cpu) && (cpu & 1))
+ return (0);
+
+ p4c = p4_pcpu[cpu];
+
+ KASSERT(p4c != NULL, ("[p4,%d] NULL pcpu", __LINE__));
+
+ /* Turn off all PMCs on this CPU */
+ for (i = 0; i < P4_NPMCS - 1; i++)
+ wrmsr(P4_CCCR_MSR_FIRST + i,
+ rdmsr(P4_CCCR_MSR_FIRST + i) & ~P4_CCCR_ENABLE);
+
+ mtx_destroy(&p4c->pc_mtx);
+
+ free(p4c, M_PMC);
+
+ p4_pcpu[cpu] = NULL;
+
+ return (0);
+}
+
+/*
+ * Read a PMC
+ */
+
+static int
+p4_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+ struct p4_cpu *pc;
+ enum pmc_mode mode;
+ struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+ pm = pc->pc_p4pmcs[ri].phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__, cpu, ri));
+
+ KASSERT(pd->pm_descr.pd_class == PMC_TO_CLASS(pm),
+ ("[p4,%d] class mismatch pd %d != id class %d", __LINE__,
+ pd->pm_descr.pd_class, PMC_TO_CLASS(pm)));
+
+ mode = PMC_TO_MODE(pm);
+
+ PMCDBG(MDP,REA,1, "p4-read cpu=%d ri=%d mode=%d", cpu, ri, mode);
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__, pd->pm_descr.pd_class));
+
+ tmp = rdmsr(p4_pmcdesc[ri].pm_pmc_msr);
+
+ if (PMC_IS_VIRTUAL_MODE(mode)) {
+ if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit overflow */
+ tmp += (P4_PERFCTR_MASK + 1) -
+ P4_PCPU_HW_VALUE(pc,ri,cpu);
+ else
+ tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu);
+ tmp += P4_PCPU_PMC_VALUE(pc,ri,cpu);
+ }
+
+ if (PMC_IS_SAMPLING_MODE(mode)) /* undo transformation */
+ *v = P4_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,2, "p4-read -> %jx", *v);
+
+ return (0);
+}
+
+/*
+ * Write a PMC
+ */
+
+static int
+p4_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ enum pmc_mode mode;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ const struct pmc_hw *phw;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[amd,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[amd,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+ phw = &pc->pc_p4pmcs[ri];
+ pm = phw->phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] No owner for HWPMC [cpu%d,pmc%d]", __LINE__,
+ cpu, ri));
+
+ mode = PMC_TO_MODE(pm);
+
+ PMCDBG(MDP,WRI,1, "p4-write cpu=%d ri=%d mode=%d v=%jx", cpu, ri,
+ mode, v);
+
+ /*
+ * write the PMC value to the register/saved value: for
+ * sampling mode PMCs, the value to be programmed into the PMC
+ * counter is -(C+1) where 'C' is the requested sample rate.
+ */
+ if (PMC_IS_SAMPLING_MODE(mode))
+ v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ if (PMC_IS_SYSTEM_MODE(mode))
+ wrmsr(pd->pm_pmc_msr, v);
+ else
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) = v;
+
+ return (0);
+}
+
+/*
+ * Configure a PMC 'pm' on the given CPU and row-index.
+ *
+ * 'pm' may be NULL to indicate de-configuration.
+ *
+ * On HTT systems, a PMC may get configured twice, once for each
+ * "logical" CPU. We track this using the CFGFLAGS field of the
+ * per-cpu state; this field is a bit mask with one bit each for
+ * logical CPUs 0 & 1.
+ */
+
+static int
+p4_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+ struct p4_cpu *pc;
+ int cfgflags, cpuflag;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+ phw = &pc->pc_p4pmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL ||
+ (p4_system_has_htt && phw->phw_pmc == pm),
+ ("[p4,%d] hwpmc not unconfigured before re-config", __LINE__));
+
+ mtx_lock_spin(&pc->pc_mtx);
+ cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);
+
+ KASSERT(cfgflags >= 0 || cfgflags <= 3,
+ ("[p4,%d] illegal cfgflags cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgflags, cpu, ri));
+
+ KASSERT(cfgflags == 0 || phw->phw_pmc,
+ ("[p4,%d] cpu=%d ri=%d pmc configured with zero cfg count",
+ __LINE__, cpu, ri));
+
+ cpuflag = P4_CPU_TO_FLAG(cpu);
+
+ if (pm) { /* config */
+ if (cfgflags == 0)
+ phw->phw_pmc = pm;
+
+ KASSERT(phw->phw_pmc == pm,
+ ("[p4,%d] cpu=%d ri=%d config %p != hw %p",
+ __LINE__, cpu, ri, pm, phw->phw_pmc));
+
+ cfgflags |= cpuflag;
+ } else { /* unconfig */
+ cfgflags &= ~cpuflag;
+
+ if (cfgflags == 0)
+ phw->phw_pmc = NULL;
+ }
+
+ KASSERT(cfgflags >= 0 || cfgflags <= 3,
+ ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__,
+ cfgflags, cpu, ri));
+
+ P4_PCPU_SET_CFGFLAGS(pc,ri,cfgflags);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ return (0);
+}
+
+/*
+ * Retrieve a configured PMC pointer from hardware state.
+ */
+
+static int
+p4_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ int cfgflags;
+ struct p4_cpu *pc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+
+ mtx_lock_spin(&pc->pc_mtx);
+ cfgflags = P4_PCPU_GET_CFGFLAGS(pc,ri);
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ if (cfgflags & P4_CPU_TO_FLAG(cpu))
+ *ppm = pc->pc_p4pmcs[ri].phw_pmc; /* PMC config'ed on this CPU */
+ else
+ *ppm = NULL;
+
+ return 0;
+}
+
+/*
+ * Allocate a PMC.
+ *
+ * The allocation strategy differs between HTT and non-HTT systems.
+ *
+ * The non-HTT case:
+ * - Given the desired event and the PMC row-index, lookup the
+ * list of valid ESCRs for the event.
+ * - For each valid ESCR:
+ * - Check if the ESCR is free and the ESCR row is in a compatible
+ * mode (i.e., system or process))
+ * - Check if the ESCR is usable with a P4 PMC at the desired row-index.
+ * If everything matches, we determine the appropriate bit values for the
+ * ESCR and CCCR registers.
+ *
+ * The HTT case:
+ *
+ * - Process mode PMCs require special care. The FreeBSD scheduler could
+ * schedule any two processes on the same physical CPU. We need to ensure
+ * that a given PMC row-index is never allocated to two different
+ * PMCs owned by different user-processes.
+ * This is ensured by always allocating a PMC from a 'FREE' PMC row
+ * if the system has HTT active.
+ * - A similar check needs to be done for ESCRs; we do not want two PMCs
+ * using the same ESCR to be scheduled at the same time. Thus ESCR
+ * allocation is also restricted to FREE rows if the system has HTT
+ * enabled.
+ * - Thirdly, some events are 'thread-independent' terminology, i.e.,
+ * the PMC hardware cannot distinguish between events caused by
+ * different logical CPUs. This makes it impossible to assign events
+ * to a given thread of execution. If the system has HTT enabled,
+ * these events are not allowed for process-mode PMCs.
+ */
+
+static int
+p4_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int found, n, m;
+ uint32_t caps, cccrvalue, escrvalue, tflags;
+ enum pmc_p4escr escr;
+ struct p4_cpu *pc;
+ struct p4_event_descr *pevent;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p4-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != a->pm_class)
+ return (EINVAL);
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return (EPERM);
+
+ /*
+ * If the system has HTT enabled, and the desired allocation
+ * mode is process-private, and the PMC row disposition is not
+ * FREE (0), decline the allocation.
+ */
+
+ if (p4_system_has_htt &&
+ PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) &&
+ pmc_getrowdisp(ri) != 0)
+ return (EBUSY);
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] unknown PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ if (pm->pm_event < PMC_EV_P4_FIRST ||
+ pm->pm_event > PMC_EV_P4_LAST)
+ return (EINVAL);
+
+ if ((pevent = p4_find_event(pm->pm_event)) == NULL)
+ return (ESRCH);
+
+ PMCDBG(MDP,ALL,2, "pevent={ev=%d,escrsel=0x%x,cccrsel=0x%x,isti=%d}",
+ pevent->pm_event, pevent->pm_escr_eventselect,
+ pevent->pm_cccr_select, pevent->pm_is_ti_event);
+
+ /*
+ * Some PMC events are 'thread independent'and therefore
+ * cannot be used for process-private modes if HTT is being
+ * used.
+ */
+
+ if (P4_EVENT_IS_TI(pevent) &&
+ PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm)) &&
+ p4_system_has_htt)
+ return (EINVAL);
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+
+ found = 0;
+
+ /* look for a suitable ESCR for this event */
+ for (n = 0; n < P4_MAX_ESCR_PER_EVENT && !found; n++) {
+ if ((escr = pevent->pm_escrs[n]) == P4_ESCR_NONE)
+ break; /* out of ESCRs */
+ /*
+ * Check ESCR row disposition.
+ *
+ * If the request is for a system-mode PMC, then the
+ * ESCR row should not be in process-virtual mode, and
+ * should also be free on the current CPU.
+ */
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ if (P4_ESCR_ROW_DISP_IS_THREAD(escr) ||
+ pc->pc_escrs[escr] != P4_INVALID_PMC_INDEX)
+ continue;
+ }
+
+ /*
+ * If the request is for a process-virtual PMC, and if
+ * HTT is not enabled, we can use an ESCR row that is
+ * either FREE or already in process mode.
+ *
+ * If HTT is enabled, then we need to ensure that a
+ * given ESCR is never allocated to two PMCS that
+ * could run simultaneously on the two logical CPUs of
+ * a CPU package. We ensure this be only allocating
+ * ESCRs from rows marked as 'FREE'.
+ */
+
+ if (PMC_IS_VIRTUAL_MODE(PMC_TO_MODE(pm))) {
+ if (p4_system_has_htt) {
+ if (!P4_ESCR_ROW_DISP_IS_FREE(escr))
+ continue;
+ } else
+ if (P4_ESCR_ROW_DISP_IS_STANDALONE(escr))
+ continue;
+ }
+
+ /*
+ * We found a suitable ESCR for this event. Now check if
+ * this escr can work with the PMC at row-index 'ri'.
+ */
+
+ for (m = 0; m < P4_MAX_PMC_PER_ESCR; m++)
+ if (p4_escrs[escr].pm_pmcs[m] == pd->pm_pmcnum) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found == 0)
+ return (ESRCH);
+
+ KASSERT((int) escr >= 0 && escr < P4_NESCR,
+ ("[p4,%d] illegal ESCR value %d", __LINE__, escr));
+
+ /* mark ESCR row mode */
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ pc->pc_escrs[escr] = ri; /* mark ESCR as in use on this cpu */
+ P4_ESCR_MARK_ROW_STANDALONE(escr);
+ } else {
+ KASSERT(pc->pc_escrs[escr] == P4_INVALID_PMC_INDEX,
+ ("[p4,%d] escr[%d] already in use", __LINE__, escr));
+ P4_ESCR_MARK_ROW_THREAD(escr);
+ }
+
+ pm->pm_md.pm_p4.pm_p4_escrmsr = p4_escrs[escr].pm_escr_msr;
+ pm->pm_md.pm_p4.pm_p4_escr = escr;
+
+ cccrvalue = P4_CCCR_TO_ESCR_SELECT(pevent->pm_cccr_select);
+ escrvalue = P4_ESCR_TO_EVENT_SELECT(pevent->pm_escr_eventselect);
+
+ /* CCCR fields */
+ if (caps & PMC_CAP_THRESHOLD)
+ cccrvalue |= (a->pm_md.pm_p4.pm_p4_cccrconfig &
+ P4_CCCR_THRESHOLD_MASK) | P4_CCCR_COMPARE;
+
+ if (caps & PMC_CAP_EDGE)
+ cccrvalue |= P4_CCCR_EDGE;
+
+ if (caps & PMC_CAP_INVERT)
+ cccrvalue |= P4_CCCR_COMPLEMENT;
+
+ if (p4_system_has_htt)
+ cccrvalue |= a->pm_md.pm_p4.pm_p4_cccrconfig &
+ P4_CCCR_ACTIVE_THREAD_MASK;
+ else /* no HTT; thread field should be '11b' */
+ cccrvalue |= P4_CCCR_TO_ACTIVE_THREAD(0x3);
+
+ if (caps & PMC_CAP_CASCADE)
+ cccrvalue |= P4_CCCR_CASCADE;
+
+ /* On HTT systems the PMI T0 field may get moved to T1 at pmc start */
+ if (caps & PMC_CAP_INTERRUPT)
+ cccrvalue |= P4_CCCR_OVF_PMI_T0;
+
+ /* ESCR fields */
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= a->pm_md.pm_p4.pm_p4_escrconfig &
+ P4_ESCR_EVENT_MASK_MASK;
+ if (caps & PMC_CAP_TAGGING)
+ escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig &
+ P4_ESCR_TAG_VALUE_MASK) | P4_ESCR_TAG_ENABLE;
+ if (caps & PMC_CAP_QUALIFIER)
+ escrvalue |= (a->pm_md.pm_p4.pm_p4_escrconfig &
+ P4_ESCR_EVENT_MASK_MASK);
+
+ /* HTT: T0_{OS,USR} bits may get moved to T1 at pmc start */
+ tflags = 0;
+ if (caps & PMC_CAP_SYSTEM)
+ tflags |= P4_ESCR_T0_OS;
+ if (caps & PMC_CAP_USER)
+ tflags |= P4_ESCR_T0_USR;
+ if (tflags == 0)
+ tflags = (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ escrvalue |= tflags;
+
+ pm->pm_md.pm_p4.pm_p4_cccrvalue = cccrvalue;
+ pm->pm_md.pm_p4.pm_p4_escrvalue = escrvalue;
+
+ PMCDBG(MDP,ALL,2, "p4-allocate cccrsel=0x%x cccrval=0x%x "
+ "escr=%d escrmsr=0x%x escrval=0x%x", pevent->pm_cccr_select,
+ cccrvalue, escr, pm->pm_md.pm_p4.pm_p4_escrmsr, escrvalue);
+
+ return (0);
+}
+
+/*
+ * release a PMC.
+ */
+
+static int
+p4_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ enum pmc_p4escr escr;
+ struct p4_cpu *pc;
+
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ escr = pm->pm_md.pm_p4.pm_p4_escr;
+
+ PMCDBG(MDP,REL,1, "p4-release cpu=%d ri=%d escr=%d", cpu, ri, escr);
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+
+ KASSERT(pc->pc_p4pmcs[ri].phw_pmc == NULL,
+ ("[p4,%d] releasing configured PMC ri=%d", __LINE__, ri));
+
+ P4_ESCR_UNMARK_ROW_STANDALONE(escr);
+ KASSERT(pc->pc_escrs[escr] == ri,
+ ("[p4,%d] escr[%d] not allocated to ri %d", __LINE__,
+ escr, ri));
+ pc->pc_escrs[escr] = P4_INVALID_PMC_INDEX; /* mark as free */
+ } else
+ P4_ESCR_UNMARK_ROW_THREAD(escr);
+
+ return (0);
+}
+
+/*
+ * Start a PMC
+ */
+
+static int
+p4_start_pmc(int cpu, int ri)
+{
+ int rc;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct p4pmc_descr *pd;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+ pm = pc->pc_p4pmcs[ri].phw_pmc;
+ pd = &p4_pmcdesc[ri];
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] starting cpu%d,pmc%d with null pmc", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p4-start cpu=%d ri=%d", cpu, ri);
+
+ KASSERT(pd->pm_descr.pd_class == PMC_CLASS_P4,
+ ("[p4,%d] wrong PMC class %d", __LINE__,
+ pd->pm_descr.pd_class));
+
+ /* retrieve the desired CCCR/ESCR values from the PMC */
+ cccrvalue = pm->pm_md.pm_p4.pm_p4_cccrvalue;
+ escrvalue = pm->pm_md.pm_p4.pm_p4_escrvalue;
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+
+ /* extract and zero the logical processor selection bits */
+ cccrtbits = cccrvalue & P4_CCCR_OVF_PMI_T0;
+ escrtbits = escrvalue & (P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+ cccrvalue &= ~P4_CCCR_OVF_PMI_T0;
+ escrvalue &= ~(P4_ESCR_T0_OS|P4_ESCR_T0_USR);
+
+ if (P4_CPU_IS_HTT_SECONDARY(cpu)) { /* shift T0 bits to T1 position */
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ /* start system mode PMCs directly */
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ wrmsr(escrmsr, escrvalue | escrtbits);
+ wrmsr(pd->pm_cccr_msr, cccrvalue | cccrtbits | P4_CCCR_ENABLE);
+ return 0;
+ }
+
+ /*
+ * Thread mode PMCs
+ *
+ * On HTT machines, the same PMC could be scheduled on the
+ * same physical CPU twice (once for each logical CPU), for
+ * example, if two threads of a multi-threaded process get
+ * scheduled on the same CPU.
+ *
+ */
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+ KASSERT(rc == 0 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ if (rc == 0) { /* 1st CPU and the non-HTT case */
+
+ KASSERT(P4_PMC_IS_STOPPED(pd->pm_cccr_msr),
+ ("[p4,%d] cpu=%d ri=%d cccr=0x%x not stopped", __LINE__,
+ cpu, ri, pd->pm_cccr_msr));
+
+ /* write out the low 40 bits of the saved value to hardware */
+ wrmsr(pd->pm_pmc_msr,
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) & P4_PERFCTR_MASK);
+
+ } else if (rc == 1) { /* 2nd CPU */
+
+ /*
+ * Stop the PMC and retrieve the CCCR and ESCR values
+ * from their MSRs, and turn on the additional T[0/1]
+ * bits for the 2nd CPU.
+ */
+
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ /* check that the configuration bits read back match the PMC */
+ KASSERT((cccrvalue & P4_CCCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK),
+ ("[p4,%d] Extra CCCR bits cpu=%d rc=%d ri=%d "
+ "cccr=0x%x PMC=0x%x", __LINE__, cpu, rc, ri,
+ cccrvalue & P4_CCCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & P4_CCCR_Tx_MASK));
+ KASSERT(cccrvalue & P4_CCCR_ENABLE,
+ ("[p4,%d] 2nd cpu rc=%d cpu=%d ri=%d not running",
+ __LINE__, rc, cpu, ri));
+ KASSERT((cccrvalue & cccrtbits) == 0,
+ ("[p4,%d] CCCR T0/T1 mismatch rc=%d cpu=%d ri=%d"
+ "cccrvalue=0x%x tbits=0x%x", __LINE__, rc, cpu, ri,
+ cccrvalue, cccrtbits));
+
+ escrvalue = rdmsr(escrmsr);
+
+ KASSERT((escrvalue & P4_ESCR_Tx_MASK) ==
+ (pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK),
+ ("[p4,%d] Extra ESCR bits cpu=%d rc=%d ri=%d "
+ "escr=0x%x pm=0x%x", __LINE__, cpu, rc, ri,
+ escrvalue & P4_ESCR_Tx_MASK,
+ pm->pm_md.pm_p4.pm_p4_escrvalue & P4_ESCR_Tx_MASK));
+ KASSERT((escrvalue & escrtbits) == 0,
+ ("[p4,%d] ESCR T0/T1 mismatch rc=%d cpu=%d ri=%d "
+ "escrmsr=0x%x escrvalue=0x%x tbits=0x%x", __LINE__,
+ rc, cpu, ri, escrmsr, escrvalue, escrtbits));
+ }
+
+ /* Enable the correct bits for this CPU. */
+ escrvalue |= escrtbits;
+ cccrvalue |= cccrtbits | P4_CCCR_ENABLE;
+
+ /* Save HW value at the time of starting hardware */
+ P4_PCPU_HW_VALUE(pc,ri,cpu) = rdmsr(pd->pm_pmc_msr);
+
+ /* Program the ESCR and CCCR and start the PMC */
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+
+ ++rc;
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ PMCDBG(MDP,STA,2,"p4-start cpu=%d rc=%d ri=%d escr=%d "
+ "escrmsr=0x%x escrvalue=0x%x cccr_config=0x%x v=%jx", cpu, rc,
+ ri, pm->pm_md.pm_p4.pm_p4_escr, escrmsr, escrvalue,
+ cccrvalue, P4_PCPU_HW_VALUE(pc,ri,cpu));
+
+ return (0);
+}
+
+/*
+ * Stop a PMC.
+ */
+
+static int
+p4_stop_pmc(int cpu, int ri)
+{
+ int rc;
+ uint32_t cccrvalue, cccrtbits, escrvalue, escrmsr, escrtbits;
+ struct pmc *pm;
+ struct p4_cpu *pc;
+ struct p4pmc_descr *pd;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] illegal row index %d", __LINE__, ri));
+
+ pd = &p4_pmcdesc[ri];
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+ pm = pc->pc_p4pmcs[ri].phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[p4,%d] null pmc for cpu%d, ri%d", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STO,1, "p4-stop cpu=%d ri=%d", cpu, ri);
+
+ if (PMC_IS_SYSTEM_MODE(PMC_TO_MODE(pm))) {
+ wrmsr(pd->pm_cccr_msr,
+ pm->pm_md.pm_p4.pm_p4_cccrvalue & ~P4_CCCR_ENABLE);
+ return (0);
+ }
+
+ /*
+ * Thread mode PMCs.
+ *
+ * On HTT machines, this PMC may be in use by two threads
+ * running on two logical CPUS. Thus we look at the
+ * 'runcount' field and only turn off the appropriate TO/T1
+ * bits (and keep the PMC running) if two logical CPUs were
+ * using the PMC.
+ *
+ */
+
+ /* bits to mask */
+ cccrtbits = P4_CCCR_OVF_PMI_T0;
+ escrtbits = P4_ESCR_T0_OS | P4_ESCR_T0_USR;
+ if (P4_CPU_IS_HTT_SECONDARY(cpu)) {
+ cccrtbits <<= 1;
+ escrtbits >>= 2;
+ }
+
+ mtx_lock_spin(&pc->pc_mtx);
+
+ rc = P4_PCPU_GET_RUNCOUNT(pc,ri);
+
+ KASSERT(rc == 2 || rc == 1,
+ ("[p4,%d] illegal runcount cpu=%d ri=%d rc=%d", __LINE__, cpu, ri,
+ rc));
+
+ --rc;
+
+ P4_PCPU_SET_RUNCOUNT(pc,ri,rc);
+
+ /* Stop this PMC */
+ cccrvalue = rdmsr(pd->pm_cccr_msr);
+ wrmsr(pd->pm_cccr_msr, cccrvalue & ~P4_CCCR_ENABLE);
+
+ escrmsr = pm->pm_md.pm_p4.pm_p4_escrmsr;
+ escrvalue = rdmsr(escrmsr);
+
+ /* The current CPU should be running on this PMC */
+ KASSERT(escrvalue & escrtbits,
+ ("[p4,%d] ESCR T0/T1 mismatch cpu=%d rc=%d ri=%d escrmsr=0x%x "
+ "escrvalue=0x%x tbits=0x%x", __LINE__, cpu, rc, ri, escrmsr,
+ escrvalue, escrtbits));
+ KASSERT(PMC_IS_COUNTING_MODE(PMC_TO_MODE(pm)) ||
+ (cccrvalue & cccrtbits),
+ ("[p4,%d] CCCR T0/T1 mismatch cpu=%d ri=%d cccrvalue=0x%x "
+ "tbits=0x%x", __LINE__, cpu, ri, cccrvalue, cccrtbits));
+
+ /* get the current hardware reading */
+ tmp = rdmsr(pd->pm_pmc_msr);
+
+ if (rc == 1) { /* need to keep the PMC running */
+ escrvalue &= ~escrtbits;
+ cccrvalue &= ~cccrtbits;
+ wrmsr(escrmsr, escrvalue);
+ wrmsr(pd->pm_cccr_msr, cccrvalue);
+ }
+
+ mtx_unlock_spin(&pc->pc_mtx);
+
+ PMCDBG(MDP,STO,2, "p4-stop cpu=%d rc=%d ri=%d escrmsr=0x%x "
+ "escrval=0x%x cccrval=0x%x v=%jx", cpu, rc, ri, escrmsr,
+ escrvalue, cccrvalue, tmp);
+
+ if (tmp < P4_PCPU_HW_VALUE(pc,ri,cpu)) /* 40 bit counter overflow */
+ tmp += (P4_PERFCTR_MASK + 1) - P4_PCPU_HW_VALUE(pc,ri,cpu);
+ else
+ tmp -= P4_PCPU_HW_VALUE(pc,ri,cpu);
+
+ P4_PCPU_PMC_VALUE(pc,ri,cpu) += tmp;
+
+ return 0;
+}
+
+/*
+ * Handle an interrupt.
+ *
+ * The hardware sets the CCCR_OVF whenever a counter overflow occurs,
+ * so the handler examines all the 18 CCCR registers, processing the
+ * counters that have overflowed.
+ *
+ * On HTT machines, the CCCR register is shared and will interrupt
+ * both logical processors if so configured. Thus multiple logical
+ * CPUs could enter the NMI service routine at the same time. These
+ * will get serialized using a per-cpu spinlock dedicated for use in
+ * the NMI handler.
+ */
+
+static int
+p4_intr(int cpu, struct trapframe *tf)
+{
+ uint32_t cccrval, ovf_mask, ovf_partner;
+ int did_interrupt, error, ri;
+ struct p4_cpu *pc;
+ struct pmc *pm;
+ pmc_value_t v;
+
+ PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];
+
+ ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ?
+ P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0;
+ ovf_mask |= P4_CCCR_OVF;
+ if (p4_system_has_htt)
+ ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ?
+ P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1;
+ else
+ ovf_partner = 0;
+ did_interrupt = 0;
+
+ if (p4_system_has_htt)
+ P4_PCPU_ACQ_INTR_SPINLOCK(pc);
+
+ /*
+ * Loop through all CCCRs, looking for ones that have
+ * interrupted this CPU.
+ */
+ for (ri = 0; ri < P4_NPMCS; ri++) {
+
+ /*
+ * Check if our partner logical CPU has already marked
+ * this PMC has having interrupted it. If so, reset
+ * the flag and process the interrupt, but leave the
+ * hardware alone.
+ */
+ if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) {
+ P4_PCPU_SET_INTRFLAG(pc,ri,0);
+ did_interrupt = 1;
+
+ /*
+ * Ignore de-configured or stopped PMCs.
+ * Ignore PMCs not in sampling mode.
+ */
+ pm = pc->pc_p4pmcs[ri].phw_pmc;
+ if (pm == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+ (void) pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ continue;
+ }
+
+ /*
+ * Fresh interrupt. Look for the CCCR_OVF bit
+ * and the OVF_Tx bit for this logical
+ * processor being set.
+ */
+ cccrval = rdmsr(P4_CCCR_MSR_FIRST + ri);
+
+ if ((cccrval & ovf_mask) != ovf_mask)
+ continue;
+
+ /*
+ * If the other logical CPU would also have been
+ * interrupted due to the PMC being shared, record
+ * this fact in the per-cpu saved interrupt flag
+ * bitmask.
+ */
+ if (p4_system_has_htt && (cccrval & ovf_partner))
+ P4_PCPU_SET_INTRFLAG(pc, ri, 1);
+
+ v = rdmsr(P4_PERFCTR_MSR_FIRST + ri);
+
+ PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v);
+
+ /* Stop the counter, and reset the overflow bit */
+ cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE);
+ wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval);
+
+ did_interrupt = 1;
+
+ /*
+ * Ignore de-configured or stopped PMCs. Ignore PMCs
+ * not in sampling mode.
+ */
+ pm = pc->pc_p4pmcs[ri].phw_pmc;
+
+ if (pm == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ /*
+ * Process the interrupt. Re-enable the PMC if
+ * processing was successful.
+ */
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+
+ /*
+ * Only the first processor executing the NMI handler
+ * in a HTT pair will restart a PMC, and that too
+ * only if there were no errors.
+ */
+ v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(
+ pm->pm_sc.pm_reloadcount);
+ wrmsr(P4_PERFCTR_MSR_FIRST + ri, v);
+ if (error == 0)
+ wrmsr(P4_CCCR_MSR_FIRST + ri,
+ cccrval | P4_CCCR_ENABLE);
+ }
+
+ /* allow the other CPU to proceed */
+ if (p4_system_has_htt)
+ P4_PCPU_REL_INTR_SPINLOCK(pc);
+
+ /*
+ * On Intel P4 CPUs, the PMC 'pcint' entry in the LAPIC gets
+ * masked when a PMC interrupts the CPU. We need to unmask
+ * the interrupt source explicitly.
+ */
+
+ if (did_interrupt)
+ lapic_reenable_pmc();
+
+ atomic_add_int(did_interrupt ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (did_interrupt);
+}
+
+/*
+ * Describe a CPU's PMC state.
+ */
+
+static int
+p4_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct p4pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p4,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] row-index %d out of range", __LINE__, ri));
+
+ PMCDBG(MDP,OPS,1,"p4-describe cpu=%d ri=%d", cpu, ri);
+
+ if (P4_CPU_IS_HTT_SECONDARY(cpu))
+ return (EINVAL);
+
+ pd = &p4_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (p4_pcpu[cpu]->pc_p4pmcs[ri].phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = p4_pcpu[cpu]->pc_p4pmcs[ri].phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Get MSR# for use with RDPMC.
+ */
+
+static int
+p4_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P4_NPMCS,
+ ("[p4,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = p4_pmcdesc[ri].pm_pmc_msr - P4_PERFCTR_MSR_FIRST;
+
+ PMCDBG(MDP,OPS, 1, "ri=%d getmsr=0x%x", ri, *msr);
+
+ return 0;
+}
+
+
+int
+pmc_p4_initialize(struct pmc_mdep *md, int ncpus)
+{
+ struct pmc_classdep *pcd;
+ struct p4_event_descr *pe;
+
+ KASSERT(md != NULL, ("[p4,%d] md is NULL", __LINE__));
+ KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL,
+ ("[p4,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p4-initialize");
+
+ /* Allocate space for pointers to per-cpu descriptors. */
+ p4_pcpu = malloc(sizeof(struct p4_cpu **) * ncpus, M_PMC,
+ M_ZERO|M_WAITOK);
+
+ /* Fill in the class dependent descriptor. */
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P4];
+
+ switch (md->pmd_cputype) {
+ case PMC_CPU_INTEL_PIV:
+
+ pcd->pcd_caps = P4_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_P4;
+ pcd->pcd_num = P4_NPMCS;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 40;
+
+ pcd->pcd_allocate_pmc = p4_allocate_pmc;
+ pcd->pcd_config_pmc = p4_config_pmc;
+ pcd->pcd_describe = p4_describe;
+ pcd->pcd_get_config = p4_get_config;
+ pcd->pcd_get_msr = p4_get_msr;
+ pcd->pcd_pcpu_fini = p4_pcpu_fini;
+ pcd->pcd_pcpu_init = p4_pcpu_init;
+ pcd->pcd_read_pmc = p4_read_pmc;
+ pcd->pcd_release_pmc = p4_release_pmc;
+ pcd->pcd_start_pmc = p4_start_pmc;
+ pcd->pcd_stop_pmc = p4_stop_pmc;
+ pcd->pcd_write_pmc = p4_write_pmc;
+
+ md->pmd_pcpu_fini = NULL;
+ md->pmd_pcpu_init = NULL;
+ md->pmd_intr = p4_intr;
+ md->pmd_npmc += P4_NPMCS;
+
+ /* model specific configuration */
+ if ((cpu_id & 0xFFF) < 0xF27) {
+
+ /*
+ * On P4 and Xeon with CPUID < (Family 15,
+ * Model 2, Stepping 7), only one ESCR is
+ * available for the IOQ_ALLOCATION event.
+ */
+
+ pe = p4_find_event(PMC_EV_P4_IOQ_ALLOCATION);
+ pe->pm_escrs[1] = P4_ESCR_NONE;
+ }
+
+ break;
+
+ default:
+ KASSERT(0,("[p4,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return (0);
+}
+
+void
+pmc_p4_finalize(struct pmc_mdep *md)
+{
+#if defined(INVARIANTS)
+ int i, ncpus;
+#endif
+
+ KASSERT(p4_pcpu != NULL,
+ ("[p4,%d] NULL p4_pcpu", __LINE__));
+
+#if defined(INVARIANTS)
+ ncpus = pmc_cpu_max();
+ for (i = 0; i < ncpus; i++)
+ KASSERT(p4_pcpu[i] == NULL, ("[p4,%d] non-null pcpu %d",
+ __LINE__, i));
+#endif
+
+ free(p4_pcpu, M_PMC);
+ p4_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_piv.h b/sys/dev/hwpmc/hwpmc_piv.h
new file mode 100644
index 0000000..ebde966
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_piv.h
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _DEV_HWPMC_PIV_H_
+#define _DEV_HWPMC_PIV_H_ 1
+
+/* Intel P4 PMCs */
+
+#define P4_NPMCS 18
+#define P4_NESCR 45
+#define P4_INVALID_PMC_INDEX -1
+#define P4_MAX_ESCR_PER_EVENT 2
+#define P4_MAX_PMC_PER_ESCR 3
+
+#define P4_CCCR_OVF (1 << 31)
+#define P4_CCCR_CASCADE (1 << 30)
+#define P4_CCCR_OVF_PMI_T1 (1 << 27)
+#define P4_CCCR_OVF_PMI_T0 (1 << 26)
+#define P4_CCCR_FORCE_OVF (1 << 25)
+#define P4_CCCR_EDGE (1 << 24)
+#define P4_CCCR_THRESHOLD_SHIFT 20
+#define P4_CCCR_THRESHOLD_MASK 0x00F00000
+#define P4_CCCR_TO_THRESHOLD(C) (((C) << P4_CCCR_THRESHOLD_SHIFT) & \
+ P4_CCCR_THRESHOLD_MASK)
+#define P4_CCCR_COMPLEMENT (1 << 19)
+#define P4_CCCR_COMPARE (1 << 18)
+#define P4_CCCR_ACTIVE_THREAD_SHIFT 16
+#define P4_CCCR_ACTIVE_THREAD_MASK 0x00030000
+#define P4_CCCR_TO_ACTIVE_THREAD(T) (((T) << P4_CCCR_ACTIVE_THREAD_SHIFT) & \
+ P4_CCCR_ACTIVE_THREAD_MASK)
+#define P4_CCCR_ESCR_SELECT_SHIFT 13
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000E000
+#define P4_CCCR_TO_ESCR_SELECT(E) (((E) << P4_CCCR_ESCR_SELECT_SHIFT) & \
+ P4_CCCR_ESCR_SELECT_MASK)
+#define P4_CCCR_ENABLE (1 << 12)
+#define P4_CCCR_VALID_BITS (P4_CCCR_OVF | P4_CCCR_CASCADE | \
+ P4_CCCR_OVF_PMI_T1 | P4_CCCR_OVF_PMI_T0 | P4_CCCR_FORCE_OVF | \
+ P4_CCCR_EDGE | P4_CCCR_THRESHOLD_MASK | P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | P4_CCCR_ESCR_SELECT_MASK | P4_CCCR_ENABLE)
+
+#define P4_ESCR_EVENT_SELECT_SHIFT 25
+#define P4_ESCR_EVENT_SELECT_MASK 0x7E000000
+#define P4_ESCR_TO_EVENT_SELECT(E) (((E) << P4_ESCR_EVENT_SELECT_SHIFT) & \
+ P4_ESCR_EVENT_SELECT_MASK)
+#define P4_ESCR_EVENT_MASK_SHIFT 9
+#define P4_ESCR_EVENT_MASK_MASK 0x01FFFE00
+#define P4_ESCR_TO_EVENT_MASK(M) (((M) << P4_ESCR_EVENT_MASK_SHIFT) & \
+ P4_ESCR_EVENT_MASK_MASK)
+#define P4_ESCR_TAG_VALUE_SHIFT 5
+#define P4_ESCR_TAG_VALUE_MASK 0x000001E0
+#define P4_ESCR_TO_TAG_VALUE(T) (((T) << P4_ESCR_TAG_VALUE_SHIFT) & \
+ P4_ESCR_TAG_VALUE_MASK)
+#define P4_ESCR_TAG_ENABLE 0x00000010
+#define P4_ESCR_T0_OS 0x00000008
+#define P4_ESCR_T0_USR 0x00000004
+#define P4_ESCR_T1_OS 0x00000002
+#define P4_ESCR_T1_USR 0x00000001
+#define P4_ESCR_OS P4_ESCR_T0_OS
+#define P4_ESCR_USR P4_ESCR_T0_USR
+#define P4_ESCR_VALID_BITS (P4_ESCR_EVENT_SELECT_MASK | \
+ P4_ESCR_EVENT_MASK_MASK | P4_ESCR_TAG_VALUE_MASK | \
+ P4_ESCR_TAG_ENABLE | P4_ESCR_T0_OS | P4_ESCR_T0_USR | P4_ESCR_T1_OS \
+ P4_ESCR_T1_USR)
+
+#define P4_PERFCTR_MASK 0xFFFFFFFFFFLL /* 40 bits */
+#define P4_PERFCTR_OVERFLOWED(PMC) ((rdpmc(PMC) & (1LL << 39)) == 0)
+
+#define P4_CCCR_MSR_FIRST 0x360 /* MSR_BPU_CCCR0 */
+#define P4_PERFCTR_MSR_FIRST 0x300 /* MSR_BPU_COUNTER0 */
+
+#define P4_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (1 - (V))
+#define P4_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (1 - (P))
+
+struct pmc_md_p4_op_pmcallocate {
+ uint32_t pm_p4_cccrconfig;
+ uint32_t pm_p4_escrconfig;
+};
+
+#ifdef _KERNEL
+
+/* MD extension for 'struct pmc' */
+struct pmc_md_p4_pmc {
+ uint32_t pm_p4_cccrvalue;
+ uint32_t pm_p4_escrvalue;
+ uint32_t pm_p4_escr;
+ uint32_t pm_p4_escrmsr;
+};
+
+
+/*
+ * Prototypes
+ */
+
+int pmc_p4_initialize(struct pmc_mdep *_md, int _ncpus);
+void pmc_p4_finalize(struct pmc_mdep *md);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_PIV_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_powerpc.c b/sys/dev/hwpmc/hwpmc_powerpc.c
new file mode 100644
index 0000000..a54ee62
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_powerpc.c
@@ -0,0 +1,852 @@
+/*-
+ * Copyright (c) 2011 Justin Hibbits
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/pmc_mdep.h>
+#include <machine/spr.h>
+#include <machine/cpu.h>
+
+#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x))
+#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x))
+#define PPC_SET_PMC3SEL(r, x) ((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x))
+#define PPC_SET_PMC4SEL(r, x) ((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x))
+#define PPC_SET_PMC5SEL(r, x) ((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x))
+#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x))
+
+/* Change this when we support more than just the 7450. */
+#define PPC_MAX_PMCS 6
+
+#define POWERPC_PMC_KERNEL_ENABLE (0x1 << 30)
+#define POWERPC_PMC_USER_ENABLE (0x1 << 31)
+
+#define POWERPC_PMC_ENABLE (POWERPC_PMC_KERNEL_ENABLE | POWERPC_PMC_USER_ENABLE)
+#define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V))
+#define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) ((P)-0x80000000)
+#define POWERPC_PMC_HAS_OVERFLOWED(x) (powerpc_pmcn_read(x) & (0x1 << 31))
+
+
+/*
+ * This should work for every 32-bit PowerPC implementation I know of (G3 and G4
+ * specifically). PoewrPC 970 will take more work.
+ */
+
+/*
+ * Per-processor information.
+ */
+struct powerpc_cpu {
+ struct pmc_hw *pc_ppcpmcs;
+};
+
+static struct powerpc_cpu **powerpc_pcpu;
+
+struct powerpc_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_counter_mask; /* Which counter this can be counted in. */
+ uint8_t pe_code; /* numeric code */
+};
+
+#define PPC_PMC_MASK1 0
+#define PPC_PMC_MASK2 1
+#define PPC_PMC_MASK3 2
+#define PPC_PMC_MASK4 3
+#define PPC_PMC_MASK5 4
+#define PPC_PMC_MASK6 5
+#define PPC_PMC_MASK_ALL 0x3f
+
+#define PMC_POWERPC_EVENT(id, mask, number) \
+ { .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
+
+static struct powerpc_event_code_map powerpc_event_codes[] = {
+ PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
+ PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
+ PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
+ PMC_POWERPC_EVENT(INSTR_DISPATCHED, 0x0f, 4),
+ PMC_POWERPC_EVENT(PMON_EXCEPT, 0x0f, 5),
+ PMC_POWERPC_EVENT(PMON_SIG, 0x0f, 7),
+ PMC_POWERPC_EVENT(VPU_INSTR_COMPLETED, 0x03, 8),
+ PMC_POWERPC_EVENT(VFPU_INSTR_COMPLETED, 0x03, 9),
+ PMC_POWERPC_EVENT(VIU1_INSTR_COMPLETED, 0x03, 10),
+ PMC_POWERPC_EVENT(VIU2_INSTR_COMPLETED, 0x03, 11),
+ PMC_POWERPC_EVENT(MTVSCR_INSTR_COMPLETED, 0x03, 12),
+ PMC_POWERPC_EVENT(MTVRSAVE_INSTR_COMPLETED, 0x03, 13),
+ PMC_POWERPC_EVENT(VPU_INSTR_WAIT_CYCLES, 0x03, 14),
+ PMC_POWERPC_EVENT(VFPU_INSTR_WAIT_CYCLES, 0x03, 15),
+ PMC_POWERPC_EVENT(VIU1_INSTR_WAIT_CYCLES, 0x03, 16),
+ PMC_POWERPC_EVENT(VIU2_INSTR_WAIT_CYCLES, 0x03, 17),
+ PMC_POWERPC_EVENT(MFVSCR_SYNC_CYCLES, 0x03, 18),
+ PMC_POWERPC_EVENT(VSCR_SAT_SET, 0x03, 19),
+ PMC_POWERPC_EVENT(STORE_INSTR_COMPLETED, 0x03, 20),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_MISSES, 0x03, 21),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOPS, 0x03, 22),
+ PMC_POWERPC_EVENT(UNRESOLVED_BRANCHES, 0x01, 23),
+ PMC_POWERPC_EVENT(SPEC_BUFFER_CYCLES, 0x01, 24),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_CYCLES, 0x01, 25),
+ PMC_POWERPC_EVENT(TRUE_BRANCH_TARGET_HITS, 0x01, 26),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STAC_PREDICTED, 0x01, 27),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_DISPATCHES, 0x01, 28),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_DISPATCHED, 0x01, 29),
+ PMC_POWERPC_EVENT(THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 30),
+ PMC_POWERPC_EVENT(THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 31),
+ PMC_POWERPC_EVENT(CYCLES_NO_COMPLETED_INSTRS, 0x01, 32),
+ PMC_POWERPC_EVENT(IU2_INSTR_COMPLETED, 0x01, 33),
+ PMC_POWERPC_EVENT(BRANCHES_COMPLETED, 0x01, 34),
+ PMC_POWERPC_EVENT(EIEIO_INSTR_COMPLETED, 0x01, 35),
+ PMC_POWERPC_EVENT(MTSPR_INSTR_COMPLETED, 0x01, 36),
+ PMC_POWERPC_EVENT(SC_INSTR_COMPLETED, 0x01, 37),
+ PMC_POWERPC_EVENT(LS_LM_COMPLETED, 0x01, 38),
+ PMC_POWERPC_EVENT(ITLB_HW_TABLE_SEARCH_CYCLES, 0x01, 39),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x01, 40),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_ACCESSES, 0x01, 41),
+ PMC_POWERPC_EVENT(INSTR_BKPT_MATCHES, 0x01, 42),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD, 0x01, 43),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_ON_MODIFIED, 0x01, 44),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS, 0x01, 45),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS_ON_TOUCH, 0x01, 46),
+ PMC_POWERPC_EVENT(TOUCH_ALIAS, 0x01, 47),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT_QUEUE, 0x01, 48),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT, 0x01, 49),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HITS, 0x01, 50),
+ PMC_POWERPC_EVENT(WRITE_THROUGH_STORES, 0x01, 51),
+ PMC_POWERPC_EVENT(CACHE_INHIBITED_STORES, 0x01, 52),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_HIT, 0x01, 53),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_HIT, 0x01, 54),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_HIT, 0x01, 55),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_HITS, 0x01, 56),
+ PMC_POWERPC_EVENT(DST_INSTR_DISPATCHED, 0x01, 57),
+ PMC_POWERPC_EVENT(REFRESHED_DSTS, 0x01, 58),
+ PMC_POWERPC_EVENT(SUCCESSFUL_DST_TABLE_SEARCHES, 0x01, 59),
+ PMC_POWERPC_EVENT(DSS_INSTR_COMPLETED, 0x01, 60),
+ PMC_POWERPC_EVENT(DST_STREAM_0_CACHE_LINE_FETCHES, 0x01, 61),
+ PMC_POWERPC_EVENT(VTQ_SUSPENDS_DUE_TO_CTX_CHANGE, 0x01, 62),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_HIT, 0x01, 63),
+ PMC_POWERPC_EVENT(VEC_LOAD_INSTR_COMPLETED, 0x01, 64),
+ PMC_POWERPC_EVENT(FP_STORE_INSTR_COMPLETED_IN_LSU, 0x01, 65),
+ PMC_POWERPC_EVENT(FPU_RENORMALIZATION, 0x01, 66),
+ PMC_POWERPC_EVENT(FPU_DENORMALIZATION, 0x01, 67),
+ PMC_POWERPC_EVENT(FP_STORE_CAUSES_STALL_IN_LSU, 0x01, 68),
+ PMC_POWERPC_EVENT(LD_ST_TRUE_ALIAS_STALL, 0x01, 70),
+ PMC_POWERPC_EVENT(LSU_INDEXED_ALIAS_STALL, 0x01, 71),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_FSQ_WB0_WB1, 0x01, 72),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_CSQ, 0x01, 73),
+ PMC_POWERPC_EVENT(LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0, 0x01, 74),
+ PMC_POWERPC_EVENT(LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0, 0x01, 75),
+ PMC_POWERPC_EVENT(LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1, 0x01, 76),
+ PMC_POWERPC_EVENT(LSU_TOUCH_ALIAS_VS_CSQ, 0x01, 77),
+ PMC_POWERPC_EVENT(LSU_LMQ_FULL_STALL, 0x01, 78),
+ PMC_POWERPC_EVENT(FP_LOAD_INSTR_COMPLETED_IN_LSU, 0x01, 79),
+ PMC_POWERPC_EVENT(FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU, 0x01, 80),
+ PMC_POWERPC_EVENT(FP_LOAD_DOUBLE_COMPLETED_IN_LSU, 0x01, 81),
+ PMC_POWERPC_EVENT(LSU_RA_LATCH_STALL, 0x01, 82),
+ PMC_POWERPC_EVENT(LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL, 0x01, 83),
+ PMC_POWERPC_EVENT(LSU_LMQ_INDEX_ALIAS, 0x01, 84),
+ PMC_POWERPC_EVENT(LSU_STORE_QUEUE_INDEX_ALIAS, 0x01, 85),
+ PMC_POWERPC_EVENT(LSU_CSQ_FORWARDING, 0x01, 86),
+ PMC_POWERPC_EVENT(LSU_MISALIGNED_LOAD_FINISH, 0x01, 87),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STORE_COMPLETED, 0x01, 88),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STALL, 0x01, 89),
+ PMC_POWERPC_EVENT(FP_ONE_QUARTER_FPSCR_RENAMES_BUSY, 0x01, 90),
+ PMC_POWERPC_EVENT(FP_ONE_HALF_FPSCR_RENAMES_BUSY, 0x01, 91),
+ PMC_POWERPC_EVENT(FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY, 0x01, 92),
+ PMC_POWERPC_EVENT(FP_ALL_FPSCR_RENAMES_BUSY, 0x01, 93),
+ PMC_POWERPC_EVENT(FP_DENORMALIZED_RESULT, 0x01, 94),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISSES, 0x02, 23),
+ PMC_POWERPC_EVENT(DISPATCHES_TO_FPR_ISSUE_QUEUE, 0x02, 24),
+ PMC_POWERPC_EVENT(LSU_INSTR_COMPLETED, 0x02, 25),
+ PMC_POWERPC_EVENT(LOAD_INSTR_COMPLETED, 0x02, 26),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_COMPLETED, 0x02, 27),
+ PMC_POWERPC_EVENT(TLBIE_INSTR_COMPLETED, 0x02, 28),
+ PMC_POWERPC_EVENT(LWARX_INSTR_COMPLETED, 0x02, 29),
+ PMC_POWERPC_EVENT(MFSPR_INSTR_COMPLETED, 0x02, 30),
+ PMC_POWERPC_EVENT(REFETCH_SERIALIZATION, 0x02, 31),
+ PMC_POWERPC_EVENT(COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD, 0x02, 32),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x02, 33),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x02, 34),
+ PMC_POWERPC_EVENT(ITLB_NON_SPECULATIVE_MISSES, 0x02, 35),
+ PMC_POWERPC_EVENT(CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS, 0x02, 36),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_ACCESS_MISS, 0x02, 37),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS, 0x02, 38),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_MISS, 0x02, 39),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS_CYCLES, 0x02, 40),
+ PMC_POWERPC_EVENT(L1_DATA_CYCLES_USED, 0x02, 41),
+ PMC_POWERPC_EVENT(DST_STREAM_1_CACHE_LINE_FETCHES, 0x02, 42),
+ PMC_POWERPC_EVENT(VTQ_STREAM_CANCELED_PREMATURELY, 0x02, 43),
+ PMC_POWERPC_EVENT(VTQ_RESUMES_DUE_TO_CTX_CHANGE, 0x02, 44),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_MISS, 0x02, 45),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH, 0x02, 46),
+ PMC_POWERPC_EVENT(TLBIE_SNOOPS, 0x02, 47),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_RELOADS, 0x02, 48),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_RELOADS, 0x02, 49),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_CASTOUTS_TO_L2, 0x02, 50),
+ PMC_POWERPC_EVENT(STORE_MERGE_GATHER, 0x02, 51),
+ PMC_POWERPC_EVENT(CACHEABLE_STORE_MERGE_TO_32_BYTES, 0x02, 52),
+ PMC_POWERPC_EVENT(DATA_BKPT_MATCHES, 0x02, 53),
+ PMC_POWERPC_EVENT(FALL_THROUGH_BRANCHES_PROCESSED, 0x02, 54),
+ PMC_POWERPC_EVENT(FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x02, 55),
+ PMC_POWERPC_EVENT(SECOND_SPECULATION_BUFFER_ACTIVE, 0x02, 56),
+ PMC_POWERPC_EVENT(BPU_STALL_ON_LR_DEPENDENCY, 0x02, 57),
+ PMC_POWERPC_EVENT(BTIC_MISS, 0x02, 58),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_CORRECTLY_RESOLVED, 0x02, 59),
+ PMC_POWERPC_EVENT(FPR_ISSUE_STALLED, 0x02, 60),
+ PMC_POWERPC_EVENT(SWITCHES_BETWEEN_PRIV_USER, 0x02, 61),
+ PMC_POWERPC_EVENT(LSU_COMPLETES_FP_STORE_SINGLE, 0x02, 62),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x04, 8),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x04, 9),
+ PMC_POWERPC_EVENT(VR_ISSUE_QUEUE_DISPATCHES, 0x04, 10),
+ PMC_POWERPC_EVENT(VR_STALLS, 0x04, 11),
+ PMC_POWERPC_EVENT(GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD, 0x04, 12),
+ PMC_POWERPC_EVENT(FPR_ISSUE_QUEUE_ENTRIES, 0x04, 13),
+ PMC_POWERPC_EVENT(FPU_INSTR_COMPLETED, 0x04, 14),
+ PMC_POWERPC_EVENT(STWCX_INSTR_COMPLETED, 0x04, 15),
+ PMC_POWERPC_EVENT(LS_LM_INSTR_PIECES, 0x04, 16),
+ PMC_POWERPC_EVENT(ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x04, 17),
+ PMC_POWERPC_EVENT(DTLB_MISSES, 0x04, 18),
+ PMC_POWERPC_EVENT(CANCELLED_L1_INSTR_CACHE_MISSES, 0x04, 19),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_OP_HIT, 0x04, 20),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_MISS_CYCLES, 0x04, 21),
+ PMC_POWERPC_EVENT(L1_DATA_PUSHES, 0x04, 22),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISS, 0x04, 23),
+ PMC_POWERPC_EVENT(VT2_FETCHES, 0x04, 24),
+ PMC_POWERPC_EVENT(TAKEN_BRANCHES_PROCESSED, 0x04, 25),
+ PMC_POWERPC_EVENT(BRANCH_FLUSHES, 0x04, 26),
+ PMC_POWERPC_EVENT(SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x04, 27),
+ PMC_POWERPC_EVENT(THIRD_SPECULATION_BUFFER_ACTIVE, 0x04, 28),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY, 0x04, 29),
+ PMC_POWERPC_EVENT(FAST_BTIC_HIT, 0x04, 30),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_MISPREDICTED, 0x04, 31),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_COMPLETED, 0x08, 14),
+ PMC_POWERPC_EVENT(CYCLES_NO_INSTR_DISPATCHED, 0x08, 15),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD, 0x08, 16),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_STALLED, 0x08, 17),
+ PMC_POWERPC_EVENT(IU1_INSTR_COMPLETED, 0x08, 18),
+ PMC_POWERPC_EVENT(DSSALL_INSTR_COMPLETED, 0x08, 19),
+ PMC_POWERPC_EVENT(TLBSYNC_INSTR_COMPLETED, 0x08, 20),
+ PMC_POWERPC_EVENT(SYNC_INSTR_COMPLETED, 0x08, 21),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_PIECES, 0x08, 22),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES, 0x08, 23),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x08, 24),
+ PMC_POWERPC_EVENT(SUCCESSFUL_STWCX, 0x08, 25),
+ PMC_POWERPC_EVENT(DST_STREAM_3_CACHE_LINE_FETCHES, 0x08, 26),
+ PMC_POWERPC_EVENT(THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x08, 27),
+ PMC_POWERPC_EVENT(MISPREDICTED_BRANCHES, 0x08, 28),
+ PMC_POWERPC_EVENT(FOLDED_BRANCHES, 0x08, 29),
+ PMC_POWERPC_EVENT(FP_STORE_DOUBLE_COMPLETES_IN_LSU, 0x08, 30),
+ PMC_POWERPC_EVENT(L2_CACHE_HITS, 0x30, 2),
+ PMC_POWERPC_EVENT(L3_CACHE_HITS, 0x30, 3),
+ PMC_POWERPC_EVENT(L2_INSTR_CACHE_MISSES, 0x30, 4),
+ PMC_POWERPC_EVENT(L3_INSTR_CACHE_MISSES, 0x30, 5),
+ PMC_POWERPC_EVENT(L2_DATA_CACHE_MISSES, 0x30, 6),
+ PMC_POWERPC_EVENT(L3_DATA_CACHE_MISSES, 0x30, 7),
+ PMC_POWERPC_EVENT(L2_LOAD_HITS, 0x10, 8),
+ PMC_POWERPC_EVENT(L2_STORE_HITS, 0x10, 9),
+ PMC_POWERPC_EVENT(L3_LOAD_HITS, 0x10, 10),
+ PMC_POWERPC_EVENT(L3_STORE_HITS, 0x10, 11),
+ PMC_POWERPC_EVENT(L2_TOUCH_HITS, 0x30, 13),
+ PMC_POWERPC_EVENT(L3_TOUCH_HITS, 0x30, 14),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x30, 15),
+ PMC_POWERPC_EVENT(SNOOP_MODIFIED, 0x10, 16),
+ PMC_POWERPC_EVENT(SNOOP_VALID, 0x10, 17),
+ PMC_POWERPC_EVENT(INTERVENTION, 0x30, 18),
+ PMC_POWERPC_EVENT(L2_CACHE_MISSES, 0x10, 19),
+ PMC_POWERPC_EVENT(L3_CACHE_MISSES, 0x10, 20),
+ PMC_POWERPC_EVENT(L2_CACHE_CASTOUTS, 0x20, 8),
+ PMC_POWERPC_EVENT(L3_CACHE_CASTOUTS, 0x20, 9),
+ PMC_POWERPC_EVENT(L2SQ_FULL_CYCLES, 0x20, 10),
+ PMC_POWERPC_EVENT(L3SQ_FULL_CYCLES, 0x20, 11),
+ PMC_POWERPC_EVENT(RAQ_FULL_CYCLES, 0x20, 16),
+ PMC_POWERPC_EVENT(WAQ_FULL_CYCLES, 0x20, 17),
+ PMC_POWERPC_EVENT(L1_EXTERNAL_INTERVENTIONS, 0x20, 19),
+ PMC_POWERPC_EVENT(L2_EXTERNAL_INTERVENTIONS, 0x20, 20),
+ PMC_POWERPC_EVENT(L3_EXTERNAL_INTERVENTIONS, 0x20, 21),
+ PMC_POWERPC_EVENT(EXTERNAL_INTERVENTIONS, 0x20, 22),
+ PMC_POWERPC_EVENT(EXTERNAL_PUSHES, 0x20, 23),
+ PMC_POWERPC_EVENT(EXTERNAL_SNOOP_RETRY, 0x20, 24),
+ PMC_POWERPC_EVENT(DTQ_FULL_CYCLES, 0x20, 25),
+ PMC_POWERPC_EVENT(BUS_RETRY, 0x20, 26),
+ PMC_POWERPC_EVENT(L2_VALID_REQUEST, 0x20, 27),
+ PMC_POWERPC_EVENT(BORDQ_FULL, 0x20, 28),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_READS, 0x20, 42),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_WRITES, 0x20, 43),
+ PMC_POWERPC_EVENT(BUS_READS_NOT_RETRIED, 0x20, 44),
+ PMC_POWERPC_EVENT(BUS_WRITES_NOT_RETRIED, 0x20, 45),
+ PMC_POWERPC_EVENT(BUS_READS_WRITES_NOT_RETRIED, 0x20, 46),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_L1_RETRY, 0x20, 47),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT, 0x20, 48),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_COLLISION, 0x20, 49),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_INTERVENTION_ORDERING, 0x20, 50),
+ PMC_POWERPC_EVENT(SNOOP_REQUESTS, 0x20, 51),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_REQUEST, 0x20, 52),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD, 0x20, 53),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_STORE, 0x20, 54),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH, 0x20, 55),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
+};
+
+const size_t powerpc_event_codes_size =
+ sizeof(powerpc_event_codes) / sizeof(powerpc_event_codes[0]);
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
+
+static pmc_value_t
+powerpc_pmcn_read(unsigned int pmc)
+{
+ switch (pmc) {
+ case 0:
+ return mfspr(SPR_PMC1);
+ break;
+ case 1:
+ return mfspr(SPR_PMC2);
+ break;
+ case 2:
+ return mfspr(SPR_PMC3);
+ break;
+ case 3:
+ return mfspr(SPR_PMC4);
+ break;
+ case 4:
+ return mfspr(SPR_PMC5);
+ break;
+ case 5:
+ return mfspr(SPR_PMC6);
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static void
+powerpc_pmcn_write(unsigned int pmc, uint32_t val)
+{
+ switch (pmc) {
+ case 0:
+ mtspr(SPR_PMC1, val);
+ break;
+ case 1:
+ mtspr(SPR_PMC2, val);
+ break;
+ case 2:
+ mtspr(SPR_PMC3, val);
+ break;
+ case 3:
+ mtspr(SPR_PMC4, val);
+ break;
+ case 4:
+ mtspr(SPR_PMC5, val);
+ break;
+ case 5:
+ mtspr(SPR_PMC6, val);
+ break;
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static int
+powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event pe;
+ uint32_t caps, config, counter;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ caps = a->pm_caps;
+
+ /*
+ * TODO: Check actual class for different generations.
+ */
+ if (a->pm_class != PMC_CLASS_PPC7450)
+ return (EINVAL);
+ pe = a->pm_ev;
+ for (i = 0; i < powerpc_event_codes_size; i++) {
+ if (powerpc_event_codes[i].pe_ev == pe) {
+ config = powerpc_event_codes[i].pe_code;
+ counter = powerpc_event_codes[i].pe_counter_mask;
+ break;
+ }
+ }
+ if (i == powerpc_event_codes_size)
+ return (EINVAL);
+
+ if ((counter & (1 << ri)) == 0)
+ return (EINVAL);
+
+ if (caps & PMC_CAP_SYSTEM)
+ config |= POWERPC_PMC_KERNEL_ENABLE;
+ if (caps & PMC_CAP_USER)
+ config |= POWERPC_PMC_USER_ENABLE;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ config |= POWERPC_PMC_ENABLE;
+
+ pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+static int
+powerpc_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+ tmp = powerpc_pmcn_read(ri);
+ PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+powerpc_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ powerpc_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+powerpc_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+powerpc_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
+
+ /* Enable the PMC. */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+
+ /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
+ * are Freeze flags.
+ */
+ config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
+
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr &= ~SPR_MMCR0_FC;
+ pmc_mmcr |= config;
+ mtspr(SPR_MMCR0, pmc_mmcr);
+
+ return 0;
+}
+
+static int
+powerpc_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+powerpc_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+powerpc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+powerpc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+powerpc_intr(int cpu, struct trapframe *tf)
+{
+ int i, error, retval;
+ uint32_t config;
+ struct pmc *pm;
+ struct powerpc_cpu *pac;
+ pmc_value_t v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ retval = 0;
+
+ pac = powerpc_pcpu[cpu];
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - look for a running, sampling PMC which has overflowed
+ * and which has a valid 'struct pmc' association
+ *
+ * If found, we call a helper to process the interrupt.
+ */
+
+ for (i = 0; i < PPC_MAX_PMCS; i++) {
+ if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ if (!POWERPC_PMC_HAS_OVERFLOWED(i))
+ continue;
+
+ retval = 1; /* Found an interrupting PMC. */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* Stop the PMC, reload count. */
+ v = pm->pm_sc.pm_reloadcount;
+ config = mfspr(SPR_MMCR0);
+
+ mtspr(SPR_MMCR0, config | SPR_MMCR0_FC);
+ powerpc_pmcn_write(i, v);
+
+ /* Restart the counter if logging succeeded. */
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ mtspr(SPR_MMCR0, config);
+ if (error != 0)
+ powerpc_stop_pmc(cpu, i);
+ atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ }
+
+ /* Re-enable PERF exceptions. */
+ mtspr(SPR_MMCR0, mfspr(SPR_MMCR0) | SPR_MMCR0_PMXE);
+
+ return (retval);
+}
+
+static int
+powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char powerpc_name[PMC_NAME_MAX];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ snprintf(powerpc_name, sizeof(powerpc_name), "POWERPC-%d", ri);
+ if ((error = copystr(powerpc_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return error;
+ pi->pm_class = PMC_CLASS_PPC7450;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+powerpc_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+static int
+powerpc_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct pmc_cpu *pc;
+ struct powerpc_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu);
+
+ powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+ pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC_MAX_PMCS,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri;
+ KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_ppcpmcs; i < PPC_MAX_PMCS; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /* Clear the MMCRs, and set FC, to disable all PMCs. */
+ mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
+ mtspr(SPR_MMCR1, 0);
+
+ return 0;
+}
+
+static int
+powerpc_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ uint32_t mmcr0 = mfspr(SPR_MMCR0);
+
+ mmcr0 |= SPR_MMCR0_FC;
+ mtspr(SPR_MMCR0, mmcr0);
+ free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
+ free(powerpc_pcpu[cpu], M_PMC);
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ powerpc_pcpu = malloc(sizeof(struct powerpc_cpu *) * pmc_cpu_max(), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = pmc_mdep_alloc(1);
+
+ pmc_mdep->pmd_cputype = PMC_CPU_PPC_7450;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450];
+ pcd->pcd_caps = POWERPC_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_PPC7450;
+ pcd->pcd_num = PPC_MAX_PMCS;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */
+
+ pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
+ pcd->pcd_config_pmc = powerpc_config_pmc;
+ pcd->pcd_pcpu_fini = powerpc_pcpu_fini;
+ pcd->pcd_pcpu_init = powerpc_pcpu_init;
+ pcd->pcd_describe = powerpc_describe;
+ pcd->pcd_get_config = powerpc_get_config;
+ pcd->pcd_read_pmc = powerpc_read_pmc;
+ pcd->pcd_release_pmc = powerpc_release_pmc;
+ pcd->pcd_start_pmc = powerpc_start_pmc;
+ pcd->pcd_stop_pmc = powerpc_stop_pmc;
+ pcd->pcd_write_pmc = powerpc_write_pmc;
+
+ pmc_mdep->pmd_intr = powerpc_intr;
+ pmc_mdep->pmd_switch_in = powerpc_switch_in;
+ pmc_mdep->pmd_switch_out = powerpc_switch_out;
+
+ pmc_mdep->pmd_npmc += PPC_MAX_PMCS;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+ free(md, M_PMC);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
diff --git a/sys/dev/hwpmc/hwpmc_ppro.c b/sys/dev/hwpmc/hwpmc_ppro.c
new file mode 100644
index 0000000..416a540
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_ppro.c
@@ -0,0 +1,866 @@
+/*-
+ * Copyright (c) 2003-2005,2008 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/pmc_mdep.h>
+#include <machine/specialreg.h>
+
+/*
+ * PENTIUM PRO SUPPORT
+ *
+ * Quirks:
+ *
+ * - Both PMCs are enabled by a single bit P6_EVSEL_EN in performance
+ * counter '0'. This bit needs to be '1' if any of the two
+ * performance counters are in use. Perf counters can also be
+ * switched off by writing zeros to their EVSEL register.
+ *
+ * - While the width of these counters is 40 bits, we do not appear to
+ * have a way of writing 40 bits to the counter MSRs. A WRMSR
+ * instruction will sign extend bit 31 of the value being written to
+ * the perf counter -- a value of 0x80000000 written to an perf
+ * counter register will be sign extended to 0xFF80000000.
+ *
+ * This quirk primarily affects thread-mode PMCs in counting mode, as
+ * these PMCs read and write PMC registers at every context switch.
+ */
+
+struct p6pmc_descr {
+ struct pmc_descr pm_descr; /* common information */
+ uint32_t pm_pmc_msr;
+ uint32_t pm_evsel_msr;
+};
+
+static struct p6pmc_descr p6_pmcdesc[P6_NPMCS] = {
+
+#define P6_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | PMC_CAP_SYSTEM | \
+ PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER)
+
+ /* PMC 0 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-0",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR0,
+ .pm_evsel_msr = P6_MSR_EVSEL0
+ },
+
+ /* PMC 1 */
+ {
+ .pm_descr =
+ {
+ .pd_name ="P6-1",
+ .pd_class = PMC_CLASS_P6,
+ .pd_caps = P6_PMC_CAPS,
+ .pd_width = 40
+ },
+ .pm_pmc_msr = P6_MSR_PERFCTR1,
+ .pm_evsel_msr = P6_MSR_EVSEL1
+ }
+};
+
+static enum pmc_cputype p6_cputype;
+
+/*
+ * P6 Event descriptor
+ *
+ * The 'pm_flags' field has the following structure:
+ * - The upper 4 bits are used to track which counter an event is valid on.
+ * - The lower bits form a bitmask of flags indicating support for the event
+ * on a given CPU.
+ */
+
+struct p6_event_descr {
+ const enum pmc_event pm_event;
+ uint32_t pm_evsel;
+ uint32_t pm_flags;
+ uint32_t pm_unitmask;
+};
+
+#define P6F_CTR(C) (1 << (28 + (C)))
+#define P6F_CTR0 P6F_CTR(0)
+#define P6F_CTR1 P6F_CTR(1)
+#define P6F(CPU) (1 << ((CPU) - PMC_CPU_INTEL_P6))
+#define _P6F(C) P6F(PMC_CPU_INTEL_##C)
+#define P6F_P6 _P6F(P6)
+#define P6F_CL _P6F(CL)
+#define P6F_PII _P6F(PII)
+#define P6F_PIII _P6F(PIII)
+#define P6F_PM _P6F(PM)
+#define P6F_ALL_CPUS (P6F_P6 | P6F_PII | P6F_CL | P6F_PIII | P6F_PM)
+#define P6F_ALL_CTRS (P6F_CTR0 | P6F_CTR1)
+#define P6F_ALL (P6F_ALL_CPUS | P6F_ALL_CTRS)
+
+#define P6_EVENT_VALID_FOR_CPU(P,CPU) ((P)->pm_flags & P6F(CPU))
+#define P6_EVENT_VALID_FOR_CTR(P,CTR) ((P)->pm_flags & P6F_CTR(CTR))
+
+static const struct p6_event_descr p6_events[] = {
+
+#define P6_EVDESCR(NAME, EVSEL, FLAGS, UMASK) \
+ { \
+ .pm_event = PMC_EV_P6_##NAME, \
+ .pm_evsel = (EVSEL), \
+ .pm_flags = (FLAGS), \
+ .pm_unitmask = (UMASK) \
+ }
+
+P6_EVDESCR(DATA_MEM_REFS, 0x43, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_LINES_IN, 0x45, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_IN, 0x46, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_M_LINES_OUT, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(DCU_MISS_OUTSTANDING, 0x47, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH, 0x80, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_FETCH_MISS, 0x81, P6F_ALL, 0x00),
+P6_EVDESCR(ITLB_MISS, 0x85, P6F_ALL, 0x00),
+P6_EVDESCR(IFU_MEM_STALL, 0x86, P6F_ALL, 0x00),
+P6_EVDESCR(ILD_STALL, 0x87, P6F_ALL, 0x00),
+P6_EVDESCR(L2_IFETCH, 0x28, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LD, 0x29, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ST, 0x2A, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_IN, 0x24, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_LINES_OUT, 0x26, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_M_LINES_INM, 0x25, P6F_ALL, 0x00),
+P6_EVDESCR(L2_M_LINES_OUTM, 0x27, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_RQSTS, 0x2E, P6F_ALL, 0x0F),
+P6_EVDESCR(L2_ADS, 0x21, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY, 0x22, P6F_ALL, 0x00),
+P6_EVDESCR(L2_DBUS_BUSY_RD, 0x23, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_DRDY_CLOCKS, 0x62, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_LOCK_CLOCKS, 0x63, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_REQ_OUTSTANDING, 0x60, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_TRAN_BRD, 0x65, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_RFO, 0x66, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_WB, 0x67, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_IFETCH, 0x68, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_INVAL, 0x69, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_PWR, 0x6A, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_P, 0x6B, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRANS_IO, 0x6C, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_DEF, 0x6D, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_BURST, 0x6E, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_ANY, 0x70, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_TRAN_MEM, 0x6F, P6F_ALL, 0x20),
+P6_EVDESCR(BUS_DATA_RCV, 0x64, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_BNR_DRV, 0x61, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HIT_DRV, 0x7A, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_HITM_DRV, 0x7B, P6F_ALL, 0x00),
+P6_EVDESCR(BUS_SNOOP_STALL, 0x7E, P6F_ALL, 0x00),
+P6_EVDESCR(FLOPS, 0xC1, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_COMPS_OPS_EXE, 0x10, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(FP_ASSIST, 0x11, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(MUL, 0x12, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(DIV, 0x13, P6F_ALL_CPUS | P6F_CTR1, 0x00),
+P6_EVDESCR(CYCLES_DIV_BUSY, 0x14, P6F_ALL_CPUS | P6F_CTR0, 0x00),
+P6_EVDESCR(LD_BLOCKS, 0x03, P6F_ALL, 0x00),
+P6_EVDESCR(SB_DRAINS, 0x04, P6F_ALL, 0x00),
+P6_EVDESCR(MISALIGN_MEM_REF, 0x05, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_PREF_DISPATCHED, 0x07, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(EMON_KNI_PREF_MISS, 0x4B, P6F_PIII | P6F_ALL_CTRS, 0x03),
+P6_EVDESCR(INST_RETIRED, 0xC0, P6F_ALL, 0x00),
+P6_EVDESCR(UOPS_RETIRED, 0xC2, P6F_ALL, 0x00),
+P6_EVDESCR(INST_DECODED, 0xD0, P6F_ALL, 0x00),
+P6_EVDESCR(EMON_KNI_INST_RETIRED, 0xD8, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(EMON_KNI_COMP_INST_RET, 0xD9, P6F_PIII | P6F_ALL_CTRS, 0x01),
+P6_EVDESCR(HW_INT_RX, 0xC8, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_MASKED, 0xC6, P6F_ALL, 0x00),
+P6_EVDESCR(CYCLES_INT_PENDING_AND_MASKED, 0xC7, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_RETIRED, 0xC4, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_RETIRED, 0xC5, P6F_ALL, 0x00),
+P6_EVDESCR(BR_TAKEN_RETIRED, 0xC9, P6F_ALL, 0x00),
+P6_EVDESCR(BR_MISS_PRED_TAKEN_RET, 0xCA, P6F_ALL, 0x00),
+P6_EVDESCR(BR_INST_DECODED, 0xE0, P6F_ALL, 0x00),
+P6_EVDESCR(BTB_MISSES, 0xE2, P6F_ALL, 0x00),
+P6_EVDESCR(BR_BOGUS, 0xE4, P6F_ALL, 0x00),
+P6_EVDESCR(BACLEARS, 0xE6, P6F_ALL, 0x00),
+P6_EVDESCR(RESOURCE_STALLS, 0xA2, P6F_ALL, 0x00),
+P6_EVDESCR(PARTIAL_RAT_STALLS, 0xD2, P6F_ALL, 0x00),
+P6_EVDESCR(SEGMENT_REG_LOADS, 0x06, P6F_ALL, 0x00),
+P6_EVDESCR(CPU_CLK_UNHALTED, 0x79, P6F_ALL, 0x00),
+P6_EVDESCR(MMX_INSTR_EXEC, 0xB0,
+ P6F_ALL_CTRS | P6F_CL | P6F_PII, 0x00),
+P6_EVDESCR(MMX_SAT_INSTR_EXEC, 0xB1,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_UOPS_EXEC, 0xB2,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(MMX_INSTR_TYPE_EXEC, 0xB3,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x3F),
+P6_EVDESCR(FP_MMX_TRANS, 0xCC,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x01),
+P6_EVDESCR(MMX_ASSIST, 0xCD,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(MMX_INSTR_RET, 0xCE, P6F_ALL_CTRS | P6F_PII, 0x00),
+P6_EVDESCR(SEG_RENAME_STALLS, 0xD4,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(SEG_REG_RENAMES, 0xD5,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x0F),
+P6_EVDESCR(RET_SEG_RENAMES, 0xD6,
+ P6F_ALL_CTRS | P6F_PII | P6F_PIII, 0x00),
+P6_EVDESCR(EMON_EST_TRANS, 0x58, P6F_ALL_CTRS | P6F_PM, 0x02),
+P6_EVDESCR(EMON_THERMAL_TRIP, 0x59, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_INST_EXEC, 0x88, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_MISSP_EXEC, 0x89, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_BAC_MISSP_EXEC, 0x8A, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_EXEC, 0x8B, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CND_MISSP_EXEC, 0x8C, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_EXEC, 0x8D, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_MISSP_EXEC, 0x8E, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_EXEC, 0x8F, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_MISSP_EXEC, 0x90, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_RET_BAC_MISSP_EXEC, 0x91, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_EXEC, 0x92, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_CALL_MISSP_EXEC, 0x93, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(BR_IND_CALL_EXEC, 0x94, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SIMD_INSTR_RETIRED, 0xCE, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SYNCH_UOPS, 0xD3, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_ESP_UOPS, 0xD7, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_FUSED_UOPS_RET, 0xDA, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_UNFUSION, 0xDB, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_UP, 0xF0, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_PREF_RQSTS_DN, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x00),
+P6_EVDESCR(EMON_SSE_SSE2_INST_RETIRED, 0xD8, P6F_ALL_CTRS | P6F_PM, 0x03),
+P6_EVDESCR(EMON_SSE_SSE2_COMP_INST_RETIRED, 0xD9, P6F_ALL_CTRS | P6F_PM, 0x03)
+
+#undef P6_EVDESCR
+};
+
+#define P6_NEVENTS (PMC_EV_P6_LAST - PMC_EV_P6_FIRST + 1)
+
+static const struct p6_event_descr *
+p6_find_event(enum pmc_event ev)
+{
+ int n;
+
+ for (n = 0; n < P6_NEVENTS; n++)
+ if (p6_events[n].pm_event == ev)
+ break;
+ if (n == P6_NEVENTS)
+ return NULL;
+ return &p6_events[n];
+}
+
+/*
+ * Per-CPU data structure for P6 class CPUs
+ *
+ * [common stuff]
+ * [flags for maintaining PMC start/stop state]
+ * [3 struct pmc_hw pointers]
+ * [3 struct pmc_hw structures]
+ */
+
+struct p6_cpu {
+ struct pmc_hw pc_p6pmcs[P6_NPMCS];
+ uint32_t pc_state;
+};
+
+static struct p6_cpu **p6_pcpu;
+
+/*
+ * If CTR1 is active, we need to keep the 'EN' bit if CTR0 set,
+ * with the rest of CTR0 being zero'ed out.
+ */
+#define P6_SYNC_CTR_STATE(PC) do { \
+ uint32_t _config, _enable; \
+ _enable = 0; \
+ if ((PC)->pc_state & 0x02) \
+ _enable |= P6_EVSEL_EN; \
+ if ((PC)->pc_state & 0x01) \
+ _config = rdmsr(P6_MSR_EVSEL0) | \
+ P6_EVSEL_EN; \
+ else \
+ _config = 0; \
+ wrmsr(P6_MSR_EVSEL0, _config | _enable); \
+ } while (0)
+
+#define P6_MARK_STARTED(PC,RI) do { \
+ (PC)->pc_state |= (1 << ((RI)-1)); \
+ } while (0)
+
+#define P6_MARK_STOPPED(PC,RI) do { \
+ (PC)->pc_state &= ~(1<< ((RI)-1)); \
+ } while (0)
+
+static int
+p6_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, n;
+ struct p6_cpu *p6c;
+ struct pmc_cpu *pc;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-init cpu=%d", cpu);
+
+ p6c = malloc(sizeof (struct p6_cpu), M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[p6,%d] cpu %d null per-cpu", __LINE__, cpu));
+
+ phw = p6c->pc_p6pmcs;
+ p6_pcpu[cpu] = p6c;
+
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P6].pcd_ri;
+
+ for (n = 0; n < P6_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+
+ return (0);
+}
+
+static int
+p6_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, n;
+ struct p6_cpu *p6c;
+ struct pmc_cpu *pc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] bad cpu %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,0,"p6-cleanup cpu=%d", cpu);
+
+ p6c = p6_pcpu[cpu];
+ p6_pcpu[cpu] = NULL;
+
+ KASSERT(p6c != NULL, ("[p6,%d] null pcpu", __LINE__));
+
+ free(p6c, M_PMC);
+
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P6].pcd_ri;
+ pc = pmc_pcpu[cpu];
+ for (n = 0; n < P6_NPMCS; n++)
+ pc->pc_hwpmcs[n + first_ri] = NULL;
+
+ return (0);
+}
+
+static int
+p6_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = p6_pcpu[cpu]->pc_p6pmcs[ri].phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ tmp = rdmsr(pd->pm_pmc_msr) & P6_PERFCTR_READ_MASK;
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = P6_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "p6-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ pd->pm_pmc_msr, *v);
+
+ return (0);
+}
+
+static int
+p6_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = p6_pcpu[cpu]->pc_p6pmcs[ri].phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "p6-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
+ pd->pm_pmc_msr, v);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = P6_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ wrmsr(pd->pm_pmc_msr, v & P6_PERFCTR_WRITE_MASK);
+
+ return (0);
+}
+
+static int
+p6_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "p6-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(p6_pcpu[cpu] != NULL, ("[p6,%d] null per-cpu %d", __LINE__,
+ cpu));
+
+ p6_pcpu[cpu]->pc_p6pmcs[ri].phw_pmc = pm;
+
+ return (0);
+}
+
+/*
+ * Retrieve a configured PMC pointer from hardware state.
+ */
+
+static int
+p6_get_config(int cpu, int ri, struct pmc **ppm)
+{
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ *ppm = p6_pcpu[cpu]->pc_p6pmcs[ri].phw_pmc;
+
+ return (0);
+}
+
+
+/*
+ * A pmc may be allocated to a given row index if:
+ * - the event is valid for this CPU
+ * - the event is valid for this counter index
+ */
+
+static int
+p6_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ uint32_t allowed_unitmask, caps, config, unitmask;
+ const struct p6pmc_descr *pd;
+ const struct p6_event_descr *pevent;
+ enum pmc_event ev;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index value %d", __LINE__, ri));
+
+ pd = &p6_pmcdesc[ri];
+
+ PMCDBG(MDP,ALL,1, "p6-allocate ri=%d class=%d pmccaps=0x%x "
+ "reqcaps=0x%x", ri, pd->pm_descr.pd_class, pd->pm_descr.pd_caps,
+ pm->pm_caps);
+
+ /* check class */
+ if (pd->pm_descr.pd_class != a->pm_class)
+ return (EINVAL);
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((pd->pm_descr.pd_caps & caps) != caps)
+ return (EPERM);
+
+ ev = pm->pm_event;
+
+ if (ev < PMC_EV_P6_FIRST || ev > PMC_EV_P6_LAST)
+ return (EINVAL);
+
+ if ((pevent = p6_find_event(ev)) == NULL)
+ return (ESRCH);
+
+ if (!P6_EVENT_VALID_FOR_CPU(pevent, p6_cputype) ||
+ !P6_EVENT_VALID_FOR_CTR(pevent, (ri-1)))
+ return (EINVAL);
+
+ /* For certain events, Pentium M differs from the stock P6 */
+ allowed_unitmask = 0;
+ if (p6_cputype == PMC_CPU_INTEL_PM) {
+ if (ev == PMC_EV_P6_L2_LD || ev == PMC_EV_P6_L2_LINES_IN ||
+ ev == PMC_EV_P6_L2_LINES_OUT)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x3F);
+ else if (ev == PMC_EV_P6_L2_M_LINES_OUTM)
+ allowed_unitmask = P6_EVSEL_TO_UMASK(0x30);
+ } else
+ allowed_unitmask = P6_EVSEL_TO_UMASK(pevent->pm_unitmask);
+
+ unitmask = a->pm_md.pm_ppro.pm_ppro_config & P6_EVSEL_UMASK_MASK;
+ if (unitmask & ~allowed_unitmask) /* disallow reserved bits */
+ return (EINVAL);
+
+ if (ev == PMC_EV_P6_MMX_UOPS_EXEC) /* hardcoded mask */
+ unitmask = P6_EVSEL_TO_UMASK(0x0F);
+
+ config = 0;
+
+ config |= P6_EVSEL_EVENT_SELECT(pevent->pm_evsel);
+
+ if (unitmask & (caps & PMC_CAP_QUALIFIER))
+ config |= unitmask;
+
+ if (caps & PMC_CAP_THRESHOLD)
+ config |= a->pm_md.pm_ppro.pm_ppro_config &
+ P6_EVSEL_CMASK_MASK;
+
+ /* set at least one of the 'usr' or 'os' caps */
+ if (caps & PMC_CAP_USER)
+ config |= P6_EVSEL_USR;
+ if (caps & PMC_CAP_SYSTEM)
+ config |= P6_EVSEL_OS;
+ if ((caps & (PMC_CAP_USER|PMC_CAP_SYSTEM)) == 0)
+ config |= (P6_EVSEL_USR|P6_EVSEL_OS);
+
+ if (caps & PMC_CAP_EDGE)
+ config |= P6_EVSEL_E;
+ if (caps & PMC_CAP_INVERT)
+ config |= P6_EVSEL_INV;
+ if (caps & PMC_CAP_INTERRUPT)
+ config |= P6_EVSEL_INT;
+
+ pm->pm_md.pm_ppro.pm_ppro_evsel = config;
+
+ PMCDBG(MDP,ALL,2, "p6-allocate config=0x%x", config);
+
+ return (0);
+}
+
+static int
+p6_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ (void) pm;
+
+ PMCDBG(MDP,REL,1, "p6-release cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ KASSERT(p6_pcpu[cpu]->pc_p6pmcs[ri].phw_pmc == NULL,
+ ("[p6,%d] PHW pmc non-NULL", __LINE__));
+
+ return (0);
+}
+
+static int
+p6_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct p6_cpu *pc;
+ const struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = p6_pcpu[cpu];
+ pm = pc->pc_p6pmcs[ri].phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] starting cpu%d,ri%d with no pmc configured",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "p6-start cpu=%d ri=%d", cpu, ri);
+
+ config = pm->pm_md.pm_ppro.pm_ppro_evsel;
+
+ PMCDBG(MDP,STA,2, "p6-start/2 cpu=%d ri=%d evselmsr=0x%x config=0x%x",
+ cpu, ri, pd->pm_evsel_msr, config);
+
+ P6_MARK_STARTED(pc, ri);
+ wrmsr(pd->pm_evsel_msr, config);
+
+ P6_SYNC_CTR_STATE(pc);
+
+ return (0);
+}
+
+static int
+p6_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct p6_cpu *pc;
+ struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] illegal row index %d", __LINE__, ri));
+
+ pc = p6_pcpu[cpu];
+ pm = pc->pc_p6pmcs[ri].phw_pmc;
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[p6,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STO,1, "p6-stop cpu=%d ri=%d", cpu, ri);
+
+ wrmsr(pd->pm_evsel_msr, 0); /* stop hw */
+ P6_MARK_STOPPED(pc, ri); /* update software state */
+
+ P6_SYNC_CTR_STATE(pc); /* restart CTR1 if need be */
+
+ PMCDBG(MDP,STO,2, "p6-stop/2 cpu=%d ri=%d", cpu, ri);
+
+ return (0);
+}
+
+static int
+p6_intr(int cpu, struct trapframe *tf)
+{
+ int error, retval, ri;
+ uint32_t perf0cfg;
+ struct pmc *pm;
+ struct p6_cpu *pc;
+ pmc_value_t v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] CPU %d out of range", __LINE__, cpu));
+
+ retval = 0;
+ pc = p6_pcpu[cpu];
+
+ /* stop both PMCs */
+ perf0cfg = rdmsr(P6_MSR_EVSEL0);
+ wrmsr(P6_MSR_EVSEL0, perf0cfg & ~P6_EVSEL_EN);
+
+ for (ri = 0; ri < P6_NPMCS; ri++) {
+
+ if ((pm = pc->pc_p6pmcs[ri].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ if (!P6_PMC_HAS_OVERFLOWED(ri))
+ continue;
+
+ retval = 1;
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
+ TRAPF_USERMODE(tf));
+ if (error)
+ P6_MARK_STOPPED(pc,ri);
+
+ /* reload sampling count */
+ v = pm->pm_sc.pm_reloadcount;
+ wrmsr(P6_MSR_PERFCTR0 + ri,
+ P6_RELOAD_COUNT_TO_PERFCTR_VALUE(v));
+
+ }
+
+ /*
+ * On P6 processors, the LAPIC needs to have its PMC interrupt
+ * unmasked after a PMC interrupt.
+ */
+ if (retval)
+ lapic_reenable_pmc();
+
+ atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ /* restart counters that can be restarted */
+ P6_SYNC_CTR_STATE(pc);
+
+ return (retval);
+}
+
+static int
+p6_describe(int cpu, int ri, struct pmc_info *pi,
+ struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ struct pmc_hw *phw;
+ struct p6pmc_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[p6,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = pmc_pcpu[cpu]->pc_hwpmcs[ri];
+ pd = &p6_pmcdesc[ri];
+
+ KASSERT(phw == &p6_pcpu[cpu]->pc_p6pmcs[ri],
+ ("[p6,%d] phw mismatch", __LINE__));
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+p6_get_msr(int ri, uint32_t *msr)
+{
+ KASSERT(ri >= 0 && ri < P6_NPMCS,
+ ("[p6,%d ri %d out of range", __LINE__, ri));
+
+ *msr = p6_pmcdesc[ri].pm_pmc_msr - P6_MSR_PERFCTR0;
+
+ return (0);
+}
+
+int
+pmc_p6_initialize(struct pmc_mdep *md, int ncpus)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(cpu_vendor_id == CPU_VENDOR_INTEL,
+ ("[p6,%d] Initializing non-intel processor", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "p6-initialize");
+
+ /* Allocate space for pointers to per-cpu descriptors. */
+ p6_pcpu = malloc(sizeof(struct p6_cpu **) * ncpus, M_PMC,
+ M_ZERO|M_WAITOK);
+
+ /* Fill in the class dependent descriptor. */
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_P6];
+
+ switch (md->pmd_cputype) {
+
+ /*
+ * P6 Family Processors
+ */
+ case PMC_CPU_INTEL_P6:
+ case PMC_CPU_INTEL_CL:
+ case PMC_CPU_INTEL_PII:
+ case PMC_CPU_INTEL_PIII:
+ case PMC_CPU_INTEL_PM:
+
+ p6_cputype = md->pmd_cputype;
+
+ pcd->pcd_caps = P6_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_P6;
+ pcd->pcd_num = P6_NPMCS;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 40;
+
+ pcd->pcd_allocate_pmc = p6_allocate_pmc;
+ pcd->pcd_config_pmc = p6_config_pmc;
+ pcd->pcd_describe = p6_describe;
+ pcd->pcd_get_config = p6_get_config;
+ pcd->pcd_get_msr = p6_get_msr;
+ pcd->pcd_pcpu_fini = p6_pcpu_fini;
+ pcd->pcd_pcpu_init = p6_pcpu_init;
+ pcd->pcd_read_pmc = p6_read_pmc;
+ pcd->pcd_release_pmc = p6_release_pmc;
+ pcd->pcd_start_pmc = p6_start_pmc;
+ pcd->pcd_stop_pmc = p6_stop_pmc;
+ pcd->pcd_write_pmc = p6_write_pmc;
+
+ md->pmd_pcpu_fini = NULL;
+ md->pmd_pcpu_init = NULL;
+ md->pmd_intr = p6_intr;
+
+ md->pmd_npmc += P6_NPMCS;
+
+ break;
+
+ default:
+ KASSERT(0,("[p6,%d] Unknown CPU type", __LINE__));
+ return ENOSYS;
+ }
+
+ return (0);
+}
+
+void
+pmc_p6_finalize(struct pmc_mdep *md)
+{
+#if defined(INVARIANTS)
+ int i, ncpus;
+#endif
+
+ KASSERT(p6_pcpu != NULL, ("[p6,%d] NULL p6_pcpu", __LINE__));
+
+#if defined(INVARIANTS)
+ ncpus = pmc_cpu_max();
+ for (i = 0; i < ncpus; i++)
+ KASSERT(p6_pcpu[i] == NULL, ("[p6,%d] non-null pcpu %d",
+ __LINE__, i));
+#endif
+
+ free(p6_pcpu, M_PMC);
+ p6_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_ppro.h b/sys/dev/hwpmc/hwpmc_ppro.h
new file mode 100644
index 0000000..c42a2b4
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_ppro.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Machine dependent interfaces */
+
+#ifndef _DEV_HWPMC_PPRO_H_
+#define _DEV_HWPMC_PPRO_H_
+
+/* Intel PPro, Celeron, P-II, P-III, Pentium-M PMCS */
+
+#define P6_NPMCS 2 /* 2 PMCs */
+
+#define P6_EVSEL_CMASK_MASK 0xFF000000
+#define P6_EVSEL_TO_CMASK(C) (((C) & 0xFF) << 24)
+#define P6_EVSEL_INV (1 << 23)
+#define P6_EVSEL_EN (1 << 22)
+#define P6_EVSEL_INT (1 << 20)
+#define P6_EVSEL_PC (1 << 19)
+#define P6_EVSEL_E (1 << 18)
+#define P6_EVSEL_OS (1 << 17)
+#define P6_EVSEL_USR (1 << 16)
+#define P6_EVSEL_UMASK_MASK 0x0000FF00
+#define P6_EVSEL_TO_UMASK(U) (((U) & 0xFF) << 8)
+#define P6_EVSEL_EVENT_SELECT(ES) ((ES) & 0xFF)
+#define P6_EVSEL_RESERVED (1 << 21)
+
+#define P6_MSR_EVSEL0 0x0186
+#define P6_MSR_EVSEL1 0x0187
+#define P6_MSR_PERFCTR0 0x00C1
+#define P6_MSR_PERFCTR1 0x00C2
+
+#define P6_PERFCTR_READ_MASK 0xFFFFFFFFFFLL /* 40 bits */
+#define P6_PERFCTR_WRITE_MASK 0xFFFFFFFFU /* 32 bits */
+
+#define P6_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R))
+#define P6_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P))
+
+#define P6_PMC_HAS_OVERFLOWED(P) ((rdpmc(P) & (1LL << 39)) == 0)
+
+struct pmc_md_ppro_op_pmcallocate {
+ uint32_t pm_ppro_config;
+};
+
+#ifdef _KERNEL
+
+/* MD extension for 'struct pmc' */
+struct pmc_md_ppro_pmc {
+ uint32_t pm_ppro_evsel;
+};
+
+/*
+ * Prototypes
+ */
+
+int pmc_p6_initialize(struct pmc_mdep *_md, int _ncpus);
+void pmc_p6_finalize(struct pmc_mdep *_md);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_PPRO_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_soft.c b/sys/dev/hwpmc/hwpmc_soft.c
new file mode 100644
index 0000000..dac3612
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_soft.c
@@ -0,0 +1,485 @@
+/*-
+ * Copyright (c) 2012 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+#include <sys/mutex.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+#include "hwpmc_soft.h"
+
+/*
+ * Software PMC support.
+ */
+
+#define SOFT_CAPS (PMC_CAP_READ | PMC_CAP_WRITE | PMC_CAP_INTERRUPT | \
+ PMC_CAP_USER | PMC_CAP_SYSTEM)
+
+struct soft_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+};
+
+static struct soft_descr soft_pmcdesc[SOFT_NPMCS] =
+{
+#define SOFT_PMCDESCR(N) \
+ { \
+ .pm_descr = \
+ { \
+ .pd_name = #N, \
+ .pd_class = PMC_CLASS_SOFT, \
+ .pd_caps = SOFT_CAPS, \
+ .pd_width = 64 \
+ }, \
+ }
+
+ SOFT_PMCDESCR(SOFT0),
+ SOFT_PMCDESCR(SOFT1),
+ SOFT_PMCDESCR(SOFT2),
+ SOFT_PMCDESCR(SOFT3),
+ SOFT_PMCDESCR(SOFT4),
+ SOFT_PMCDESCR(SOFT5),
+ SOFT_PMCDESCR(SOFT6),
+ SOFT_PMCDESCR(SOFT7),
+ SOFT_PMCDESCR(SOFT8),
+ SOFT_PMCDESCR(SOFT9),
+ SOFT_PMCDESCR(SOFT10),
+ SOFT_PMCDESCR(SOFT11),
+ SOFT_PMCDESCR(SOFT12),
+ SOFT_PMCDESCR(SOFT13),
+ SOFT_PMCDESCR(SOFT14),
+ SOFT_PMCDESCR(SOFT15)
+};
+
+/*
+ * Per-CPU data structure.
+ */
+
+struct soft_cpu {
+ struct pmc_hw soft_hw[SOFT_NPMCS];
+ pmc_value_t soft_values[SOFT_NPMCS];
+};
+
+
+static struct soft_cpu **soft_pcpu;
+
+static int
+soft_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event ev;
+ struct pmc_soft *ps;
+
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ if (a->pm_class != PMC_CLASS_SOFT)
+ return (EINVAL);
+
+ if ((pm->pm_caps & SOFT_CAPS) == 0)
+ return (EINVAL);
+
+ if ((pm->pm_caps & ~SOFT_CAPS) != 0)
+ return (EPERM);
+
+ ev = pm->pm_event;
+ if ((int)ev < PMC_EV_SOFT_FIRST || (int)ev > PMC_EV_SOFT_LAST)
+ return (EINVAL);
+
+ /* Check if event is registered. */
+ ps = pmc_soft_ev_acquire(ev);
+ if (ps == NULL)
+ return (EINVAL);
+ pmc_soft_ev_release(ps);
+
+ return (0);
+}
+
+static int
+soft_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[soft,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__,
+ pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+soft_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct soft_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+ pd = &soft_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+soft_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ (void) ri;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ *ppm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc;
+ return (0);
+}
+
+static int
+soft_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int ri;
+ struct pmc_cpu *pc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(soft_pcpu[cpu] != NULL, ("[soft,%d] null pcpu", __LINE__));
+
+ free(soft_pcpu[cpu], M_PMC);
+ soft_pcpu[cpu] = NULL;
+
+ ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
+
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] ri=%d", __LINE__, ri));
+
+ pc = pmc_pcpu[cpu];
+ pc->pc_hwpmcs[ri] = NULL;
+
+ return (0);
+}
+
+static int
+soft_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, n;
+ struct pmc_cpu *pc;
+ struct soft_cpu *soft_pc;
+ struct pmc_hw *phw;
+
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(soft_pcpu, ("[soft,%d] null pcpu", __LINE__));
+ KASSERT(soft_pcpu[cpu] == NULL, ("[soft,%d] non-null per-cpu",
+ __LINE__));
+
+ soft_pc = malloc(sizeof(struct soft_cpu), M_PMC, M_WAITOK|M_ZERO);
+ if (soft_pc == NULL)
+ return (ENOMEM);
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[soft,%d] cpu %d null per-cpu", __LINE__, cpu));
+
+ soft_pcpu[cpu] = soft_pc;
+ phw = soft_pc->soft_hw;
+ first_ri = md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_ri;
+
+ for (n = 0; n < SOFT_NPMCS; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(n);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + first_ri] = phw;
+ }
+
+ return (0);
+}
+
+static int
+soft_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ const struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[soft,%d] no owner for PHW [cpu%d,pmc%d]", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,REA,1,"soft-read id=%d", ri);
+
+ *v = soft_pcpu[cpu]->soft_values[ri];
+
+ return (0);
+}
+
+static int
+soft_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ const struct soft_descr *pd;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = soft_pcpu[cpu]->soft_hw[ri].phw_pmc;
+ pd = &soft_pmcdesc[ri];
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "soft-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ soft_pcpu[cpu]->soft_values[ri] = v;
+
+ return (0);
+}
+
+static int
+soft_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &soft_pcpu[cpu]->soft_hw[ri];
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[soft,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ /*
+ * Nothing to do.
+ */
+ return (0);
+}
+
+static int
+soft_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ struct pmc_soft *ps;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = soft_pcpu[cpu];
+ pm = pc->soft_hw[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ if (ps == NULL)
+ return (EINVAL);
+ atomic_add_int(&ps->ps_running, 1);
+ pmc_soft_ev_release(ps);
+
+ return (0);
+}
+
+static int
+soft_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ struct pmc_soft *ps;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < SOFT_NPMCS,
+ ("[soft,%d] illegal row-index %d", __LINE__, ri));
+
+ pc = soft_pcpu[cpu];
+ pm = pc->soft_hw[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ ps = pmc_soft_ev_acquire(pm->pm_event);
+ /* event unregistered ? */
+ if (ps != NULL) {
+ atomic_subtract_int(&ps->ps_running, 1);
+ pmc_soft_ev_release(ps);
+ }
+
+ return (0);
+}
+
+int
+pmc_soft_intr(struct pmckern_soft *ks)
+{
+ struct pmc *pm;
+ struct soft_cpu *pc;
+ int ri, processed, error, user_mode;
+
+ KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(),
+ ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu));
+
+ processed = 0;
+ pc = soft_pcpu[ks->pm_cpu];
+
+ for (ri = 0; ri < SOFT_NPMCS; ri++) {
+
+ pm = pc->soft_hw[ri].phw_pmc;
+ if (pm == NULL ||
+ pm->pm_state != PMC_STATE_RUNNING ||
+ pm->pm_event != ks->pm_ev) {
+ continue;
+ }
+
+ processed = 1;
+ pc->soft_values[ri]++;
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ user_mode = TRAPF_USERMODE(ks->pm_tf);
+ error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm,
+ ks->pm_tf, user_mode);
+ if (error) {
+ soft_stop_pmc(ks->pm_cpu, ri);
+ continue;
+ }
+
+ if (user_mode) {
+ /* If in user mode setup AST to process
+ * callchain out of interrupt context.
+ */
+ curthread->td_flags |= TDF_ASTPENDING;
+ }
+ }
+ }
+
+ atomic_add_int(processed ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ return (processed);
+}
+
+void
+pmc_soft_initialize(struct pmc_mdep *md)
+{
+ struct pmc_classdep *pcd;
+
+ /* Add SOFT PMCs. */
+ soft_pcpu = malloc(sizeof(struct soft_cpu *) * pmc_cpu_max(), M_PMC,
+ M_ZERO|M_WAITOK);
+
+ pcd = &md->pmd_classdep[PMC_CLASS_INDEX_SOFT];
+
+ pcd->pcd_caps = SOFT_CAPS;
+ pcd->pcd_class = PMC_CLASS_SOFT;
+ pcd->pcd_num = SOFT_NPMCS;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 64;
+
+ pcd->pcd_allocate_pmc = soft_allocate_pmc;
+ pcd->pcd_config_pmc = soft_config_pmc;
+ pcd->pcd_describe = soft_describe;
+ pcd->pcd_get_config = soft_get_config;
+ pcd->pcd_get_msr = NULL;
+ pcd->pcd_pcpu_init = soft_pcpu_init;
+ pcd->pcd_pcpu_fini = soft_pcpu_fini;
+ pcd->pcd_read_pmc = soft_read_pmc;
+ pcd->pcd_write_pmc = soft_write_pmc;
+ pcd->pcd_release_pmc = soft_release_pmc;
+ pcd->pcd_start_pmc = soft_start_pmc;
+ pcd->pcd_stop_pmc = soft_stop_pmc;
+
+ md->pmd_npmc += SOFT_NPMCS;
+}
+
+void
+pmc_soft_finalize(struct pmc_mdep *md)
+{
+#ifdef INVARIANTS
+ int i, ncpus;
+
+ ncpus = pmc_cpu_max();
+ for (i = 0; i < ncpus; i++)
+ KASSERT(soft_pcpu[i] == NULL, ("[soft,%d] non-null pcpu cpu %d",
+ __LINE__, i));
+
+ KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class ==
+ PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__));
+#endif
+ free(soft_pcpu, M_PMC);
+ soft_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_soft.h b/sys/dev/hwpmc/hwpmc_soft.h
new file mode 100644
index 0000000..f82baff
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_soft.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2012 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_SOFT_H_
+#define _DEV_HWPMC_SOFT_H_ 1
+
+#include <sys/pmckern.h>
+
+#ifdef _KERNEL
+
+#define PMC_CLASS_INDEX_SOFT 0
+#define SOFT_NPMCS 16
+
+/*
+ * Prototypes.
+ */
+
+void pmc_soft_initialize(struct pmc_mdep *md);
+void pmc_soft_finalize(struct pmc_mdep *md);
+int pmc_soft_intr(struct pmckern_soft *ks);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_SOFT_H */
diff --git a/sys/dev/hwpmc/hwpmc_sparc64.c b/sys/dev/hwpmc/hwpmc_sparc64.c
new file mode 100644
index 0000000..ce1caf6
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_sparc64.c
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2005, Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+
+#include <machine/pmc_mdep.h>
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ return NULL;
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+ (void) md;
+}
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
+ struct trapframe *tf)
+{
+ (void) cc;
+ (void) maxsamples;
+ (void) tf;
+ return (0);
+}
diff --git a/sys/dev/hwpmc/hwpmc_tsc.c b/sys/dev/hwpmc/hwpmc_tsc.c
new file mode 100644
index 0000000..237b7a1
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_tsc.c
@@ -0,0 +1,385 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/specialreg.h>
+
+/*
+ * TSC support.
+ */
+
+#define TSC_CAPS PMC_CAP_READ
+
+struct tsc_descr {
+ struct pmc_descr pm_descr; /* "base class" */
+};
+
+static struct tsc_descr tsc_pmcdesc[TSC_NPMCS] =
+{
+ {
+ .pm_descr =
+ {
+ .pd_name = "TSC",
+ .pd_class = PMC_CLASS_TSC,
+ .pd_caps = TSC_CAPS,
+ .pd_width = 64
+ }
+ }
+};
+
+/*
+ * Per-CPU data structure for TSCs.
+ */
+
+struct tsc_cpu {
+ struct pmc_hw tc_hw;
+};
+
+static struct tsc_cpu **tsc_pcpu;
+
+static int
+tsc_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < TSC_NPMCS,
+ ("[tsc,%d] illegal row index %d", __LINE__, ri));
+
+ if (a->pm_class != PMC_CLASS_TSC)
+ return (EINVAL);
+
+ if ((pm->pm_caps & TSC_CAPS) == 0)
+ return (EINVAL);
+
+ if ((pm->pm_caps & ~TSC_CAPS) != 0)
+ return (EPERM);
+
+ if (a->pm_ev != PMC_EV_TSC_TSC ||
+ a->pm_mode != PMC_MODE_SC)
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
+tsc_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &tsc_pcpu[cpu]->tc_hw;
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[tsc,%d] pm=%p phw->pm=%p hwpmc not unconfigured", __LINE__,
+ pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+tsc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ size_t copied;
+ const struct tsc_descr *pd;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &tsc_pcpu[cpu]->tc_hw;
+ pd = &tsc_pmcdesc[ri];
+
+ if ((error = copystr(pd->pm_descr.pd_name, pi->pm_name,
+ PMC_NAME_MAX, &copied)) != 0)
+ return (error);
+
+ pi->pm_class = pd->pm_descr.pd_class;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+tsc_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ (void) ri;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ *ppm = tsc_pcpu[cpu]->tc_hw.phw_pmc;
+
+ return (0);
+}
+
+static int
+tsc_get_msr(int ri, uint32_t *msr)
+{
+ (void) ri;
+
+ KASSERT(ri >= 0 && ri < TSC_NPMCS,
+ ("[tsc,%d] ri %d out of range", __LINE__, ri));
+
+ *msr = MSR_TSC;
+
+ return (0);
+}
+
+static int
+tsc_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int ri;
+ struct pmc_cpu *pc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(tsc_pcpu[cpu] != NULL, ("[tsc,%d] null pcpu", __LINE__));
+
+ free(tsc_pcpu[cpu], M_PMC);
+ tsc_pcpu[cpu] = NULL;
+
+ ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_TSC].pcd_ri;
+
+ pc = pmc_pcpu[cpu];
+ pc->pc_hwpmcs[ri] = NULL;
+
+ return (0);
+}
+
+static int
+tsc_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int ri;
+ struct pmc_cpu *pc;
+ struct tsc_cpu *tsc_pc;
+
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal cpu %d", __LINE__, cpu));
+ KASSERT(tsc_pcpu, ("[tsc,%d] null pcpu", __LINE__));
+ KASSERT(tsc_pcpu[cpu] == NULL, ("[tsc,%d] non-null per-cpu",
+ __LINE__));
+
+ tsc_pc = malloc(sizeof(struct tsc_cpu), M_PMC, M_WAITOK|M_ZERO);
+
+ tsc_pc->tc_hw.phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(0) |
+ PMC_PHW_FLAG_IS_SHAREABLE;
+
+ tsc_pcpu[cpu] = tsc_pc;
+
+ ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_TSC].pcd_ri;
+
+ KASSERT(pmc_pcpu, ("[tsc,%d] null generic pcpu", __LINE__));
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc, ("[tsc,%d] null generic per-cpu", __LINE__));
+
+ pc->pc_hwpmcs[ri] = &tsc_pc->tc_hw;
+
+ return (0);
+}
+
+static int
+tsc_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ enum pmc_mode mode;
+ const struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal ri %d", __LINE__, ri));
+
+ phw = &tsc_pcpu[cpu]->tc_hw;
+ pm = phw->phw_pmc;
+
+ KASSERT(pm != NULL,
+ ("[tsc,%d] no owner for PHW [cpu%d,pmc%d]", __LINE__, cpu, ri));
+
+ mode = PMC_TO_MODE(pm);
+
+ KASSERT(mode == PMC_MODE_SC,
+ ("[tsc,%d] illegal pmc mode %d", __LINE__, mode));
+
+ PMCDBG(MDP,REA,1,"tsc-read id=%d", ri);
+
+ *v = rdtsc();
+
+ return (0);
+}
+
+static int
+tsc_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ (void) pmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0,
+ ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &tsc_pcpu[cpu]->tc_hw;
+
+ KASSERT(phw->phw_pmc == NULL,
+ ("[tsc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ /*
+ * Nothing to do.
+ */
+ return (0);
+}
+
+static int
+tsc_start_pmc(int cpu, int ri)
+{
+ (void) cpu;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ return (0); /* TSCs are always running. */
+}
+
+static int
+tsc_stop_pmc(int cpu, int ri)
+{
+ (void) cpu; (void) ri;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ return (0); /* Cannot actually stop a TSC. */
+}
+
+static int
+tsc_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ (void) cpu; (void) ri; (void) v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[tsc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri == 0, ("[tsc,%d] illegal row-index %d", __LINE__, ri));
+
+ /*
+ * The TSCs are used as timecounters by the kernel, so even
+ * though some i386 CPUs support writeable TSCs, we don't
+ * support writing changing TSC values through the HWPMC API.
+ */
+ return (0);
+}
+
+int
+pmc_tsc_initialize(struct pmc_mdep *md, int maxcpu)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(md != NULL, ("[tsc,%d] md is NULL", __LINE__));
+ KASSERT(md->pmd_nclass >= 1, ("[tsc,%d] dubious md->nclass %d",
+ __LINE__, md->pmd_nclass));
+
+ tsc_pcpu = malloc(sizeof(struct tsc_cpu *) * maxcpu, M_PMC,
+ M_ZERO|M_WAITOK);
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_TSC];
+
+ pcd->pcd_caps = PMC_CAP_READ;
+ pcd->pcd_class = PMC_CLASS_TSC;
+ pcd->pcd_num = TSC_NPMCS;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = 64;
+
+ pcd->pcd_allocate_pmc = tsc_allocate_pmc;
+ pcd->pcd_config_pmc = tsc_config_pmc;
+ pcd->pcd_describe = tsc_describe;
+ pcd->pcd_get_config = tsc_get_config;
+ pcd->pcd_get_msr = tsc_get_msr;
+ pcd->pcd_pcpu_init = tsc_pcpu_init;
+ pcd->pcd_pcpu_fini = tsc_pcpu_fini;
+ pcd->pcd_read_pmc = tsc_read_pmc;
+ pcd->pcd_release_pmc = tsc_release_pmc;
+ pcd->pcd_start_pmc = tsc_start_pmc;
+ pcd->pcd_stop_pmc = tsc_stop_pmc;
+ pcd->pcd_write_pmc = tsc_write_pmc;
+
+ md->pmd_npmc += TSC_NPMCS;
+
+ return (0);
+}
+
+void
+pmc_tsc_finalize(struct pmc_mdep *md)
+{
+#ifdef INVARIANTS
+ int i, ncpus;
+
+ ncpus = pmc_cpu_max();
+ for (i = 0; i < ncpus; i++)
+ KASSERT(tsc_pcpu[i] == NULL, ("[tsc,%d] non-null pcpu cpu %d",
+ __LINE__, i));
+
+ KASSERT(md->pmd_classdep[PMC_MDEP_CLASS_INDEX_TSC].pcd_class ==
+ PMC_CLASS_TSC, ("[tsc,%d] class mismatch", __LINE__));
+
+#else
+ (void) md;
+#endif
+
+ free(tsc_pcpu, M_PMC);
+ tsc_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_tsc.h b/sys/dev/hwpmc/hwpmc_tsc.h
new file mode 100644
index 0000000..a8b011e
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_tsc.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2008 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_TSC_H_
+#define _DEV_HWPMC_TSC_H_ 1
+
+#ifdef _KERNEL
+
+#define TSC_NPMCS 1
+
+/*
+ * Prototypes.
+ */
+
+int pmc_tsc_initialize(struct pmc_mdep *_md, int _maxcpu);
+void pmc_tsc_finalize(struct pmc_mdep *_md);
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_TSC_H */
diff --git a/sys/dev/hwpmc/hwpmc_uncore.c b/sys/dev/hwpmc/hwpmc_uncore.c
new file mode 100644
index 0000000..007aa6a
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_uncore.c
@@ -0,0 +1,1208 @@
+/*-
+ * Copyright (c) 2010 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Intel Uncore PMCs.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
+
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+
+#define UCF_PMC_CAPS \
+ (PMC_CAP_READ | PMC_CAP_WRITE)
+
+#define UCP_PMC_CAPS \
+ (PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
+ PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
+
+#define SELECTSEL(x) \
+ (((x) == PMC_CPU_INTEL_SANDYBRIDGE) ? UCP_CB0_EVSEL0 : UCP_EVSEL0)
+
+#define SELECTOFF(x) \
+ (((x) == PMC_CPU_INTEL_SANDYBRIDGE) ? UCF_OFFSET_SB : UCF_OFFSET)
+
+static enum pmc_cputype uncore_cputype;
+
+struct uncore_cpu {
+ volatile uint32_t pc_resync;
+ volatile uint32_t pc_ucfctrl; /* Fixed function control. */
+ volatile uint64_t pc_globalctrl; /* Global control register. */
+ struct pmc_hw pc_uncorepmcs[];
+};
+
+static struct uncore_cpu **uncore_pcpu;
+
+static uint64_t uncore_pmcmask;
+
+static int uncore_ucf_ri; /* relative index of fixed counters */
+static int uncore_ucf_width;
+static int uncore_ucf_npmc;
+
+static int uncore_ucp_width;
+static int uncore_ucp_npmc;
+
+static int
+uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
+{
+ (void) md;
+ (void) cpu;
+ return (0);
+}
+
+static int
+uncore_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ struct pmc_cpu *pc;
+ struct uncore_cpu *cc;
+ struct pmc_hw *phw;
+ int uncore_ri, n, npmc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[ucf,%d] insane cpu number %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"uncore-init cpu=%d", cpu);
+
+ uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
+ npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
+ npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
+
+ cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
+ M_PMC, M_WAITOK | M_ZERO);
+
+ uncore_pcpu[cpu] = cc;
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL && cc != NULL,
+ ("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
+
+ for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) |
+ PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[n + uncore_ri] = phw;
+ }
+
+ return (0);
+}
+
+static int
+uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ int uncore_ri, n, npmc;
+ struct pmc_cpu *pc;
+ struct uncore_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
+
+ PMCDBG(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
+
+ if ((cc = uncore_pcpu[cpu]) == NULL)
+ return (0);
+
+ uncore_pcpu[cpu] = NULL;
+
+ pc = pmc_pcpu[cpu];
+
+ KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
+ cpu));
+
+ npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
+ uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
+
+ for (n = 0; n < npmc; n++)
+ wrmsr(SELECTSEL(uncore_cputype) + n, 0);
+
+ wrmsr(UCF_CTRL, 0);
+ npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
+
+ for (n = 0; n < npmc; n++)
+ pc->pc_hwpmcs[n + uncore_ri] = NULL;
+
+ free(cc, M_PMC);
+
+ return (0);
+}
+
+/*
+ * Fixed function counters.
+ */
+
+static pmc_value_t
+ucf_perfctr_value_to_reload_count(pmc_value_t v)
+{
+ v &= (1ULL << uncore_ucf_width) - 1;
+ return (1ULL << uncore_ucf_width) - v;
+}
+
+static pmc_value_t
+ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
+{
+ return (1ULL << uncore_ucf_width) - rlc;
+}
+
+static int
+ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event ev;
+ uint32_t caps, flags;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
+
+ if (ri < 0 || ri > uncore_ucf_npmc)
+ return (EINVAL);
+
+ caps = a->pm_caps;
+
+ if (a->pm_class != PMC_CLASS_UCF ||
+ (caps & UCF_PMC_CAPS) != caps)
+ return (EINVAL);
+
+ ev = pm->pm_event;
+ if (ev < PMC_EV_UCF_FIRST || ev > PMC_EV_UCF_LAST)
+ return (EINVAL);
+
+ flags = UCF_EN;
+
+ pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
+
+ PMCDBG(MDP,ALL,2, "ucf-allocate config=0x%jx",
+ (uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
+
+ return (0);
+}
+
+static int
+ucf_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
+ cpu));
+
+ uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char ucf_name[PMC_NAME_MAX];
+
+ phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
+
+ (void) snprintf(ucf_name, sizeof(ucf_name), "UCF-%d", ri);
+ if ((error = copystr(ucf_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return (error);
+
+ pi->pm_class = PMC_CLASS_UCF;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+ucf_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
+
+ return (0);
+}
+
+static int
+ucf_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] cpu %d ri %d(%d) pmc not configured", __LINE__, cpu,
+ ri, ri + uncore_ucf_ri));
+
+ tmp = rdmsr(UCF_CTR0 + ri);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = ucf_perfctr_value_to_reload_count(tmp);
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
+
+ return (0);
+}
+
+static int
+ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ PMCDBG(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
+ ("[uncore,%d] PHW pmc non-NULL", __LINE__));
+
+ return (0);
+}
+
+static int
+ucf_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct uncore_cpu *ucfc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
+
+ ucfc = uncore_pcpu[cpu];
+ pm = ucfc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
+
+ ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
+
+ wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
+
+ do {
+ ucfc->pc_resync = 0;
+ ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
+ wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
+ } while (ucfc->pc_resync != 0);
+
+ PMCDBG(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
+ ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
+ ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
+
+ return (0);
+}
+
+static int
+ucf_stop_pmc(int cpu, int ri)
+{
+ uint32_t fc;
+ struct uncore_cpu *ucfc;
+
+ PMCDBG(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
+
+ ucfc = uncore_pcpu[cpu];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ fc = (UCF_MASK << (ri * 4));
+
+ ucfc->pc_ucfctrl &= ~fc;
+
+ PMCDBG(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
+ wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
+
+ do {
+ ucfc->pc_resync = 0;
+ ucfc->pc_globalctrl &= ~(1ULL << (ri + SELECTOFF(uncore_cputype)));
+ wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
+ } while (ucfc->pc_resync != 0);
+
+ PMCDBG(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
+ ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
+ ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
+
+ return (0);
+}
+
+static int
+ucf_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct uncore_cpu *cc;
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ cc = uncore_pcpu[cpu];
+ pm = cc->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = ucf_reload_count_to_perfctr_value(v);
+
+ wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */
+ wrmsr(UCF_CTR0 + ri, v);
+ wrmsr(UCF_CTRL, cc->pc_ucfctrl);
+
+ PMCDBG(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
+ cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
+
+ return (0);
+}
+
+
+static void
+ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "ucf-initialize");
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
+
+ pcd->pcd_caps = UCF_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_UCF;
+ pcd->pcd_num = npmc;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = pmcwidth;
+
+ pcd->pcd_allocate_pmc = ucf_allocate_pmc;
+ pcd->pcd_config_pmc = ucf_config_pmc;
+ pcd->pcd_describe = ucf_describe;
+ pcd->pcd_get_config = ucf_get_config;
+ pcd->pcd_get_msr = NULL;
+ pcd->pcd_pcpu_fini = uncore_pcpu_noop;
+ pcd->pcd_pcpu_init = uncore_pcpu_noop;
+ pcd->pcd_read_pmc = ucf_read_pmc;
+ pcd->pcd_release_pmc = ucf_release_pmc;
+ pcd->pcd_start_pmc = ucf_start_pmc;
+ pcd->pcd_stop_pmc = ucf_stop_pmc;
+ pcd->pcd_write_pmc = ucf_write_pmc;
+
+ md->pmd_npmc += npmc;
+}
+
+/*
+ * Intel programmable PMCs.
+ */
+
+/*
+ * Event descriptor tables.
+ *
+ * For each event id, we track:
+ *
+ * 1. The CPUs that the event is valid for.
+ *
+ * 2. If the event uses a fixed UMASK, the value of the umask field.
+ * If the event doesn't use a fixed UMASK, a mask of legal bits
+ * to check against.
+ */
+
+struct ucp_event_descr {
+ enum pmc_event ucp_ev;
+ unsigned char ucp_evcode;
+ unsigned char ucp_umask;
+ unsigned char ucp_flags;
+};
+
+#define UCP_F_I7 (1 << 0) /* CPU: Core i7 */
+#define UCP_F_WM (1 << 1) /* CPU: Westmere */
+#define UCP_F_SB (1 << 2) /* CPU: Sandy Bridge */
+#define UCP_F_FM (1 << 3) /* Fixed mask */
+
+#define UCP_F_ALLCPUS \
+ (UCP_F_I7 | UCP_F_WM)
+
+#define UCP_F_CMASK 0xFF000000
+
+static struct ucp_event_descr ucp_events[] = {
+#undef UCPDESCR
+#define UCPDESCR(N,EV,UM,FLAGS) { \
+ .ucp_ev = PMC_EV_UCP_EVENT_##N, \
+ .ucp_evcode = (EV), \
+ .ucp_umask = (UM), \
+ .ucp_flags = (FLAGS) \
+ }
+
+ UCPDESCR(00H_01H, 0x00, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(00H_02H, 0x00, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(00H_04H, 0x00, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(01H_01H, 0x01, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(01H_02H, 0x01, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(01H_04H, 0x01, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(02H_01H, 0x02, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_01H, 0x03, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_02H, 0x03, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_04H, 0x03, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_08H, 0x03, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_10H, 0x03, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_20H, 0x03, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(03H_40H, 0x03, 0x40, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(04H_01H, 0x04, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(04H_02H, 0x04, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(04H_04H, 0x04, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(04H_08H, 0x04, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(04H_10H, 0x04, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(05H_01H, 0x05, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(05H_02H, 0x05, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(05H_04H, 0x05, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(06H_01H, 0x06, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(06H_02H, 0x06, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(06H_04H, 0x06, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(06H_08H, 0x06, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(06H_10H, 0x06, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(06H_20H, 0x06, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(07H_01H, 0x07, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_02H, 0x07, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_04H, 0x07, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_08H, 0x07, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_10H, 0x07, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_20H, 0x07, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(07H_24H, 0x07, 0x24, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(08H_01H, 0x08, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(08H_02H, 0x08, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(08H_04H, 0x08, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(08H_03H, 0x08, 0x03, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(09H_01H, 0x09, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(09H_02H, 0x09, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(09H_04H, 0x09, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(09H_03H, 0x09, 0x03, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(0AH_01H, 0x0A, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0AH_02H, 0x0A, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0AH_04H, 0x0A, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0AH_08H, 0x0A, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0AH_0FH, 0x0A, 0x0F, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(0BH_01H, 0x0B, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0BH_02H, 0x0B, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0BH_04H, 0x0B, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0BH_08H, 0x0B, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0BH_10H, 0x0B, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(0BH_1FH, 0x0B, 0x1F, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(0CH_01H, 0x0C, 0x01, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_02H, 0x0C, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_04H_E, 0x0C, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_04H_F, 0x0C, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_04H_M, 0x0C, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_04H_S, 0x0C, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_08H_E, 0x0C, 0x08, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_08H_F, 0x0C, 0x08, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_08H_M, 0x0C, 0x08, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(0CH_08H_S, 0x0C, 0x08, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(20H_01H, 0x20, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(20H_02H, 0x20, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(20H_04H, 0x20, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(20H_08H, 0x20, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(20H_10H, 0x20, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(20H_20H, 0x20, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(21H_01H, 0x21, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(21H_02H, 0x21, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(21H_04H, 0x21, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(22H_01H, 0x22, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM |
+ UCP_F_SB),
+ UCPDESCR(22H_02H, 0x22, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM |
+ UCP_F_SB),
+ UCPDESCR(22H_04H, 0x22, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM |
+ UCP_F_SB),
+ UCPDESCR(22H_08H, 0x22, 0x08, UCP_F_FM | UCP_F_SB),
+ UCPDESCR(22H_20H, 0x22, 0x20, UCP_F_FM | UCP_F_SB),
+ UCPDESCR(22H_40H, 0x22, 0x40, UCP_F_FM | UCP_F_SB),
+ UCPDESCR(22H_80H, 0x22, 0x80, UCP_F_FM | UCP_F_SB),
+
+ UCPDESCR(23H_01H, 0x23, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(23H_02H, 0x23, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(23H_04H, 0x23, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(24H_02H, 0x24, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(24H_04H, 0x24, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(25H_01H, 0x25, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(25H_02H, 0x25, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(25H_04H, 0x25, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(26H_01H, 0x26, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(27H_01H, 0x27, 0x01, UCP_F_FM | UCP_F_I7),
+ UCPDESCR(27H_02H, 0x27, 0x02, UCP_F_FM | UCP_F_I7),
+ UCPDESCR(27H_04H, 0x27, 0x04, UCP_F_FM | UCP_F_I7),
+ UCPDESCR(27H_08H, 0x27, 0x08, UCP_F_FM | UCP_F_I7),
+ UCPDESCR(27H_10H, 0x27, 0x10, UCP_F_FM | UCP_F_I7),
+ UCPDESCR(27H_20H, 0x27, 0x20, UCP_F_FM | UCP_F_I7),
+
+ UCPDESCR(28H_01H, 0x28, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(28H_02H, 0x28, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(28H_04H, 0x28, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(28H_08H, 0x28, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(28H_10H, 0x28, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(28H_20H, 0x28, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(29H_01H, 0x29, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(29H_02H, 0x29, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(29H_04H, 0x29, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(29H_08H, 0x29, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(29H_10H, 0x29, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(29H_20H, 0x29, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(2AH_01H, 0x2A, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2AH_02H, 0x2A, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2AH_04H, 0x2A, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2AH_07H, 0x2A, 0x07, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(2BH_01H, 0x2B, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2BH_02H, 0x2B, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2BH_04H, 0x2B, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2BH_07H, 0x2B, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(2CH_01H, 0x2C, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2CH_02H, 0x2C, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2CH_04H, 0x2C, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2CH_07H, 0x2C, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(2DH_01H, 0x2D, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2DH_02H, 0x2D, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2DH_04H, 0x2D, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2DH_07H, 0x2D, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(2EH_01H, 0x2E, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2EH_02H, 0x2E, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2EH_04H, 0x2E, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2EH_07H, 0x2E, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(2FH_01H, 0x2F, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_02H, 0x2F, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_04H, 0x2F, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_07H, 0x2F, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_08H, 0x2F, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_10H, 0x2F, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_20H, 0x2F, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(2FH_38H, 0x2F, 0x38, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(30H_01H, 0x30, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(30H_02H, 0x30, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(30H_04H, 0x30, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(30H_07H, 0x30, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(31H_01H, 0x31, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(31H_02H, 0x31, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(31H_04H, 0x31, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(31H_07H, 0x31, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(32H_01H, 0x32, 0x01, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(32H_02H, 0x32, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(32H_04H, 0x32, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(32H_07H, 0x32, 0x07, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(33H_01H, 0x33, 0x01, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(33H_02H, 0x33, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(33H_04H, 0x33, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(33H_07H, 0x33, 0x07, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(34H_01H, 0x34, 0x01, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_02H, 0x34, 0x02, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_04H, 0x34, 0x04, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_08H, 0x34, 0x08, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_10H, 0x34, 0x10, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_20H, 0x34, 0x20, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(34H_40H, 0x34, 0x40, UCP_F_FM | UCP_F_SB),
+ UCPDESCR(34H_80H, 0x34, 0x80, UCP_F_FM | UCP_F_SB),
+
+ UCPDESCR(35H_01H, 0x35, 0x01, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(35H_02H, 0x35, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(35H_04H, 0x35, 0x04, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(40H_01H, 0x40, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_02H, 0x40, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_04H, 0x40, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_08H, 0x40, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_10H, 0x40, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_20H, 0x40, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_07H, 0x40, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(40H_38H, 0x40, 0x38, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(41H_01H, 0x41, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_02H, 0x41, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_04H, 0x41, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_08H, 0x41, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_10H, 0x41, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_20H, 0x41, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_07H, 0x41, 0x07, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(41H_38H, 0x41, 0x38, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(42H_01H, 0x42, 0x01, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(42H_02H, 0x42, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(42H_04H, 0x42, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(42H_08H, 0x42, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(43H_01H, 0x43, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(43H_02H, 0x43, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(60H_01H, 0x60, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(60H_02H, 0x60, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(60H_04H, 0x60, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(61H_01H, 0x61, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(61H_02H, 0x61, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(61H_04H, 0x61, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(62H_01H, 0x62, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(62H_02H, 0x62, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(62H_04H, 0x62, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(63H_01H, 0x63, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(63H_02H, 0x63, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(63H_04H, 0x63, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(63H_08H, 0x63, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(63H_10H, 0x63, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(63H_20H, 0x63, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(64H_01H, 0x64, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(64H_02H, 0x64, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(64H_04H, 0x64, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(64H_08H, 0x64, 0x08, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(64H_10H, 0x64, 0x10, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(64H_20H, 0x64, 0x20, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(65H_01H, 0x65, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(65H_02H, 0x65, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(65H_04H, 0x65, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(66H_01H, 0x66, 0x01, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(66H_02H, 0x66, 0x02, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+ UCPDESCR(66H_04H, 0x66, 0x04, UCP_F_FM | UCP_F_I7 | UCP_F_WM),
+
+ UCPDESCR(67H_01H, 0x67, 0x01, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(80H_01H, 0x80, 0x01, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(80H_02H, 0x80, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(80H_04H, 0x80, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(80H_08H, 0x80, 0x08, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(81H_01H, 0x81, 0x01, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(81H_02H, 0x81, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(81H_04H, 0x81, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(81H_08H, 0x81, 0x08, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(81H_20H, 0x81, 0x20, UCP_F_FM | UCP_F_SB),
+ UCPDESCR(81H_80H, 0x81, 0x80, UCP_F_FM | UCP_F_SB),
+
+ UCPDESCR(82H_01H, 0x82, 0x01, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(83H_01H, 0x83, 0x01, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(83H_02H, 0x83, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(83H_04H, 0x83, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(83H_08H, 0x83, 0x08, UCP_F_FM | UCP_F_WM),
+
+ UCPDESCR(84H_01H, 0x84, 0x01, UCP_F_FM | UCP_F_WM | UCP_F_SB),
+ UCPDESCR(84H_02H, 0x84, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(84H_04H, 0x84, 0x04, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(84H_08H, 0x84, 0x08, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(85H_02H, 0x85, 0x02, UCP_F_FM | UCP_F_WM),
+ UCPDESCR(86H_01H, 0x86, 0x01, UCP_F_FM | UCP_F_WM)
+};
+
+static const int nucp_events = sizeof(ucp_events) / sizeof(ucp_events[0]);
+
+static pmc_value_t
+ucp_perfctr_value_to_reload_count(pmc_value_t v)
+{
+ v &= (1ULL << uncore_ucp_width) - 1;
+ return (1ULL << uncore_ucp_width) - v;
+}
+
+static pmc_value_t
+ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
+{
+ return (1ULL << uncore_ucp_width) - rlc;
+}
+
+static int
+ucp_event_sandybridge_ok_on_counter(enum pmc_event pe, int ri)
+{
+ uint32_t mask;
+
+ switch (pe) {
+ /*
+ * Events valid only on counter 0.
+ */
+ case PMC_EV_UCP_EVENT_80H_01H:
+ case PMC_EV_UCP_EVENT_83H_01H:
+ mask = (1 << 0);
+ break;
+
+ default:
+ mask = ~0; /* Any row index is ok. */
+ }
+
+ return (mask & (1 << ri));
+}
+
+static int
+ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ int n;
+ enum pmc_event ev;
+ struct ucp_event_descr *ie;
+ uint32_t caps, config, cpuflag, evsel;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row-index value %d", __LINE__, ri));
+
+ /* check requested capabilities */
+ caps = a->pm_caps;
+ if ((UCP_PMC_CAPS & caps) != caps)
+ return (EPERM);
+
+ ev = pm->pm_event;
+
+ switch (uncore_cputype) {
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ if (ucp_event_sandybridge_ok_on_counter(ev, ri) == 0)
+ return (EINVAL);
+ break;
+ default:
+ break;
+ }
+
+
+ /*
+ * Look for an event descriptor with matching CPU and event id
+ * fields.
+ */
+
+ switch (uncore_cputype) {
+ case PMC_CPU_INTEL_COREI7:
+ cpuflag = UCP_F_I7;
+ break;
+ case PMC_CPU_INTEL_SANDYBRIDGE:
+ cpuflag = UCP_F_SB;
+ break;
+ case PMC_CPU_INTEL_WESTMERE:
+ cpuflag = UCP_F_WM;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ for (n = 0, ie = ucp_events; n < nucp_events; n++, ie++)
+ if (ie->ucp_ev == ev && ie->ucp_flags & cpuflag)
+ break;
+
+ if (n == nucp_events)
+ return (EINVAL);
+
+ /*
+ * A matching event descriptor has been found, so start
+ * assembling the contents of the event select register.
+ */
+ evsel = ie->ucp_evcode | UCP_EN;
+
+ config = a->pm_md.pm_ucp.pm_ucp_config & ~UCP_F_CMASK;
+
+ /*
+ * If the event uses a fixed umask value, reject any umask
+ * bits set by the user.
+ */
+ if (ie->ucp_flags & UCP_F_FM) {
+
+ if (UCP_UMASK(config) != 0)
+ return (EINVAL);
+
+ evsel |= (ie->ucp_umask << 8);
+
+ } else
+ return (EINVAL);
+
+ if (caps & PMC_CAP_THRESHOLD)
+ evsel |= (a->pm_md.pm_ucp.pm_ucp_config & UCP_F_CMASK);
+ if (caps & PMC_CAP_EDGE)
+ evsel |= UCP_EDGE;
+ if (caps & PMC_CAP_INVERT)
+ evsel |= UCP_INV;
+
+ pm->pm_md.pm_ucp.pm_ucp_evsel = evsel;
+
+ return (0);
+}
+
+static int
+ucp_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU %d", __LINE__, cpu));
+
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ PMCDBG(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
+ cpu));
+
+ uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
+
+ return (0);
+}
+
+static int
+ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char ucp_name[PMC_NAME_MAX];
+
+ phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
+
+ (void) snprintf(ucp_name, sizeof(ucp_name), "UCP-%d", ri);
+ if ((error = copystr(ucp_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return (error);
+
+ pi->pm_class = PMC_CLASS_UCP;
+
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+ucp_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
+
+ return (0);
+}
+
+static int
+ucp_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] cpu %d ri %d pmc not configured", __LINE__, cpu,
+ ri));
+
+ tmp = rdmsr(UCP_PMC0 + ri);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = ucp_perfctr_value_to_reload_count(tmp);
+ else
+ *v = tmp;
+
+ PMCDBG(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
+ ri, *v);
+
+ return (0);
+}
+
+static int
+ucp_release_pmc(int cpu, int ri, struct pmc *pm)
+{
+ (void) pm;
+
+ PMCDBG(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
+ pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
+ == NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
+
+ return (0);
+}
+
+static int
+ucp_start_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ uint32_t evsel;
+ struct uncore_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row-index %d", __LINE__, ri));
+
+ cc = uncore_pcpu[cpu];
+ pm = cc->pc_uncorepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
+ __LINE__, cpu, ri));
+
+ PMCDBG(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
+
+ evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
+
+ PMCDBG(MDP,STA,2,
+ "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
+ cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
+
+ /* Event specific configuration. */
+ switch (pm->pm_event) {
+ case PMC_EV_UCP_EVENT_0CH_04H_E:
+ case PMC_EV_UCP_EVENT_0CH_08H_E:
+ wrmsr(MSR_GQ_SNOOP_MESF,0x2);
+ break;
+ case PMC_EV_UCP_EVENT_0CH_04H_F:
+ case PMC_EV_UCP_EVENT_0CH_08H_F:
+ wrmsr(MSR_GQ_SNOOP_MESF,0x8);
+ break;
+ case PMC_EV_UCP_EVENT_0CH_04H_M:
+ case PMC_EV_UCP_EVENT_0CH_08H_M:
+ wrmsr(MSR_GQ_SNOOP_MESF,0x1);
+ break;
+ case PMC_EV_UCP_EVENT_0CH_04H_S:
+ case PMC_EV_UCP_EVENT_0CH_08H_S:
+ wrmsr(MSR_GQ_SNOOP_MESF,0x4);
+ break;
+ default:
+ break;
+ }
+
+ wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
+
+ do {
+ cc->pc_resync = 0;
+ cc->pc_globalctrl |= (1ULL << ri);
+ wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
+ } while (cc->pc_resync != 0);
+
+ return (0);
+}
+
+static int
+ucp_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct uncore_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row index %d", __LINE__, ri));
+
+ cc = uncore_pcpu[cpu];
+ pm = cc->pc_uncorepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
+
+ /* stop hw. */
+ wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
+
+ do {
+ cc->pc_resync = 0;
+ cc->pc_globalctrl &= ~(1ULL << ri);
+ wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
+ } while (cc->pc_resync != 0);
+
+ return (0);
+}
+
+static int
+ucp_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+ struct uncore_cpu *cc;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
+ ("[uncore,%d] illegal row index %d", __LINE__, ri));
+
+ cc = uncore_pcpu[cpu];
+ pm = cc->pc_uncorepmcs[ri].phw_pmc;
+
+ KASSERT(pm,
+ ("[uncore,%d] cpu%d ri%d no configured PMC to stop", __LINE__,
+ cpu, ri));
+
+ PMCDBG(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
+ UCP_PMC0 + ri, v);
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = ucp_reload_count_to_perfctr_value(v);
+
+ /*
+ * Write the new value to the counter. The counter will be in
+ * a stopped state when the pcd_write() entry point is called.
+ */
+
+ wrmsr(UCP_PMC0 + ri, v);
+
+ return (0);
+}
+
+
+static void
+ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
+{
+ struct pmc_classdep *pcd;
+
+ KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
+
+ PMCDBG(MDP,INI,1, "%s", "ucp-initialize");
+
+ pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
+
+ pcd->pcd_caps = UCP_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_UCP;
+ pcd->pcd_num = npmc;
+ pcd->pcd_ri = md->pmd_npmc;
+ pcd->pcd_width = pmcwidth;
+
+ pcd->pcd_allocate_pmc = ucp_allocate_pmc;
+ pcd->pcd_config_pmc = ucp_config_pmc;
+ pcd->pcd_describe = ucp_describe;
+ pcd->pcd_get_config = ucp_get_config;
+ pcd->pcd_get_msr = NULL;
+ pcd->pcd_pcpu_fini = uncore_pcpu_fini;
+ pcd->pcd_pcpu_init = uncore_pcpu_init;
+ pcd->pcd_read_pmc = ucp_read_pmc;
+ pcd->pcd_release_pmc = ucp_release_pmc;
+ pcd->pcd_start_pmc = ucp_start_pmc;
+ pcd->pcd_stop_pmc = ucp_stop_pmc;
+ pcd->pcd_write_pmc = ucp_write_pmc;
+
+ md->pmd_npmc += npmc;
+}
+
+int
+pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
+{
+ uncore_cputype = md->pmd_cputype;
+ uncore_pmcmask = 0;
+
+ /*
+ * Initialize programmable counters.
+ */
+
+ uncore_ucp_npmc = 8;
+ uncore_ucp_width = 48;
+
+ uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
+
+ ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
+
+ /*
+ * Initialize fixed function counters, if present.
+ */
+ uncore_ucf_ri = uncore_ucp_npmc;
+ uncore_ucf_npmc = 1;
+ uncore_ucf_width = 48;
+
+ ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
+ uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
+
+ PMCDBG(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
+ uncore_ucf_ri);
+
+ uncore_pcpu = malloc(sizeof(struct uncore_cpu **) * maxcpu, M_PMC,
+ M_ZERO | M_WAITOK);
+
+ return (0);
+}
+
+void
+pmc_uncore_finalize(struct pmc_mdep *md)
+{
+ PMCDBG(MDP,INI,1, "%s", "uncore-finalize");
+
+ free(uncore_pcpu, M_PMC);
+ uncore_pcpu = NULL;
+}
diff --git a/sys/dev/hwpmc/hwpmc_uncore.h b/sys/dev/hwpmc/hwpmc_uncore.h
new file mode 100644
index 0000000..c1f8259
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_uncore.h
@@ -0,0 +1,128 @@
+/*-
+ * Copyright (c) 2010 Fabien Thomas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_UNCORE_H_
+#define _DEV_HWPMC_UNCORE_H_ 1
+
+/*
+ * Fixed-function PMCs.
+ */
+struct pmc_md_ucf_op_pmcallocate {
+ uint16_t pm_ucf_flags; /* additional flags */
+};
+
+#define UCF_EN 0x1
+#define UCF_PMI 0x4
+
+/*
+ * Programmable PMCs.
+ */
+struct pmc_md_ucp_op_pmcallocate {
+ uint32_t pm_ucp_config;
+};
+
+#define UCP_EVSEL(C) ((C) & 0xFF)
+#define UCP_UMASK(C) ((C) & 0xFF00)
+#define UCP_CTRR (1 << 17)
+#define UCP_EDGE (1 << 18)
+#define UCP_INT (1 << 20)
+#define UCP_EN (1 << 22)
+#define UCP_INV (1 << 23)
+#define UCP_CMASK(C) (((C) & 0xFF) << 24)
+
+#ifdef _KERNEL
+
+#define DCTL_FLAG_UNC_PMI (1ULL << 13)
+
+/*
+ * Fixed-function counters.
+ */
+
+#define UCF_MASK 0xF
+
+#define UCF_CTR0 0x394
+
+#define UCF_OFFSET 32
+#define UCF_OFFSET_SB 29
+#define UCF_CTRL 0x395
+
+/*
+ * Programmable counters.
+ */
+
+#define UCP_PMC0 0x3B0
+#define UCP_EVSEL0 0x3C0
+#define UCP_OPCODE_MATCH 0x396
+#define UCP_CB0_EVSEL0 0x700
+
+/*
+ * Simplified programming interface in Intel Performance Architecture
+ * v2 and later.
+ */
+
+#define UC_GLOBAL_STATUS 0x392
+#define UC_GLOBAL_CTRL 0x391
+#define UC_GLOBAL_OVF_CTRL 0x393
+
+#define UC_GLOBAL_STATUS_FLAG_CLRCHG (1ULL << 63)
+#define UC_GLOBAL_STATUS_FLAG_OVFPMI (1ULL << 61)
+#define UC_GLOBAL_CTRL_FLAG_FRZ (1ULL << 63)
+#define UC_GLOBAL_CTRL_FLAG_ENPMICORE0 (1ULL << 48)
+
+/*
+ * Model specific registers.
+ */
+
+#define MSR_GQ_SNOOP_MESF 0x301
+
+struct pmc_md_ucf_pmc {
+ uint64_t pm_ucf_ctrl;
+};
+
+struct pmc_md_ucp_pmc {
+ uint32_t pm_ucp_evsel;
+};
+
+/*
+ * Prototypes.
+ */
+
+int pmc_uncore_initialize(struct pmc_mdep *_md, int _maxcpu);
+void pmc_uncore_finalize(struct pmc_mdep *_md);
+
+void pmc_uncore_mark_started(int _cpu, int _pmc);
+
+int pmc_ucf_initialize(struct pmc_mdep *_md, int _maxcpu, int _npmc, int _width);
+void pmc_ucf_finalize(struct pmc_mdep *_md);
+
+int pmc_ucp_initialize(struct pmc_mdep *_md, int _maxcpu, int _npmc, int _width,
+ int _flags);
+void pmc_ucp_finalize(struct pmc_mdep *_md);
+
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_UNCORE_H */
diff --git a/sys/dev/hwpmc/hwpmc_x86.c b/sys/dev/hwpmc/hwpmc_x86.c
new file mode 100644
index 0000000..f4c6c94
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_x86.c
@@ -0,0 +1,274 @@
+/*-
+ * Copyright (c) 2005,2008 Joseph Koshy
+ * Copyright (c) 2007 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by A. Joseph Koshy under
+ * sponsorship from the FreeBSD Foundation and Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/pmc.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cputypes.h>
+#include <machine/intr_machdep.h>
+#include <machine/apicvar.h>
+#include <machine/pmc_mdep.h>
+#include <machine/md_var.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include "hwpmc_soft.h"
+
+/*
+ * Attempt to walk a user call stack using a too-simple algorithm.
+ * In the general case we need unwind information associated with
+ * the executable to be able to walk the user stack.
+ *
+ * We are handed a trap frame laid down at the time the PMC interrupt
+ * was taken. If the application is using frame pointers, the saved
+ * PC value could be:
+ * a. at the beginning of a function before the stack frame is laid
+ * down,
+ * b. just before a 'ret', after the stack frame has been taken off,
+ * c. somewhere else in the function with a valid stack frame being
+ * present,
+ *
+ * If the application is not using frame pointers, this algorithm will
+ * fail to yield an interesting call chain.
+ *
+ * TODO: figure out a way to use unwind information.
+ */
+
+int
+pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
+{
+ int n;
+ uint32_t instr;
+ uintptr_t fp, oldfp, pc, r, sp;
+
+ KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
+ __LINE__, (void *) tf));
+
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
+ sp = PMC_TRAPFRAME_TO_USER_SP(tf);
+
+ *cc++ = pc; n = 1;
+
+ r = fp + sizeof(uintptr_t); /* points to return address */
+
+ if (!PMC_IN_USERSPACE(pc))
+ return (n);
+
+ if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
+ return (n);
+
+ if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
+ PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
+ if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
+ return (n);
+ } else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
+ sp += sizeof(uintptr_t);
+ if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
+ return (n);
+ } else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
+ copyin((void *) fp, &fp, sizeof(fp)) != 0)
+ return (n);
+
+ for (; n < nframes;) {
+ if (pc == 0 || !PMC_IN_USERSPACE(pc))
+ break;
+
+ *cc++ = pc; n++;
+
+ if (fp < oldfp)
+ break;
+
+ r = fp + sizeof(uintptr_t); /* address of return address */
+ oldfp = fp;
+
+ if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
+ copyin((void *) fp, &fp, sizeof(fp)) != 0)
+ break;
+ }
+
+ return (n);
+}
+
+/*
+ * Walking the kernel call stack.
+ *
+ * We are handed the trap frame laid down at the time the PMC
+ * interrupt was taken. The saved PC could be:
+ * a. in the lowlevel trap handler, meaning that there isn't a C stack
+ * to traverse,
+ * b. at the beginning of a function before the stack frame is laid
+ * down,
+ * c. just before a 'ret', after the stack frame has been taken off,
+ * d. somewhere else in a function with a valid stack frame being
+ * present.
+ *
+ * In case (d), the previous frame pointer is at [%ebp]/[%rbp] and
+ * the return address is at [%ebp+4]/[%rbp+8].
+ *
+ * For cases (b) and (c), the return address is at [%esp]/[%rsp] and
+ * the frame pointer doesn't need to be changed when going up one
+ * level in the stack.
+ *
+ * For case (a), we check if the PC lies in low-level trap handling
+ * code, and if so we terminate our trace.
+ */
+
+int
+pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
+{
+ int n;
+ uint32_t instr;
+ uintptr_t fp, pc, r, sp, stackstart, stackend;
+ struct thread *td;
+
+ KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
+ __LINE__));
+
+ td = curthread;
+ pc = PMC_TRAPFRAME_TO_PC(tf);
+ fp = PMC_TRAPFRAME_TO_FP(tf);
+ sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);
+
+ *cc++ = pc;
+ r = fp + sizeof(uintptr_t); /* points to return address */
+
+ if (nframes <= 1)
+ return (1);
+
+ stackstart = (uintptr_t) td->td_kstack;
+ stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
+
+ if (PMC_IN_TRAP_HANDLER(pc) ||
+ !PMC_IN_KERNEL(pc) ||
+ !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
+ !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
+ !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
+ return (1);
+
+ instr = *(uint32_t *) pc;
+
+ /*
+ * Determine whether the interrupted function was in the
+ * processing of either laying down its stack frame or taking
+ * it off.
+ *
+ * If we haven't started laying down a stack frame, or are
+ * just about to return, then our caller's address is at
+ * *sp, and we don't have a frame to unwind.
+ */
+ if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
+ PMC_AT_FUNCTION_EPILOGUE_RET(instr))
+ pc = *(uintptr_t *) sp;
+ else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
+ /*
+ * The code was midway through laying down a frame.
+ * At this point sp[0] has a frame back pointer,
+ * and the caller's address is therefore at sp[1].
+ */
+ sp += sizeof(uintptr_t);
+ if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
+ return (1);
+ pc = *(uintptr_t *) sp;
+ } else {
+ /*
+ * Not in the function prologue or epilogue.
+ */
+ pc = *(uintptr_t *) r;
+ fp = *(uintptr_t *) fp;
+ }
+
+ for (n = 1; n < nframes; n++) {
+ *cc++ = pc;
+
+ if (PMC_IN_TRAP_HANDLER(pc))
+ break;
+
+ r = fp + sizeof(uintptr_t);
+ if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
+ !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
+ break;
+ pc = *(uintptr_t *) r;
+ fp = *(uintptr_t *) fp;
+ }
+
+ return (n);
+}
+
+/*
+ * Machine dependent initialization for x86 class platforms.
+ */
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ int i;
+ struct pmc_mdep *md;
+
+ /* determine the CPU kind */
+ if (cpu_vendor_id == CPU_VENDOR_AMD)
+ md = pmc_amd_initialize();
+ else if (cpu_vendor_id == CPU_VENDOR_INTEL)
+ md = pmc_intel_initialize();
+ else
+ return (NULL);
+
+ /* disallow sampling if we do not have an LAPIC */
+ if (md != NULL && !lapic_enable_pmc())
+ for (i = 0; i < md->pmd_nclass; i++) {
+ if (i == PMC_CLASS_INDEX_SOFT)
+ continue;
+ md->pmd_classdep[i].pcd_caps &= ~PMC_CAP_INTERRUPT;
+ }
+
+ return (md);
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+
+ lapic_disable_pmc();
+ if (cpu_vendor_id == CPU_VENDOR_AMD)
+ pmc_amd_finalize(md);
+ else if (cpu_vendor_id == CPU_VENDOR_INTEL)
+ pmc_intel_finalize(md);
+ else
+ KASSERT(0, ("[x86,%d] Unknown vendor", __LINE__));
+}
diff --git a/sys/dev/hwpmc/hwpmc_xscale.c b/sys/dev/hwpmc/hwpmc_xscale.c
new file mode 100644
index 0000000..9b73337
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_xscale.c
@@ -0,0 +1,676 @@
+/*-
+ * Copyright (c) 2009 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/pmc.h>
+#include <sys/pmckern.h>
+
+#include <machine/pmc_mdep.h>
+/*
+ * Support for the Intel XScale network processors
+ *
+ * XScale processors have up to now three generations.
+ *
+ * The first generation has two PMC; the event selection, interrupt config
+ * and overflow flag setup are done by writing to the PMNC register.
+ * It also has less monitoring events than the latter generations.
+ *
+ * The second and third generatiosn have four PMCs, one register for the event
+ * selection, one register for the interrupt config and one register for
+ * the overflow flags.
+ */
+static int xscale_npmcs;
+static int xscale_gen; /* XScale Core generation */
+
+struct xscale_event_code_map {
+ enum pmc_event pe_ev;
+ uint8_t pe_code;
+};
+
+const struct xscale_event_code_map xscale_event_codes[] = {
+ /* 1st and 2nd Generation XScale cores */
+ { PMC_EV_XSCALE_IC_FETCH, 0x00 },
+ { PMC_EV_XSCALE_IC_MISS, 0x01 },
+ { PMC_EV_XSCALE_DATA_DEPENDENCY_STALLED,0x02 },
+ { PMC_EV_XSCALE_ITLB_MISS, 0x03 },
+ { PMC_EV_XSCALE_DTLB_MISS, 0x04 },
+ { PMC_EV_XSCALE_BRANCH_RETIRED, 0x05 },
+ { PMC_EV_XSCALE_BRANCH_MISPRED, 0x06 },
+ { PMC_EV_XSCALE_INSTR_RETIRED, 0x07 },
+ { PMC_EV_XSCALE_DC_FULL_CYCLE, 0x08 },
+ { PMC_EV_XSCALE_DC_FULL_CONTIG, 0x09 },
+ { PMC_EV_XSCALE_DC_ACCESS, 0x0a },
+ { PMC_EV_XSCALE_DC_MISS, 0x0b },
+ { PMC_EV_XSCALE_DC_WRITEBACK, 0x0c },
+ { PMC_EV_XSCALE_PC_CHANGE, 0x0d },
+ /* 3rd Generation XScale cores */
+ { PMC_EV_XSCALE_BRANCH_RETIRED_ALL, 0x0e },
+ { PMC_EV_XSCALE_INSTR_CYCLE, 0x0f },
+ { PMC_EV_XSCALE_CP_STALL, 0x17 },
+ { PMC_EV_XSCALE_PC_CHANGE_ALL, 0x18 },
+ { PMC_EV_XSCALE_PIPELINE_FLUSH, 0x19 },
+ { PMC_EV_XSCALE_BACKEND_STALL, 0x1a },
+ { PMC_EV_XSCALE_MULTIPLIER_USE, 0x1b },
+ { PMC_EV_XSCALE_MULTIPLIER_STALLED, 0x1c },
+ { PMC_EV_XSCALE_DATA_CACHE_STALLED, 0x1e },
+ { PMC_EV_XSCALE_L2_CACHE_REQ, 0x20 },
+ { PMC_EV_XSCALE_L2_CACHE_MISS, 0x23 },
+ { PMC_EV_XSCALE_ADDRESS_BUS_TRANS, 0x40 },
+ { PMC_EV_XSCALE_SELF_ADDRESS_BUS_TRANS, 0x41 },
+ { PMC_EV_XSCALE_DATA_BUS_TRANS, 0x48 },
+};
+
+const int xscale_event_codes_size =
+ sizeof(xscale_event_codes) / sizeof(xscale_event_codes[0]);
+
+/*
+ * Per-processor information.
+ */
+struct xscale_cpu {
+ struct pmc_hw *pc_xscalepmcs;
+};
+
+static struct xscale_cpu **xscale_pcpu;
+
+/*
+ * Performance Monitor Control Register
+ */
+static __inline uint32_t
+xscale_pmnc_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+xscale_pmnc_write(uint32_t reg)
+{
+ __asm __volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (reg));
+}
+
+/*
+ * Clock Counter Register
+ */
+static __inline uint32_t
+xscale_ccnt_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+xscale_ccnt_write(uint32_t reg)
+{
+ __asm __volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (reg));
+}
+
+/*
+ * Interrupt Enable Register
+ */
+static __inline uint32_t
+xscale_inten_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+xscale_inten_write(uint32_t reg)
+{
+ __asm __volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (reg));
+}
+
+/*
+ * Overflow Flag Register
+ */
+static __inline uint32_t
+xscale_flag_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+xscale_flag_write(uint32_t reg)
+{
+ __asm __volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (reg));
+}
+
+/*
+ * Event Selection Register
+ */
+static __inline uint32_t
+xscale_evtsel_read(void)
+{
+ uint32_t reg;
+
+ __asm __volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (reg));
+
+ return (reg);
+}
+
+static __inline void
+xscale_evtsel_write(uint32_t reg)
+{
+ __asm __volatile("mcr p14, 0, %0, c8, c1, 0" : : "r" (reg));
+}
+
+/*
+ * Performance Count Register N
+ */
+static uint32_t
+xscale_pmcn_read(unsigned int pmc)
+{
+ uint32_t reg = 0;
+
+ KASSERT(pmc < 4, ("[xscale,%d] illegal PMC number %d", __LINE__, pmc));
+
+ switch (pmc) {
+ case 0:
+ __asm __volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (reg));
+ break;
+ case 1:
+ __asm __volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (reg));
+ break;
+ case 2:
+ __asm __volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (reg));
+ break;
+ case 3:
+ __asm __volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (reg));
+ break;
+ }
+
+ return (reg);
+}
+
+static uint32_t
+xscale_pmcn_write(unsigned int pmc, uint32_t reg)
+{
+
+ KASSERT(pmc < 4, ("[xscale,%d] illegal PMC number %d", __LINE__, pmc));
+
+ switch (pmc) {
+ case 0:
+ __asm __volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (reg));
+ break;
+ case 1:
+ __asm __volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (reg));
+ break;
+ case 2:
+ __asm __volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (reg));
+ break;
+ case 3:
+ __asm __volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (reg));
+ break;
+ }
+
+ return (reg);
+}
+
+static int
+xscale_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event pe;
+ uint32_t caps, config;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] illegal row index %d", __LINE__, ri));
+
+ caps = a->pm_caps;
+ if (a->pm_class != PMC_CLASS_XSCALE)
+ return (EINVAL);
+ pe = a->pm_ev;
+ for (i = 0; i < xscale_event_codes_size; i++) {
+ if (xscale_event_codes[i].pe_ev == pe) {
+ config = xscale_event_codes[i].pe_code;
+ break;
+ }
+ }
+ if (i == xscale_event_codes_size)
+ return EINVAL;
+ /* Generation 1 has fewer events */
+ if (xscale_gen == 1 && i > PMC_EV_XSCALE_PC_CHANGE)
+ return EINVAL;
+ pm->pm_md.pm_xscale.pm_xscale_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"xscale-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+
+static int
+xscale_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] illegal row index %d", __LINE__, ri));
+
+ pm = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
+ tmp = xscale_pmcn_read(ri);
+ PMCDBG(MDP,REA,2,"xscale-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = XSCALE_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+xscale_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = XSCALE_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"xscale-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ xscale_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+xscale_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[xscale,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+xscale_start_pmc(int cpu, int ri)
+{
+ uint32_t pmnc, config, evtsel;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+
+ phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_xscale.pm_xscale_evsel;
+
+ /*
+ * Configure the event selection.
+ *
+ * On the XScale 2nd Generation there's no EVTSEL register.
+ */
+ if (xscale_npmcs == 2) {
+ pmnc = xscale_pmnc_read();
+ switch (ri) {
+ case 0:
+ pmnc &= ~XSCALE_PMNC_EVT0_MASK;
+ pmnc |= (config << 12) & XSCALE_PMNC_EVT0_MASK;
+ break;
+ case 1:
+ pmnc &= ~XSCALE_PMNC_EVT1_MASK;
+ pmnc |= (config << 20) & XSCALE_PMNC_EVT1_MASK;
+ break;
+ default:
+ /* XXX */
+ break;
+ }
+ xscale_pmnc_write(pmnc);
+ } else {
+ evtsel = xscale_evtsel_read();
+ switch (ri) {
+ case 0:
+ evtsel &= ~XSCALE_EVTSEL_EVT0_MASK;
+ evtsel |= config & XSCALE_EVTSEL_EVT0_MASK;
+ break;
+ case 1:
+ evtsel &= ~XSCALE_EVTSEL_EVT1_MASK;
+ evtsel |= (config << 8) & XSCALE_EVTSEL_EVT1_MASK;
+ break;
+ case 2:
+ evtsel &= ~XSCALE_EVTSEL_EVT2_MASK;
+ evtsel |= (config << 16) & XSCALE_EVTSEL_EVT2_MASK;
+ break;
+ case 3:
+ evtsel &= ~XSCALE_EVTSEL_EVT3_MASK;
+ evtsel |= (config << 24) & XSCALE_EVTSEL_EVT3_MASK;
+ break;
+ default:
+ /* XXX */
+ break;
+ }
+ xscale_evtsel_write(evtsel);
+ }
+ /*
+ * Enable the PMC.
+ *
+ * Note that XScale provides only one bit to enable/disable _all_
+ * performance monitoring units.
+ */
+ pmnc = xscale_pmnc_read();
+ pmnc |= XSCALE_PMNC_ENABLE;
+ xscale_pmnc_write(pmnc);
+
+ return 0;
+}
+
+static int
+xscale_stop_pmc(int cpu, int ri)
+{
+ uint32_t pmnc, evtsel;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+
+ phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ *
+ * Note that XScale provides only one bit to enable/disable _all_
+ * performance monitoring units.
+ */
+ pmnc = xscale_pmnc_read();
+ pmnc &= ~XSCALE_PMNC_ENABLE;
+ xscale_pmnc_write(pmnc);
+ /*
+ * A value of 0xff makes the corresponding PMU go into
+ * power saving mode.
+ */
+ if (xscale_npmcs == 2) {
+ pmnc = xscale_pmnc_read();
+ switch (ri) {
+ case 0:
+ pmnc |= XSCALE_PMNC_EVT0_MASK;
+ break;
+ case 1:
+ pmnc |= XSCALE_PMNC_EVT1_MASK;
+ break;
+ default:
+ /* XXX */
+ break;
+ }
+ xscale_pmnc_write(pmnc);
+ } else {
+ evtsel = xscale_evtsel_read();
+ switch (ri) {
+ case 0:
+ evtsel |= XSCALE_EVTSEL_EVT0_MASK;
+ break;
+ case 1:
+ evtsel |= XSCALE_EVTSEL_EVT1_MASK;
+ break;
+ case 2:
+ evtsel |= XSCALE_EVTSEL_EVT2_MASK;
+ break;
+ case 3:
+ evtsel |= XSCALE_EVTSEL_EVT3_MASK;
+ break;
+ default:
+ /* XXX */
+ break;
+ }
+ xscale_evtsel_write(evtsel);
+ }
+
+ return 0;
+}
+
+static int
+xscale_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[xscale,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+xscale_intr(int cpu, struct trapframe *tf)
+{
+ printf("intr\n");
+ return 0;
+}
+
+static int
+xscale_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char xscale_name[PMC_NAME_MAX];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < xscale_npmcs,
+ ("[xscale,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &xscale_pcpu[cpu]->pc_xscalepmcs[ri];
+ snprintf(xscale_name, sizeof(xscale_name), "XSCALE-%d", ri);
+ if ((error = copystr(xscale_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return error;
+ pi->pm_class = PMC_CLASS_XSCALE;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+xscale_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = xscale_pcpu[cpu]->pc_xscalepmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+/*
+ * XXX don't know what we should do here.
+ */
+static int
+xscale_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+xscale_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+xscale_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct pmc_cpu *pc;
+ struct xscale_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[xscale,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"xscale-init cpu=%d", cpu);
+
+ xscale_pcpu[cpu] = pac = malloc(sizeof(struct xscale_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+ pac->pc_xscalepmcs = malloc(sizeof(struct pmc_hw) * xscale_npmcs,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_XSCALE].pcd_ri;
+ KASSERT(pc != NULL, ("[xscale,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_xscalepmcs; i < xscale_npmcs; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /*
+ * Disable and put the PMUs into power save mode.
+ */
+ if (xscale_npmcs == 2) {
+ xscale_pmnc_write(XSCALE_PMNC_EVT1_MASK |
+ XSCALE_PMNC_EVT0_MASK);
+ } else {
+ xscale_evtsel_write(XSCALE_EVTSEL_EVT3_MASK |
+ XSCALE_EVTSEL_EVT2_MASK | XSCALE_EVTSEL_EVT1_MASK |
+ XSCALE_EVTSEL_EVT0_MASK);
+ }
+
+ return 0;
+}
+
+static int
+xscale_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_xscale_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+ uint32_t idreg;
+
+ /* Get the Core Generation from CP15 */
+ __asm __volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (idreg));
+ xscale_gen = (idreg >> 13) & 0x3;
+ switch (xscale_gen) {
+ case 1:
+ xscale_npmcs = 2;
+ break;
+ case 2:
+ case 3:
+ xscale_npmcs = 4;
+ break;
+ default:
+ printf("%s: unknown XScale core generation\n", __func__);
+ return (NULL);
+ }
+ PMCDBG(MDP,INI,1,"xscale-init npmcs=%d", xscale_npmcs);
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ xscale_pcpu = malloc(sizeof(struct xscale_cpu *) * pmc_cpu_max(), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = pmc_mdep_alloc(1);
+
+ pmc_mdep->pmd_cputype = PMC_CPU_INTEL_XSCALE;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_XSCALE];
+ pcd->pcd_caps = XSCALE_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_XSCALE;
+ pcd->pcd_num = xscale_npmcs;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32;
+
+ pcd->pcd_allocate_pmc = xscale_allocate_pmc;
+ pcd->pcd_config_pmc = xscale_config_pmc;
+ pcd->pcd_pcpu_fini = xscale_pcpu_fini;
+ pcd->pcd_pcpu_init = xscale_pcpu_init;
+ pcd->pcd_describe = xscale_describe;
+ pcd->pcd_get_config = xscale_get_config;
+ pcd->pcd_read_pmc = xscale_read_pmc;
+ pcd->pcd_release_pmc = xscale_release_pmc;
+ pcd->pcd_start_pmc = xscale_start_pmc;
+ pcd->pcd_stop_pmc = xscale_stop_pmc;
+ pcd->pcd_write_pmc = xscale_write_pmc;
+
+ pmc_mdep->pmd_intr = xscale_intr;
+ pmc_mdep->pmd_switch_in = xscale_switch_in;
+ pmc_mdep->pmd_switch_out = xscale_switch_out;
+
+ pmc_mdep->pmd_npmc += xscale_npmcs;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_xscale_finalize(struct pmc_mdep *md)
+{
+}
diff --git a/sys/dev/hwpmc/hwpmc_xscale.h b/sys/dev/hwpmc/hwpmc_xscale.h
new file mode 100644
index 0000000..80e2d23
--- /dev/null
+++ b/sys/dev/hwpmc/hwpmc_xscale.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2009 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_XSCALE_H_
+#define _DEV_HWPMC_XSCALE_H_
+
+#define XSCALE_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
+
+
+#define XSCALE_PMNC_ENABLE 0x01 /* Enable all Counters */
+#define XSCALE_PMNC_PMNRESET 0x02 /* Performance Counter Reset */
+#define XSCALE_PMNC_CCNTRESET 0x04 /* Clock Counter Reset */
+#define XSCALE_PMNC_CCNTDIV 0x08 /* Clock Counter Divider */
+/* IXP425 only -- first generation */
+#define XSCALE_PMNC_EVT0_MASK 0x00ff000
+#define XSCALE_PMNC_EVT1_MASK 0xff00000
+
+#define XSCALE_INTEN_CCNT 0x01 /* Enable Clock Counter Int. */
+#define XSCALE_INTEN_PMN0 0x02 /* Enable PMN0 Interrupts */
+#define XSCALE_INTEN_PMN1 0x04 /* Enable PMN1 Interrupts */
+#define XSCALE_INTEN_PMN2 0x08 /* Enable PMN2 Interrupts */
+#define XSCALE_INTEN_PMN3 0x10 /* Enable PMN3 Interrupts */
+
+#define XSCALE_EVTSEL_EVT0_MASK 0x000000ff
+#define XSCALE_EVTSEL_EVT1_MASK 0x0000ff00
+#define XSCALE_EVTSEL_EVT2_MASK 0x00ff0000
+#define XSCALE_EVTSEL_EVT3_MASK 0xff000000
+
+#define XSCALE_FLAG_CCNT_OVERFLOW 0x01
+#define XSCALE_FLAG_PMN0_OVERFLOW 0x02
+#define XSCALE_FLAG_PMN1_OVERFLOW 0x04
+#define XSCALE_FLAG_PMN2_OVERFLOW 0x08
+#define XSCALE_FLAG_PMN3_OVERFLOW 0x10
+
+#define XSCALE_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R))
+#define XSCALE_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P))
+
+#ifdef _KERNEL
+/* MD extension for 'struct pmc' */
+struct pmc_md_xscale_pmc {
+ uint32_t pm_xscale_evsel;
+};
+#endif /* _KERNEL */
+#endif /* _DEV_HWPMC_XSCALE_H_ */
diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h
new file mode 100644
index 0000000..8f246f6
--- /dev/null
+++ b/sys/dev/hwpmc/pmc_events.h
@@ -0,0 +1,4388 @@
+/*-
+ * Copyright (c) 2005 Joseph Koshy
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_HWPMC_PMC_EVENTS_H_
+#define _DEV_HWPMC_PMC_EVENTS_H_
+
+/*
+ * Note: Documentation on adding events can be found both in
+ * the source tree at src/share/doc/papers/hwpmc/hwpmc.ms
+ * as well as on-line at:
+ *
+ * http://wiki.freebsd.org/PmcTools/PmcHardwareHowTo
+ *
+ * Please refer to those resources before you attempt to modify
+ * this file or the hwpmc driver/subsystem.
+ */
+
+/* * PMC event codes.
+ *
+ * __PMC_EV(CLASS, SYMBOLIC-NAME)
+ *
+ */
+
+/*
+ * AMD K7 Events, from "The AMD Athlon(tm) Processor x86 Code
+ * Optimization Guide" [Doc#22007K, Feb 2002]
+ */
+
+#define __PMC_EV_K7() \
+__PMC_EV(K7, DC_ACCESSES) \
+__PMC_EV(K7, DC_MISSES) \
+__PMC_EV(K7, DC_REFILLS_FROM_L2) \
+__PMC_EV(K7, DC_REFILLS_FROM_SYSTEM) \
+__PMC_EV(K7, DC_WRITEBACKS) \
+__PMC_EV(K7, L1_DTLB_MISS_AND_L2_DTLB_HITS) \
+__PMC_EV(K7, L1_AND_L2_DTLB_MISSES) \
+__PMC_EV(K7, MISALIGNED_REFERENCES) \
+__PMC_EV(K7, IC_FETCHES) \
+__PMC_EV(K7, IC_MISSES) \
+__PMC_EV(K7, L1_ITLB_MISSES) \
+__PMC_EV(K7, L1_L2_ITLB_MISSES) \
+__PMC_EV(K7, RETIRED_INSTRUCTIONS) \
+__PMC_EV(K7, RETIRED_OPS) \
+__PMC_EV(K7, RETIRED_BRANCHES) \
+__PMC_EV(K7, RETIRED_BRANCHES_MISPREDICTED) \
+__PMC_EV(K7, RETIRED_TAKEN_BRANCHES) \
+__PMC_EV(K7, RETIRED_TAKEN_BRANCHES_MISPREDICTED) \
+__PMC_EV(K7, RETIRED_FAR_CONTROL_TRANSFERS) \
+__PMC_EV(K7, RETIRED_RESYNC_BRANCHES) \
+__PMC_EV(K7, INTERRUPTS_MASKED_CYCLES) \
+__PMC_EV(K7, INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \
+__PMC_EV(K7, HARDWARE_INTERRUPTS)
+
+#define PMC_EV_K7_FIRST PMC_EV_K7_DC_ACCESSES
+#define PMC_EV_K7_LAST PMC_EV_K7_HARDWARE_INTERRUPTS
+
+
+/*
+ * Intel P4 Events, from "IA-32 Intel(r) Architecture Software
+ * Developer's Manual, Volume 3: System Programming Guide" [245472-012]
+ */
+
+#define __PMC_EV_P4() \
+__PMC_EV(P4, TC_DELIVER_MODE) \
+__PMC_EV(P4, BPU_FETCH_REQUEST) \
+__PMC_EV(P4, ITLB_REFERENCE) \
+__PMC_EV(P4, MEMORY_CANCEL) \
+__PMC_EV(P4, MEMORY_COMPLETE) \
+__PMC_EV(P4, LOAD_PORT_REPLAY) \
+__PMC_EV(P4, STORE_PORT_REPLAY) \
+__PMC_EV(P4, MOB_LOAD_REPLAY) \
+__PMC_EV(P4, PAGE_WALK_TYPE) \
+__PMC_EV(P4, BSQ_CACHE_REFERENCE) \
+__PMC_EV(P4, IOQ_ALLOCATION) \
+__PMC_EV(P4, IOQ_ACTIVE_ENTRIES) \
+__PMC_EV(P4, FSB_DATA_ACTIVITY) \
+__PMC_EV(P4, BSQ_ALLOCATION) \
+__PMC_EV(P4, BSQ_ACTIVE_ENTRIES) \
+__PMC_EV(P4, SSE_INPUT_ASSIST) \
+__PMC_EV(P4, PACKED_SP_UOP) \
+__PMC_EV(P4, PACKED_DP_UOP) \
+__PMC_EV(P4, SCALAR_SP_UOP) \
+__PMC_EV(P4, SCALAR_DP_UOP) \
+__PMC_EV(P4, 64BIT_MMX_UOP) \
+__PMC_EV(P4, 128BIT_MMX_UOP) \
+__PMC_EV(P4, X87_FP_UOP) \
+__PMC_EV(P4, X87_SIMD_MOVES_UOP) \
+__PMC_EV(P4, GLOBAL_POWER_EVENTS) \
+__PMC_EV(P4, TC_MS_XFER) \
+__PMC_EV(P4, UOP_QUEUE_WRITES) \
+__PMC_EV(P4, RETIRED_MISPRED_BRANCH_TYPE) \
+__PMC_EV(P4, RETIRED_BRANCH_TYPE) \
+__PMC_EV(P4, RESOURCE_STALL) \
+__PMC_EV(P4, WC_BUFFER) \
+__PMC_EV(P4, B2B_CYCLES) \
+__PMC_EV(P4, BNR) \
+__PMC_EV(P4, SNOOP) \
+__PMC_EV(P4, RESPONSE) \
+__PMC_EV(P4, FRONT_END_EVENT) \
+__PMC_EV(P4, EXECUTION_EVENT) \
+__PMC_EV(P4, REPLAY_EVENT) \
+__PMC_EV(P4, INSTR_RETIRED) \
+__PMC_EV(P4, UOPS_RETIRED) \
+__PMC_EV(P4, UOP_TYPE) \
+__PMC_EV(P4, BRANCH_RETIRED) \
+__PMC_EV(P4, MISPRED_BRANCH_RETIRED) \
+__PMC_EV(P4, X87_ASSIST) \
+__PMC_EV(P4, MACHINE_CLEAR)
+
+#define PMC_EV_P4_FIRST PMC_EV_P4_TC_DELIVER_MODE
+#define PMC_EV_P4_LAST PMC_EV_P4_MACHINE_CLEAR
+
+/* Intel Pentium Pro, P-II, P-III and Pentium-M style events */
+
+#define __PMC_EV_P6() \
+__PMC_EV(P6, DATA_MEM_REFS) \
+__PMC_EV(P6, DCU_LINES_IN) \
+__PMC_EV(P6, DCU_M_LINES_IN) \
+__PMC_EV(P6, DCU_M_LINES_OUT) \
+__PMC_EV(P6, DCU_MISS_OUTSTANDING) \
+__PMC_EV(P6, IFU_FETCH) \
+__PMC_EV(P6, IFU_FETCH_MISS) \
+__PMC_EV(P6, ITLB_MISS) \
+__PMC_EV(P6, IFU_MEM_STALL) \
+__PMC_EV(P6, ILD_STALL) \
+__PMC_EV(P6, L2_IFETCH) \
+__PMC_EV(P6, L2_LD) \
+__PMC_EV(P6, L2_ST) \
+__PMC_EV(P6, L2_LINES_IN) \
+__PMC_EV(P6, L2_LINES_OUT) \
+__PMC_EV(P6, L2_M_LINES_INM) \
+__PMC_EV(P6, L2_M_LINES_OUTM) \
+__PMC_EV(P6, L2_RQSTS) \
+__PMC_EV(P6, L2_ADS) \
+__PMC_EV(P6, L2_DBUS_BUSY) \
+__PMC_EV(P6, L2_DBUS_BUSY_RD) \
+__PMC_EV(P6, BUS_DRDY_CLOCKS) \
+__PMC_EV(P6, BUS_LOCK_CLOCKS) \
+__PMC_EV(P6, BUS_REQ_OUTSTANDING) \
+__PMC_EV(P6, BUS_TRAN_BRD) \
+__PMC_EV(P6, BUS_TRAN_RFO) \
+__PMC_EV(P6, BUS_TRANS_WB) \
+__PMC_EV(P6, BUS_TRAN_IFETCH) \
+__PMC_EV(P6, BUS_TRAN_INVAL) \
+__PMC_EV(P6, BUS_TRAN_PWR) \
+__PMC_EV(P6, BUS_TRANS_P) \
+__PMC_EV(P6, BUS_TRANS_IO) \
+__PMC_EV(P6, BUS_TRAN_DEF) \
+__PMC_EV(P6, BUS_TRAN_BURST) \
+__PMC_EV(P6, BUS_TRAN_ANY) \
+__PMC_EV(P6, BUS_TRAN_MEM) \
+__PMC_EV(P6, BUS_DATA_RCV) \
+__PMC_EV(P6, BUS_BNR_DRV) \
+__PMC_EV(P6, BUS_HIT_DRV) \
+__PMC_EV(P6, BUS_HITM_DRV) \
+__PMC_EV(P6, BUS_SNOOP_STALL) \
+__PMC_EV(P6, FLOPS) \
+__PMC_EV(P6, FP_COMPS_OPS_EXE) \
+__PMC_EV(P6, FP_ASSIST) \
+__PMC_EV(P6, MUL) \
+__PMC_EV(P6, DIV) \
+__PMC_EV(P6, CYCLES_DIV_BUSY) \
+__PMC_EV(P6, LD_BLOCKS) \
+__PMC_EV(P6, SB_DRAINS) \
+__PMC_EV(P6, MISALIGN_MEM_REF) \
+__PMC_EV(P6, EMON_KNI_PREF_DISPATCHED) \
+__PMC_EV(P6, EMON_KNI_PREF_MISS) \
+__PMC_EV(P6, INST_RETIRED) \
+__PMC_EV(P6, UOPS_RETIRED) \
+__PMC_EV(P6, INST_DECODED) \
+__PMC_EV(P6, EMON_KNI_INST_RETIRED) \
+__PMC_EV(P6, EMON_KNI_COMP_INST_RET) \
+__PMC_EV(P6, HW_INT_RX) \
+__PMC_EV(P6, CYCLES_INT_MASKED) \
+__PMC_EV(P6, CYCLES_INT_PENDING_AND_MASKED) \
+__PMC_EV(P6, BR_INST_RETIRED) \
+__PMC_EV(P6, BR_MISS_PRED_RETIRED) \
+__PMC_EV(P6, BR_TAKEN_RETIRED) \
+__PMC_EV(P6, BR_MISS_PRED_TAKEN_RET) \
+__PMC_EV(P6, BR_INST_DECODED) \
+__PMC_EV(P6, BTB_MISSES) \
+__PMC_EV(P6, BR_BOGUS) \
+__PMC_EV(P6, BACLEARS) \
+__PMC_EV(P6, RESOURCE_STALLS) \
+__PMC_EV(P6, PARTIAL_RAT_STALLS) \
+__PMC_EV(P6, SEGMENT_REG_LOADS) \
+__PMC_EV(P6, CPU_CLK_UNHALTED) \
+__PMC_EV(P6, MMX_INSTR_EXEC) \
+__PMC_EV(P6, MMX_SAT_INSTR_EXEC) \
+__PMC_EV(P6, MMX_UOPS_EXEC) \
+__PMC_EV(P6, MMX_INSTR_TYPE_EXEC) \
+__PMC_EV(P6, FP_MMX_TRANS) \
+__PMC_EV(P6, MMX_ASSIST) \
+__PMC_EV(P6, MMX_INSTR_RET) \
+__PMC_EV(P6, SEG_RENAME_STALLS) \
+__PMC_EV(P6, SEG_REG_RENAMES) \
+__PMC_EV(P6, RET_SEG_RENAMES) \
+__PMC_EV(P6, EMON_EST_TRANS) \
+__PMC_EV(P6, EMON_THERMAL_TRIP) \
+__PMC_EV(P6, BR_INST_EXEC) \
+__PMC_EV(P6, BR_MISSP_EXEC) \
+__PMC_EV(P6, BR_BAC_MISSP_EXEC) \
+__PMC_EV(P6, BR_CND_EXEC) \
+__PMC_EV(P6, BR_CND_MISSP_EXEC) \
+__PMC_EV(P6, BR_IND_EXEC) \
+__PMC_EV(P6, BR_IND_MISSP_EXEC) \
+__PMC_EV(P6, BR_RET_EXEC) \
+__PMC_EV(P6, BR_RET_MISSP_EXEC) \
+__PMC_EV(P6, BR_RET_BAC_MISSP_EXEC) \
+__PMC_EV(P6, BR_CALL_EXEC) \
+__PMC_EV(P6, BR_CALL_MISSP_EXEC) \
+__PMC_EV(P6, BR_IND_CALL_EXEC) \
+__PMC_EV(P6, EMON_SIMD_INSTR_RETIRED) \
+__PMC_EV(P6, EMON_SYNCH_UOPS) \
+__PMC_EV(P6, EMON_ESP_UOPS) \
+__PMC_EV(P6, EMON_FUSED_UOPS_RET) \
+__PMC_EV(P6, EMON_UNFUSION) \
+__PMC_EV(P6, EMON_PREF_RQSTS_UP) \
+__PMC_EV(P6, EMON_PREF_RQSTS_DN) \
+__PMC_EV(P6, EMON_SSE_SSE2_INST_RETIRED) \
+__PMC_EV(P6, EMON_SSE_SSE2_COMP_INST_RETIRED)
+
+
+#define PMC_EV_P6_FIRST PMC_EV_P6_DATA_MEM_REFS
+#define PMC_EV_P6_LAST PMC_EV_P6_EMON_SSE_SSE2_COMP_INST_RETIRED
+
+/* AMD K8 PMCs */
+
+#define __PMC_EV_K8() \
+__PMC_EV(K8, FP_DISPATCHED_FPU_OPS) \
+__PMC_EV(K8, FP_CYCLES_WITH_NO_FPU_OPS_RETIRED) \
+__PMC_EV(K8, FP_DISPATCHED_FPU_FAST_FLAG_OPS) \
+__PMC_EV(K8, LS_SEGMENT_REGISTER_LOAD) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SELF_MODIFYING_CODE) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \
+__PMC_EV(K8, LS_BUFFER2_FULL) \
+__PMC_EV(K8, LS_LOCKED_OPERATION) \
+__PMC_EV(K8, LS_MICROARCHITECTURAL_LATE_CANCEL) \
+__PMC_EV(K8, LS_RETIRED_CFLUSH_INSTRUCTIONS) \
+__PMC_EV(K8, LS_RETIRED_CPUID_INSTRUCTIONS) \
+__PMC_EV(K8, DC_ACCESS) \
+__PMC_EV(K8, DC_MISS) \
+__PMC_EV(K8, DC_REFILL_FROM_L2) \
+__PMC_EV(K8, DC_REFILL_FROM_SYSTEM) \
+__PMC_EV(K8, DC_COPYBACK) \
+__PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_HIT) \
+__PMC_EV(K8, DC_L1_DTLB_MISS_AND_L2_DTLB_MISS) \
+__PMC_EV(K8, DC_MISALIGNED_DATA_REFERENCE) \
+__PMC_EV(K8, DC_MICROARCHITECTURAL_LATE_CANCEL) \
+__PMC_EV(K8, DC_MICROARCHITECTURAL_EARLY_CANCEL) \
+__PMC_EV(K8, DC_ONE_BIT_ECC_ERROR) \
+__PMC_EV(K8, DC_DISPATCHED_PREFETCH_INSTRUCTIONS) \
+__PMC_EV(K8, DC_DCACHE_ACCESSES_BY_LOCKS) \
+__PMC_EV(K8, BU_CPU_CLK_UNHALTED) \
+__PMC_EV(K8, BU_INTERNAL_L2_REQUEST) \
+__PMC_EV(K8, BU_FILL_REQUEST_L2_MISS) \
+__PMC_EV(K8, BU_FILL_INTO_L2) \
+__PMC_EV(K8, IC_FETCH) \
+__PMC_EV(K8, IC_MISS) \
+__PMC_EV(K8, IC_REFILL_FROM_L2) \
+__PMC_EV(K8, IC_REFILL_FROM_SYSTEM) \
+__PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_HIT) \
+__PMC_EV(K8, IC_L1_ITLB_MISS_AND_L2_ITLB_MISS) \
+__PMC_EV(K8, IC_MICROARCHITECTURAL_RESYNC_BY_SNOOP) \
+__PMC_EV(K8, IC_INSTRUCTION_FETCH_STALL) \
+__PMC_EV(K8, IC_RETURN_STACK_HIT) \
+__PMC_EV(K8, IC_RETURN_STACK_OVERFLOW) \
+__PMC_EV(K8, FR_RETIRED_X86_INSTRUCTIONS) \
+__PMC_EV(K8, FR_RETIRED_UOPS) \
+__PMC_EV(K8, FR_RETIRED_BRANCHES) \
+__PMC_EV(K8, FR_RETIRED_BRANCHES_MISPREDICTED) \
+__PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES) \
+__PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED) \
+__PMC_EV(K8, FR_RETIRED_FAR_CONTROL_TRANSFERS) \
+__PMC_EV(K8, FR_RETIRED_RESYNCS) \
+__PMC_EV(K8, FR_RETIRED_NEAR_RETURNS) \
+__PMC_EV(K8, FR_RETIRED_NEAR_RETURNS_MISPREDICTED) \
+__PMC_EV(K8, FR_RETIRED_TAKEN_BRANCHES_MISPREDICTED_BY_ADDR_MISCOMPARE) \
+__PMC_EV(K8, FR_RETIRED_FPU_INSTRUCTIONS) \
+__PMC_EV(K8, FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS) \
+__PMC_EV(K8, FR_INTERRUPTS_MASKED_CYCLES) \
+__PMC_EV(K8, FR_INTERRUPTS_MASKED_WHILE_PENDING_CYCLES) \
+__PMC_EV(K8, FR_TAKEN_HARDWARE_INTERRUPTS) \
+__PMC_EV(K8, FR_DECODER_EMPTY) \
+__PMC_EV(K8, FR_DISPATCH_STALLS) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FROM_BRANCH_ABORT_TO_RETIRE) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FOR_SERIALIZATION) \
+__PMC_EV(K8, FR_DISPATCH_STALL_FOR_SEGMENT_LOAD) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_REORDER_BUFFER_IS_FULL) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_RESERVATION_STATIONS_ARE_FULL) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FPU_IS_FULL) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_LS_IS_FULL) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_WAITING_FOR_ALL_TO_BE_QUIET) \
+__PMC_EV(K8, FR_DISPATCH_STALL_WHEN_FAR_XFER_OR_RESYNC_BRANCH_PENDING) \
+__PMC_EV(K8, FR_FPU_EXCEPTIONS) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR0) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR1) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR2) \
+__PMC_EV(K8, FR_NUMBER_OF_BREAKPOINTS_FOR_DR3) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_PAGE_TABLE_OVERFLOW) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_DRAM_COMMAND_SLOTS_MISSED) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_TURNAROUND) \
+__PMC_EV(K8, NB_MEMORY_CONTROLLER_BYPASS_SATURATION) \
+__PMC_EV(K8, NB_SIZED_COMMANDS) \
+__PMC_EV(K8, NB_PROBE_RESULT) \
+__PMC_EV(K8, NB_HT_BUS0_BANDWIDTH) \
+__PMC_EV(K8, NB_HT_BUS1_BANDWIDTH) \
+__PMC_EV(K8, NB_HT_BUS2_BANDWIDTH)
+
+#define PMC_EV_K8_FIRST PMC_EV_K8_FP_DISPATCHED_FPU_OPS
+#define PMC_EV_K8_LAST PMC_EV_K8_NB_HT_BUS2_BANDWIDTH
+
+
+/*
+ * Intel Pentium and Pentium MMX events, from the "Intel 64 and IA-32
+ * Architectures Software Developer's Manual, Volume 3B: System Programming
+ * Guide, Part 2, August 2007".
+ */
+#define __PMC_EV_P5() \
+__PMC_EV(P5, DATA_READ) \
+__PMC_EV(P5, DATA_WRITE) \
+__PMC_EV(P5, DATA_TLB_MISS) \
+__PMC_EV(P5, DATA_READ_MISS) \
+__PMC_EV(P5, DATA_WRITE_MISS) \
+__PMC_EV(P5, WRITE_HIT_TO_M_OR_E_STATE_LINES) \
+__PMC_EV(P5, DATA_CACHE_LINES_WRITTEN_BACK) \
+__PMC_EV(P5, EXTERNAL_SNOOPS) \
+__PMC_EV(P5, EXTERNAL_DATA_CACHE_SNOOP_HITS) \
+__PMC_EV(P5, MEMORY_ACCESSES_IN_BOTH_PIPES) \
+__PMC_EV(P5, BANK_CONFLICTS) \
+__PMC_EV(P5, MISALIGNED_DATA_OR_IO_REFERENCES) \
+__PMC_EV(P5, CODE_READ) \
+__PMC_EV(P5, CODE_TLB_MISS) \
+__PMC_EV(P5, CODE_CACHE_MISS) \
+__PMC_EV(P5, ANY_SEGMENT_REGISTER_LOADED) \
+__PMC_EV(P5, BRANCHES) \
+__PMC_EV(P5, BTB_HITS) \
+__PMC_EV(P5, TAKEN_BRANCH_OR_BTB_HIT) \
+__PMC_EV(P5, PIPELINE_FLUSHES) \
+__PMC_EV(P5, INSTRUCTIONS_EXECUTED) \
+__PMC_EV(P5, INSTRUCTIONS_EXECUTED_V_PIPE) \
+__PMC_EV(P5, BUS_CYCLE_DURATION) \
+__PMC_EV(P5, WRITE_BUFFER_FULL_STALL_DURATION) \
+__PMC_EV(P5, WAITING_FOR_DATA_MEMORY_READ_STALL_DURATION) \
+__PMC_EV(P5, STALL_ON_WRITE_TO_AN_E_OR_M_STATE_LINE) \
+__PMC_EV(P5, LOCKED_BUS_CYCLE) \
+__PMC_EV(P5, IO_READ_OR_WRITE_CYCLE) \
+__PMC_EV(P5, NONCACHEABLE_MEMORY_READS) \
+__PMC_EV(P5, PIPELINE_AGI_STALLS) \
+__PMC_EV(P5, FLOPS) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR0_REGISTER) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR1_REGISTER) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR2_REGISTER) \
+__PMC_EV(P5, BREAKPOINT_MATCH_ON_DR3_REGISTER) \
+__PMC_EV(P5, HARDWARE_INTERRUPTS) \
+__PMC_EV(P5, DATA_READ_OR_WRITE) \
+__PMC_EV(P5, DATA_READ_MISS_OR_WRITE_MISS) \
+__PMC_EV(P5, BUS_OWNERSHIP_LATENCY) \
+__PMC_EV(P5, BUS_OWNERSHIP_TRANSFERS) \
+__PMC_EV(P5, MMX_INSTRUCTIONS_EXECUTED_U_PIPE) \
+__PMC_EV(P5, MMX_INSTRUCTIONS_EXECUTED_V_PIPE) \
+__PMC_EV(P5, CACHE_M_LINE_SHARING) \
+__PMC_EV(P5, CACHE_LINE_SHARING) \
+__PMC_EV(P5, EMMS_INSTRUCTIONS_EXECUTED) \
+__PMC_EV(P5, TRANSITIONS_BETWEEN_MMX_AND_FP_INSTRUCTIONS) \
+__PMC_EV(P5, BUS_UTILIZATION_DUE_TO_PROCESSOR_ACTIVITY) \
+__PMC_EV(P5, WRITES_TO_NONCACHEABLE_MEMORY) \
+__PMC_EV(P5, SATURATING_MMX_INSTRUCTIONS_EXECUTED) \
+__PMC_EV(P5, SATURATIONS_PERFORMED) \
+__PMC_EV(P5, NUMBER_OF_CYCLES_NOT_IN_HALT_STATE) \
+__PMC_EV(P5, DATA_CACHE_TLB_MISS_STALL_DURATION) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_READS) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_READ_MISSES) \
+__PMC_EV(P5, FLOATING_POINT_STALLS_DURATION) \
+__PMC_EV(P5, TAKEN_BRANCHES) \
+__PMC_EV(P5, D1_STARVATION_AND_FIFO_IS_EMPTY) \
+__PMC_EV(P5, D1_STARVATION_AND_ONLY_ONE_INSTRUCTION_IN_FIFO) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_WRITES) \
+__PMC_EV(P5, MMX_INSTRUCTION_DATA_WRITE_MISSES) \
+__PMC_EV(P5, PIPELINE_FLUSHES_DUE_TO_WRONG_BRANCH_PREDICTIONS) \
+__PMC_EV(P5, \
+ PIPELINE_FLUSHES_DUE_TO_WRONG_BRANCH_PREDICTIONS_RESOLVED_IN_WB_STAGE) \
+__PMC_EV(P5, MISALIGNED_DATA_MEMORY_REFERENCE_ON_MMX_INSTRUCTIONS) \
+__PMC_EV(P5, PIPELINE_STALL_FOR_MMX_INSTRUCTION_DATA_MEMORY_READS) \
+__PMC_EV(P5, MISPREDICTED_OR_UNPREDICTED_RETURNS) \
+__PMC_EV(P5, PREDICTED_RETURNS) \
+__PMC_EV(P5, MMX_MULTIPLY_UNIT_INTERLOCK) \
+__PMC_EV(P5, MOVD_MOVQ_STORE_STALL_DUE_TO_PREVIOUS_MMX_OPERATION) \
+__PMC_EV(P5, RETURNS) \
+__PMC_EV(P5, BTB_FALSE_ENTRIES) \
+__PMC_EV(P5, BTB_MISS_PREDICTION_ON_NOT_TAKEN_BRANCH) \
+__PMC_EV(P5, \
+ FULL_WRITE_BUFFER_STALL_DURATION_WHILE_EXECUTING_MMX_INSTRUCTIONS) \
+__PMC_EV(P5, STALL_ON_MMX_INSTRUCTION_WRITE_TO_E_OR_M_STATE_LINE)
+
+#define PMC_EV_P5_FIRST PMC_EV_P5_DATA_READ
+#define PMC_EV_P5_LAST \
+ PMC_EV_P5_STALL_ON_MMX_INSTRUCTION_WRITE_TO_E_OR_M_STATE_LINE
+
+/*
+ * Events supported by Intel architectural fixed function counters,
+ * from the "Intel 64 and IA-32 Architectures Software Developer's
+ * Manual Volume 3B: System Programming Guide, Part 2", July 2008.
+ */
+#define __PMC_EV_IAF() \
+__PMC_EV(IAF, INSTR_RETIRED_ANY) \
+__PMC_EV(IAF, CPU_CLK_UNHALTED_CORE) \
+__PMC_EV(IAF, CPU_CLK_UNHALTED_REF)
+
+#define PMC_EV_IAF_FIRST PMC_EV_IAF_INSTR_RETIRED_ANY
+#define PMC_EV_IAF_LAST PMC_EV_IAF_CPU_CLK_UNHALTED_REF
+
+#define __PMC_EV_ALIAS_IAF() \
+__PMC_EV_ALIAS("instruction-retired", IAF_INSTR_RETIRED_ANY) \
+__PMC_EV_ALIAS("unhalted-core-cycles", IAF_CPU_CLK_UNHALTED_CORE) \
+__PMC_EV_ALIAS("unhalted-reference-cycles", IAF_CPU_CLK_UNHALTED_REF)
+
+/*
+ * Events supported by programmable function counters present in
+ * Intel Atom, Core and Core2 CPUs, from the "Intel 64 and IA-32
+ * Architectures Software Developer's Manual Volume 3B: System Programming
+ * Guide, Part 2", July 2008.
+ *
+ * These PMCs select events with a combination of an event code and
+ * unit mask. Quirks that need to be taken care of include:
+ * - The set of (event code, umask) combinations supported by a processor
+ * varies according to the processor model.
+ * - A given (event code, umask) combination need not measure the same
+ * hardware event in all processor models.
+ * - Event names in vendor documentation for an (event code, umask) pair
+ * may vary according to the CPU model.
+ * - Identically named events can map to different (event code, umask)
+ * pairs on different CPUs.
+ * - New (event code, umask) combinations continue to be added as CPUs
+ * evolve. The interface between hwpmc(4) and libpmc(3) needs to be
+ * robust with respect to ABI changes.
+ *
+ * The IAP_EVENT_* symbols below define the ABI between userland and kernel.
+ * New (event code, * umask) combinations used in new CPUs would be added
+ * to the end of the list. Vendor names for events are mapped to IAP_EVENT_*
+ * symbols using aliases. The final disambiguation of semantics based on
+ * the CPU model happens inside hwpmc(4).
+ */
+#define __PMC_EV_IAP() \
+__PMC_EV(IAP, EVENT_02H_01H) \
+__PMC_EV(IAP, EVENT_02H_81H) \
+__PMC_EV(IAP, EVENT_03H_00H) \
+__PMC_EV(IAP, EVENT_03H_01H) \
+__PMC_EV(IAP, EVENT_03H_02H) \
+__PMC_EV(IAP, EVENT_03H_04H) \
+__PMC_EV(IAP, EVENT_03H_08H) \
+__PMC_EV(IAP, EVENT_03H_10H) \
+__PMC_EV(IAP, EVENT_03H_20H) \
+__PMC_EV(IAP, EVENT_04H_00H) \
+__PMC_EV(IAP, EVENT_04H_01H) \
+__PMC_EV(IAP, EVENT_04H_02H) \
+__PMC_EV(IAP, EVENT_04H_07H) \
+__PMC_EV(IAP, EVENT_04H_08H) \
+__PMC_EV(IAP, EVENT_05H_00H) \
+__PMC_EV(IAP, EVENT_05H_01H) \
+__PMC_EV(IAP, EVENT_05H_02H) \
+__PMC_EV(IAP, EVENT_05H_03H) \
+__PMC_EV(IAP, EVENT_06H_00H) \
+__PMC_EV(IAP, EVENT_06H_01H) \
+__PMC_EV(IAP, EVENT_06H_02H) \
+__PMC_EV(IAP, EVENT_06H_04H) \
+__PMC_EV(IAP, EVENT_06H_08H) \
+__PMC_EV(IAP, EVENT_06H_0FH) \
+__PMC_EV(IAP, EVENT_07H_00H) \
+__PMC_EV(IAP, EVENT_07H_01H) \
+__PMC_EV(IAP, EVENT_07H_02H) \
+__PMC_EV(IAP, EVENT_07H_03H) \
+__PMC_EV(IAP, EVENT_07H_06H) \
+__PMC_EV(IAP, EVENT_07H_08H) \
+__PMC_EV(IAP, EVENT_08H_01H) \
+__PMC_EV(IAP, EVENT_08H_02H) \
+__PMC_EV(IAP, EVENT_08H_04H) \
+__PMC_EV(IAP, EVENT_08H_05H) \
+__PMC_EV(IAP, EVENT_08H_06H) \
+__PMC_EV(IAP, EVENT_08H_07H) \
+__PMC_EV(IAP, EVENT_08H_08H) \
+__PMC_EV(IAP, EVENT_08H_09H) \
+__PMC_EV(IAP, EVENT_08H_10H) \
+__PMC_EV(IAP, EVENT_08H_20H) \
+__PMC_EV(IAP, EVENT_08H_40H) \
+__PMC_EV(IAP, EVENT_08H_80H) \
+__PMC_EV(IAP, EVENT_08H_81H) \
+__PMC_EV(IAP, EVENT_08H_82H) \
+__PMC_EV(IAP, EVENT_08H_84H) \
+__PMC_EV(IAP, EVENT_09H_01H) \
+__PMC_EV(IAP, EVENT_09H_02H) \
+__PMC_EV(IAP, EVENT_09H_04H) \
+__PMC_EV(IAP, EVENT_09H_08H) \
+__PMC_EV(IAP, EVENT_0BH_01H) \
+__PMC_EV(IAP, EVENT_0BH_02H) \
+__PMC_EV(IAP, EVENT_0BH_10H) \
+__PMC_EV(IAP, EVENT_0CH_01H) \
+__PMC_EV(IAP, EVENT_0CH_02H) \
+__PMC_EV(IAP, EVENT_0CH_03H) \
+__PMC_EV(IAP, EVENT_0DH_03H) \
+__PMC_EV(IAP, EVENT_0DH_40H) \
+__PMC_EV(IAP, EVENT_0EH_01H) \
+__PMC_EV(IAP, EVENT_0EH_02H) \
+__PMC_EV(IAP, EVENT_0EH_10H) \
+__PMC_EV(IAP, EVENT_0EH_20H) \
+__PMC_EV(IAP, EVENT_0EH_40H) \
+__PMC_EV(IAP, EVENT_0FH_01H) \
+__PMC_EV(IAP, EVENT_0FH_02H) \
+__PMC_EV(IAP, EVENT_0FH_08H) \
+__PMC_EV(IAP, EVENT_0FH_10H) \
+__PMC_EV(IAP, EVENT_0FH_20H) \
+__PMC_EV(IAP, EVENT_0FH_80H) \
+__PMC_EV(IAP, EVENT_10H_00H) \
+__PMC_EV(IAP, EVENT_10H_01H) \
+__PMC_EV(IAP, EVENT_10H_02H) \
+__PMC_EV(IAP, EVENT_10H_04H) \
+__PMC_EV(IAP, EVENT_10H_08H) \
+__PMC_EV(IAP, EVENT_10H_10H) \
+__PMC_EV(IAP, EVENT_10H_20H) \
+__PMC_EV(IAP, EVENT_10H_40H) \
+__PMC_EV(IAP, EVENT_10H_80H) \
+__PMC_EV(IAP, EVENT_10H_81H) \
+__PMC_EV(IAP, EVENT_11H_00H) \
+__PMC_EV(IAP, EVENT_11H_01H) \
+__PMC_EV(IAP, EVENT_11H_02H) \
+__PMC_EV(IAP, EVENT_11H_81H) \
+__PMC_EV(IAP, EVENT_12H_00H) \
+__PMC_EV(IAP, EVENT_12H_01H) \
+__PMC_EV(IAP, EVENT_12H_02H) \
+__PMC_EV(IAP, EVENT_12H_04H) \
+__PMC_EV(IAP, EVENT_12H_08H) \
+__PMC_EV(IAP, EVENT_12H_10H) \
+__PMC_EV(IAP, EVENT_12H_20H) \
+__PMC_EV(IAP, EVENT_12H_40H) \
+__PMC_EV(IAP, EVENT_12H_81H) \
+__PMC_EV(IAP, EVENT_13H_00H) \
+__PMC_EV(IAP, EVENT_13H_01H) \
+__PMC_EV(IAP, EVENT_13H_02H) \
+__PMC_EV(IAP, EVENT_13H_04H) \
+__PMC_EV(IAP, EVENT_13H_07H) \
+__PMC_EV(IAP, EVENT_13H_81H) \
+__PMC_EV(IAP, EVENT_14H_00H) \
+__PMC_EV(IAP, EVENT_14H_01H) \
+__PMC_EV(IAP, EVENT_14H_02H) \
+__PMC_EV(IAP, EVENT_17H_01H) \
+__PMC_EV(IAP, EVENT_18H_00H) \
+__PMC_EV(IAP, EVENT_18H_01H) \
+__PMC_EV(IAP, EVENT_19H_00H) \
+__PMC_EV(IAP, EVENT_19H_01H) \
+__PMC_EV(IAP, EVENT_19H_02H) \
+__PMC_EV(IAP, EVENT_1DH_01H) \
+__PMC_EV(IAP, EVENT_1DH_02H) \
+__PMC_EV(IAP, EVENT_1DH_04H) \
+__PMC_EV(IAP, EVENT_1EH_01H) \
+__PMC_EV(IAP, EVENT_20H_01H) \
+__PMC_EV(IAP, EVENT_21H) \
+__PMC_EV(IAP, EVENT_22H) \
+__PMC_EV(IAP, EVENT_23H) \
+__PMC_EV(IAP, EVENT_24H) \
+__PMC_EV(IAP, EVENT_24H_01H) \
+__PMC_EV(IAP, EVENT_24H_02H) \
+__PMC_EV(IAP, EVENT_24H_03H) \
+__PMC_EV(IAP, EVENT_24H_04H) \
+__PMC_EV(IAP, EVENT_24H_08H) \
+__PMC_EV(IAP, EVENT_24H_0CH) \
+__PMC_EV(IAP, EVENT_24H_10H) \
+__PMC_EV(IAP, EVENT_24H_20H) \
+__PMC_EV(IAP, EVENT_24H_30H) \
+__PMC_EV(IAP, EVENT_24H_40H) \
+__PMC_EV(IAP, EVENT_24H_80H) \
+__PMC_EV(IAP, EVENT_24H_AAH) \
+__PMC_EV(IAP, EVENT_24H_C0H) \
+__PMC_EV(IAP, EVENT_24H_FFH) \
+__PMC_EV(IAP, EVENT_25H) \
+__PMC_EV(IAP, EVENT_26H) \
+__PMC_EV(IAP, EVENT_26H_01H) \
+__PMC_EV(IAP, EVENT_26H_02H) \
+__PMC_EV(IAP, EVENT_26H_04H) \
+__PMC_EV(IAP, EVENT_26H_08H) \
+__PMC_EV(IAP, EVENT_26H_0FH) \
+__PMC_EV(IAP, EVENT_26H_10H) \
+__PMC_EV(IAP, EVENT_26H_20H) \
+__PMC_EV(IAP, EVENT_26H_40H) \
+__PMC_EV(IAP, EVENT_26H_80H) \
+__PMC_EV(IAP, EVENT_26H_F0H) \
+__PMC_EV(IAP, EVENT_26H_FFH) \
+__PMC_EV(IAP, EVENT_27H) \
+__PMC_EV(IAP, EVENT_27H_01H) \
+__PMC_EV(IAP, EVENT_27H_02H) \
+__PMC_EV(IAP, EVENT_27H_04H) \
+__PMC_EV(IAP, EVENT_27H_08H) \
+__PMC_EV(IAP, EVENT_27H_0EH) \
+__PMC_EV(IAP, EVENT_27H_0FH) \
+__PMC_EV(IAP, EVENT_27H_10H) \
+__PMC_EV(IAP, EVENT_27H_20H) \
+__PMC_EV(IAP, EVENT_27H_40H) \
+__PMC_EV(IAP, EVENT_27H_80H) \
+__PMC_EV(IAP, EVENT_27H_E0H) \
+__PMC_EV(IAP, EVENT_27H_F0H) \
+__PMC_EV(IAP, EVENT_28H) \
+__PMC_EV(IAP, EVENT_28H_01H) \
+__PMC_EV(IAP, EVENT_28H_02H) \
+__PMC_EV(IAP, EVENT_28H_04H) \
+__PMC_EV(IAP, EVENT_28H_08H) \
+__PMC_EV(IAP, EVENT_28H_0FH) \
+__PMC_EV(IAP, EVENT_29H) \
+__PMC_EV(IAP, EVENT_2AH) \
+__PMC_EV(IAP, EVENT_2BH) \
+__PMC_EV(IAP, EVENT_2EH) \
+__PMC_EV(IAP, EVENT_2EH_01H) \
+__PMC_EV(IAP, EVENT_2EH_02H) \
+__PMC_EV(IAP, EVENT_2EH_41H) \
+__PMC_EV(IAP, EVENT_2EH_4FH) \
+__PMC_EV(IAP, EVENT_30H) \
+__PMC_EV(IAP, EVENT_32H) \
+__PMC_EV(IAP, EVENT_3AH) \
+__PMC_EV(IAP, EVENT_3AH_00H) \
+__PMC_EV(IAP, EVENT_3BH_C0H) \
+__PMC_EV(IAP, EVENT_3CH_00H) \
+__PMC_EV(IAP, EVENT_3CH_01H) \
+__PMC_EV(IAP, EVENT_3CH_02H) \
+__PMC_EV(IAP, EVENT_3DH_01H) \
+__PMC_EV(IAP, EVENT_40H) \
+__PMC_EV(IAP, EVENT_40H_01H) \
+__PMC_EV(IAP, EVENT_40H_02H) \
+__PMC_EV(IAP, EVENT_40H_04H) \
+__PMC_EV(IAP, EVENT_40H_08H) \
+__PMC_EV(IAP, EVENT_40H_0FH) \
+__PMC_EV(IAP, EVENT_40H_21H) \
+__PMC_EV(IAP, EVENT_41H) \
+__PMC_EV(IAP, EVENT_41H_01H) \
+__PMC_EV(IAP, EVENT_41H_02H) \
+__PMC_EV(IAP, EVENT_41H_04H) \
+__PMC_EV(IAP, EVENT_41H_08H) \
+__PMC_EV(IAP, EVENT_41H_0FH) \
+__PMC_EV(IAP, EVENT_41H_22H) \
+__PMC_EV(IAP, EVENT_42H) \
+__PMC_EV(IAP, EVENT_42H_01H) \
+__PMC_EV(IAP, EVENT_42H_02H) \
+__PMC_EV(IAP, EVENT_42H_04H) \
+__PMC_EV(IAP, EVENT_42H_08H) \
+__PMC_EV(IAP, EVENT_42H_10H) \
+__PMC_EV(IAP, EVENT_43H_01H) \
+__PMC_EV(IAP, EVENT_43H_02H) \
+__PMC_EV(IAP, EVENT_44H_02H) \
+__PMC_EV(IAP, EVENT_45H_0FH) \
+__PMC_EV(IAP, EVENT_46H_00H) \
+__PMC_EV(IAP, EVENT_47H_00H) \
+__PMC_EV(IAP, EVENT_48H_00H) \
+__PMC_EV(IAP, EVENT_48H_01H) \
+__PMC_EV(IAP, EVENT_48H_02H) \
+__PMC_EV(IAP, EVENT_49H_00H) \
+__PMC_EV(IAP, EVENT_49H_01H) \
+__PMC_EV(IAP, EVENT_49H_02H) \
+__PMC_EV(IAP, EVENT_49H_04H) \
+__PMC_EV(IAP, EVENT_49H_10H) \
+__PMC_EV(IAP, EVENT_49H_20H) \
+__PMC_EV(IAP, EVENT_49H_40H) \
+__PMC_EV(IAP, EVENT_49H_80H) \
+__PMC_EV(IAP, EVENT_4BH_00H) \
+__PMC_EV(IAP, EVENT_4BH_01H) \
+__PMC_EV(IAP, EVENT_4BH_02H) \
+__PMC_EV(IAP, EVENT_4BH_03H) \
+__PMC_EV(IAP, EVENT_4BH_08H) \
+__PMC_EV(IAP, EVENT_4CH_00H) \
+__PMC_EV(IAP, EVENT_4CH_01H) \
+__PMC_EV(IAP, EVENT_4CH_02H) \
+__PMC_EV(IAP, EVENT_4DH_01H) \
+__PMC_EV(IAP, EVENT_4EH_01H) \
+__PMC_EV(IAP, EVENT_4EH_02H) \
+__PMC_EV(IAP, EVENT_4EH_04H) \
+__PMC_EV(IAP, EVENT_4EH_10H) \
+__PMC_EV(IAP, EVENT_4FH_00H) \
+__PMC_EV(IAP, EVENT_4FH_02H) \
+__PMC_EV(IAP, EVENT_4FH_04H) \
+__PMC_EV(IAP, EVENT_4FH_08H) \
+__PMC_EV(IAP, EVENT_4FH_10H) \
+__PMC_EV(IAP, EVENT_51H_01H) \
+__PMC_EV(IAP, EVENT_51H_02H) \
+__PMC_EV(IAP, EVENT_51H_04H) \
+__PMC_EV(IAP, EVENT_51H_08H) \
+__PMC_EV(IAP, EVENT_52H_01H) \
+__PMC_EV(IAP, EVENT_53H_01H) \
+__PMC_EV(IAP, EVENT_58H_01H) \
+__PMC_EV(IAP, EVENT_58H_02H) \
+__PMC_EV(IAP, EVENT_58H_04H) \
+__PMC_EV(IAP, EVENT_58H_08H) \
+__PMC_EV(IAP, EVENT_59H_20H) \
+__PMC_EV(IAP, EVENT_59H_40H) \
+__PMC_EV(IAP, EVENT_59H_80H) \
+__PMC_EV(IAP, EVENT_5BH_0CH) \
+__PMC_EV(IAP, EVENT_5BH_0FH) \
+__PMC_EV(IAP, EVENT_5BH_40H) \
+__PMC_EV(IAP, EVENT_5BH_4FH) \
+__PMC_EV(IAP, EVENT_5CH_01H) \
+__PMC_EV(IAP, EVENT_5CH_02H) \
+__PMC_EV(IAP, EVENT_5EH_01H) \
+__PMC_EV(IAP, EVENT_5FH_01H) \
+__PMC_EV(IAP, EVENT_5FH_04H) \
+__PMC_EV(IAP, EVENT_60H) \
+__PMC_EV(IAP, EVENT_60H_01H) \
+__PMC_EV(IAP, EVENT_60H_02H) \
+__PMC_EV(IAP, EVENT_60H_04H) \
+__PMC_EV(IAP, EVENT_60H_08H) \
+__PMC_EV(IAP, EVENT_61H) \
+__PMC_EV(IAP, EVENT_61H_00H) \
+__PMC_EV(IAP, EVENT_62H) \
+__PMC_EV(IAP, EVENT_62H_00H) \
+__PMC_EV(IAP, EVENT_63H) \
+__PMC_EV(IAP, EVENT_63H_01H) \
+__PMC_EV(IAP, EVENT_63H_02H) \
+__PMC_EV(IAP, EVENT_64H) \
+__PMC_EV(IAP, EVENT_64H_40H) \
+__PMC_EV(IAP, EVENT_65H) \
+__PMC_EV(IAP, EVENT_66H) \
+__PMC_EV(IAP, EVENT_67H) \
+__PMC_EV(IAP, EVENT_68H) \
+__PMC_EV(IAP, EVENT_69H) \
+__PMC_EV(IAP, EVENT_6AH) \
+__PMC_EV(IAP, EVENT_6BH) \
+__PMC_EV(IAP, EVENT_6CH) \
+__PMC_EV(IAP, EVENT_6CH_01H) \
+__PMC_EV(IAP, EVENT_6DH) \
+__PMC_EV(IAP, EVENT_6EH) \
+__PMC_EV(IAP, EVENT_6FH) \
+__PMC_EV(IAP, EVENT_70H) \
+__PMC_EV(IAP, EVENT_77H) \
+__PMC_EV(IAP, EVENT_78H) \
+__PMC_EV(IAP, EVENT_79H_02H) \
+__PMC_EV(IAP, EVENT_79H_04H) \
+__PMC_EV(IAP, EVENT_79H_08H) \
+__PMC_EV(IAP, EVENT_79H_10H) \
+__PMC_EV(IAP, EVENT_79H_20H) \
+__PMC_EV(IAP, EVENT_79H_30H) \
+__PMC_EV(IAP, EVENT_79H_18H) \
+__PMC_EV(IAP, EVENT_79H_24H) \
+__PMC_EV(IAP, EVENT_79H_3CH) \
+__PMC_EV(IAP, EVENT_7AH) \
+__PMC_EV(IAP, EVENT_7BH) \
+__PMC_EV(IAP, EVENT_7DH) \
+__PMC_EV(IAP, EVENT_7EH) \
+__PMC_EV(IAP, EVENT_7EH_00H) \
+__PMC_EV(IAP, EVENT_7FH) \
+__PMC_EV(IAP, EVENT_80H_00H) \
+__PMC_EV(IAP, EVENT_80H_01H) \
+__PMC_EV(IAP, EVENT_80H_02H) \
+__PMC_EV(IAP, EVENT_80H_03H) \
+__PMC_EV(IAP, EVENT_80H_04H) \
+__PMC_EV(IAP, EVENT_80H_10H) \
+__PMC_EV(IAP, EVENT_81H_00H) \
+__PMC_EV(IAP, EVENT_81H_01H) \
+__PMC_EV(IAP, EVENT_81H_02H) \
+__PMC_EV(IAP, EVENT_82H_01H) \
+__PMC_EV(IAP, EVENT_82H_02H) \
+__PMC_EV(IAP, EVENT_82H_04H) \
+__PMC_EV(IAP, EVENT_82H_10H) \
+__PMC_EV(IAP, EVENT_82H_12H) \
+__PMC_EV(IAP, EVENT_82H_40H) \
+__PMC_EV(IAP, EVENT_83H_01H) \
+__PMC_EV(IAP, EVENT_83H_02H) \
+__PMC_EV(IAP, EVENT_85H_00H) \
+__PMC_EV(IAP, EVENT_85H_01H) \
+__PMC_EV(IAP, EVENT_85H_02H) \
+__PMC_EV(IAP, EVENT_85H_04H) \
+__PMC_EV(IAP, EVENT_85H_10H) \
+__PMC_EV(IAP, EVENT_85H_20H) \
+__PMC_EV(IAP, EVENT_85H_40H) \
+__PMC_EV(IAP, EVENT_85H_80H) \
+__PMC_EV(IAP, EVENT_86H_00H) \
+__PMC_EV(IAP, EVENT_87H_00H) \
+__PMC_EV(IAP, EVENT_87H_01H) \
+__PMC_EV(IAP, EVENT_87H_02H) \
+__PMC_EV(IAP, EVENT_87H_04H) \
+__PMC_EV(IAP, EVENT_87H_08H) \
+__PMC_EV(IAP, EVENT_87H_0FH) \
+__PMC_EV(IAP, EVENT_88H_00H) \
+__PMC_EV(IAP, EVENT_88H_01H) \
+__PMC_EV(IAP, EVENT_88H_02H) \
+__PMC_EV(IAP, EVENT_88H_04H) \
+__PMC_EV(IAP, EVENT_88H_07H) \
+__PMC_EV(IAP, EVENT_88H_08H) \
+__PMC_EV(IAP, EVENT_88H_10H) \
+__PMC_EV(IAP, EVENT_88H_20H) \
+__PMC_EV(IAP, EVENT_88H_30H) \
+__PMC_EV(IAP, EVENT_88H_40H) \
+__PMC_EV(IAP, EVENT_88H_80H) \
+__PMC_EV(IAP, EVENT_88H_7FH) \
+__PMC_EV(IAP, EVENT_88H_FFH) \
+__PMC_EV(IAP, EVENT_89H_00H) \
+__PMC_EV(IAP, EVENT_89H_01H) \
+__PMC_EV(IAP, EVENT_89H_02H) \
+__PMC_EV(IAP, EVENT_89H_04H) \
+__PMC_EV(IAP, EVENT_89H_07H) \
+__PMC_EV(IAP, EVENT_89H_08H) \
+__PMC_EV(IAP, EVENT_89H_10H) \
+__PMC_EV(IAP, EVENT_89H_20H) \
+__PMC_EV(IAP, EVENT_89H_30H) \
+__PMC_EV(IAP, EVENT_89H_40H) \
+__PMC_EV(IAP, EVENT_89H_80H) \
+__PMC_EV(IAP, EVENT_89H_7FH) \
+__PMC_EV(IAP, EVENT_89H_FFH) \
+__PMC_EV(IAP, EVENT_8AH_00H) \
+__PMC_EV(IAP, EVENT_8BH_00H) \
+__PMC_EV(IAP, EVENT_8CH_00H) \
+__PMC_EV(IAP, EVENT_8DH_00H) \
+__PMC_EV(IAP, EVENT_8EH_00H) \
+__PMC_EV(IAP, EVENT_8FH_00H) \
+__PMC_EV(IAP, EVENT_90H_00H) \
+__PMC_EV(IAP, EVENT_91H_00H) \
+__PMC_EV(IAP, EVENT_92H_00H) \
+__PMC_EV(IAP, EVENT_93H_00H) \
+__PMC_EV(IAP, EVENT_94H_00H) \
+__PMC_EV(IAP, EVENT_97H_00H) \
+__PMC_EV(IAP, EVENT_98H_00H) \
+__PMC_EV(IAP, EVENT_9CH_01H) \
+__PMC_EV(IAP, EVENT_A0H_00H) \
+__PMC_EV(IAP, EVENT_A1H_01H) \
+__PMC_EV(IAP, EVENT_A1H_02H) \
+__PMC_EV(IAP, EVENT_A1H_04H) \
+__PMC_EV(IAP, EVENT_A1H_08H) \
+__PMC_EV(IAP, EVENT_A1H_0CH) \
+__PMC_EV(IAP, EVENT_A1H_10H) \
+__PMC_EV(IAP, EVENT_A1H_20H) \
+__PMC_EV(IAP, EVENT_A1H_30H) \
+__PMC_EV(IAP, EVENT_A1H_40H) \
+__PMC_EV(IAP, EVENT_A1H_80H) \
+__PMC_EV(IAP, EVENT_A2H_00H) \
+__PMC_EV(IAP, EVENT_A2H_01H) \
+__PMC_EV(IAP, EVENT_A2H_02H) \
+__PMC_EV(IAP, EVENT_A2H_04H) \
+__PMC_EV(IAP, EVENT_A2H_08H) \
+__PMC_EV(IAP, EVENT_A2H_10H) \
+__PMC_EV(IAP, EVENT_A2H_20H) \
+__PMC_EV(IAP, EVENT_A2H_40H) \
+__PMC_EV(IAP, EVENT_A2H_80H) \
+__PMC_EV(IAP, EVENT_A3H_01H) \
+__PMC_EV(IAP, EVENT_A3H_02H) \
+__PMC_EV(IAP, EVENT_A3H_04H) \
+__PMC_EV(IAP, EVENT_A3H_08H) \
+__PMC_EV(IAP, EVENT_A6H_01H) \
+__PMC_EV(IAP, EVENT_A7H_01H) \
+__PMC_EV(IAP, EVENT_A8H_01H) \
+__PMC_EV(IAP, EVENT_AAH_01H) \
+__PMC_EV(IAP, EVENT_AAH_02H) \
+__PMC_EV(IAP, EVENT_AAH_03H) \
+__PMC_EV(IAP, EVENT_AAH_08H) \
+__PMC_EV(IAP, EVENT_ABH_01H) \
+__PMC_EV(IAP, EVENT_ABH_02H) \
+__PMC_EV(IAP, EVENT_ACH_02H) \
+__PMC_EV(IAP, EVENT_ACH_08H) \
+__PMC_EV(IAP, EVENT_ACH_0AH) \
+__PMC_EV(IAP, EVENT_AEH_01H) \
+__PMC_EV(IAP, EVENT_B0H_00H) \
+__PMC_EV(IAP, EVENT_B0H_01H) \
+__PMC_EV(IAP, EVENT_B0H_02H) \
+__PMC_EV(IAP, EVENT_B0H_04H) \
+__PMC_EV(IAP, EVENT_B0H_08H) \
+__PMC_EV(IAP, EVENT_B0H_10H) \
+__PMC_EV(IAP, EVENT_B0H_20H) \
+__PMC_EV(IAP, EVENT_B0H_40H) \
+__PMC_EV(IAP, EVENT_B0H_80H) \
+__PMC_EV(IAP, EVENT_B1H_00H) \
+__PMC_EV(IAP, EVENT_B1H_01H) \
+__PMC_EV(IAP, EVENT_B1H_02H) \
+__PMC_EV(IAP, EVENT_B1H_04H) \
+__PMC_EV(IAP, EVENT_B1H_08H) \
+__PMC_EV(IAP, EVENT_B1H_10H) \
+__PMC_EV(IAP, EVENT_B1H_1FH) \
+__PMC_EV(IAP, EVENT_B1H_20H) \
+__PMC_EV(IAP, EVENT_B1H_3FH) \
+__PMC_EV(IAP, EVENT_B1H_40H) \
+__PMC_EV(IAP, EVENT_B1H_80H) \
+__PMC_EV(IAP, EVENT_B2H_01H) \
+__PMC_EV(IAP, EVENT_B3H_01H) \
+__PMC_EV(IAP, EVENT_B3H_02H) \
+__PMC_EV(IAP, EVENT_B3H_04H) \
+__PMC_EV(IAP, EVENT_B3H_08H) \
+__PMC_EV(IAP, EVENT_B3H_10H) \
+__PMC_EV(IAP, EVENT_B3H_20H) \
+__PMC_EV(IAP, EVENT_B3H_81H) \
+__PMC_EV(IAP, EVENT_B3H_82H) \
+__PMC_EV(IAP, EVENT_B3H_84H) \
+__PMC_EV(IAP, EVENT_B3H_88H) \
+__PMC_EV(IAP, EVENT_B3H_90H) \
+__PMC_EV(IAP, EVENT_B3H_A0H) \
+__PMC_EV(IAP, EVENT_B4H_01H) \
+__PMC_EV(IAP, EVENT_B4H_02H) \
+__PMC_EV(IAP, EVENT_B4H_04H) \
+__PMC_EV(IAP, EVENT_B6H_01H) \
+__PMC_EV(IAP, EVENT_B7H_01H) \
+__PMC_EV(IAP, EVENT_B8H_01H) \
+__PMC_EV(IAP, EVENT_B8H_02H) \
+__PMC_EV(IAP, EVENT_B8H_04H) \
+__PMC_EV(IAP, EVENT_BAH_01H) \
+__PMC_EV(IAP, EVENT_BAH_02H) \
+__PMC_EV(IAP, EVENT_BBH_01H) \
+__PMC_EV(IAP, EVENT_BDH_01H) \
+__PMC_EV(IAP, EVENT_BDH_20H) \
+__PMC_EV(IAP, EVENT_BFH_05H) \
+__PMC_EV(IAP, EVENT_C0H_00H) \
+__PMC_EV(IAP, EVENT_C0H_01H) \
+__PMC_EV(IAP, EVENT_C0H_02H) \
+__PMC_EV(IAP, EVENT_C0H_04H) \
+__PMC_EV(IAP, EVENT_C0H_08H) \
+__PMC_EV(IAP, EVENT_C1H_00H) \
+__PMC_EV(IAP, EVENT_C1H_01H) \
+__PMC_EV(IAP, EVENT_C1H_02H) \
+__PMC_EV(IAP, EVENT_C1H_08H) \
+__PMC_EV(IAP, EVENT_C1H_10H) \
+__PMC_EV(IAP, EVENT_C1H_20H) \
+__PMC_EV(IAP, EVENT_C1H_FEH) \
+__PMC_EV(IAP, EVENT_C2H_00H) \
+__PMC_EV(IAP, EVENT_C2H_01H) \
+__PMC_EV(IAP, EVENT_C2H_02H) \
+__PMC_EV(IAP, EVENT_C2H_04H) \
+__PMC_EV(IAP, EVENT_C2H_07H) \
+__PMC_EV(IAP, EVENT_C2H_08H) \
+__PMC_EV(IAP, EVENT_C2H_0FH) \
+__PMC_EV(IAP, EVENT_C2H_10H) \
+__PMC_EV(IAP, EVENT_C3H_00H) \
+__PMC_EV(IAP, EVENT_C3H_01H) \
+__PMC_EV(IAP, EVENT_C3H_02H) \
+__PMC_EV(IAP, EVENT_C3H_04H) \
+__PMC_EV(IAP, EVENT_C3H_10H) \
+__PMC_EV(IAP, EVENT_C3H_20H) \
+__PMC_EV(IAP, EVENT_C4H_00H) \
+__PMC_EV(IAP, EVENT_C4H_01H) \
+__PMC_EV(IAP, EVENT_C4H_02H) \
+__PMC_EV(IAP, EVENT_C4H_04H) \
+__PMC_EV(IAP, EVENT_C4H_08H) \
+__PMC_EV(IAP, EVENT_C4H_0CH) \
+__PMC_EV(IAP, EVENT_C4H_0FH) \
+__PMC_EV(IAP, EVENT_C4H_10H) \
+__PMC_EV(IAP, EVENT_C4H_20H) \
+__PMC_EV(IAP, EVENT_C4H_40H) \
+__PMC_EV(IAP, EVENT_C5H_00H) \
+__PMC_EV(IAP, EVENT_C5H_01H) \
+__PMC_EV(IAP, EVENT_C5H_02H) \
+__PMC_EV(IAP, EVENT_C5H_04H) \
+__PMC_EV(IAP, EVENT_C5H_10H) \
+__PMC_EV(IAP, EVENT_C5H_20H) \
+__PMC_EV(IAP, EVENT_C6H_00H) \
+__PMC_EV(IAP, EVENT_C6H_01H) \
+__PMC_EV(IAP, EVENT_C6H_02H) \
+__PMC_EV(IAP, EVENT_C7H_00H) \
+__PMC_EV(IAP, EVENT_C7H_01H) \
+__PMC_EV(IAP, EVENT_C7H_02H) \
+__PMC_EV(IAP, EVENT_C7H_04H) \
+__PMC_EV(IAP, EVENT_C7H_08H) \
+__PMC_EV(IAP, EVENT_C7H_10H) \
+__PMC_EV(IAP, EVENT_C7H_1FH) \
+__PMC_EV(IAP, EVENT_C8H_00H) \
+__PMC_EV(IAP, EVENT_C8H_20H) \
+__PMC_EV(IAP, EVENT_C9H_00H) \
+__PMC_EV(IAP, EVENT_CAH_00H) \
+__PMC_EV(IAP, EVENT_CAH_01H) \
+__PMC_EV(IAP, EVENT_CAH_02H) \
+__PMC_EV(IAP, EVENT_CAH_04H) \
+__PMC_EV(IAP, EVENT_CAH_08H) \
+__PMC_EV(IAP, EVENT_CAH_10H) \
+__PMC_EV(IAP, EVENT_CAH_1EH) \
+__PMC_EV(IAP, EVENT_CBH_01H) \
+__PMC_EV(IAP, EVENT_CBH_02H) \
+__PMC_EV(IAP, EVENT_CBH_04H) \
+__PMC_EV(IAP, EVENT_CBH_08H) \
+__PMC_EV(IAP, EVENT_CBH_10H) \
+__PMC_EV(IAP, EVENT_CBH_40H) \
+__PMC_EV(IAP, EVENT_CBH_80H) \
+__PMC_EV(IAP, EVENT_CCH_00H) \
+__PMC_EV(IAP, EVENT_CCH_01H) \
+__PMC_EV(IAP, EVENT_CCH_02H) \
+__PMC_EV(IAP, EVENT_CCH_03H) \
+__PMC_EV(IAP, EVENT_CCH_20H) \
+__PMC_EV(IAP, EVENT_CDH_00H) \
+__PMC_EV(IAP, EVENT_CDH_01H) \
+__PMC_EV(IAP, EVENT_CDH_02H) \
+__PMC_EV(IAP, EVENT_CEH_00H) \
+__PMC_EV(IAP, EVENT_CFH_00H) \
+__PMC_EV(IAP, EVENT_D0H_00H) \
+__PMC_EV(IAP, EVENT_D0H_01H) \
+__PMC_EV(IAP, EVENT_D0H_02H) \
+__PMC_EV(IAP, EVENT_D0H_10H) \
+__PMC_EV(IAP, EVENT_D0H_20H) \
+__PMC_EV(IAP, EVENT_D0H_40H) \
+__PMC_EV(IAP, EVENT_D0H_80H) \
+__PMC_EV(IAP, EVENT_D1H_01H) \
+__PMC_EV(IAP, EVENT_D1H_02H) \
+__PMC_EV(IAP, EVENT_D1H_04H) \
+__PMC_EV(IAP, EVENT_D1H_08H) \
+__PMC_EV(IAP, EVENT_D1H_20H) \
+__PMC_EV(IAP, EVENT_D1H_40H) \
+__PMC_EV(IAP, EVENT_D2H_01H) \
+__PMC_EV(IAP, EVENT_D2H_02H) \
+__PMC_EV(IAP, EVENT_D2H_04H) \
+__PMC_EV(IAP, EVENT_D2H_08H) \
+__PMC_EV(IAP, EVENT_D2H_0FH) \
+__PMC_EV(IAP, EVENT_D2H_10H) \
+__PMC_EV(IAP, EVENT_D3H_01H) \
+__PMC_EV(IAP, EVENT_D3H_04H) \
+__PMC_EV(IAP, EVENT_D3H_10H) \
+__PMC_EV(IAP, EVENT_D3H_20H) \
+__PMC_EV(IAP, EVENT_D4H_01H) \
+__PMC_EV(IAP, EVENT_D4H_02H) \
+__PMC_EV(IAP, EVENT_D4H_04H) \
+__PMC_EV(IAP, EVENT_D4H_08H) \
+__PMC_EV(IAP, EVENT_D4H_0FH) \
+__PMC_EV(IAP, EVENT_D5H_01H) \
+__PMC_EV(IAP, EVENT_D5H_02H) \
+__PMC_EV(IAP, EVENT_D5H_04H) \
+__PMC_EV(IAP, EVENT_D5H_08H) \
+__PMC_EV(IAP, EVENT_D5H_0FH) \
+__PMC_EV(IAP, EVENT_D7H_00H) \
+__PMC_EV(IAP, EVENT_D8H_00H) \
+__PMC_EV(IAP, EVENT_D8H_01H) \
+__PMC_EV(IAP, EVENT_D8H_02H) \
+__PMC_EV(IAP, EVENT_D8H_03H) \
+__PMC_EV(IAP, EVENT_D8H_04H) \
+__PMC_EV(IAP, EVENT_D9H_00H) \
+__PMC_EV(IAP, EVENT_D9H_01H) \
+__PMC_EV(IAP, EVENT_D9H_02H) \
+__PMC_EV(IAP, EVENT_D9H_03H) \
+__PMC_EV(IAP, EVENT_DAH_00H) \
+__PMC_EV(IAP, EVENT_DAH_01H) \
+__PMC_EV(IAP, EVENT_DAH_02H) \
+__PMC_EV(IAP, EVENT_DBH_00H) \
+__PMC_EV(IAP, EVENT_DBH_01H) \
+__PMC_EV(IAP, EVENT_DCH_01H) \
+__PMC_EV(IAP, EVENT_DCH_02H) \
+__PMC_EV(IAP, EVENT_DCH_04H) \
+__PMC_EV(IAP, EVENT_DCH_08H) \
+__PMC_EV(IAP, EVENT_DCH_10H) \
+__PMC_EV(IAP, EVENT_DCH_1FH) \
+__PMC_EV(IAP, EVENT_E0H_00H) \
+__PMC_EV(IAP, EVENT_E0H_01H) \
+__PMC_EV(IAP, EVENT_E2H_00H) \
+__PMC_EV(IAP, EVENT_E4H_00H) \
+__PMC_EV(IAP, EVENT_E4H_01H) \
+__PMC_EV(IAP, EVENT_E5H_01H) \
+__PMC_EV(IAP, EVENT_E6H_00H) \
+__PMC_EV(IAP, EVENT_E6H_01H) \
+__PMC_EV(IAP, EVENT_E6H_02H) \
+__PMC_EV(IAP, EVENT_E6H_1FH) \
+__PMC_EV(IAP, EVENT_E8H_01H) \
+__PMC_EV(IAP, EVENT_E8H_02H) \
+__PMC_EV(IAP, EVENT_E8H_03H) \
+__PMC_EV(IAP, EVENT_ECH_01H) \
+__PMC_EV(IAP, EVENT_F0H_00H) \
+__PMC_EV(IAP, EVENT_F0H_01H) \
+__PMC_EV(IAP, EVENT_F0H_02H) \
+__PMC_EV(IAP, EVENT_F0H_04H) \
+__PMC_EV(IAP, EVENT_F0H_08H) \
+__PMC_EV(IAP, EVENT_F0H_10H) \
+__PMC_EV(IAP, EVENT_F0H_20H) \
+__PMC_EV(IAP, EVENT_F0H_40H) \
+__PMC_EV(IAP, EVENT_F0H_80H) \
+__PMC_EV(IAP, EVENT_F1H_01H) \
+__PMC_EV(IAP, EVENT_F1H_02H) \
+__PMC_EV(IAP, EVENT_F1H_04H) \
+__PMC_EV(IAP, EVENT_F1H_07H) \
+__PMC_EV(IAP, EVENT_F2H_01H) \
+__PMC_EV(IAP, EVENT_F2H_02H) \
+__PMC_EV(IAP, EVENT_F2H_04H) \
+__PMC_EV(IAP, EVENT_F2H_08H) \
+__PMC_EV(IAP, EVENT_F2H_0AH) \
+__PMC_EV(IAP, EVENT_F2H_0FH) \
+__PMC_EV(IAP, EVENT_F3H_01H) \
+__PMC_EV(IAP, EVENT_F3H_02H) \
+__PMC_EV(IAP, EVENT_F3H_04H) \
+__PMC_EV(IAP, EVENT_F3H_08H) \
+__PMC_EV(IAP, EVENT_F3H_10H) \
+__PMC_EV(IAP, EVENT_F3H_20H) \
+__PMC_EV(IAP, EVENT_F4H_01H) \
+__PMC_EV(IAP, EVENT_F4H_02H) \
+__PMC_EV(IAP, EVENT_F4H_04H) \
+__PMC_EV(IAP, EVENT_F4H_08H) \
+__PMC_EV(IAP, EVENT_F4H_10H) \
+__PMC_EV(IAP, EVENT_F6H_01H) \
+__PMC_EV(IAP, EVENT_F7H_01H) \
+__PMC_EV(IAP, EVENT_F7H_02H) \
+__PMC_EV(IAP, EVENT_F7H_04H) \
+__PMC_EV(IAP, EVENT_F8H_00H) \
+__PMC_EV(IAP, EVENT_F8H_01H) \
+__PMC_EV(IAP, EVENT_FDH_01H) \
+__PMC_EV(IAP, EVENT_FDH_02H) \
+__PMC_EV(IAP, EVENT_FDH_04H) \
+__PMC_EV(IAP, EVENT_FDH_08H) \
+__PMC_EV(IAP, EVENT_FDH_10H) \
+__PMC_EV(IAP, EVENT_FDH_20H) \
+__PMC_EV(IAP, EVENT_FDH_40H)
+
+#define PMC_EV_IAP_FIRST PMC_EV_IAP_EVENT_02H_01H
+#define PMC_EV_IAP_LAST PMC_EV_IAP_EVENT_FDH_40H
+
+/*
+ * Map "architectural" event names to event ids.
+ */
+#define __PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("branch-instruction-retired", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("branch-misses-retired", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("instruction-retired", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("llc-misses", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("llc-reference", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("unhalted-reference-cycles", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("unhalted-core-cycles", IAP_EVENT_3CH_00H)
+
+/*
+ * Aliases for Atom PMCs.
+ */
+#define __PMC_EV_ALIAS_ATOM() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("BACLEARS", IAP_EVENT_E6H_01H) \
+__PMC_EV_ALIAS("BOGUS_BR", IAP_EVENT_E4H_00H) \
+__PMC_EV_ALIAS("BR_BAC_MISSP_EXEC", IAP_EVENT_8AH_00H) \
+__PMC_EV_ALIAS("BR_CALL_EXEC", IAP_EVENT_92H_00H) \
+__PMC_EV_ALIAS("BR_CALL_MISSP_EXEC", IAP_EVENT_93H_00H) \
+__PMC_EV_ALIAS("BR_CND_EXEC", IAP_EVENT_8BH_00H) \
+__PMC_EV_ALIAS("BR_CND_MISSP_EXEC", IAP_EVENT_8CH_00H) \
+__PMC_EV_ALIAS("BR_IND_CALL_EXEC", IAP_EVENT_94H_00H) \
+__PMC_EV_ALIAS("BR_IND_EXEC", IAP_EVENT_8DH_00H) \
+__PMC_EV_ALIAS("BR_IND_MISSP_EXEC", IAP_EVENT_8EH_00H) \
+__PMC_EV_ALIAS("BR_INST_DECODED", IAP_EVENT_E0H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC", IAP_EVENT_88H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ANY", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ANY1", IAP_EVENT_C4H_0FH) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED_NOT_TAKEN", \
+ IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED_TAKEN", IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.PRED_NOT_TAKEN",IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.PRED_TAKEN", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.TAKEN", IAP_EVENT_C4H_0CH) \
+__PMC_EV_ALIAS("BR_MISSP_EXEC", IAP_EVENT_89H_00H) \
+__PMC_EV_ALIAS("BR_RET_BAC_MISSP_EXEC", IAP_EVENT_91H_00H) \
+__PMC_EV_ALIAS("BR_RET_EXEC", IAP_EVENT_8FH_00H) \
+__PMC_EV_ALIAS("BR_RET_MISSP_EXEC", IAP_EVENT_90H_00H) \
+__PMC_EV_ALIAS("BR_TKN_BUBBLE_1", IAP_EVENT_97H_00H) \
+__PMC_EV_ALIAS("BR_TKN_BUBBLE_2", IAP_EVENT_98H_00H) \
+__PMC_EV_ALIAS("BUSQ_EMPTY", IAP_EVENT_7DH) \
+__PMC_EV_ALIAS("BUS_BNR_DRV", IAP_EVENT_61H) \
+__PMC_EV_ALIAS("BUS_DATA_RCV", IAP_EVENT_64H) \
+__PMC_EV_ALIAS("BUS_DRDY_CLOCKS", IAP_EVENT_62H) \
+__PMC_EV_ALIAS("BUS_HITM_DRV", IAP_EVENT_7BH) \
+__PMC_EV_ALIAS("BUS_HIT_DRV", IAP_EVENT_7AH) \
+__PMC_EV_ALIAS("BUS_IO_WAIT", IAP_EVENT_7FH) \
+__PMC_EV_ALIAS("BUS_LOCK_CLOCKS", IAP_EVENT_63H) \
+__PMC_EV_ALIAS("BUS_REQUEST_OUTSTANDING", IAP_EVENT_60H) \
+__PMC_EV_ALIAS("BUS_TRANS_ANY", IAP_EVENT_70H) \
+__PMC_EV_ALIAS("BUS_TRANS_BRD", IAP_EVENT_65H) \
+__PMC_EV_ALIAS("BUS_TRANS_BURST", IAP_EVENT_6EH) \
+__PMC_EV_ALIAS("BUS_TRANS_DEF", IAP_EVENT_6DH) \
+__PMC_EV_ALIAS("BUS_TRANS_IFETCH", IAP_EVENT_68H) \
+__PMC_EV_ALIAS("BUS_TRANS_INVAL", IAP_EVENT_69H) \
+__PMC_EV_ALIAS("BUS_TRANS_IO", IAP_EVENT_6CH) \
+__PMC_EV_ALIAS("BUS_TRANS_MEM", IAP_EVENT_6FH) \
+__PMC_EV_ALIAS("BUS_TRANS_P", IAP_EVENT_6BH) \
+__PMC_EV_ALIAS("BUS_TRANS_PWR", IAP_EVENT_6AH) \
+__PMC_EV_ALIAS("BUS_TRANS_RFO", IAP_EVENT_66H) \
+__PMC_EV_ALIAS("BUS_TRANS_WB", IAP_EVENT_67H) \
+__PMC_EV_ALIAS("CMP_SNOOP", IAP_EVENT_78H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.BUS", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.CORE_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.NO_OTHER", IAP_EVENT_3CH_02H) \
+__PMC_EV_ALIAS("CYCLES_DIV_BUSY", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("CYCLES_INT_MASKED.CYCLES_INT_MASKED", \
+ IAP_EVENT_C6H_01H) \
+__PMC_EV_ALIAS("CYCLES_INT_MASKED.CYCLES_INT_PENDING_AND_MASKED", \
+ IAP_EVENT_C6H_02H) \
+__PMC_EV_ALIAS("CYCLES_L1I_MEM_STALLED", IAP_EVENT_86H_00H) \
+__PMC_EV_ALIAS("DATA_TLB_MISSES.DTLB_MISS", IAP_EVENT_08H_07H) \
+__PMC_EV_ALIAS("DATA_TLB_MISSES.DTLB_MISS_LD", IAP_EVENT_08H_05H) \
+__PMC_EV_ALIAS("DATA_TLB_MISSES.DTLB_MISS_ST", IAP_EVENT_08H_06H) \
+__PMC_EV_ALIAS("DATA_TLB_MISSES.UTLB_MISS_LD", IAP_EVENT_08H_09H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.FP", IAP_EVENT_19H_00H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.LOAD", IAP_EVENT_19H_01H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.SIMD", IAP_EVENT_19H_02H) \
+__PMC_EV_ALIAS("DIV", IAP_EVENT_13H_00H) \
+__PMC_EV_ALIAS("DIV.AR", IAP_EVENT_13H_81H) \
+__PMC_EV_ALIAS("DIV.S", IAP_EVENT_13H_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.ANY", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.L0_MISS_LD", IAP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("DTLB_MISSES.MISS_LD", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.MISS_ST", IAP_EVENT_08H_08H) \
+__PMC_EV_ALIAS("EIST_TRANS", IAP_EVENT_3AH_00H) \
+__PMC_EV_ALIAS("ESP.ADDITIONS", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("ESP.SYNCH", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("EXT_SNOOP", IAP_EVENT_77H) \
+__PMC_EV_ALIAS("FP_ASSIST", IAP_EVENT_11H_01H) \
+__PMC_EV_ALIAS("FP_ASSIST.AR", IAP_EVENT_11H_81H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE", IAP_EVENT_10H_00H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS_TO_FP", IAP_EVENT_CCH_02H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS_TO_MMX", IAP_EVENT_CCH_01H) \
+__PMC_EV_ALIAS("HW_INT_RCV", IAP_EVENT_C8H_00H) \
+__PMC_EV_ALIAS("ICACHE.ACCESSES", IAP_EVENT_80H_03H) \
+__PMC_EV_ALIAS("ICACHE.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("IDLE_DURING_DIV", IAP_EVENT_18H_00H) \
+__PMC_EV_ALIAS("ILD_STALL", IAP_EVENT_87H_00H) \
+__PMC_EV_ALIAS("INST_QUEUE.FULL", IAP_EVENT_83H_02H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.LOADS", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.OTHER", IAP_EVENT_C0H_04H) \
+__PMC_EV_ALIAS("INST_RETIRED.STORES", IAP_EVENT_C0H_02H) \
+__PMC_EV_ALIAS("ITLB.FLUSH", IAP_EVENT_82H_04H) \
+__PMC_EV_ALIAS("ITLB.LARGE_MISS", IAP_EVENT_82H_10H) \
+__PMC_EV_ALIAS("ITLB.MISSES", IAP_EVENT_82H_02H) \
+__PMC_EV_ALIAS("ITLB.SMALL_MISS", IAP_EVENT_82H_02H) \
+__PMC_EV_ALIAS("ITLB_MISS_RETIRED", IAP_EVENT_C9H_00H) \
+__PMC_EV_ALIAS("L1D_ALL_CACHE_REF", IAP_EVENT_43H_02H) \
+__PMC_EV_ALIAS("L1D_ALL_REF", IAP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE.LD", IAP_EVENT_40H_21H) \
+__PMC_EV_ALIAS("L1D_CACHE.ST", IAP_EVENT_41H_22H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK", IAP_EVENT_42H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK_DURATION", IAP_EVENT_42H_10H) \
+__PMC_EV_ALIAS("L1D_M_EVICT", IAP_EVENT_47H_00H) \
+__PMC_EV_ALIAS("L1D_M_REPL", IAP_EVENT_46H_00H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS", IAP_EVENT_48H_00H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.REQUESTS", IAP_EVENT_4EH_10H) \
+__PMC_EV_ALIAS("L1D_REPL", IAP_EVENT_45H_0FH) \
+__PMC_EV_ALIAS("L1D_SPLIT.LOADS", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("L1D_SPLIT.STORES", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("L1I_MISSES", IAP_EVENT_81H_00H) \
+__PMC_EV_ALIAS("L1I_READS", IAP_EVENT_80H_00H) \
+__PMC_EV_ALIAS("L2_ADS", IAP_EVENT_21H) \
+__PMC_EV_ALIAS("L2_DBUS_BUSY_RD", IAP_EVENT_23H) \
+__PMC_EV_ALIAS("L2_IFETCH", IAP_EVENT_28H) \
+__PMC_EV_ALIAS("L2_LD", IAP_EVENT_29H) \
+__PMC_EV_ALIAS("L2_LINES_IN", IAP_EVENT_24H) \
+__PMC_EV_ALIAS("L2_LINES_OUT", IAP_EVENT_26H) \
+__PMC_EV_ALIAS("L2_LOCK", IAP_EVENT_2BH) \
+__PMC_EV_ALIAS("L2_M_LINES_IN", IAP_EVENT_25H) \
+__PMC_EV_ALIAS("L2_M_LINES_OUT", IAP_EVENT_27H) \
+__PMC_EV_ALIAS("L2_NO_REQ", IAP_EVENT_32H) \
+__PMC_EV_ALIAS("L2_REJECT_BUSQ", IAP_EVENT_30H) \
+__PMC_EV_ALIAS("L2_RQSTS", IAP_EVENT_2EH) \
+__PMC_EV_ALIAS("L2_RQSTS.SELF.DEMAND.I_STATE", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("L2_RQSTS.SELF.DEMAND.MESI", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("L2_ST", IAP_EVENT_2AH) \
+__PMC_EV_ALIAS("LOAD_BLOCK.L1D", IAP_EVENT_03H_20H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.OVERLAP_STORE", IAP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.STA", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.STD", IAP_EVENT_03H_04H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.UNTIL_RETIRE", IAP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE", IAP_EVENT_4CH_00H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_01H) \
+__PMC_EV_ALIAS("MACHINE_NUKES.MEM_ORDER", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACRO_INSTS.ALL_DECODED", IAP_EVENT_AAH_03H) \
+__PMC_EV_ALIAS("MACRO_INSTS.CISC_DECODED", IAP_EVENT_AAH_02H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGUATION.RESET", IAP_EVENT_09H_01H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGUATION.SUCCESS", IAP_EVENT_09H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.DTLB_MISS", IAP_EVENT_CBH_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_HIT", IAP_EVENT_CBH_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_LINE_MISS", IAP_EVENT_CBH_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_MISS", IAP_EVENT_CBH_02H) \
+__PMC_EV_ALIAS("MUL", IAP_EVENT_12H_00H) \
+__PMC_EV_ALIAS("MUL.AR", IAP_EVENT_12H_81H) \
+__PMC_EV_ALIAS("MUL.S", IAP_EVENT_12H_01H) \
+__PMC_EV_ALIAS("PAGE_WALKS.CYCLES", IAP_EVENT_0CH_03H) \
+__PMC_EV_ALIAS("PAGE_WALKS.WALKS", IAP_EVENT_0CH_03H) \
+__PMC_EV_ALIAS("PREFETCH.PREFETCHNTA", IAP_EVENT_07H_08H) \
+__PMC_EV_ALIAS("PREFETCH.PREFETCHT0", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("PREFETCH.SW_L2", IAP_EVENT_07H_06H) \
+__PMC_EV_ALIAS("PREF_RQSTS_DN", IAP_EVENT_F8H_00H) \
+__PMC_EV_ALIAS("PREF_RQSTS_UP", IAP_EVENT_F0H_00H) \
+__PMC_EV_ALIAS("RAT_STALLS.ANY", IAP_EVENT_D2H_0FH) \
+__PMC_EV_ALIAS("RAT_STALLS.FLAGS", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("RAT_STALLS.FPSW", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.PARTIAL_CYCLES", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("RAT_STALLS.ROB_READ_PORT", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_DCH_1FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.BR_MISS_CLEAR", IAP_EVENT_DCH_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FPCW", IAP_EVENT_DCH_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LD_ST", IAP_EVENT_DCH_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB_FULL", IAP_EVENT_DCH_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS_FULL", IAP_EVENT_DCH_02H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED", IAP_EVENT_A0H_00H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT2", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT3", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT4", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT5", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("SB_DRAIN_CYCLES", IAP_EVENT_04H_01H) \
+__PMC_EV_ALIAS("SEGMENT_REG_LOADS.ANY", IAP_EVENT_06H_00H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.ANY", IAP_EVENT_D5H_0FH) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.DS", IAP_EVENT_D5H_02H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.ES", IAP_EVENT_D5H_01H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.FS", IAP_EVENT_D5H_04H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.GS", IAP_EVENT_D5H_08H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.ANY", IAP_EVENT_D4H_0FH) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.DS", IAP_EVENT_D4H_02H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.ES", IAP_EVENT_D4H_01H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.FS", IAP_EVENT_D4H_04H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.GS", IAP_EVENT_D4H_08H) \
+__PMC_EV_ALIAS("SIMD_ASSIST", IAP_EVENT_CDH_00H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.PACKED_DOUBLE", \
+ IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.PACKED_SINGLE", \
+ IAP_EVENT_CAH_01H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE", \
+ IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.SCALAR_SINGLE", \
+ IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("SIMD_INSTR_RETIRED", IAP_EVENT_CEH_00H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.ANY", IAP_EVENT_C7H_1FH) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.PACKED_DOUBLE", IAP_EVENT_C7H_04H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.PACKED_SINGLE", IAP_EVENT_C7H_01H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.SCALAR_DOUBLE", IAP_EVENT_C7H_08H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.SCALAR_SINGLE", IAP_EVENT_C7H_02H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.VECTOR", IAP_EVENT_C7H_10H) \
+__PMC_EV_ALIAS("SIMD_SAT_INSTR_RETIRED", IAP_EVENT_CFH_00H) \
+__PMC_EV_ALIAS("SIMD_SAT_UOP_EXEC.AR", IAP_EVENT_B1H_80H) \
+__PMC_EV_ALIAS("SIMD_SAT_UOP_EXEC.S", IAP_EVENT_B1H_00H) \
+__PMC_EV_ALIAS("SIMD_UOPS_EXEC.AR", IAP_EVENT_B0H_80H) \
+__PMC_EV_ALIAS("SIMD_UOPS_EXEC.S", IAP_EVENT_B0H_00H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.ARITHMETIC.AR", IAP_EVENT_B3H_A0H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.ARITHMETIC.S", IAP_EVENT_B3H_20H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.LOGICAL.AR", IAP_EVENT_B3H_90H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.LOGICAL.S", IAP_EVENT_B3H_10H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.MUL.AR", IAP_EVENT_B3H_81H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.MUL.S", IAP_EVENT_B3H_01H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.PACK.AR", IAP_EVENT_B3H_84H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.PACK.S", IAP_EVENT_B3H_04H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.SHIFT.AR", IAP_EVENT_B3H_82H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.SHIFT.S", IAP_EVENT_B3H_02H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.UNPACK.AR", IAP_EVENT_B3H_88H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.UNPACK.S", IAP_EVENT_B3H_08H) \
+__PMC_EV_ALIAS("SNOOP_STALL_DRV", IAP_EVENT_7EH) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.L2", IAP_EVENT_07H_02H) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.STORES", IAP_EVENT_07H_03H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.L1", IAP_EVENT_4BH_01H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.L2", IAP_EVENT_4BH_02H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.NTA", IAP_EVENT_4BH_00H) \
+__PMC_EV_ALIAS("STORE_BLOCK.ORDER", IAP_EVENT_04H_02H) \
+__PMC_EV_ALIAS("STORE_BLOCK.SNOOP", IAP_EVENT_04H_08H) \
+__PMC_EV_ALIAS("STORE_FORWARDS.GOOD", IAP_EVENT_02H_81H) \
+__PMC_EV_ALIAS("THERMAL_TRIP", IAP_EVENT_3BH_C0H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ANY", IAP_EVENT_C2H_10H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.FUSED", IAP_EVENT_C2H_07H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.LD_IND_BR", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.MACRO_FUSION", IAP_EVENT_C2H_04H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.NON_FUSED", IAP_EVENT_C2H_08H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.STD_STA", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("X87_COMP_OPS_EXE.ANY.AR", IAP_EVENT_10H_81H) \
+__PMC_EV_ALIAS("X87_COMP_OPS_EXE.ANY.S", IAP_EVENT_10H_01H) \
+__PMC_EV_ALIAS("X87_OPS_RETIRED.ANY", IAP_EVENT_C1H_FEH) \
+__PMC_EV_ALIAS("X87_OPS_RETIRED.FXCH", IAP_EVENT_C1H_01H)
+
+/*
+ * Aliases for Core PMC events.
+ */
+#define __PMC_EV_ALIAS_CORE() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("BAClears", IAP_EVENT_E6H_00H) \
+__PMC_EV_ALIAS("BTB_Misses", IAP_EVENT_E2H_00H) \
+__PMC_EV_ALIAS("Br_BAC_Missp_Exec", IAP_EVENT_8AH_00H) \
+__PMC_EV_ALIAS("Br_Bogus", IAP_EVENT_E4H_00H) \
+__PMC_EV_ALIAS("Br_Call_Exec", IAP_EVENT_92H_00H) \
+__PMC_EV_ALIAS("Br_Call_Missp_Exec", IAP_EVENT_93H_00H) \
+__PMC_EV_ALIAS("Br_Cnd_Exec", IAP_EVENT_8BH_00H) \
+__PMC_EV_ALIAS("Br_Cnd_Missp_Exec", IAP_EVENT_8CH_00H) \
+__PMC_EV_ALIAS("Br_Ind_Call_Exec", IAP_EVENT_94H_00H) \
+__PMC_EV_ALIAS("Br_Ind_Exec", IAP_EVENT_8DH_00H) \
+__PMC_EV_ALIAS("Br_Ind_Missp_Exec", IAP_EVENT_8EH_00H) \
+__PMC_EV_ALIAS("Br_Inst_Exec", IAP_EVENT_88H_00H) \
+__PMC_EV_ALIAS("Br_Instr_Decoded", IAP_EVENT_E0H_00H) \
+__PMC_EV_ALIAS("Br_Instr_Ret", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("Br_MisPred_Ret", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("Br_MisPred_Taken_Ret", IAP_EVENT_CAH_00H) \
+__PMC_EV_ALIAS("Br_Missp_Exec", IAP_EVENT_89H_00H) \
+__PMC_EV_ALIAS("Br_Ret_BAC_Missp_Exec", IAP_EVENT_91H_00H) \
+__PMC_EV_ALIAS("Br_Ret_Exec", IAP_EVENT_8FH_00H) \
+__PMC_EV_ALIAS("Br_Ret_Missp_Exec", IAP_EVENT_90H_00H) \
+__PMC_EV_ALIAS("Br_Taken_Ret", IAP_EVENT_C9H_00H) \
+__PMC_EV_ALIAS("Bus_BNR_Clocks", IAP_EVENT_61H_00H) \
+__PMC_EV_ALIAS("Bus_DRDY_Clocks", IAP_EVENT_62H_00H) \
+__PMC_EV_ALIAS("Bus_Data_Rcv", IAP_EVENT_64H_40H) \
+__PMC_EV_ALIAS("Bus_Locks_Clocks", IAP_EVENT_63H) \
+__PMC_EV_ALIAS("Bus_Not_In_Use", IAP_EVENT_7DH) \
+__PMC_EV_ALIAS("Bus_Req_Outstanding", IAP_EVENT_60H) \
+__PMC_EV_ALIAS("Bus_Snoop_Stall", IAP_EVENT_7EH_00H) \
+__PMC_EV_ALIAS("Bus_Snoops", IAP_EVENT_77H) \
+__PMC_EV_ALIAS("Bus_Trans_Any", IAP_EVENT_70H) \
+__PMC_EV_ALIAS("Bus_Trans_Brd", IAP_EVENT_65H) \
+__PMC_EV_ALIAS("Bus_Trans_Burst", IAP_EVENT_6EH) \
+__PMC_EV_ALIAS("Bus_Trans_Def", IAP_EVENT_6DH) \
+__PMC_EV_ALIAS("Bus_Trans_IO", IAP_EVENT_6CH) \
+__PMC_EV_ALIAS("Bus_Trans_Ifetch", IAP_EVENT_68H) \
+__PMC_EV_ALIAS("Bus_Trans_Inval", IAP_EVENT_69H) \
+__PMC_EV_ALIAS("Bus_Trans_Mem", IAP_EVENT_6FH) \
+__PMC_EV_ALIAS("Bus_Trans_P", IAP_EVENT_6BH) \
+__PMC_EV_ALIAS("Bus_Trans_Pwr", IAP_EVENT_6AH) \
+__PMC_EV_ALIAS("Bus_Trans_RFO", IAP_EVENT_66H) \
+__PMC_EV_ALIAS("Bus_Trans_WB", IAP_EVENT_67H) \
+__PMC_EV_ALIAS("Cycles_Div_Busy", IAP_EVENT_14H_00H) \
+__PMC_EV_ALIAS("Cycles_Int_Masked", IAP_EVENT_C6H_00H) \
+__PMC_EV_ALIAS("Cycles_Int_Pending_Masked", IAP_EVENT_C7H_00H) \
+__PMC_EV_ALIAS("DCU_Snoop_To_Share", IAP_EVENT_78H) \
+__PMC_EV_ALIAS("DCache_Cache_LD", IAP_EVENT_40H) \
+__PMC_EV_ALIAS("DCache_Cache_Lock", IAP_EVENT_42H) \
+__PMC_EV_ALIAS("DCache_Cache_ST", IAP_EVENT_41H) \
+__PMC_EV_ALIAS("DCache_M_Evict", IAP_EVENT_47H_00H) \
+__PMC_EV_ALIAS("DCache_M_Repl", IAP_EVENT_46H_00H) \
+__PMC_EV_ALIAS("DCache_Pend_Miss", IAP_EVENT_48H_00H) \
+__PMC_EV_ALIAS("DCache_Repl", IAP_EVENT_45H_0FH) \
+__PMC_EV_ALIAS("Data_Mem_Cache_Ref", IAP_EVENT_44H_02H) \
+__PMC_EV_ALIAS("Data_Mem_Ref", IAP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("Dbus_Busy", IAP_EVENT_22H) \
+__PMC_EV_ALIAS("Dbus_Busy_Rd", IAP_EVENT_23H) \
+__PMC_EV_ALIAS("Div", IAP_EVENT_13H_00H) \
+__PMC_EV_ALIAS("Dtlb_Miss", IAP_EVENT_49H_00H) \
+__PMC_EV_ALIAS("ESP_Uops", IAP_EVENT_D7H_00H) \
+__PMC_EV_ALIAS("EST_Trans", IAP_EVENT_3AH) \
+__PMC_EV_ALIAS("FP_Assist", IAP_EVENT_11H_00H) \
+__PMC_EV_ALIAS("FP_Comp_Instr_Ret", IAP_EVENT_C1H_00H) \
+__PMC_EV_ALIAS("FP_Comps_Op_Exe", IAP_EVENT_10H_00H) \
+__PMC_EV_ALIAS("FP_MMX_Trans", IAP_EVENT_CCH_01H) \
+__PMC_EV_ALIAS("Fused_Ld_Uops_Ret", IAP_EVENT_DAH_01H) \
+__PMC_EV_ALIAS("Fused_St_Uops_Ret", IAP_EVENT_DAH_02H) \
+__PMC_EV_ALIAS("Fused_Uops_Ret", IAP_EVENT_DAH_00H) \
+__PMC_EV_ALIAS("HW_Int_Rx", IAP_EVENT_C8H_00H) \
+__PMC_EV_ALIAS("ICache_Misses", IAP_EVENT_81H_00H) \
+__PMC_EV_ALIAS("ICache_Reads", IAP_EVENT_80H_00H) \
+__PMC_EV_ALIAS("IFU_Mem_Stall", IAP_EVENT_86H_00H) \
+__PMC_EV_ALIAS("ILD_Stall", IAP_EVENT_87H_00H) \
+__PMC_EV_ALIAS("ITLB_Misses", IAP_EVENT_85H_00H) \
+__PMC_EV_ALIAS("Instr_Decoded", IAP_EVENT_D0H_00H) \
+__PMC_EV_ALIAS("Instr_Ret", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("L1_Pref_Req", IAP_EVENT_4FH_00H) \
+__PMC_EV_ALIAS("L2_ADS", IAP_EVENT_21H) \
+__PMC_EV_ALIAS("L2_IFetch", IAP_EVENT_28H) \
+__PMC_EV_ALIAS("L2_LD", IAP_EVENT_29H) \
+__PMC_EV_ALIAS("L2_Lines_In", IAP_EVENT_24H) \
+__PMC_EV_ALIAS("L2_Lines_Out", IAP_EVENT_26H) \
+__PMC_EV_ALIAS("L2_M_Lines_In", IAP_EVENT_25H) \
+__PMC_EV_ALIAS("L2_M_Lines_Out", IAP_EVENT_27H) \
+__PMC_EV_ALIAS("L2_No_Request_Cycles", IAP_EVENT_32H) \
+__PMC_EV_ALIAS("L2_Reject_Cycles", IAP_EVENT_30H) \
+__PMC_EV_ALIAS("L2_Rqsts", IAP_EVENT_2EH) \
+__PMC_EV_ALIAS("L2_ST", IAP_EVENT_2AH) \
+__PMC_EV_ALIAS("LD_Blocks", IAP_EVENT_03H_00H) \
+__PMC_EV_ALIAS("LLC_Misses", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("LLC_Reference", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("MMX_Assist", IAP_EVENT_CDH_00H) \
+__PMC_EV_ALIAS("MMX_FP_Trans", IAP_EVENT_CCH_00H) \
+__PMC_EV_ALIAS("MMX_Instr_Exec", IAP_EVENT_B0H_00H) \
+__PMC_EV_ALIAS("MMX_Instr_Ret", IAP_EVENT_CEH_00H) \
+__PMC_EV_ALIAS("Misalign_Mem_Ref", IAP_EVENT_05H_00H) \
+__PMC_EV_ALIAS("Mul", IAP_EVENT_12H_00H) \
+__PMC_EV_ALIAS("NonHlt_Ref_Cycles", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("Pref_Rqsts_Dn", IAP_EVENT_F8H_00H) \
+__PMC_EV_ALIAS("Pref_Rqsts_Up", IAP_EVENT_F0H_00H) \
+__PMC_EV_ALIAS("Resource_Stall", IAP_EVENT_A2H_00H) \
+__PMC_EV_ALIAS("SD_Drains", IAP_EVENT_04H_00H) \
+__PMC_EV_ALIAS("SIMD_FP_DP_P_Comp_Ret", IAP_EVENT_D9H_02H) \
+__PMC_EV_ALIAS("SIMD_FP_DP_P_Ret", IAP_EVENT_D8H_02H) \
+__PMC_EV_ALIAS("SIMD_FP_DP_S_Comp_Ret", IAP_EVENT_D9H_03H) \
+__PMC_EV_ALIAS("SIMD_FP_DP_S_Ret", IAP_EVENT_D8H_03H) \
+__PMC_EV_ALIAS("SIMD_FP_SP_P_Comp_Ret", IAP_EVENT_D9H_00H) \
+__PMC_EV_ALIAS("SIMD_FP_SP_Ret", IAP_EVENT_D8H_00H) \
+__PMC_EV_ALIAS("SIMD_FP_SP_S_Comp_Ret", IAP_EVENT_D9H_01H) \
+__PMC_EV_ALIAS("SIMD_FP_SP_S_Ret", IAP_EVENT_D8H_01H) \
+__PMC_EV_ALIAS("SIMD_Int_128_Ret", IAP_EVENT_D8H_04H) \
+__PMC_EV_ALIAS("SIMD_Int_Pari_Exec", IAP_EVENT_B3H_20H) \
+__PMC_EV_ALIAS("SIMD_Int_Pck_Exec", IAP_EVENT_B3H_04H) \
+__PMC_EV_ALIAS("SIMD_Int_Plog_Exec", IAP_EVENT_B3H_10H) \
+__PMC_EV_ALIAS("SIMD_Int_Pmul_Exec", IAP_EVENT_B3H_01H) \
+__PMC_EV_ALIAS("SIMD_Int_Psft_Exec", IAP_EVENT_B3H_02H) \
+__PMC_EV_ALIAS("SIMD_Int_Sat_Exec", IAP_EVENT_B1H_00H) \
+__PMC_EV_ALIAS("SIMD_Int_Upck_Exec", IAP_EVENT_B3H_08H) \
+__PMC_EV_ALIAS("SMC_Detected", IAP_EVENT_C3H_00H) \
+__PMC_EV_ALIAS("SSE_NTStores_Miss", IAP_EVENT_4BH_03H) \
+__PMC_EV_ALIAS("SSE_NTStores_Ret", IAP_EVENT_07H_03H) \
+__PMC_EV_ALIAS("SSE_PrefNta_Miss", IAP_EVENT_4BH_00H) \
+__PMC_EV_ALIAS("SSE_PrefNta_Ret", IAP_EVENT_07H_00H) \
+__PMC_EV_ALIAS("SSE_PrefT1_Miss", IAP_EVENT_4BH_01H) \
+__PMC_EV_ALIAS("SSE_PrefT1_Ret", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("SSE_PrefT2_Miss", IAP_EVENT_4BH_02H) \
+__PMC_EV_ALIAS("SSE_PrefT2_Ret", IAP_EVENT_07H_02H) \
+__PMC_EV_ALIAS("Seg_Reg_Loads", IAP_EVENT_06H_00H) \
+__PMC_EV_ALIAS("Serial_Execution_Cycles", IAP_EVENT_3CH_02H) \
+__PMC_EV_ALIAS("Thermal_Trip", IAP_EVENT_3BH_C0H) \
+__PMC_EV_ALIAS("Unfusion", IAP_EVENT_DBH_00H) \
+__PMC_EV_ALIAS("Unhalted_Core_Cycles", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("Uops_Ret", IAP_EVENT_C2H_00H)
+
+/*
+ * Aliases for Core2 PMC events.
+ */
+#define __PMC_EV_ALIAS_CORE2() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("BACLEARS", IAP_EVENT_E6H_00H) \
+__PMC_EV_ALIAS("BOGUS_BR", IAP_EVENT_E4H_00H) \
+__PMC_EV_ALIAS("BR_BAC_MISSP_EXEC", IAP_EVENT_8AH_00H) \
+__PMC_EV_ALIAS("BR_CALL_EXEC", IAP_EVENT_92H_00H) \
+__PMC_EV_ALIAS("BR_CALL_MISSP_EXEC", IAP_EVENT_93H_00H) \
+__PMC_EV_ALIAS("BR_CND_EXEC", IAP_EVENT_8BH_00H) \
+__PMC_EV_ALIAS("BR_CND_MISSP_EXEC", IAP_EVENT_8CH_00H) \
+__PMC_EV_ALIAS("BR_IND_CALL_EXEC", IAP_EVENT_94H_00H) \
+__PMC_EV_ALIAS("BR_IND_EXEC", IAP_EVENT_8DH_00H) \
+__PMC_EV_ALIAS("BR_IND_MISSP_EXEC", IAP_EVENT_8EH_00H) \
+__PMC_EV_ALIAS("BR_INST_DECODED", IAP_EVENT_E0H_00H) \
+__PMC_EV_ALIAS("BR_INST_EXEC", IAP_EVENT_88H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ANY", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED_NOT_TAKEN", \
+ IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.MISPRED_TAKEN", \
+ IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.PRED_NOT_TAKEN", \
+ IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.PRED_TAKEN", \
+ IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.TAKEN", IAP_EVENT_C4H_0CH) \
+__PMC_EV_ALIAS("BR_MISSP_EXEC", IAP_EVENT_89H_00H) \
+__PMC_EV_ALIAS("BR_RET_BAC_MISSP_EXEC", IAP_EVENT_91H_00H) \
+__PMC_EV_ALIAS("BR_RET_EXEC", IAP_EVENT_8FH_00H) \
+__PMC_EV_ALIAS("BR_RET_MISSP_EXEC", IAP_EVENT_90H_00H) \
+__PMC_EV_ALIAS("BR_TKN_BUBBLE_1", IAP_EVENT_97H_00H) \
+__PMC_EV_ALIAS("BR_TKN_BUBBLE_2", IAP_EVENT_98H_00H) \
+__PMC_EV_ALIAS("BUSQ_EMPTY", IAP_EVENT_7DH) \
+__PMC_EV_ALIAS("BUS_BNR_DRV", IAP_EVENT_61H) \
+__PMC_EV_ALIAS("BUS_DATA_RCV", IAP_EVENT_64H) \
+__PMC_EV_ALIAS("BUS_DRDY_CLOCKS", IAP_EVENT_62H) \
+__PMC_EV_ALIAS("BUS_HITM_DRV", IAP_EVENT_7BH) \
+__PMC_EV_ALIAS("BUS_HIT_DRV", IAP_EVENT_7AH) \
+__PMC_EV_ALIAS("BUS_IO_WAIT", IAP_EVENT_7FH) \
+__PMC_EV_ALIAS("BUS_LOCK_CLOCKS", IAP_EVENT_63H) \
+__PMC_EV_ALIAS("BUS_REQUEST_OUTSTANDING", \
+ IAP_EVENT_60H) \
+__PMC_EV_ALIAS("BUS_TRANS_ANY", IAP_EVENT_70H) \
+__PMC_EV_ALIAS("BUS_TRANS_BRD", IAP_EVENT_65H) \
+__PMC_EV_ALIAS("BUS_TRANS_BURST", IAP_EVENT_6EH) \
+__PMC_EV_ALIAS("BUS_TRANS_DEF", IAP_EVENT_6DH) \
+__PMC_EV_ALIAS("BUS_TRANS_IFETCH", IAP_EVENT_68H) \
+__PMC_EV_ALIAS("BUS_TRANS_INVAL", IAP_EVENT_69H) \
+__PMC_EV_ALIAS("BUS_TRANS_IO", IAP_EVENT_6CH) \
+__PMC_EV_ALIAS("BUS_TRANS_MEM", IAP_EVENT_6FH) \
+__PMC_EV_ALIAS("BUS_TRANS_P", IAP_EVENT_6BH) \
+__PMC_EV_ALIAS("BUS_TRANS_PWR", IAP_EVENT_6AH) \
+__PMC_EV_ALIAS("BUS_TRANS_RFO", IAP_EVENT_66H) \
+__PMC_EV_ALIAS("BUS_TRANS_WB", IAP_EVENT_67H) \
+__PMC_EV_ALIAS("CMP_SNOOP", IAP_EVENT_78H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.BUS", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.CORE_P", \
+ IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.NO_OTHER", \
+ IAP_EVENT_3CH_02H) \
+__PMC_EV_ALIAS("CYCLES_DIV_BUSY", IAP_EVENT_14H_00H) \
+__PMC_EV_ALIAS("CYCLES_INT_MASKED", IAP_EVENT_C6H_01H) \
+__PMC_EV_ALIAS("CYCLES_INT_PENDING_AND_MASKED", \
+ IAP_EVENT_C6H_02H) \
+__PMC_EV_ALIAS("CYCLES_L1I_MEM_STALLED", IAP_EVENT_86H_00H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.FP", IAP_EVENT_19H_00H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.LOAD", IAP_EVENT_19H_01H) \
+__PMC_EV_ALIAS("DELAYED_BYPASS.SIMD", IAP_EVENT_19H_02H) \
+__PMC_EV_ALIAS("DIV", IAP_EVENT_13H_00H) \
+__PMC_EV_ALIAS("DTLB_MISSES.ANY", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.L0_MISS_LD", IAP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("DTLB_MISSES.MISS_LD", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.MISS_ST", IAP_EVENT_08H_08H) \
+__PMC_EV_ALIAS("EIST_TRANS", IAP_EVENT_3AH_00H) \
+__PMC_EV_ALIAS("ESP.ADDITIONS", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("ESP.SYNCH", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("EXT_SNOOP", IAP_EVENT_77H) \
+__PMC_EV_ALIAS("FP_ASSIST", IAP_EVENT_11H_00H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE", IAP_EVENT_10H_00H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS_TO_FP", IAP_EVENT_CCH_02H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS_TO_MMX", IAP_EVENT_CCH_01H) \
+__PMC_EV_ALIAS("HW_INT_RCV", IAP_EVENT_C8H_00H) \
+__PMC_EV_ALIAS("IDLE_DURING_DIV", IAP_EVENT_18H_00H) \
+__PMC_EV_ALIAS("ILD_STALL", IAP_EVENT_87H_00H) \
+__PMC_EV_ALIAS("INST_QUEUE.FULL", IAP_EVENT_83H_02H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.LOADS", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.OTHER", IAP_EVENT_C0H_04H) \
+__PMC_EV_ALIAS("INST_RETIRED.STORES", IAP_EVENT_C0H_02H) \
+__PMC_EV_ALIAS("INST_RETIRED.VM_H", IAP_EVENT_C0H_08H) \
+__PMC_EV_ALIAS("ITLB.FLUSH", IAP_EVENT_82H_40H) \
+__PMC_EV_ALIAS("ITLB.LARGE_MISS", IAP_EVENT_82H_10H) \
+__PMC_EV_ALIAS("ITLB.MISSES", IAP_EVENT_82H_12H) \
+__PMC_EV_ALIAS("ITLB.SMALL_MISS", IAP_EVENT_82H_02H) \
+__PMC_EV_ALIAS("ITLB_MISS_RETIRED", IAP_EVENT_C9H_00H) \
+__PMC_EV_ALIAS("L1D_ALL_CACHE_REF", IAP_EVENT_43H_02H) \
+__PMC_EV_ALIAS("L1D_ALL_REF", IAP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD", IAP_EVENT_40H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK", IAP_EVENT_42H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK_DURATION", IAP_EVENT_42H_10H) \
+__PMC_EV_ALIAS("L1D_CACHE_ST", IAP_EVENT_41H) \
+__PMC_EV_ALIAS("L1D_M_EVICT", IAP_EVENT_47H_00H) \
+__PMC_EV_ALIAS("L1D_M_REPL", IAP_EVENT_46H_00H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS", IAP_EVENT_48H_00H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.REQUESTS", IAP_EVENT_4EH_10H) \
+__PMC_EV_ALIAS("L1D_REPL", IAP_EVENT_45H_0FH) \
+__PMC_EV_ALIAS("L1D_SPLIT.LOADS", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("L1D_SPLIT.STORES", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("L1I_MISSES", IAP_EVENT_81H_00H) \
+__PMC_EV_ALIAS("L1I_READS", IAP_EVENT_80H_00H) \
+__PMC_EV_ALIAS("L2_ADS", IAP_EVENT_21H) \
+__PMC_EV_ALIAS("L2_DBUS_BUSY_RD", IAP_EVENT_23H) \
+__PMC_EV_ALIAS("L2_IFETCH", IAP_EVENT_28H) \
+__PMC_EV_ALIAS("L2_LD", IAP_EVENT_29H) \
+__PMC_EV_ALIAS("L2_LINES_IN", IAP_EVENT_24H) \
+__PMC_EV_ALIAS("L2_LINES_OUT", IAP_EVENT_26H) \
+__PMC_EV_ALIAS("L2_LOCK", IAP_EVENT_2BH) \
+__PMC_EV_ALIAS("L2_M_LINES_IN", IAP_EVENT_25H) \
+__PMC_EV_ALIAS("L2_M_LINES_OUT", IAP_EVENT_27H) \
+__PMC_EV_ALIAS("L2_NO_REQ", IAP_EVENT_32H) \
+__PMC_EV_ALIAS("L2_REJECT_BUSQ", IAP_EVENT_30H) \
+__PMC_EV_ALIAS("L2_RQSTS", IAP_EVENT_2EH) \
+__PMC_EV_ALIAS("L2_RQSTS.SELF.DEMAND.I_STATE", \
+ IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("L2_RQSTS.SELF.DEMAND.MESI", \
+ IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("L2_ST", IAP_EVENT_2AH) \
+__PMC_EV_ALIAS("LOAD_BLOCK.L1D", IAP_EVENT_03H_20H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.OVERLAP_STORE", \
+ IAP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.STA", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.STD", IAP_EVENT_03H_04H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.UNTIL_RETIRE", IAP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE", IAP_EVENT_4CH_00H) \
+__PMC_EV_ALIAS("MACHINE_NUKES.MEM_ORDER", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACHINE_NUKES.SMC", IAP_EVENT_C3H_01H) \
+__PMC_EV_ALIAS("MACRO_INSTS.CISC_DECODED", IAP_EVENT_AAH_08H) \
+__PMC_EV_ALIAS("MACRO_INSTS.DECODED", IAP_EVENT_AAH_01H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGUATION.RESET", \
+ IAP_EVENT_09H_01H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGUATION.SUCCESS", \
+ IAP_EVENT_09H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.DTLB_MISS", \
+ IAP_EVENT_CBH_10H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L1D_LINE_MISS", \
+ IAP_EVENT_CBH_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L1D_MISS", \
+ IAP_EVENT_CBH_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_LINE_MISS", \
+ IAP_EVENT_CBH_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_MISS", \
+ IAP_EVENT_CBH_04H) \
+__PMC_EV_ALIAS("MUL", IAP_EVENT_12H_00H) \
+__PMC_EV_ALIAS("PAGE_WALKS.COUNT", IAP_EVENT_0CH_01H) \
+__PMC_EV_ALIAS("PAGE_WALKS.CYCLES", IAP_EVENT_0CH_02H) \
+__PMC_EV_ALIAS("PREF_RQSTS_DN", IAP_EVENT_F8H_00H) \
+__PMC_EV_ALIAS("PREF_RQSTS_UP", IAP_EVENT_F0H_00H) \
+__PMC_EV_ALIAS("RAT_STALLS.ANY", IAP_EVENT_D2H_0FH) \
+__PMC_EV_ALIAS("RAT_STALLS.FLAGS", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("RAT_STALLS.FPSW", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.OTHER_SERIALIZATION_STALLS", \
+ IAP_EVENT_D2H_10H) \
+__PMC_EV_ALIAS("RAT_STALLS.PARTIAL_CYCLES", \
+ IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("RAT_STALLS.ROB_READ_PORT", \
+ IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_DCH_1FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.BR_MISS_CLEAR", \
+ IAP_EVENT_DCH_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FPCW", IAP_EVENT_DCH_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LD_ST", IAP_EVENT_DCH_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB_FULL", \
+ IAP_EVENT_DCH_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS_FULL", IAP_EVENT_DCH_02H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED", IAP_EVENT_A0H_00H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT2", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT3", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT4", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("RS_UOPS_DISPATCHED.PORT5", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("SB_DRAIN_CYCLES", IAP_EVENT_04H_01H) \
+__PMC_EV_ALIAS("SEGMENT_REG_LOADS", IAP_EVENT_06H_00H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.ANY", IAP_EVENT_D5H_0FH) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.DS", IAP_EVENT_D5H_02H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.ES", IAP_EVENT_D5H_01H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.FS", IAP_EVENT_D5H_04H) \
+__PMC_EV_ALIAS("SEG_REG_RENAMES.GS", IAP_EVENT_D5H_08H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.ANY", IAP_EVENT_D4H_0FH) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.DS", IAP_EVENT_D4H_02H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.ES", IAP_EVENT_D4H_01H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.FS", IAP_EVENT_D4H_04H) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS.GS", IAP_EVENT_D4H_08H) \
+__PMC_EV_ALIAS("SIMD_ASSIST", IAP_EVENT_CDH_00H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.PACKED_DOUBLE", \
+ IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.PACKED_SINGLE", \
+ IAP_EVENT_CAH_01H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE", \
+ IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("SIMD_COMP_INST_RETIRED.SCALAR_SINGLE", \
+ IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("SIMD_INSTR_RETIRED", IAP_EVENT_CEH_00H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.ANY", IAP_EVENT_C7H_1FH) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.PACKED_DOUBLE", \
+ IAP_EVENT_C7H_04H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.PACKED_SINGLE", \
+ IAP_EVENT_C7H_01H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.SCALAR_DOUBLE", \
+ IAP_EVENT_C7H_08H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.SCALAR_SINGLE", \
+ IAP_EVENT_C7H_02H) \
+__PMC_EV_ALIAS("SIMD_INST_RETIRED.VECTOR", IAP_EVENT_C7H_10H) \
+__PMC_EV_ALIAS("SIMD_SAT_INSTR_RETIRED", IAP_EVENT_CFH_00H) \
+__PMC_EV_ALIAS("SIMD_SAT_UOP_EXEC", IAP_EVENT_B1H_00H) \
+__PMC_EV_ALIAS("SIMD_UOPS_EXEC", IAP_EVENT_B0H_00H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.ARITHMETIC", IAP_EVENT_B3H_20H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.LOGICAL", IAP_EVENT_B3H_10H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.MUL", IAP_EVENT_B3H_01H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.PACK", IAP_EVENT_B3H_04H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.SHIFT", IAP_EVENT_B3H_02H) \
+__PMC_EV_ALIAS("SIMD_UOP_TYPE_EXEC.UNPACK", IAP_EVENT_B3H_08H) \
+__PMC_EV_ALIAS("SNOOP_STALL_DRV", IAP_EVENT_7EH) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.L1", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.L2", IAP_EVENT_07H_02H) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.NTA", IAP_EVENT_07H_00H) \
+__PMC_EV_ALIAS("SSE_PRE_EXEC.STORES", IAP_EVENT_07H_03H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.L1", IAP_EVENT_4BH_01H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.L2", IAP_EVENT_4BH_02H) \
+__PMC_EV_ALIAS("SSE_PRE_MISS.NTA", IAP_EVENT_4BH_00H) \
+__PMC_EV_ALIAS("STORE_BLOCK.ORDER", IAP_EVENT_04H_02H) \
+__PMC_EV_ALIAS("STORE_BLOCK.SNOOP", IAP_EVENT_04H_08H) \
+__PMC_EV_ALIAS("THERMAL_TRIP", IAP_EVENT_3BH_C0H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ANY", IAP_EVENT_C2H_0FH) \
+__PMC_EV_ALIAS("UOPS_RETIRED.FUSED", IAP_EVENT_C2H_07H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.LD_IND_BR", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.MACRO_FUSION", IAP_EVENT_C2H_04H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.NON_FUSED", IAP_EVENT_C2H_08H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.STD_STA", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("X87_OPS_RETIRED.ANY", IAP_EVENT_C1H_FEH) \
+__PMC_EV_ALIAS("X87_OPS_RETIRED.FXCH", IAP_EVENT_C1H_01H)
+
+/*
+ * Core i7 and Xeon 5500 events removed between 253669-031US June 2009
+ * and 253669-040US October 2011.
+ */
+#define __PMC_EV_ALIAS_COREI7_OLD() \
+__PMC_EV_ALIAS("SB_FORWARD.ANY", IAP_EVENT_02H_01H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.STD", IAP_EVENT_03H_01H) \
+__PMC_EV_ALIAS("LOAD_BLOCK.ADDRESS_OFFSET", IAP_EVENT_03H_04H) \
+__PMC_EV_ALIAS("SB_DRAIN.CYCLES", IAP_EVENT_04H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.LOAD", IAP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.STORE", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.ANY", IAP_EVENT_05H_03H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.NOT_STA", IAP_EVENT_06H_01H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.STA", IAP_EVENT_06H_02H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.ANY", IAP_EVENT_06H_0FH) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.PDP_MISS", IAP_EVENT_08H_40H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGURATION.RESET", IAP_EVENT_09H_01H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGURATION.SUCCESS", IAP_EVENT_09H_02H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGURATION.WATCHDOG", IAP_EVENT_09H_04H) \
+__PMC_EV_ALIAS("MEMORY_DISAMBIGURATION.WATCH_CYCLES", IAP_EVENT_09H_08H)\
+__PMC_EV_ALIAS("HW_INT.RCV", IAP_EVENT_1DH_01H) \
+__PMC_EV_ALIAS("HW_INT.CYCLES_MASKED", IAP_EVENT_1DH_02H) \
+__PMC_EV_ALIAS("HW_INT.CYCLES_PENDING_AND_MASKED", IAP_EVENT_1DH_04H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.E_STATE", IAP_EVENT_27H_04H) \
+__PMC_EV_ALIAS("UOPS_DECODED.DEC0", IAP_EVENT_3DH_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_ST.I_STATE", IAP_EVENT_41H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_ST.MESI", IAP_EVENT_41H_0FH) \
+__PMC_EV_ALIAS("L1D_PEND_MISS.LOAD_BUFFERS_FULL", IAP_EVENT_48H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.PDP_MISS", IAP_EVENT_49H_40H) \
+__PMC_EV_ALIAS("SSE_MEM_EXEC.NTA", IAP_EVENT_4BH_01H) \
+__PMC_EV_ALIAS("SSE_MEM_EXEC.STREAMING_STORES", IAP_EVENT_4BH_08H) \
+__PMC_EV_ALIAS("SFENCE_CYCLES", IAP_EVENT_4DH_01H) \
+__PMC_EV_ALIAS("EPT.EPDE_MISS", IAP_EVENT_4FH_02H) \
+__PMC_EV_ALIAS("EPT.EPDPE_HIT", IAP_EVENT_4FH_04H) \
+__PMC_EV_ALIAS("EPT.EPDPE_MISS", IAP_EVENT_4FH_08H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA", \
+ IAP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE", \
+ IAP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO", \
+ IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ANY.READ", \
+ IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("IFU_IVC.FULL", IAP_EVENT_81H_01H) \
+__PMC_EV_ALIAS("IFU_IVC.L1I_EVICTION", IAP_EVENT_81H_02H) \
+__PMC_EV_ALIAS("L1I_OPPORTUNISTIC_HITS", IAP_EVENT_83H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_CYCLES", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.PMH_BUSY_CYCLES", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \
+__PMC_EV_ALIAS("ITLB_MISSES.PDE_MISS", IAP_EVENT_85H_20H) \
+__PMC_EV_ALIAS("ITLB_MISSES.PDP_MISS", IAP_EVENT_85H_40H) \
+__PMC_EV_ALIAS("ITLB_MISSES.LARGE_WALK_COMPLETED", IAP_EVENT_85H_80H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.READ_DATA", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.READ_CODE", IAP_EVENT_B0H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY.READ", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY.RFO", IAP_EVENT_B0H_10H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.UNCACHED_MEM", IAP_EVENT_B0H_20H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY", IAP_EVENT_B0H_80H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.DATA", IAP_EVENT_B3H_01H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE", \
+ IAP_EVENT_B3H_02H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.CODE", IAP_EVENT_B3H_04H) \
+__PMC_EV_ALIAS("PIC_ACCESSES.TPR_READS", IAP_EVENT_BAH_01H) \
+__PMC_EV_ALIAS("PIC_ACCESSES.TPR_WRITES", IAP_EVENT_BAH_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.FUSION_ASSIST", IAP_EVENT_C3H_10H) \
+__PMC_EV_ALIAS("BOGUS_BR", IAP_EVENT_E4H_01H) \
+__PMC_EV_ALIAS("BPU_CLEARS.ANY", IAP_EVENT_E8H_03H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.HIT", IAP_EVENT_F3H_01H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.ALLOC", IAP_EVENT_F3H_02H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.DATA_TRIGGER", IAP_EVENT_F3H_04H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.CODE_TRIGGER", IAP_EVENT_F3H_08H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.DCA_TRIGGER", IAP_EVENT_F3H_10H) \
+__PMC_EV_ALIAS("L2_HW_PREFETCH.KICK_START", IAP_EVENT_F3H_20H) \
+__PMC_EV_ALIAS("SQ_MISC.PROMOTION", IAP_EVENT_F4H_01H) \
+__PMC_EV_ALIAS("SQ_MISC.PROMOTION_POST_GO", IAP_EVENT_F4H_02H) \
+__PMC_EV_ALIAS("SQ_MISC.LRU_HINTS", IAP_EVENT_F4H_04H) \
+__PMC_EV_ALIAS("SQ_MISC.FILL_DROPPED", IAP_EVENT_F4H_08H) \
+__PMC_EV_ALIAS("SEGMENT_REG_LOADS", IAP_EVENT_F8H_01H)
+
+/*
+ * Aliases for Core i7 and Xeon 5500 PMC events (253669-033US December 2009)
+ */
+#define __PMC_EV_ALIAS_COREI7() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("SB_DRAIN.ANY", IAP_EVENT_04H_07H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.AT_RET", IAP_EVENT_06H_04H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.L1D_BLOCK", IAP_EVENT_06H_08H) \
+__PMC_EV_ALIAS("PARTIAL_ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.ANY", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.STLB_HIT", IAP_EVENT_08H_10H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.PDE_MISS", IAP_EVENT_08H_20H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED", \
+ IAP_EVENT_08H_80H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.LOADS", IAP_EVENT_0BH_01H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.STORES", IAP_EVENT_0BH_02H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD", \
+ IAP_EVENT_0BH_10H) \
+__PMC_EV_ALIAS("MEM_STORE_RETIRED.DTLB_MISS", IAP_EVENT_0CH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.STALLED_CYCLES", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.FUSED", IAP_EVENT_0EH_02H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.L3_DATA_MISS_UNKNOWN", \
+ IAP_EVENT_0FH_01H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM", \
+ IAP_EVENT_0FH_02H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT", \
+ IAP_EVENT_0FH_08H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.REMOTE_DRAM", \
+ IAP_EVENT_0FH_10H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.LOCAL_DRAM", IAP_EVENT_0FH_20H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.UNCACHEABLE", IAP_EVENT_0FH_80H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.X87", IAP_EVENT_10H_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.MMX", IAP_EVENT_10H_02H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP", IAP_EVENT_10H_04H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE2_INTEGER", IAP_EVENT_10H_08H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_PACKED", IAP_EVENT_10H_10H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_SCALAR", IAP_EVENT_10H_20H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION", \
+ IAP_EVENT_10H_40H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION", \
+ IAP_EVENT_10H_80H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_MPY", IAP_EVENT_12H_01H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_SHIFT", IAP_EVENT_12H_02H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACK", IAP_EVENT_12H_04H) \
+__PMC_EV_ALIAS("SIMD_INT_128.UNPACK", IAP_EVENT_12H_08H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_LOGICAL", IAP_EVENT_12H_10H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_ARITH", IAP_EVENT_12H_20H) \
+__PMC_EV_ALIAS("SIMD_INT_128.SHUFFLE_MOVE", IAP_EVENT_12H_40H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.RS", IAP_EVENT_13H_01H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.RS_DELAYED", IAP_EVENT_13H_02H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.MOB", IAP_EVENT_13H_04H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.ANY", IAP_EVENT_13H_07H) \
+__PMC_EV_ALIAS("ARITH.CYCLES_DIV_BUSY", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("ARITH.MUL", IAP_EVENT_14H_02H) \
+__PMC_EV_ALIAS("INST_QUEUE_WRITES", IAP_EVENT_17H_01H) \
+__PMC_EV_ALIAS("INST_DECODED.DEC0", IAP_EVENT_18H_01H) \
+__PMC_EV_ALIAS("TWO_UOP_INSTS_DECODED", IAP_EVENT_19H_01H) \
+__PMC_EV_ALIAS("INST_QUEUE_WRITE_CYCLES", IAP_EVENT_1EH_01H) \
+__PMC_EV_ALIAS("LSD_OVERFLOW", IAP_EVENT_20H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.LD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.LD_MISS", IAP_EVENT_24H_02H) \
+__PMC_EV_ALIAS("L2_RQSTS.LOADS", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HIT", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFOS", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCH_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCH_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCHES", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCH_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCH_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCHES", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_RQSTS.MISS", IAP_EVENT_24H_AAH) \
+__PMC_EV_ALIAS("L2_RQSTS.REFERENCES", IAP_EVENT_24H_FFH) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.I_STATE", IAP_EVENT_26H_01H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.S_STATE", IAP_EVENT_26H_02H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.E_STATE", IAP_EVENT_26H_04H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.M_STATE", IAP_EVENT_26H_08H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.MESI", IAP_EVENT_26H_0FH) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.I_STATE", IAP_EVENT_26H_10H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.S_STATE", IAP_EVENT_26H_20H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.E_STATE", IAP_EVENT_26H_40H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.M_STATE", IAP_EVENT_26H_80H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.MESI", IAP_EVENT_26H_F0H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.ANY", IAP_EVENT_26H_FFH) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.I_STATE", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.S_STATE", IAP_EVENT_27H_02H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.M_STATE", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.HIT", IAP_EVENT_27H_0EH) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.MESI", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.I_STATE", IAP_EVENT_27H_10H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.S_STATE", IAP_EVENT_27H_20H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.E_STATE", IAP_EVENT_27H_40H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.M_STATE", IAP_EVENT_27H_80H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.HIT", IAP_EVENT_27H_E0H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.MESI", IAP_EVENT_27H_F0H) \
+__PMC_EV_ALIAS("L1D_WB_L2.I_STATE", IAP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("L1D_WB_L2.S_STATE", IAP_EVENT_28H_02H) \
+__PMC_EV_ALIAS("L1D_WB_L2.E_STATE", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L1D_WB_L2.M_STATE", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("L1D_WB_L2.MESI", IAP_EVENT_28H_0FH) \
+__PMC_EV_ALIAS("L3_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("L3_LAT_CACHE.MISS", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.REF_P", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD.I_STATE", IAP_EVENT_40H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD.S_STATE", IAP_EVENT_40H_02H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD.E_STATE", IAP_EVENT_40H_04H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD.M_STATE", IAP_EVENT_40H_08H) \
+__PMC_EV_ALIAS("L1D_CACHE_LD.MESI", IAP_EVENT_40H_0FH) \
+__PMC_EV_ALIAS("L1D_CACHE_ST.S_STATE", IAP_EVENT_41H_02H) \
+__PMC_EV_ALIAS("L1D_CACHE_ST.E_STATE", IAP_EVENT_41H_04H) \
+__PMC_EV_ALIAS("L1D_CACHE_ST.M_STATE", IAP_EVENT_41H_08H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK.HIT", IAP_EVENT_42H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK.S_STATE", IAP_EVENT_42H_02H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK.E_STATE", IAP_EVENT_42H_04H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK.M_STATE", IAP_EVENT_42H_08H) \
+__PMC_EV_ALIAS("L1D_ALL_REF.ANY", IAP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("L1D_ALL_REF.CACHEABLE", IAP_EVENT_43H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.ANY", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("DTLB_MISSES.PDE_MISS", IAP_EVENT_49H_20H) \
+__PMC_EV_ALIAS("DTLB_MISSES.LARGE_WALK_COMPLETED", IAP_EVENT_49H_80H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.REQUESTS", IAP_EVENT_4EH_01H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.MISS", IAP_EVENT_4EH_02H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.TRIGGERS", IAP_EVENT_4EH_04H) \
+__PMC_EV_ALIAS("L1D.REPL", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("L1D.M_REPL", IAP_EVENT_51H_02H) \
+__PMC_EV_ALIAS("L1D.M_EVICT", IAP_EVENT_51H_04H) \
+__PMC_EV_ALIAS("L1D.M_SNOOP_EVICT", IAP_EVENT_51H_08H) \
+__PMC_EV_ALIAS("L1D_CACHE_PREFETCH_LOCK_FB_HIT", IAP_EVENT_52H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK_FB_HIT", IAP_EVENT_53H_01H) \
+__PMC_EV_ALIAS("CACHE_LOCK_CYCLES.L1D_L2", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("CACHE_LOCK_CYCLES.L1D", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IO_TRANSACTIONS", IAP_EVENT_6CH_01H) \
+__PMC_EV_ALIAS("L1I.HITS", IAP_EVENT_80H_01H) \
+__PMC_EV_ALIAS("L1I.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("L1I.READS", IAP_EVENT_80H_03H) \
+__PMC_EV_ALIAS("L1I.CYCLES_STALLED", IAP_EVENT_80H_04H) \
+__PMC_EV_ALIAS("LARGE_ITLB.HIT", IAP_EVENT_82H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.ANY", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.MRU", IAP_EVENT_87H_02H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("ILD_STALL.REGEN", IAP_EVENT_87H_08H) \
+__PMC_EV_ALIAS("ILD_STALL.ANY", IAP_EVENT_87H_0FH) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NON_CALL", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NON_CALLS", IAP_EVENT_88H_07H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NEAR_CALLS", IAP_EVENT_88H_30H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.ANY", IAP_EVENT_88H_7FH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT", IAP_EVENT_89H_02H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NON_CALL", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NON_CALLS", IAP_EVENT_89H_07H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NEAR_CALLS", IAP_EVENT_89H_30H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ANY", IAP_EVENT_89H_7FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LOAD", IAP_EVENT_A2H_02H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS_FULL", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.STORE", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB_FULL", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FPCW", IAP_EVENT_A2H_20H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.MXCSR", IAP_EVENT_A2H_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.OTHER", IAP_EVENT_A2H_80H) \
+__PMC_EV_ALIAS("MACRO_INSTS.FUSIONS_DECODED", IAP_EVENT_A6H_01H) \
+__PMC_EV_ALIAS("BACLEAR_FORCE_IQ", IAP_EVENT_A7H_01H) \
+__PMC_EV_ALIAS("LSD.UOPS", IAP_EVENT_A8H_01H) \
+__PMC_EV_ALIAS("ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.L1D_WRITEBACK", IAP_EVENT_B0H_40H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT0", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT1", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT2_CORE", IAP_EVENT_B1H_04H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT3_CORE", IAP_EVENT_B1H_08H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT4_CORE", IAP_EVENT_B1H_10H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5", \
+ IAP_EVENT_B1H_1FH) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT5", IAP_EVENT_B1H_20H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE_ACTIVE_CYCLES", IAP_EVENT_B1H_3FH) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT015", IAP_EVENT_B1H_40H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT234", IAP_EVENT_B1H_80H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_SQ_FULL", IAP_EVENT_B2H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HIT", IAP_EVENT_B8H_01H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HITE", IAP_EVENT_B8H_02H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HITM", IAP_EVENT_B8H_04H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.X87", IAP_EVENT_C0H_02H) \
+__PMC_EV_ALIAS("INST_RETIRED.MMX", IAP_EVENT_C0H_04H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ANY", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.MACRO_FUSED", IAP_EVENT_C2H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.CYCLES", IAP_EVENT_C3H_01H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEM_ORDER", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.PACKED_SINGLE", IAP_EVENT_C7H_01H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.SCALAR_SINGLE", IAP_EVENT_C7H_02H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.PACKED_DOUBLE", IAP_EVENT_C7H_04H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.SCALAR_DOUBLE", IAP_EVENT_C7H_08H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.VECTOR_INTEGER", IAP_EVENT_C7H_10H) \
+__PMC_EV_ALIAS("ITLB_MISS_RETIRED", IAP_EVENT_C8H_20H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L1D_HIT", IAP_EVENT_CBH_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_HIT", IAP_EVENT_CBH_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L3_UNSHARED_HIT", IAP_EVENT_CBH_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM", \
+ IAP_EVENT_CBH_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L3_MISS", IAP_EVENT_CBH_10H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.HIT_LFB", IAP_EVENT_CBH_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.DTLB_MISS", IAP_EVENT_CBH_80H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.TO_FP", IAP_EVENT_CCH_01H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.TO_MMX", IAP_EVENT_CCH_02H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.ANY", IAP_EVENT_CCH_03H) \
+__PMC_EV_ALIAS("MACRO_INSTS.DECODED", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("UOPS_DECODED.MS", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("UOPS_DECODED.ESP_FOLDING", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("UOPS_DECODED.ESP_SYNC", IAP_EVENT_D1H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.FLAGS", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("RAT_STALLS.REGISTERS", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("RAT_STALLS.ROB_READ_PORT", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("RAT_STALLS.SCOREBOARD", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.ANY", IAP_EVENT_D2H_0FH) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS", IAP_EVENT_D4H_01H) \
+__PMC_EV_ALIAS("ES_REG_RENAMES", IAP_EVENT_D5H_01H) \
+__PMC_EV_ALIAS("UOP_UNFUSION", IAP_EVENT_DBH_01H) \
+__PMC_EV_ALIAS("BR_INST_DECODED", IAP_EVENT_E0H_01H) \
+__PMC_EV_ALIAS("BPU_MISSED_CALL_RET", IAP_EVENT_E5H_01H) \
+__PMC_EV_ALIAS("BACLEAR.CLEAR", IAP_EVENT_E6H_01H) \
+__PMC_EV_ALIAS("BACLEAR.BAD_TARGET", IAP_EVENT_E6H_02H) \
+__PMC_EV_ALIAS("BPU_CLEARS.EARLY", IAP_EVENT_E8H_01H) \
+__PMC_EV_ALIAS("BPU_CLEARS.LATE", IAP_EVENT_E8H_02H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.LOAD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.IFETCH", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.PREFETCH", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.ANY", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S_STATE", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E_STATE", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ANY", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PREFETCH_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PREFETCH_DIRTY", IAP_EVENT_F2H_08H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.ANY", IAP_EVENT_F2H_0FH) \
+__PMC_EV_ALIAS("SQ_MISC.SPLIT_LOCK", IAP_EVENT_F4H_10H) \
+__PMC_EV_ALIAS("SQ_FULL_STALL_CYCLES", IAP_EVENT_F6H_01H) \
+__PMC_EV_ALIAS("FP_ASSIST.ALL", IAP_EVENT_F7H_01H) \
+__PMC_EV_ALIAS("FP_ASSIST.OUTPUT", IAP_EVENT_F7H_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.INPUT", IAP_EVENT_F7H_04H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_MPY", IAP_EVENT_FDH_01H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_SHIFT", IAP_EVENT_FDH_02H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACK", IAP_EVENT_FDH_04H) \
+__PMC_EV_ALIAS("SIMD_INT_64.UNPACK", IAP_EVENT_FDH_08H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_LOGICAL", IAP_EVENT_FDH_10H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_ARITH", IAP_EVENT_FDH_20H) \
+__PMC_EV_ALIAS("SIMD_INT_64.SHUFFLE_MOVE", IAP_EVENT_FDH_40H) \
+__PMC_EV_ALIAS_COREI7_OLD()
+
+/*
+ * Aliases for Westmere PMC events (253669-033US December 2009)
+ */
+#define __PMC_EV_ALIAS_WESTMERE() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("LOAD_BLOCK.OVERLAP_STORE", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("SB_DRAIN.ANY", IAP_EVENT_04H_07H) \
+__PMC_EV_ALIAS("MISALIGN_MEMORY.STORE", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.AT_RET", IAP_EVENT_06H_04H) \
+__PMC_EV_ALIAS("STORE_BLOCKS.L1D_BLOCK", IAP_EVENT_06H_08H) \
+__PMC_EV_ALIAS("PARTIAL_ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.ANY", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_CYCLES", IAP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.STLB_HIT", IAP_EVENT_08H_10H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.PDE_MISS", IAP_EVENT_08H_20H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.LOADS", IAP_EVENT_0BH_01H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.STORES", IAP_EVENT_0BH_02H) \
+__PMC_EV_ALIAS("MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD", \
+ IAP_EVENT_0BH_10H) \
+__PMC_EV_ALIAS("MEM_STORE_RETIRED.DTLB_MISS", IAP_EVENT_0CH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.STALLED_CYCLES", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.FUSED", IAP_EVENT_0EH_02H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.LOCAL_HITM", IAP_EVENT_0FH_02H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.LOCAL_DRAM_AND_REMOTE_CACHE_HIT", \
+ IAP_EVENT_0FH_08H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.LOCAL_DRAM", IAP_EVENT_0FH_10H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.REMOTE_DRAM", IAP_EVENT_0FH_20H) \
+__PMC_EV_ALIAS("MEM_UNCORE_RETIRED.UNCACHEABLE", IAP_EVENT_0FH_80H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.X87", IAP_EVENT_10H_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.MMX", IAP_EVENT_10H_02H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP", IAP_EVENT_10H_04H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE2_INTEGER", IAP_EVENT_10H_08H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_PACKED", IAP_EVENT_10H_10H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_SCALAR", IAP_EVENT_10H_20H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION", \
+ IAP_EVENT_10H_40H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION", \
+ IAP_EVENT_10H_80H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_MPY", IAP_EVENT_12H_01H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_SHIFT", IAP_EVENT_12H_02H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACK", IAP_EVENT_12H_04H) \
+__PMC_EV_ALIAS("SIMD_INT_128.UNPACK", IAP_EVENT_12H_08H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_LOGICAL", IAP_EVENT_12H_10H) \
+__PMC_EV_ALIAS("SIMD_INT_128.PACKED_ARITH", IAP_EVENT_12H_20H) \
+__PMC_EV_ALIAS("SIMD_INT_128.SHUFFLE_MOVE", IAP_EVENT_12H_40H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.RS", IAP_EVENT_13H_01H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.RS_DELAYED", IAP_EVENT_13H_02H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.MOB", IAP_EVENT_13H_04H) \
+__PMC_EV_ALIAS("LOAD_DISPATCH.ANY", IAP_EVENT_13H_07H) \
+__PMC_EV_ALIAS("ARITH.CYCLES_DIV_BUSY", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("ARITH.MUL", IAP_EVENT_14H_02H) \
+__PMC_EV_ALIAS("INST_QUEUE_WRITES", IAP_EVENT_17H_01H) \
+__PMC_EV_ALIAS("INST_DECODED.DEC0", IAP_EVENT_18H_01H) \
+__PMC_EV_ALIAS("TWO_UOP_INSTS_DECODED", IAP_EVENT_19H_01H) \
+__PMC_EV_ALIAS("INST_QUEUE_WRITE_CYCLES", IAP_EVENT_1EH_01H) \
+__PMC_EV_ALIAS("LSD_OVERFLOW", IAP_EVENT_20H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.LD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.LD_MISS", IAP_EVENT_24H_02H) \
+__PMC_EV_ALIAS("L2_RQSTS.LOADS", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HIT", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFOS", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCH_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCH_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.IFETCHES", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCH_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCH_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.PREFETCHES", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_RQSTS.MISS", IAP_EVENT_24H_AAH) \
+__PMC_EV_ALIAS("L2_RQSTS.REFERENCES", IAP_EVENT_24H_FFH) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.I_STATE", IAP_EVENT_26H_01H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.S_STATE", IAP_EVENT_26H_02H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.E_STATE", IAP_EVENT_26H_04H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.M_STATE", IAP_EVENT_26H_08H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.DEMAND.MESI", IAP_EVENT_26H_0FH) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.I_STATE", IAP_EVENT_26H_10H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.S_STATE", IAP_EVENT_26H_20H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.E_STATE", IAP_EVENT_26H_40H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.M_STATE", IAP_EVENT_26H_80H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.PREFETCH.MESI", IAP_EVENT_26H_F0H) \
+__PMC_EV_ALIAS("L2_DATA_RQSTS.ANY", IAP_EVENT_26H_FFH) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.I_STATE", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.S_STATE", IAP_EVENT_27H_02H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.M_STATE", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.HIT", IAP_EVENT_27H_0EH) \
+__PMC_EV_ALIAS("L2_WRITE.RFO.MESI", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.I_STATE", IAP_EVENT_27H_10H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.S_STATE", IAP_EVENT_27H_20H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.E_STATE", IAP_EVENT_27H_40H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.M_STATE", IAP_EVENT_27H_80H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.HIT", IAP_EVENT_27H_E0H) \
+__PMC_EV_ALIAS("L2_WRITE.LOCK.MESI", IAP_EVENT_27H_F0H) \
+__PMC_EV_ALIAS("L1D_WB_L2.I_STATE", IAP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("L1D_WB_L2.S_STATE", IAP_EVENT_28H_02H) \
+__PMC_EV_ALIAS("L1D_WB_L2.E_STATE", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L1D_WB_L2.M_STATE", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("L1D_WB_L2.MESI", IAP_EVENT_28H_0FH) \
+__PMC_EV_ALIAS("L3_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_02H) \
+__PMC_EV_ALIAS("L3_LAT_CACHE.MISS", IAP_EVENT_2EH_01H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.REF_P", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.ANY", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_MISSES.WALK_CYCLES", IAP_EVENT_49H_04H) \
+__PMC_EV_ALIAS("DTLB_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("DTLB_MISSES.LARGE_WALK_COMPLETED", IAP_EVENT_49H_80H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.REQUESTS", IAP_EVENT_4EH_01H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.MISS", IAP_EVENT_4EH_02H) \
+__PMC_EV_ALIAS("L1D_PREFETCH.TRIGGERS", IAP_EVENT_4EH_04H) \
+__PMC_EV_ALIAS("EPT.WALK_CYCLES", IAP_EVENT_4FH_10H) \
+__PMC_EV_ALIAS("L1D.REPL", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("L1D.M_REPL", IAP_EVENT_51H_02H) \
+__PMC_EV_ALIAS("L1D.M_EVICT", IAP_EVENT_51H_04H) \
+__PMC_EV_ALIAS("L1D.M_SNOOP_EVICT", IAP_EVENT_51H_08H) \
+__PMC_EV_ALIAS("L1D_CACHE_PREFETCH_LOCK_FB_HIT", IAP_EVENT_52H_01H) \
+__PMC_EV_ALIAS("L1D_CACHE_LOCK_FB_HIT", IAP_EVENT_53H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA", \
+ IAP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE", \
+ IAP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO", \
+ IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ANY.READ", \
+ IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("CACHE_LOCK_CYCLES.L1D_L2", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("CACHE_LOCK_CYCLES.L1D", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IO_TRANSACTIONS", IAP_EVENT_6CH_01H) \
+__PMC_EV_ALIAS("L1I.HITS", IAP_EVENT_80H_01H) \
+__PMC_EV_ALIAS("L1I.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("L1I.READS", IAP_EVENT_80H_03H) \
+__PMC_EV_ALIAS("L1I.CYCLES_STALLED", IAP_EVENT_80H_04H) \
+__PMC_EV_ALIAS("LARGE_ITLB.HIT", IAP_EVENT_82H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.ANY", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_CYCLES", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.LARGE_WALK_COMPLETED", IAP_EVENT_85H_80H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.MRU", IAP_EVENT_87H_02H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("ILD_STALL.REGEN", IAP_EVENT_87H_08H) \
+__PMC_EV_ALIAS("ILD_STALL.ANY", IAP_EVENT_87H_0FH) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NON_CALL", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NON_CALLS", IAP_EVENT_88H_07H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NEAR_CALLS", IAP_EVENT_88H_30H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.ANY", IAP_EVENT_88H_7FH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT", IAP_EVENT_89H_02H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NON_CALL", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NON_CALLS", IAP_EVENT_89H_07H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NEAR_CALLS", IAP_EVENT_89H_30H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ANY", IAP_EVENT_89H_7FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LOAD", IAP_EVENT_A2H_02H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS_FULL", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.STORE", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB_FULL", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FPCW", IAP_EVENT_A2H_20H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.MXCSR", IAP_EVENT_A2H_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.OTHER", IAP_EVENT_A2H_80H) \
+__PMC_EV_ALIAS("MACRO_INSTS.FUSIONS_DECODED", IAP_EVENT_A6H_01H) \
+__PMC_EV_ALIAS("BACLEAR_FORCE_IQ", IAP_EVENT_A7H_01H) \
+__PMC_EV_ALIAS("LSD.UOPS", IAP_EVENT_A8H_01H) \
+__PMC_EV_ALIAS("ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.READ_DATA", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.READ_CODE", IAP_EVENT_B0H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND.RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY.READ", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY.RFO", IAP_EVENT_B0H_10H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.L1D_WRITEBACK", IAP_EVENT_B0H_40H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ANY", IAP_EVENT_B0H_80H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT0", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT1", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT2_CORE", IAP_EVENT_B1H_04H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT3_CORE", IAP_EVENT_B1H_08H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT4_CORE", IAP_EVENT_B1H_10H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5", \
+ IAP_EVENT_B1H_1FH) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT5", IAP_EVENT_B1H_20H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE_ACTIVE_CYCLES", IAP_EVENT_B1H_3FH) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT015", IAP_EVENT_B1H_40H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.PORT234", IAP_EVENT_B1H_80H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_SQ_FULL", IAP_EVENT_B2H_01H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.DATA", IAP_EVENT_B3H_01H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE", \
+ IAP_EVENT_B3H_02H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS_OUTSTANDING.CODE", IAP_EVENT_B3H_04H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS.CODE", IAP_EVENT_B4H_01H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS.DATA", IAP_EVENT_B4H_02H) \
+__PMC_EV_ALIAS("SNOOPQ_REQUESTS.INVALIDATE", IAP_EVENT_B4H_04H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HIT", IAP_EVENT_B8H_01H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HITE", IAP_EVENT_B8H_02H) \
+__PMC_EV_ALIAS("SNOOP_RESPONSE.HITM", IAP_EVENT_B8H_04H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.X87", IAP_EVENT_C0H_02H) \
+__PMC_EV_ALIAS("INST_RETIRED.MMX", IAP_EVENT_C0H_04H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ANY", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.MACRO_FUSED", IAP_EVENT_C2H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.CYCLES", IAP_EVENT_C3H_01H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEM_ORDER", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ANY_P", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ANY_P", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.CONDITIONAL", IAP_EVENT_C5H_01H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_04H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.PACKED_SINGLE", IAP_EVENT_C7H_01H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.SCALAR_SINGLE", IAP_EVENT_C7H_02H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.PACKED_DOUBLE", IAP_EVENT_C7H_04H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.SCALAR_DOUBLE", IAP_EVENT_C7H_08H) \
+__PMC_EV_ALIAS("SSEX_UOPS_RETIRED.VECTOR_INTEGER", IAP_EVENT_C7H_10H) \
+__PMC_EV_ALIAS("ITLB_MISS_RETIRED", IAP_EVENT_C8H_20H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L1D_HIT", IAP_EVENT_CBH_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L2_HIT", IAP_EVENT_CBH_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L3_UNSHARED_HIT", IAP_EVENT_CBH_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM", \
+ IAP_EVENT_CBH_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.L3_MISS", IAP_EVENT_CBH_10H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.HIT_LFB", IAP_EVENT_CBH_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_RETIRED.DTLB_MISS", IAP_EVENT_CBH_80H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.TO_FP", IAP_EVENT_CCH_01H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.TO_MMX", IAP_EVENT_CCH_02H) \
+__PMC_EV_ALIAS("FP_MMX_TRANS.ANY", IAP_EVENT_CCH_03H) \
+__PMC_EV_ALIAS("MACRO_INSTS.DECODED", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("UOPS_DECODED.STALL_CYCLES", IAP_EVENT_D1H_01H) \
+__PMC_EV_ALIAS("UOPS_DECODED.MS", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("UOPS_DECODED.ESP_FOLDING", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("UOPS_DECODED.ESP_SYNC", IAP_EVENT_D1H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.FLAGS", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("RAT_STALLS.REGISTERS", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("RAT_STALLS.ROB_READ_PORT", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("RAT_STALLS.SCOREBOARD", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("RAT_STALLS.ANY", IAP_EVENT_D2H_0FH) \
+__PMC_EV_ALIAS("SEG_RENAME_STALLS", IAP_EVENT_D4H_01H) \
+__PMC_EV_ALIAS("ES_REG_RENAMES", IAP_EVENT_D5H_01H) \
+__PMC_EV_ALIAS("UOP_UNFUSION", IAP_EVENT_DBH_01H) \
+__PMC_EV_ALIAS("BR_INST_DECODED", IAP_EVENT_E0H_01H) \
+__PMC_EV_ALIAS("BPU_MISSED_CALL_RET", IAP_EVENT_E5H_01H) \
+__PMC_EV_ALIAS("BACLEAR.CLEAR", IAP_EVENT_E6H_01H) \
+__PMC_EV_ALIAS("BACLEAR.BAD_TARGET", IAP_EVENT_E6H_02H) \
+__PMC_EV_ALIAS("BPU_CLEARS.EARLY", IAP_EVENT_E8H_01H) \
+__PMC_EV_ALIAS("BPU_CLEARS.LATE", IAP_EVENT_E8H_02H) \
+__PMC_EV_ALIAS("THREAD_ACTIVE", IAP_EVENT_ECH_01H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.LOAD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.IFETCH", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.PREFETCH", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANSACTIONS.ANY", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S_STATE", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E_STATE", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ANY", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PREFETCH_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PREFETCH_DIRTY", IAP_EVENT_F2H_08H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.ANY", IAP_EVENT_F2H_0FH) \
+__PMC_EV_ALIAS("SQ_MISC.LRU_HINTS", IAP_EVENT_F4H_04H) \
+__PMC_EV_ALIAS("SQ_MISC.SPLIT_LOCK", IAP_EVENT_F4H_10H) \
+__PMC_EV_ALIAS("SQ_FULL_STALL_CYCLES", IAP_EVENT_F6H_01H) \
+__PMC_EV_ALIAS("FP_ASSIST.ALL", IAP_EVENT_F7H_01H) \
+__PMC_EV_ALIAS("FP_ASSIST.OUTPUT", IAP_EVENT_F7H_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.INPUT", IAP_EVENT_F7H_04H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_MPY", IAP_EVENT_FDH_01H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_SHIFT", IAP_EVENT_FDH_02H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACK", IAP_EVENT_FDH_04H) \
+__PMC_EV_ALIAS("SIMD_INT_64.UNPACK", IAP_EVENT_FDH_08H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_LOGICAL", IAP_EVENT_FDH_10H) \
+__PMC_EV_ALIAS("SIMD_INT_64.PACKED_ARITH", IAP_EVENT_FDH_20H) \
+__PMC_EV_ALIAS("SIMD_INT_64.SHUFFLE_MOVE", IAP_EVENT_FDH_40H)
+
+#define __PMC_EV_ALIAS_IVYBRIDGE() \
+__PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK", IAP_EVENT_08H_81H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED", IAP_EVENT_08H_82H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION", IAP_EVENT_08H_84H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.FLAGS_MERGE", IAP_EVENT_0EH_10H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.SLOW_LEA", IAP_EVENT_0EH_20H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.SINGLE_MUL", IAP_EVENT_0EH_40H) \
+__PMC_EV_ALIAS("ARITH.FPU_DIV_ACTIVE", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.DEMAND_DATA_RD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_DEMAND_DATA_RD", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HITS", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_RFO", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_CODE_RD", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_PF", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.MISS", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_M", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.ALL", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.MISS", IAP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_E", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_M", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.ALL", IAP_EVENT_28H_0FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.MISS", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_THREAD_UNHALTED.REF_XCLK", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS.PENDING", IAP_EVENT_48H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_DURATION", IAP_EVENT_49H_04H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.SW_PF", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.HW_PF", IAP_EVENT_4CH_02H) \
+__PMC_EV_ALIAS("L1D.REPLACEMENT", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.INT_NOT_ELIMINATED", IAP_EVENT_58H_01H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.SIMD_NOT_ELIMINATED", IAP_EVENT_58H_02H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.INT_ELIMINATED", IAP_EVENT_58H_04H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.SIMD_ELIMINATED", IAP_EVENT_58H_08H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \
+__PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \
+__PMC_EV_ALIAS("TLB_ACCESS.LOAD_STLB_HIT", IAP_EVENT_5FH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", IAP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD", IAP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.CACHE_LOCK_DURATION", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IDQ.EMPTY", IAP_EVENT_79H_02H) \
+__PMC_EV_ALIAS("IDQ.MITE_UOPS", IAP_EVENT_79H_04H) \
+__PMC_EV_ALIAS("IDQ.DSB_UOPS", IAP_EVENT_79H_08H) \
+__PMC_EV_ALIAS("IDQ.MS_DSB_UOPS", IAP_EVENT_79H_10H) \
+__PMC_EV_ALIAS("IDQ.MS_MITE_UOPS", IAP_EVENT_79H_20H) \
+__PMC_EV_ALIAS("IDQ.MS_UOPS", IAP_EVENT_79H_30H) \
+__PMC_EV_ALIAS("IDQ.ALL_DSB_CYCLES_ANY_UOPS", IAP_EVENT_79H_18H) \
+__PMC_EV_ALIAS("IDQ.ALL_DSB_CYCLES_4_UOPS", IAP_EVENT_79H_18H) \
+__PMC_EV_ALIAS("IDQ.ALL_MITE_CYCLES_ANY_UOPS", IAP_EVENT_79H_24H) \
+__PMC_EV_ALIAS("IDQ.ALL_MITE_CYCLES_4_UOPS", IAP_EVENT_79H_24H) \
+__PMC_EV_ALIAS("IDQ.MITE_ALL_UOPS", IAP_EVENT_79H_3CH) \
+__PMC_EV_ALIAS("ICACHE.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \
+__PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_LD", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_STA", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2", IAP_EVENT_A1H_0CH) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_LD", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_STA", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3", IAP_EVENT_A1H_30H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_4", IAP_EVENT_A1H_40H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_5", IAP_EVENT_A1H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.SB", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.COUNT", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.PENALTY_CYCLES", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.EXCEED_DSB_LINES", IAP_EVENT_ACH_08H) \
+__PMC_EV_ALIAS("ITLB.ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_DATA_RD", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_CODE_RD", IAP_EVENT_B0H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ALL_DATA_RD", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.THREAD", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.DTLB_THREAD", IAP_EVENT_BDH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.STLB_ANY", IAP_EVENT_BDH_20H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.ALL", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_STORE", IAP_EVENT_C1H_08H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_TO_SSE", IAP_EVENT_C1H_10H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.SSE_TO_AVX", IAP_EVENT_C1H_20H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ALL", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEMORY_ORDERING", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MASKMOV", IAP_EVENT_C3H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_RETURN", IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NOT_TAKEN", IAP_EVENT_C4H_10H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_TAKEN", IAP_EVENT_C4H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.FAR_BRANCH", IAP_EVENT_C4H_40H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.CONDITIONAL", IAP_EVENT_C5H_01H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NOT_TAKEN", IAP_EVENT_C5H_10H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.TAKEN", IAP_EVENT_C5H_20H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_OUTPUT", IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_INPUT", IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_OUTPUT", IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \
+__PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \
+__PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.HIT_LFB", IAP_EVENT_D1H_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM", IAP_EVENT_D3H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.DEMAND_DATA_RD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANS.CODE_RD", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_PF", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_REQUESTS", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.I", IAP_EVENT_F1H_01H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ALL", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_DIRTY", IAP_EVENT_F2H_08H)
+
+/*
+ * Aliases for Ivy Bridge Xeon PMC events (325462-045US January 2013)
+ */
+
+#define __PMC_EV_ALIAS_IVYBRIDGE_XEON() \
+__PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK", IAP_EVENT_08H_81H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED", IAP_EVENT_08H_82H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION", IAP_EVENT_08H_84H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.FLAGS_MERGE", IAP_EVENT_0EH_10H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.SLOW_LEA", IAP_EVENT_0EH_20H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.SINGLE_MUL", IAP_EVENT_0EH_40H) \
+__PMC_EV_ALIAS("ARITH.FPU_DIV_ACTIVE", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.DEMAND_DATA_RD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_DEMAND_DATA_RD", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HITS", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_RFO", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_CODE_RD", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_PF", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.MISS", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_M", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.ALL", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.MISS", IAP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_E", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_M", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.ALL", IAP_EVENT_28H_0FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.MISS", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_THREAD_UNHALTED.REF_XCLK", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS.PENDING", IAP_EVENT_48H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_DURATION", IAP_EVENT_49H_04H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.SW_PF", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.HW_PF", IAP_EVENT_4CH_02H) \
+__PMC_EV_ALIAS("L1D.REPLACEMENT", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.INT_NOT_ELIMINATED", IAP_EVENT_58H_01H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.SIMD_NOT_ELIMINATED", IAP_EVENT_58H_02H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.INT_ELIMINATED", IAP_EVENT_58H_04H) \
+__PMC_EV_ALIAS("MOVE_ELIMINATION.SIMD_ELIMINATED", IAP_EVENT_58H_08H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \
+__PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.STLB_HIT", IAP_EVENT_5FH_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", IAP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD", IAP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.CACHE_LOCK_DURATION", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IDQ.EMPTY", IAP_EVENT_79H_02H) \
+__PMC_EV_ALIAS("IDQ.MITE_UOPS", IAP_EVENT_79H_04H) \
+__PMC_EV_ALIAS("IDQ.DSB_UOPS", IAP_EVENT_79H_08H) \
+__PMC_EV_ALIAS("IDQ.MS_DSB_UOPS", IAP_EVENT_79H_10H) \
+__PMC_EV_ALIAS("IDQ.MS_MITE_UOPS", IAP_EVENT_79H_20H) \
+__PMC_EV_ALIAS("IDQ.MS_UOPS", IAP_EVENT_79H_30H) \
+__PMC_EV_ALIAS("IDQ.ALL_DSB_CYCLES_ANY_UOPS", IAP_EVENT_79H_18H) \
+__PMC_EV_ALIAS("IDQ.ALL_DSB_CYCLES_4_UOPS", IAP_EVENT_79H_18H) \
+__PMC_EV_ALIAS("IDQ.ALL_MITE_CYCLES_ANY_UOPS", IAP_EVENT_79H_24H) \
+__PMC_EV_ALIAS("IDQ.ALL_MITE_CYCLES_4_UOPS", IAP_EVENT_79H_24H) \
+__PMC_EV_ALIAS("IDQ.MITE_ALL_UOPS", IAP_EVENT_79H_3CH) \
+__PMC_EV_ALIAS("ICACHE.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \
+__PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_LD", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_STA", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2", IAP_EVENT_A1H_0CH) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_LD", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_STA", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3", IAP_EVENT_A1H_30H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_4", IAP_EVENT_A1H_40H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_5", IAP_EVENT_A1H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.SB", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_L2_PENDING", IAP_EVENT_A3H_01H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_LDM_PENDING", IAP_EVENT_A3H_02H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_NO_EXECUTE", IAP_EVENT_A3H_04H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_L1D_PENDING", IAP_EVENT_A3H_08H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.COUNT", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.PENALTY_CYCLES", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.EXCEED_DSB_LINES", IAP_EVENT_ACH_08H) \
+__PMC_EV_ALIAS("ITLB.ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_DATA_RD", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_CODE_RD", IAP_EVENT_B0H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ALL_DATA_RD", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.THREAD", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_EXECUTED.CORE", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.DTLB_THREAD", IAP_EVENT_BDH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.STLB_ANY", IAP_EVENT_BDH_20H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.ALL", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_STORE", IAP_EVENT_C1H_08H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_TO_SSE", IAP_EVENT_C1H_10H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.SSE_TO_AVX", IAP_EVENT_C1H_20H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ALL", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEMORY_ORDERING", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MASKMOV", IAP_EVENT_C3H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_RETURN", IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NOT_TAKEN", IAP_EVENT_C4H_10H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_TAKEN", IAP_EVENT_C4H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.FAR_BRANCH", IAP_EVENT_C4H_40H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.CONDITIONAL", IAP_EVENT_C5H_01H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NOT_TAKEN", IAP_EVENT_C5H_10H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.TAKEN", IAP_EVENT_C5H_20H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_OUTPUT", IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_INPUT", IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_OUTPUT", IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \
+__PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \
+__PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_MISS", IAP_EVENT_D1H_20H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.HIT_LFB", IAP_EVENT_D1H_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM", IAP_EVENT_D3H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM", IAP_EVENT_D3H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM", IAP_EVENT_D3H_10H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD", IAP_EVENT_D3H_20H) \
+__PMC_EV_ALIAS("BACLEARS.ANY", IAP_EVENT_E6H_1FH) \
+__PMC_EV_ALIAS("L2_TRANS.DEMAND_DATA_RD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANS.CODE_RD", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_PF", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_REQUESTS", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.I", IAP_EVENT_F1H_01H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ALL", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_DIRTY", IAP_EVENT_F2H_08H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DIRTY_ALL", IAP_EVENT_F2H_0AH)
+
+/*
+ * Aliases for Sandy Bridge PMC events (253669-039US May 2011)
+ */
+
+#define __PMC_EV_ALIAS_SANDYBRIDGE() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("LD_BLOCKS.DATA_UNKNOWN", IAP_EVENT_03H_01H) \
+__PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("LD_BLOCKS.ALL_BLOCK", IAP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ALL_STA_BLOCK", IAP_EVENT_07H_08H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_DURATION", IAP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.STLB_HIT", IAP_EVENT_08H_10H) \
+__PMC_EV_ALIAS("INT_MISC.RECOVERY_CYCLES", IAP_EVENT_0DH_03H) \
+__PMC_EV_ALIAS("INT_MISC.RAT_STALL_CYCLES", IAP_EVENT_0DH_40H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.X87", IAP_EVENT_10H_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_PACKED_DOUBLE", IAP_EVENT_10H_10H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_SCALAR_SINGLE", IAP_EVENT_10H_20H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", IAP_EVENT_10H_40H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", IAP_EVENT_10H_80H) \
+__PMC_EV_ALIAS("SIMD_FP_256.PACKED_SINGLE", IAP_EVENT_11H_01H) \
+__PMC_EV_ALIAS("SIMD_FP_256.PACKED_DOUBLE", IAP_EVENT_11H_02H) \
+__PMC_EV_ALIAS("ARITH.FPU_DIV_ACTIVE", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("INSTS_WRITTEN_TO_IQ.INSTS", IAP_EVENT_17H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.DEMAND_DATA_RD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_DEMAND_DATA_RD", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HITS", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_RFO", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_CODE_RD", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_PF", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.MISS", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_E", IAP_EVENT_27H_04H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_M", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.ALL", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_E", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_M", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.MISS", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_THREAD_UNHALTED.REF_XCLK", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS.PENDING", IAP_EVENT_48H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_DURATION", IAP_EVENT_49H_04H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.SW_PF", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.HW_PF", IAP_EVENT_4CH_02H) \
+__PMC_EV_ALIAS("HW_PRE_REQ.DL1_MISS", IAP_EVENT_4EH_02H) \
+__PMC_EV_ALIAS("L1D.REPLACEMENT", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("L1D.ALLOCATED_IN_M", IAP_EVENT_51H_02H) \
+__PMC_EV_ALIAS("L1D.EVICTION", IAP_EVENT_51H_04H) \
+__PMC_EV_ALIAS("L1D.ALL_M_REPLACEMENT", IAP_EVENT_51H_08H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP", IAP_EVENT_59H_20H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW", IAP_EVENT_59H_40H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.MUL_SINGLE_UOP", IAP_EVENT_59H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.ALL_FL_EMPTY", IAP_EVENT_5BH_0CH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.ALL_PRF_CONTROL", IAP_EVENT_5BH_0FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.BOB_FULL", IAP_EVENT_5BH_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.OOO_RSRC", IAP_EVENT_5BH_4FH) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \
+__PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", IAP_EVENT_60H_01H)\
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.CACHE_LOCK_DURATION", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IDQ.EMPTY", IAP_EVENT_79H_02H) \
+__PMC_EV_ALIAS("IQD.MITE_UOPS", IAP_EVENT_79H_04H) \
+__PMC_EV_ALIAS("IDQ.DSB_UOPS", IAP_EVENT_79H_08H) \
+__PMC_EV_ALIAS("IDQ.MS_DSB_UOPS", IAP_EVENT_79H_10H) \
+__PMC_EV_ALIAS("IDQ.MS_MITE_UOPS", IAP_EVENT_79H_20H) \
+__PMC_EV_ALIAS("IDQ.MS_UOPS", IAP_EVENT_79H_30H) \
+__PMC_EV_ALIAS("ICACHE.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \
+__PMC_EV_ALIAS("BR_INST_EXE.ALL_BRANCHES", IAP_EVENT_88H_FFH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \
+__PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_LD", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_STA", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2", IAP_EVENT_A1H_0CH) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_LD", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_STA", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3", IAP_EVENT_A1H_30H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_4", IAP_EVENT_A1H_40H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_5", IAP_EVENT_A1H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LB", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.SB", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FCSW", IAP_EVENT_A2H_20H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.MXCSR", IAP_EVENT_A2H_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.OTHER", IAP_EVENT_A2H_80H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.COUNT", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.PENALTY_CYCLES", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.OTHER_CANCEL", IAP_EVENT_ACH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.EXCEED_DSB_LINES", IAP_EVENT_ACH_08H) \
+__PMC_EV_ALIAS("DSB_FILL.ALL_CANCEL", IAP_EVENT_ACH_0AH) \
+__PMC_EV_ALIAS("ITLB.ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_DATA_RD", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ALL_DATA_RD", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED.THREAD", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED.CORE", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_BUFFER.SQ_FULL", IAP_EVENT_B2H_01H) \
+__PMC_EV_ALIAS("AGU_BYPASS_CANCEL.COUNT", IAP_EVENT_B6H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.DTLB_THREAD", IAP_EVENT_BDH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.STLB_ANY", IAP_EVENT_BDH_20H) \
+__PMC_EV_ALIAS("L1D_BLOCKS.BANK_CONFLICT_CYCLES", IAP_EVENT_BFH_05H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.PREC_DIST", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("INST_RETIRED.X87", IAP_EVENT_C0H_02H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.ITLB_MISS_RETIRED", IAP_EVENT_C1H_02H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_STORE", IAP_EVENT_C1H_08H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_TO_SSE", IAP_EVENT_C1H_10H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.SSE_TO_AVX", IAP_EVENT_C1H_20H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ALL", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEMORY_ORDERING", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MASKMOV", IAP_EVENT_C3H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCH", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_RETURN", IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NOT_TAKEN", IAP_EVENT_C4H_10H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_TAKEN", IAP_EVENT_C4H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.FAR_BRANCH", IAP_EVENT_C4H_40H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.CONDITIONAL", IAP_EVENT_C5H_01H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NOT_TAKEN", IAP_EVENT_C5H_10H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.TAKEN", IAP_EVENT_C5H_20H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_OUTPUT", IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_INPUT", IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_OUTPUT", IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \
+__PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \
+__PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED_ALL", IAP_EVENT_D0H_80H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.HIT_LFB", IAP_EVENT_D1H_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", IAP_EVENT_D2H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", IAP_EVENT_D2H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", IAP_EVENT_D2H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE", IAP_EVENT_D2H_08H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_HIT_RETIRED.LLC_MISS", IAP_EVENT_D4H_02H) \
+__PMC_EV_ALIAS("L2_TRANS.DEMAND_DATA_RD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANS.CODE_RD", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_PF", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_REQUESTS", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.I", IAP_EVENT_F1H_01H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ALL", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_DIRTY", IAP_EVENT_F2H_08H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DIRTY_ALL", IAP_EVENT_F2H_0AH) \
+__PMC_EV_ALIAS("SQ_MISC.SPLIT_LOCK", IAP_EVENT_F4H_10H)
+
+/*
+ * Aliases for Sandy Bridge Xeon PMC events (253669-044US August 2012)
+ */
+
+#define __PMC_EV_ALIAS_SANDYBRIDGE_XEON() \
+__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \
+__PMC_EV_ALIAS("LD_BLOCKS.DATA_UNKNOWN", IAP_EVENT_03H_01H) \
+__PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("LD_BLOCKS.ALL_BLOCK", IAP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ALL_STA_BLOCK", IAP_EVENT_07H_08H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_DURATION", IAP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("DTLB_LOAD_MISSES.STLB_HIT", IAP_EVENT_08H_10H) \
+__PMC_EV_ALIAS("INT_MISC.RECOVERY_CYCLES", IAP_EVENT_0DH_03H) \
+__PMC_EV_ALIAS("INT_MISC.RAT_STALL_CYCLES", IAP_EVENT_0DH_40H) \
+__PMC_EV_ALIAS("UOPS_ISSUED.ANY", IAP_EVENT_0EH_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.X87", IAP_EVENT_10H_01H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_PACKED_DOUBLE", IAP_EVENT_10H_10H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_FP_SCALAR_SINGLE", IAP_EVENT_10H_20H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_PACKED_SINGLE", IAP_EVENT_10H_40H) \
+__PMC_EV_ALIAS("FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE", IAP_EVENT_10H_80H) \
+__PMC_EV_ALIAS("SIMD_FP_256.PACKED_SINGLE", IAP_EVENT_11H_01H) \
+__PMC_EV_ALIAS("SIMD_FP_256.PACKED_DOUBLE", IAP_EVENT_11H_02H) \
+__PMC_EV_ALIAS("ARITH.FPU_DIV_ACTIVE", IAP_EVENT_14H_01H) \
+__PMC_EV_ALIAS("INSTS_WRITTEN_TO_IQ.INSTS", IAP_EVENT_17H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.DEMAND_DATA_RD_HIT", IAP_EVENT_24H_01H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_DEMAND_DATA_RD", IAP_EVENT_24H_03H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_HITS", IAP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("L2_RQSTS.RFO_MISS", IAP_EVENT_24H_08H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_RFO", IAP_EVENT_24H_0CH) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_HIT", IAP_EVENT_24H_10H) \
+__PMC_EV_ALIAS("L2_RQSTS.CODE_RD_MISS", IAP_EVENT_24H_20H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_CODE_RD", IAP_EVENT_24H_30H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_HIT", IAP_EVENT_24H_40H) \
+__PMC_EV_ALIAS("L2_RQSTS.PF_MISS", IAP_EVENT_24H_80H) \
+__PMC_EV_ALIAS("L2_RQSTS.ALL_PF", IAP_EVENT_24H_C0H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.MISS", IAP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_E", IAP_EVENT_27H_04H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.HIT_M", IAP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("L2_STORE_LOCK_RQSTS.ALL", IAP_EVENT_27H_0FH) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.MISS", IAP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_S", IAP_EVENT_28H_02H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_E", IAP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.HIT_M", IAP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("L2_L1D_WB_RQSTS.ALL", IAP_EVENT_28H_0FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.REFERENCE", IAP_EVENT_2EH_4FH) \
+__PMC_EV_ALIAS("LONGEST_LAT_CACHE.MISS", IAP_EVENT_2EH_41H) \
+__PMC_EV_ALIAS("CPU_CLK_UNHALTED.THREAD_P", IAP_EVENT_3CH_00H) \
+__PMC_EV_ALIAS("CPU_CLK_THREAD_UNHALTED.REF_XCLK", IAP_EVENT_3CH_01H) \
+__PMC_EV_ALIAS("L1D_PEND_MISS.PENDING", IAP_EVENT_48H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_49H_01H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_COMPLETED", IAP_EVENT_49H_02H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.WALK_DURATION", IAP_EVENT_49H_04H) \
+__PMC_EV_ALIAS("DTLB_STORE_MISSES.STLB_HIT", IAP_EVENT_49H_10H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.SW_PF", IAP_EVENT_4CH_01H) \
+__PMC_EV_ALIAS("LOAD_HIT_PRE.HW_PF", IAP_EVENT_4CH_02H) \
+__PMC_EV_ALIAS("HW_PRE_REQ.DL1_MISS", IAP_EVENT_4EH_02H) \
+__PMC_EV_ALIAS("L1D.REPLACEMENT", IAP_EVENT_51H_01H) \
+__PMC_EV_ALIAS("L1D.ALLOCATED_IN_M", IAP_EVENT_51H_02H) \
+__PMC_EV_ALIAS("L1D.EVICTION", IAP_EVENT_51H_04H) \
+__PMC_EV_ALIAS("L1D.ALL_M_REPLACEMENT", IAP_EVENT_51H_08H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP", IAP_EVENT_59H_20H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW", IAP_EVENT_59H_40H) \
+__PMC_EV_ALIAS("PARTIAL_RAT_STALLS.MUL_SINGLE_UOP", IAP_EVENT_59H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.ALL_FL_EMPTY", IAP_EVENT_5BH_0CH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.ALL_PRF_CONTROL", IAP_EVENT_5BH_0FH) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.BOB_FULL", IAP_EVENT_5BH_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS2.OOO_RSRC", IAP_EVENT_5BH_4FH) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \
+__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \
+__PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", IAP_EVENT_60H_01H)\
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", IAP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", IAP_EVENT_60H_08H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION", IAP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("LOCK_CYCLES.CACHE_LOCK_DURATION", IAP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("IDQ.EMPTY", IAP_EVENT_79H_02H) \
+__PMC_EV_ALIAS("IDQ.MITE_UOPS", IAP_EVENT_79H_04H) \
+__PMC_EV_ALIAS("IDQ.DSB_UOPS", IAP_EVENT_79H_08H) \
+__PMC_EV_ALIAS("IDQ.MS_DSB_UOPS", IAP_EVENT_79H_10H) \
+__PMC_EV_ALIAS("IDQ.MS_MITE_UOPS", IAP_EVENT_79H_20H) \
+__PMC_EV_ALIAS("IDQ.MS_UOPS", IAP_EVENT_79H_30H) \
+__PMC_EV_ALIAS("ICACHE.MISSES", IAP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_85H_01H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_COMPLETED", IAP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \
+__PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \
+__PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \
+__PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_88H_04H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \
+__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \
+__PMC_EV_ALIAS("BR_INST_EXE.ALL_BRANCHES", IAP_EVENT_88H_FFH) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", IAP_EVENT_89H_04H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \
+__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \
+__PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_1", IAP_EVENT_A1H_02H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_LD", IAP_EVENT_A1H_04H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2_STA", IAP_EVENT_A1H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_2", IAP_EVENT_A1H_0CH) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_LD", IAP_EVENT_A1H_10H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3_STA", IAP_EVENT_A1H_20H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_3", IAP_EVENT_A1H_30H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_4", IAP_EVENT_A1H_40H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_5", IAP_EVENT_A1H_80H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ANY", IAP_EVENT_A2H_01H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.LB", IAP_EVENT_A2H_02H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.RS", IAP_EVENT_A2H_04H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.SB", IAP_EVENT_A2H_08H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.ROB", IAP_EVENT_A2H_10H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.FCSW", IAP_EVENT_A2H_20H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.MXCSR", IAP_EVENT_A2H_40H) \
+__PMC_EV_ALIAS("RESOURCE_STALLS.OTHER", IAP_EVENT_A2H_80H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_L2_PENDING", IAP_EVENT_A3H_01H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_L1D_PENDING", IAP_EVENT_A3H_02H) \
+__PMC_EV_ALIAS("CYCLE_ACTIVITY.CYCLES_NO_DISPATCH", IAP_EVENT_A3H_04H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.COUNT", IAP_EVENT_ABH_01H) \
+__PMC_EV_ALIAS("DSB2MITE_SWITCHES.PENALTY_CYCLES", IAP_EVENT_ABH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.OTHER_CANCEL", IAP_EVENT_ACH_02H) \
+__PMC_EV_ALIAS("DSB_FILL.EXCEED_DSB_LINES", IAP_EVENT_ACH_08H) \
+__PMC_EV_ALIAS("DSB_FILL.ALL_CANCEL", IAP_EVENT_ACH_0AH) \
+__PMC_EV_ALIAS("ITLB.ITLB_FLUSH", IAP_EVENT_AEH_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_DATA_RD", IAP_EVENT_B0H_01H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.DEMAND_RFO", IAP_EVENT_B0H_04H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS.ALL_DATA_RD", IAP_EVENT_B0H_08H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED.THREAD", IAP_EVENT_B1H_01H) \
+__PMC_EV_ALIAS("UOPS_DISPATCHED.CORE", IAP_EVENT_B1H_02H) \
+__PMC_EV_ALIAS("OFFCORE_REQUESTS_BUFFER.SQ_FULL", IAP_EVENT_B2H_01H) \
+__PMC_EV_ALIAS("AGU_BYPASS_CANCEL.COUNT", IAP_EVENT_B6H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_0", IAP_EVENT_B7H_01H) \
+__PMC_EV_ALIAS("OFF_CORE_RESPONSE_1", IAP_EVENT_BBH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.DTLB_THREAD", IAP_EVENT_BDH_01H) \
+__PMC_EV_ALIAS("TLB_FLUSH.STLB_ANY", IAP_EVENT_BDH_20H) \
+__PMC_EV_ALIAS("L1D_BLOCKS.BANK_CONFLICT_CYCLES", IAP_EVENT_BFH_05H) \
+__PMC_EV_ALIAS("INST_RETIRED.ANY_P", IAP_EVENT_C0H_00H) \
+__PMC_EV_ALIAS("INST_RETIRED.ALL", IAP_EVENT_C0H_01H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.ITLB_MISS_RETIRED", IAP_EVENT_C1H_02H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_STORE", IAP_EVENT_C1H_08H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.AVX_TO_SSE", IAP_EVENT_C1H_10H) \
+__PMC_EV_ALIAS("OTHER_ASSISTS.SSE_TO_AVX", IAP_EVENT_C1H_20H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.ALL", IAP_EVENT_C2H_01H) \
+__PMC_EV_ALIAS("UOPS_RETIRED.RETIRE_SLOTS", IAP_EVENT_C2H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MEMORY_ORDERING", IAP_EVENT_C3H_02H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.SMC", IAP_EVENT_C3H_04H) \
+__PMC_EV_ALIAS("MACHINE_CLEARS.MASKMOV", IAP_EVENT_C3H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCH", IAP_EVENT_C4H_00H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.CONDITIONAL", IAP_EVENT_C4H_01H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_CALL", IAP_EVENT_C4H_02H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.ALL_BRANCHES", IAP_EVENT_C4H_04H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_RETURN", IAP_EVENT_C4H_08H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NOT_TAKEN", IAP_EVENT_C4H_10H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.NEAR_TAKEN", IAP_EVENT_C4H_20H) \
+__PMC_EV_ALIAS("BR_INST_RETIRED.FAR_BRANCH", IAP_EVENT_C4H_40H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_00H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.CONDITIONAL", IAP_EVENT_C5H_01H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NEAR_CALL", IAP_EVENT_C5H_02H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.ALL_BRANCHES", IAP_EVENT_C5H_04H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.NOT_TAKEN", IAP_EVENT_C5H_10H) \
+__PMC_EV_ALIAS("BR_MISP_RETIRED.TAKEN", IAP_EVENT_C5H_20H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_OUTPUT", IAP_EVENT_CAH_02H) \
+__PMC_EV_ALIAS("FP_ASSIST.X87_INPUT", IAP_EVENT_CAH_04H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_OUTPUT", IAP_EVENT_CAH_08H) \
+__PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \
+__PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \
+__PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \
+__PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \
+__PMC_EV_ALIAS("MEM_UOP_RETIRED_ALL", IAP_EVENT_D0H_80H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_MISS", IAP_EVENT_D1H_20H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.HIT_LFB", IAP_EVENT_D1H_40H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM", IAP_EVENT_D3H_01H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM", IAP_EVENT_D3H_04H) \
+__PMC_EV_ALIAS("MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS", IAP_EVENT_D4H_02H) \
+__PMC_EV_ALIAS("BACLEARS.ANY", IAP_EVENT_E6H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.DEMAND_DATA_RD", IAP_EVENT_F0H_01H) \
+__PMC_EV_ALIAS("L2_TRANS.RFO", IAP_EVENT_F0H_02H) \
+__PMC_EV_ALIAS("L2_TRANS.CODE_RD", IAP_EVENT_F0H_04H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_PF", IAP_EVENT_F0H_08H) \
+__PMC_EV_ALIAS("L2_TRANS.L1D_WB", IAP_EVENT_F0H_10H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_FILL", IAP_EVENT_F0H_20H) \
+__PMC_EV_ALIAS("L2_TRANS.L2_WB", IAP_EVENT_F0H_40H) \
+__PMC_EV_ALIAS("L2_TRANS.ALL_REQUESTS", IAP_EVENT_F0H_80H) \
+__PMC_EV_ALIAS("L2_LINES_IN.I", IAP_EVENT_F1H_01H) \
+__PMC_EV_ALIAS("L2_LINES_IN.S", IAP_EVENT_F1H_02H) \
+__PMC_EV_ALIAS("L2_LINES_IN.E", IAP_EVENT_F1H_04H) \
+__PMC_EV_ALIAS("L2_LINES_IN.ALL", IAP_EVENT_F1H_07H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_CLEAN", IAP_EVENT_F2H_01H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_02H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_CLEAN", IAP_EVENT_F2H_04H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.PF_DIRTY", IAP_EVENT_F2H_08H) \
+__PMC_EV_ALIAS("L2_LINES_OUT.DIRTY_ALL", IAP_EVENT_F2H_0AH) \
+__PMC_EV_ALIAS("SQ_MISC.SPLIT_LOCK", IAP_EVENT_F4H_10H)
+
+/* timestamp counters. */
+#define __PMC_EV_TSC() \
+ __PMC_EV(TSC, TSC)
+
+#define PMC_EV_TSC_FIRST PMC_EV_TSC_TSC
+#define PMC_EV_TSC_LAST PMC_EV_TSC_TSC
+
+/*
+ * Software events are dynamicaly defined.
+ */
+
+#define PMC_EV_DYN_COUNT 0x1000
+
+#define PMC_EV_SOFT_FIRST 0x20000
+#define PMC_EV_SOFT_LAST (PMC_EV_SOFT_FIRST + PMC_EV_DYN_COUNT - 1)
+
+#define __PMC_EV_UCF() \
+__PMC_EV(UCF, UCLOCK)
+
+#define PMC_EV_UCF_FIRST PMC_EV_UCF_UCLOCK
+#define PMC_EV_UCF_LAST PMC_EV_UCF_UCLOCK
+
+#define __PMC_EV_UCP() \
+__PMC_EV(UCP, EVENT_00H_01H) \
+__PMC_EV(UCP, EVENT_00H_02H) \
+__PMC_EV(UCP, EVENT_00H_04H) \
+__PMC_EV(UCP, EVENT_01H_01H) \
+__PMC_EV(UCP, EVENT_01H_02H) \
+__PMC_EV(UCP, EVENT_01H_04H) \
+__PMC_EV(UCP, EVENT_02H_01H) \
+__PMC_EV(UCP, EVENT_03H_01H) \
+__PMC_EV(UCP, EVENT_03H_02H) \
+__PMC_EV(UCP, EVENT_03H_04H) \
+__PMC_EV(UCP, EVENT_03H_08H) \
+__PMC_EV(UCP, EVENT_03H_10H) \
+__PMC_EV(UCP, EVENT_03H_20H) \
+__PMC_EV(UCP, EVENT_03H_40H) \
+__PMC_EV(UCP, EVENT_04H_01H) \
+__PMC_EV(UCP, EVENT_04H_02H) \
+__PMC_EV(UCP, EVENT_04H_04H) \
+__PMC_EV(UCP, EVENT_04H_08H) \
+__PMC_EV(UCP, EVENT_04H_10H) \
+__PMC_EV(UCP, EVENT_05H_01H) \
+__PMC_EV(UCP, EVENT_05H_02H) \
+__PMC_EV(UCP, EVENT_05H_04H) \
+__PMC_EV(UCP, EVENT_06H_01H) \
+__PMC_EV(UCP, EVENT_06H_02H) \
+__PMC_EV(UCP, EVENT_06H_04H) \
+__PMC_EV(UCP, EVENT_06H_08H) \
+__PMC_EV(UCP, EVENT_06H_10H) \
+__PMC_EV(UCP, EVENT_06H_20H) \
+__PMC_EV(UCP, EVENT_07H_01H) \
+__PMC_EV(UCP, EVENT_07H_02H) \
+__PMC_EV(UCP, EVENT_07H_04H) \
+__PMC_EV(UCP, EVENT_07H_08H) \
+__PMC_EV(UCP, EVENT_07H_10H) \
+__PMC_EV(UCP, EVENT_07H_20H) \
+__PMC_EV(UCP, EVENT_07H_24H) \
+__PMC_EV(UCP, EVENT_08H_01H) \
+__PMC_EV(UCP, EVENT_08H_02H) \
+__PMC_EV(UCP, EVENT_08H_04H) \
+__PMC_EV(UCP, EVENT_08H_03H) \
+__PMC_EV(UCP, EVENT_09H_01H) \
+__PMC_EV(UCP, EVENT_09H_02H) \
+__PMC_EV(UCP, EVENT_09H_04H) \
+__PMC_EV(UCP, EVENT_09H_03H) \
+__PMC_EV(UCP, EVENT_0AH_01H) \
+__PMC_EV(UCP, EVENT_0AH_02H) \
+__PMC_EV(UCP, EVENT_0AH_04H) \
+__PMC_EV(UCP, EVENT_0AH_08H) \
+__PMC_EV(UCP, EVENT_0AH_0FH) \
+__PMC_EV(UCP, EVENT_0BH_01H) \
+__PMC_EV(UCP, EVENT_0BH_02H) \
+__PMC_EV(UCP, EVENT_0BH_04H) \
+__PMC_EV(UCP, EVENT_0BH_08H) \
+__PMC_EV(UCP, EVENT_0BH_10H) \
+__PMC_EV(UCP, EVENT_0BH_1FH) \
+__PMC_EV(UCP, EVENT_0CH_01H) \
+__PMC_EV(UCP, EVENT_0CH_02H) \
+__PMC_EV(UCP, EVENT_0CH_04H_E) \
+__PMC_EV(UCP, EVENT_0CH_04H_F) \
+__PMC_EV(UCP, EVENT_0CH_04H_M) \
+__PMC_EV(UCP, EVENT_0CH_04H_S) \
+__PMC_EV(UCP, EVENT_0CH_08H_E) \
+__PMC_EV(UCP, EVENT_0CH_08H_F) \
+__PMC_EV(UCP, EVENT_0CH_08H_M) \
+__PMC_EV(UCP, EVENT_0CH_08H_S) \
+__PMC_EV(UCP, EVENT_20H_01H) \
+__PMC_EV(UCP, EVENT_20H_02H) \
+__PMC_EV(UCP, EVENT_20H_04H) \
+__PMC_EV(UCP, EVENT_20H_08H) \
+__PMC_EV(UCP, EVENT_20H_10H) \
+__PMC_EV(UCP, EVENT_20H_20H) \
+__PMC_EV(UCP, EVENT_21H_01H) \
+__PMC_EV(UCP, EVENT_21H_02H) \
+__PMC_EV(UCP, EVENT_21H_04H) \
+__PMC_EV(UCP, EVENT_22H_01H) \
+__PMC_EV(UCP, EVENT_22H_02H) \
+__PMC_EV(UCP, EVENT_22H_04H) \
+__PMC_EV(UCP, EVENT_22H_08H) \
+__PMC_EV(UCP, EVENT_22H_20H) \
+__PMC_EV(UCP, EVENT_22H_40H) \
+__PMC_EV(UCP, EVENT_22H_80H) \
+__PMC_EV(UCP, EVENT_23H_01H) \
+__PMC_EV(UCP, EVENT_23H_02H) \
+__PMC_EV(UCP, EVENT_23H_04H) \
+__PMC_EV(UCP, EVENT_24H_02H) \
+__PMC_EV(UCP, EVENT_24H_04H) \
+__PMC_EV(UCP, EVENT_25H_01H) \
+__PMC_EV(UCP, EVENT_25H_02H) \
+__PMC_EV(UCP, EVENT_25H_04H) \
+__PMC_EV(UCP, EVENT_26H_01H) \
+__PMC_EV(UCP, EVENT_27H_01H) \
+__PMC_EV(UCP, EVENT_27H_02H) \
+__PMC_EV(UCP, EVENT_27H_04H) \
+__PMC_EV(UCP, EVENT_27H_08H) \
+__PMC_EV(UCP, EVENT_27H_10H) \
+__PMC_EV(UCP, EVENT_27H_20H) \
+__PMC_EV(UCP, EVENT_28H_01H) \
+__PMC_EV(UCP, EVENT_28H_02H) \
+__PMC_EV(UCP, EVENT_28H_04H) \
+__PMC_EV(UCP, EVENT_28H_08H) \
+__PMC_EV(UCP, EVENT_28H_10H) \
+__PMC_EV(UCP, EVENT_28H_20H) \
+__PMC_EV(UCP, EVENT_29H_01H) \
+__PMC_EV(UCP, EVENT_29H_02H) \
+__PMC_EV(UCP, EVENT_29H_04H) \
+__PMC_EV(UCP, EVENT_29H_08H) \
+__PMC_EV(UCP, EVENT_29H_10H) \
+__PMC_EV(UCP, EVENT_29H_20H) \
+__PMC_EV(UCP, EVENT_2AH_01H) \
+__PMC_EV(UCP, EVENT_2AH_02H) \
+__PMC_EV(UCP, EVENT_2AH_04H) \
+__PMC_EV(UCP, EVENT_2AH_07H) \
+__PMC_EV(UCP, EVENT_2BH_01H) \
+__PMC_EV(UCP, EVENT_2BH_02H) \
+__PMC_EV(UCP, EVENT_2BH_04H) \
+__PMC_EV(UCP, EVENT_2BH_07H) \
+__PMC_EV(UCP, EVENT_2CH_01H) \
+__PMC_EV(UCP, EVENT_2CH_02H) \
+__PMC_EV(UCP, EVENT_2CH_04H) \
+__PMC_EV(UCP, EVENT_2CH_07H) \
+__PMC_EV(UCP, EVENT_2DH_01H) \
+__PMC_EV(UCP, EVENT_2DH_02H) \
+__PMC_EV(UCP, EVENT_2DH_04H) \
+__PMC_EV(UCP, EVENT_2DH_07H) \
+__PMC_EV(UCP, EVENT_2EH_01H) \
+__PMC_EV(UCP, EVENT_2EH_02H) \
+__PMC_EV(UCP, EVENT_2EH_04H) \
+__PMC_EV(UCP, EVENT_2EH_07H) \
+__PMC_EV(UCP, EVENT_2FH_01H) \
+__PMC_EV(UCP, EVENT_2FH_02H) \
+__PMC_EV(UCP, EVENT_2FH_04H) \
+__PMC_EV(UCP, EVENT_2FH_07H) \
+__PMC_EV(UCP, EVENT_2FH_08H) \
+__PMC_EV(UCP, EVENT_2FH_10H) \
+__PMC_EV(UCP, EVENT_2FH_20H) \
+__PMC_EV(UCP, EVENT_2FH_38H) \
+__PMC_EV(UCP, EVENT_30H_01H) \
+__PMC_EV(UCP, EVENT_30H_02H) \
+__PMC_EV(UCP, EVENT_30H_04H) \
+__PMC_EV(UCP, EVENT_30H_07H) \
+__PMC_EV(UCP, EVENT_31H_01H) \
+__PMC_EV(UCP, EVENT_31H_02H) \
+__PMC_EV(UCP, EVENT_31H_04H) \
+__PMC_EV(UCP, EVENT_31H_07H) \
+__PMC_EV(UCP, EVENT_32H_01H) \
+__PMC_EV(UCP, EVENT_32H_02H) \
+__PMC_EV(UCP, EVENT_32H_04H) \
+__PMC_EV(UCP, EVENT_32H_07H) \
+__PMC_EV(UCP, EVENT_33H_01H) \
+__PMC_EV(UCP, EVENT_33H_02H) \
+__PMC_EV(UCP, EVENT_33H_04H) \
+__PMC_EV(UCP, EVENT_33H_07H) \
+__PMC_EV(UCP, EVENT_34H_01H) \
+__PMC_EV(UCP, EVENT_34H_02H) \
+__PMC_EV(UCP, EVENT_34H_04H) \
+__PMC_EV(UCP, EVENT_34H_08H) \
+__PMC_EV(UCP, EVENT_34H_10H) \
+__PMC_EV(UCP, EVENT_34H_20H) \
+__PMC_EV(UCP, EVENT_34H_40H) \
+__PMC_EV(UCP, EVENT_34H_80H) \
+__PMC_EV(UCP, EVENT_35H_01H) \
+__PMC_EV(UCP, EVENT_35H_02H) \
+__PMC_EV(UCP, EVENT_35H_04H) \
+__PMC_EV(UCP, EVENT_40H_01H) \
+__PMC_EV(UCP, EVENT_40H_02H) \
+__PMC_EV(UCP, EVENT_40H_04H) \
+__PMC_EV(UCP, EVENT_40H_08H) \
+__PMC_EV(UCP, EVENT_40H_10H) \
+__PMC_EV(UCP, EVENT_40H_20H) \
+__PMC_EV(UCP, EVENT_40H_07H) \
+__PMC_EV(UCP, EVENT_40H_38H) \
+__PMC_EV(UCP, EVENT_41H_01H) \
+__PMC_EV(UCP, EVENT_41H_02H) \
+__PMC_EV(UCP, EVENT_41H_04H) \
+__PMC_EV(UCP, EVENT_41H_08H) \
+__PMC_EV(UCP, EVENT_41H_10H) \
+__PMC_EV(UCP, EVENT_41H_20H) \
+__PMC_EV(UCP, EVENT_41H_07H) \
+__PMC_EV(UCP, EVENT_41H_38H) \
+__PMC_EV(UCP, EVENT_42H_01H) \
+__PMC_EV(UCP, EVENT_42H_02H) \
+__PMC_EV(UCP, EVENT_42H_04H) \
+__PMC_EV(UCP, EVENT_42H_08H) \
+__PMC_EV(UCP, EVENT_43H_01H) \
+__PMC_EV(UCP, EVENT_43H_02H) \
+__PMC_EV(UCP, EVENT_60H_01H) \
+__PMC_EV(UCP, EVENT_60H_02H) \
+__PMC_EV(UCP, EVENT_60H_04H) \
+__PMC_EV(UCP, EVENT_61H_01H) \
+__PMC_EV(UCP, EVENT_61H_02H) \
+__PMC_EV(UCP, EVENT_61H_04H) \
+__PMC_EV(UCP, EVENT_62H_01H) \
+__PMC_EV(UCP, EVENT_62H_02H) \
+__PMC_EV(UCP, EVENT_62H_04H) \
+__PMC_EV(UCP, EVENT_63H_01H) \
+__PMC_EV(UCP, EVENT_63H_02H) \
+__PMC_EV(UCP, EVENT_63H_04H) \
+__PMC_EV(UCP, EVENT_63H_08H) \
+__PMC_EV(UCP, EVENT_63H_10H) \
+__PMC_EV(UCP, EVENT_63H_20H) \
+__PMC_EV(UCP, EVENT_64H_01H) \
+__PMC_EV(UCP, EVENT_64H_02H) \
+__PMC_EV(UCP, EVENT_64H_04H) \
+__PMC_EV(UCP, EVENT_64H_08H) \
+__PMC_EV(UCP, EVENT_64H_10H) \
+__PMC_EV(UCP, EVENT_64H_20H) \
+__PMC_EV(UCP, EVENT_65H_01H) \
+__PMC_EV(UCP, EVENT_65H_02H) \
+__PMC_EV(UCP, EVENT_65H_04H) \
+__PMC_EV(UCP, EVENT_66H_01H) \
+__PMC_EV(UCP, EVENT_66H_02H) \
+__PMC_EV(UCP, EVENT_66H_04H) \
+__PMC_EV(UCP, EVENT_67H_01H) \
+__PMC_EV(UCP, EVENT_80H_01H) \
+__PMC_EV(UCP, EVENT_80H_02H) \
+__PMC_EV(UCP, EVENT_80H_04H) \
+__PMC_EV(UCP, EVENT_80H_08H) \
+__PMC_EV(UCP, EVENT_81H_01H) \
+__PMC_EV(UCP, EVENT_81H_02H) \
+__PMC_EV(UCP, EVENT_81H_04H) \
+__PMC_EV(UCP, EVENT_81H_08H) \
+__PMC_EV(UCP, EVENT_81H_20H) \
+__PMC_EV(UCP, EVENT_81H_80H) \
+__PMC_EV(UCP, EVENT_82H_01H) \
+__PMC_EV(UCP, EVENT_83H_01H) \
+__PMC_EV(UCP, EVENT_83H_02H) \
+__PMC_EV(UCP, EVENT_83H_04H) \
+__PMC_EV(UCP, EVENT_83H_08H) \
+__PMC_EV(UCP, EVENT_84H_01H) \
+__PMC_EV(UCP, EVENT_84H_02H) \
+__PMC_EV(UCP, EVENT_84H_04H) \
+__PMC_EV(UCP, EVENT_84H_08H) \
+__PMC_EV(UCP, EVENT_85H_02H) \
+__PMC_EV(UCP, EVENT_86H_01H)
+
+#define PMC_EV_UCP_FIRST PMC_EV_UCP_EVENT_00H_01H
+#define PMC_EV_UCP_LAST PMC_EV_UCP_EVENT_86H_01H
+
+#define __PMC_EV_ALIAS_COREI7UC() \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.READ_TRACKER", UCP_EVENT_00H_01H) \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.WRITE_TRACKER", UCP_EVENT_00H_02H) \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.PEER_PROBE_TRACKER", UCP_EVENT_00H_04H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.READ_TRACKER", UCP_EVENT_01H_01H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.WRITE_TRACKER", UCP_EVENT_01H_02H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.PEER_PROBE_TRACKER", UCP_EVENT_01H_04H) \
+__PMC_EV_ALIAS("GQ_ALLOC.READ_TRACKER", UCP_EVENT_03H_01H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_L3_MISS", UCP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_TO_L3_RESP", UCP_EVENT_03H_04H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_TO_RTID_ACQUIRED", UCP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("GQ_ALLOC.WT_TO_RTID_ACQUIRED", UCP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("GQ_ALLOC.WRITE_TRACKER", UCP_EVENT_03H_20H) \
+__PMC_EV_ALIAS("GQ_ALLOC.PEER_PROBE_TRACKER", UCP_EVENT_03H_40H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_QPI", UCP_EVENT_04H_01H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_QMC", UCP_EVENT_04H_02H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_L3", UCP_EVENT_04H_04H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_CORES_02", UCP_EVENT_04H_08H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_CORES_13", UCP_EVENT_04H_10H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_QPI_QMC", UCP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_L3", UCP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_CORES", UCP_EVENT_05H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.I_STATE", UCP_EVENT_06H_01H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.S_STATE", UCP_EVENT_06H_02H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.FWD_S_STATE", UCP_EVENT_06H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.FWD_I_STATE", UCP_EVENT_06H_08H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.CONFLICT", UCP_EVENT_06H_10H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.WB", UCP_EVENT_06H_20H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.I_STATE", UCP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.S_STATE", UCP_EVENT_07H_02H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.FWD_S_STATE", UCP_EVENT_07H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.FWD_I_STATE", UCP_EVENT_07H_08H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.CONFLICT", UCP_EVENT_07H_10H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.WB", UCP_EVENT_07H_20H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.HITM", UCP_EVENT_07H_24H) \
+__PMC_EV_ALIAS("L3_HITS.READ", UCP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("L3_HITS.WRITE", UCP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("L3_HITS.PROBE", UCP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("L3_HITS.ANY", UCP_EVENT_08H_03H) \
+__PMC_EV_ALIAS("L3_MISS.READ", UCP_EVENT_09H_01H) \
+__PMC_EV_ALIAS("L3_MISS.WRITE", UCP_EVENT_09H_02H) \
+__PMC_EV_ALIAS("L3_MISS.PROBE", UCP_EVENT_09H_04H) \
+__PMC_EV_ALIAS("L3_MISS.ANY", UCP_EVENT_09H_03H) \
+__PMC_EV_ALIAS("L3_LINES_IN.M_STATE", UCP_EVENT_0AH_01H) \
+__PMC_EV_ALIAS("L3_LINES_IN.E_STATE", UCP_EVENT_0AH_02H) \
+__PMC_EV_ALIAS("L3_LINES_IN.S_STATE", UCP_EVENT_0AH_04H) \
+__PMC_EV_ALIAS("L3_LINES_IN.F_STATE", UCP_EVENT_0AH_08H) \
+__PMC_EV_ALIAS("L3_LINES_IN.ANY", UCP_EVENT_0AH_0FH) \
+__PMC_EV_ALIAS("L3_LINES_OUT.M_STATE", UCP_EVENT_0BH_01H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.E_STATE", UCP_EVENT_0BH_02H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.S_STATE", UCP_EVENT_0BH_04H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.I_STATE", UCP_EVENT_0BH_08H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.F_STATE", UCP_EVENT_0BH_10H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.ANY", UCP_EVENT_0BH_1FH) \
+__PMC_EV_ALIAS("QHL_REQUESTS.IOH_READS", UCP_EVENT_20H_01H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.IOH_WRITES", UCP_EVENT_20H_02H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.REMOTE_READS", UCP_EVENT_20H_04H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.REMOTE_WRITES", UCP_EVENT_20H_08H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.LOCAL_READS", UCP_EVENT_20H_10H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.LOCAL_WRITES", UCP_EVENT_20H_20H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.IOH", UCP_EVENT_21H_01H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.REMOTE", UCP_EVENT_21H_02H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.LOCAL", UCP_EVENT_21H_04H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.IOH", UCP_EVENT_22H_01H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.REMOTE", UCP_EVENT_22H_02H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.LOCAL", UCP_EVENT_22H_04H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.IOH", UCP_EVENT_23H_01H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.REMOTE", UCP_EVENT_23H_02H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.LOCAL", UCP_EVENT_23H_04H) \
+__PMC_EV_ALIAS("QHL_ADDRESS_CONFLICTS.2WAY", UCP_EVENT_24H_02H) \
+__PMC_EV_ALIAS("QHL_ADDRESS_CONFLICTS.3WAY", UCP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.IOH", UCP_EVENT_25H_01H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.REMOTE", UCP_EVENT_25H_02H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.LOCAL", UCP_EVENT_25H_04H) \
+__PMC_EV_ALIAS("QHL_TO_QMC_BYPASS", UCP_EVENT_26H_01H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.READ.CH0", UCP_EVENT_27H_01H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.READ.CH1", UCP_EVENT_27H_02H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.READ.CH2", UCP_EVENT_27H_04H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.WRITE.CH0", UCP_EVENT_27H_08H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.WRITE.CH1", UCP_EVENT_27H_10H) \
+__PMC_EV_ALIAS("QMC_NORMAL_FULL.WRITE.CH2", UCP_EVENT_27H_20H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH0", UCP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH1", UCP_EVENT_28H_02H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH2", UCP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH0", UCP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH1", UCP_EVENT_28H_10H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH2", UCP_EVENT_28H_20H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH0", UCP_EVENT_29H_01H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH1", UCP_EVENT_29H_02H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH2", UCP_EVENT_29H_04H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH0", UCP_EVENT_29H_08H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH1", UCP_EVENT_29H_10H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH2", UCP_EVENT_29H_20H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH0", UCP_EVENT_2AH_01H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH1", UCP_EVENT_2AH_02H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH2", UCP_EVENT_2AH_04H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH0", UCP_EVENT_2BH_01H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH1", UCP_EVENT_2BH_02H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH2", UCP_EVENT_2BH_04H) \
+__PMC_EV_ALIAS("QMC_ISSOC_READS.ANY", UCP_EVENT_2BH_07H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH0", UCP_EVENT_2CH_01H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH1", UCP_EVENT_2CH_02H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH2", UCP_EVENT_2CH_04H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.ANY", UCP_EVENT_2CH_07H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH0", UCP_EVENT_2DH_01H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH1", UCP_EVENT_2DH_02H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH2", UCP_EVENT_2DH_04H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.ANY", UCP_EVENT_2DH_07H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH0", UCP_EVENT_2EH_01H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH1", UCP_EVENT_2EH_02H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH2", UCP_EVENT_2EH_04H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.ANY", UCP_EVENT_2EH_07H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH0", UCP_EVENT_2FH_01H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH1", UCP_EVENT_2FH_02H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH2", UCP_EVENT_2FH_04H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.ANY", UCP_EVENT_2FH_07H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH0", UCP_EVENT_2FH_08H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH1", UCP_EVENT_2FH_10H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH2", UCP_EVENT_2FH_20H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.ANY", UCP_EVENT_2FH_38H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH0", UCP_EVENT_30H_01H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH1", UCP_EVENT_30H_02H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH2", UCP_EVENT_30H_04H) \
+__PMC_EV_ALIAS("QMC_CANCEL.ANY", UCP_EVENT_30H_07H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH0", UCP_EVENT_31H_01H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH1", UCP_EVENT_31H_02H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH2", UCP_EVENT_31H_04H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.ANY", UCP_EVENT_31H_07H) \
+__PMC_EV_ALIAS("QHL_FRC_ACK_CNFLTS.LOCAL", UCP_EVENT_33H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_0", UCP_EVENT_40H_01H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_0", UCP_EVENT_40H_02H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_0", UCP_EVENT_40H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_1", UCP_EVENT_40H_08H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_1", UCP_EVENT_40H_10H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_1", UCP_EVENT_40H_20H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.LINK_0", UCP_EVENT_40H_07H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.LINK_1", UCP_EVENT_40H_38H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_0", UCP_EVENT_41H_01H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_0", UCP_EVENT_41H_02H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_0", UCP_EVENT_41H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_1", UCP_EVENT_41H_08H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_1", UCP_EVENT_41H_10H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_1", UCP_EVENT_41H_20H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.LINK_0", UCP_EVENT_41H_07H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.LINK_1", UCP_EVENT_41H_38H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.BUSY.LINK_0", UCP_EVENT_42H_02H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.BUSY.LINK_1", UCP_EVENT_42H_08H) \
+__PMC_EV_ALIAS("QPI_RX_NO_PPT_CREDIT.STALLS.LINK_0", UCP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("QPI_RX_NO_PPT_CREDIT.STALLS.LINK_1", UCP_EVENT_43H_02H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH0", UCP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH1", UCP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH2", UCP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH0", UCP_EVENT_61H_01H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH1", UCP_EVENT_61H_02H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH2", UCP_EVENT_61H_04H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH0", UCP_EVENT_62H_01H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH1", UCP_EVENT_62H_02H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH2", UCP_EVENT_62H_04H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH0", UCP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH0", UCP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH1", UCP_EVENT_63H_04H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH1", UCP_EVENT_63H_08H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH2", UCP_EVENT_63H_10H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH2", UCP_EVENT_63H_20H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH0", UCP_EVENT_64H_01H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH0", UCP_EVENT_64H_02H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH1", UCP_EVENT_64H_04H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH1", UCP_EVENT_64H_08H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH2", UCP_EVENT_64H_10H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH2", UCP_EVENT_64H_20H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH0", UCP_EVENT_65H_01H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH1", UCP_EVENT_65H_02H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH2", UCP_EVENT_65H_04H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH0", UCP_EVENT_66H_01H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH1", UCP_EVENT_66H_02H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH2", UCP_EVENT_66H_04H)
+
+#define __PMC_EV_ALIAS_WESTMEREUC() \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.READ_TRACKER", UCP_EVENT_00H_01H) \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.WRITE_TRACKER", UCP_EVENT_00H_02H) \
+__PMC_EV_ALIAS("GQ_CYCLES_FULL.PEER_PROBE_TRACKER", UCP_EVENT_00H_04H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.READ_TRACKER", UCP_EVENT_01H_01H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.WRITE_TRACKER", UCP_EVENT_01H_02H) \
+__PMC_EV_ALIAS("GQ_CYCLES_NOT_EMPTY.PEER_PROBE_TRACKER", UCP_EVENT_01H_04H) \
+__PMC_EV_ALIAS("GQ_OCCUPANCY.READ_TRACKER", UCP_EVENT_02H_01H) \
+__PMC_EV_ALIAS("GQ_ALLOC.READ_TRACKER", UCP_EVENT_03H_01H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_L3_MISS", UCP_EVENT_03H_02H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_TO_L3_RESP", UCP_EVENT_03H_04H) \
+__PMC_EV_ALIAS("GQ_ALLOC.RT_TO_RTID_ACQUIRED", UCP_EVENT_03H_08H) \
+__PMC_EV_ALIAS("GQ_ALLOC.WT_TO_RTID_ACQUIRED", UCP_EVENT_03H_10H) \
+__PMC_EV_ALIAS("GQ_ALLOC.WRITE_TRACKER", UCP_EVENT_03H_20H) \
+__PMC_EV_ALIAS("GQ_ALLOC.PEER_PROBE_TRACKER", UCP_EVENT_03H_40H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_QPI", UCP_EVENT_04H_01H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_QMC", UCP_EVENT_04H_02H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_L3", UCP_EVENT_04H_04H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_CORES_02", UCP_EVENT_04H_08H) \
+__PMC_EV_ALIAS("GQ_DATA.FROM_CORES_13", UCP_EVENT_04H_10H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_QPI_QMC", UCP_EVENT_05H_01H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_L3", UCP_EVENT_05H_02H) \
+__PMC_EV_ALIAS("GQ_DATA.TO_CORES", UCP_EVENT_05H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.I_STATE", UCP_EVENT_06H_01H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.S_STATE", UCP_EVENT_06H_02H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.FWD_S_STATE", UCP_EVENT_06H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.FWD_I_STATE", UCP_EVENT_06H_08H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.CONFLICT", UCP_EVENT_06H_10H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_LOCAL_HOME.WB", UCP_EVENT_06H_20H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.I_STATE", UCP_EVENT_07H_01H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.S_STATE", UCP_EVENT_07H_02H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.FWD_S_STATE", UCP_EVENT_07H_04H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.FWD_I_STATE", UCP_EVENT_07H_08H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.CONFLICT", UCP_EVENT_07H_10H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.WB", UCP_EVENT_07H_20H) \
+__PMC_EV_ALIAS("SNP_RESP_TO_REMOTE_HOME.HITM", UCP_EVENT_07H_24H) \
+__PMC_EV_ALIAS("L3_HITS.READ", UCP_EVENT_08H_01H) \
+__PMC_EV_ALIAS("L3_HITS.WRITE", UCP_EVENT_08H_02H) \
+__PMC_EV_ALIAS("L3_HITS.PROBE", UCP_EVENT_08H_04H) \
+__PMC_EV_ALIAS("L3_HITS.ANY", UCP_EVENT_08H_03H) \
+__PMC_EV_ALIAS("L3_MISS.READ", UCP_EVENT_09H_01H) \
+__PMC_EV_ALIAS("L3_MISS.WRITE", UCP_EVENT_09H_02H) \
+__PMC_EV_ALIAS("L3_MISS.PROBE", UCP_EVENT_09H_04H) \
+__PMC_EV_ALIAS("L3_MISS.ANY", UCP_EVENT_09H_03H) \
+__PMC_EV_ALIAS("L3_LINES_IN.M_STATE", UCP_EVENT_0AH_01H) \
+__PMC_EV_ALIAS("L3_LINES_IN.E_STATE", UCP_EVENT_0AH_02H) \
+__PMC_EV_ALIAS("L3_LINES_IN.S_STATE", UCP_EVENT_0AH_04H) \
+__PMC_EV_ALIAS("L3_LINES_IN.F_STATE", UCP_EVENT_0AH_08H) \
+__PMC_EV_ALIAS("L3_LINES_IN.ANY", UCP_EVENT_0AH_0FH) \
+__PMC_EV_ALIAS("L3_LINES_OUT.M_STATE", UCP_EVENT_0BH_01H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.E_STATE", UCP_EVENT_0BH_02H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.S_STATE", UCP_EVENT_0BH_04H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.I_STATE", UCP_EVENT_0BH_08H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.F_STATE", UCP_EVENT_0BH_10H) \
+__PMC_EV_ALIAS("L3_LINES_OUT.ANY", UCP_EVENT_0BH_1FH) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_S", UCP_EVENT_0CH_01H) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_I", UCP_EVENT_0CH_02H) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_S_HIT_E", UCP_EVENT_0CH_04H_E) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_S_HIT_F", UCP_EVENT_0CH_04H_F) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_S_HIT_M", UCP_EVENT_0CH_04H_M) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_S_HIT_S", UCP_EVENT_0CH_04H_S) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_I_HIT_E", UCP_EVENT_0CH_08H_E) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_I_HIT_F", UCP_EVENT_0CH_08H_F) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_I_HIT_M", UCP_EVENT_0CH_08H_M) \
+__PMC_EV_ALIAS("GQ_SNOOP.GOTO_I_HIT_S", UCP_EVENT_0CH_08H_S) \
+__PMC_EV_ALIAS("QHL_REQUESTS.IOH_READS", UCP_EVENT_20H_01H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.IOH_WRITES", UCP_EVENT_20H_02H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.REMOTE_READS", UCP_EVENT_20H_04H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.REMOTE_WRITES", UCP_EVENT_20H_08H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.LOCAL_READS", UCP_EVENT_20H_10H) \
+__PMC_EV_ALIAS("QHL_REQUESTS.LOCAL_WRITES", UCP_EVENT_20H_20H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.IOH", UCP_EVENT_21H_01H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.REMOTE", UCP_EVENT_21H_02H) \
+__PMC_EV_ALIAS("QHL_CYCLES_FULL.LOCAL", UCP_EVENT_21H_04H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.IOH", UCP_EVENT_22H_01H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.REMOTE", UCP_EVENT_22H_02H) \
+__PMC_EV_ALIAS("QHL_CYCLES_NOT_EMPTY.LOCAL", UCP_EVENT_22H_04H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.IOH", UCP_EVENT_23H_01H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.REMOTE", UCP_EVENT_23H_02H) \
+__PMC_EV_ALIAS("QHL_OCCUPANCY.LOCAL", UCP_EVENT_23H_04H) \
+__PMC_EV_ALIAS("QHL_ADDRESS_CONFLICTS.2WAY", UCP_EVENT_24H_02H) \
+__PMC_EV_ALIAS("QHL_ADDRESS_CONFLICTS.3WAY", UCP_EVENT_24H_04H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.IOH", UCP_EVENT_25H_01H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.REMOTE", UCP_EVENT_25H_02H) \
+__PMC_EV_ALIAS("QHL_CONFLICT_CYCLES.LOCAL", UCP_EVENT_25H_04H) \
+__PMC_EV_ALIAS("QHL_TO_QMC_BYPASS", UCP_EVENT_26H_01H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH0", UCP_EVENT_28H_01H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH1", UCP_EVENT_28H_02H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.READ.CH2", UCP_EVENT_28H_04H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH0", UCP_EVENT_28H_08H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH1", UCP_EVENT_28H_10H) \
+__PMC_EV_ALIAS("QMC_ISOC_FULL.WRITE.CH2", UCP_EVENT_28H_20H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH0", UCP_EVENT_29H_01H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH1", UCP_EVENT_29H_02H) \
+__PMC_EV_ALIAS("QMC_BUSY.READ.CH2", UCP_EVENT_29H_04H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH0", UCP_EVENT_29H_08H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH1", UCP_EVENT_29H_10H) \
+__PMC_EV_ALIAS("QMC_BUSY.WRITE.CH2", UCP_EVENT_29H_20H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH0", UCP_EVENT_2AH_01H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH1", UCP_EVENT_2AH_02H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.CH2", UCP_EVENT_2AH_04H) \
+__PMC_EV_ALIAS("QMC_OCCUPANCY.ANY", UCP_EVENT_2AH_07H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH0", UCP_EVENT_2BH_01H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH1", UCP_EVENT_2BH_02H) \
+__PMC_EV_ALIAS("QMC_ISSOC_OCCUPANCY.CH2", UCP_EVENT_2BH_04H) \
+__PMC_EV_ALIAS("QMC_ISSOC_READS.ANY", UCP_EVENT_2BH_07H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH0", UCP_EVENT_2CH_01H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH1", UCP_EVENT_2CH_02H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.CH2", UCP_EVENT_2CH_04H) \
+__PMC_EV_ALIAS("QMC_NORMAL_READS.ANY", UCP_EVENT_2CH_07H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH0", UCP_EVENT_2DH_01H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH1", UCP_EVENT_2DH_02H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.CH2", UCP_EVENT_2DH_04H) \
+__PMC_EV_ALIAS("QMC_HIGH_PRIORITY_READS.ANY", UCP_EVENT_2DH_07H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH0", UCP_EVENT_2EH_01H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH1", UCP_EVENT_2EH_02H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.CH2", UCP_EVENT_2EH_04H) \
+__PMC_EV_ALIAS("QMC_CRITICAL_PRIORITY_READS.ANY", UCP_EVENT_2EH_07H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH0", UCP_EVENT_2FH_01H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH1", UCP_EVENT_2FH_02H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.CH2", UCP_EVENT_2FH_04H) \
+__PMC_EV_ALIAS("QMC_WRITES.FULL.ANY", UCP_EVENT_2FH_07H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH0", UCP_EVENT_2FH_08H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH1", UCP_EVENT_2FH_10H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.CH2", UCP_EVENT_2FH_20H) \
+__PMC_EV_ALIAS("QMC_WRITES.PARTIAL.ANY", UCP_EVENT_2FH_38H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH0", UCP_EVENT_30H_01H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH1", UCP_EVENT_30H_02H) \
+__PMC_EV_ALIAS("QMC_CANCEL.CH2", UCP_EVENT_30H_04H) \
+__PMC_EV_ALIAS("QMC_CANCEL.ANY", UCP_EVENT_30H_07H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH0", UCP_EVENT_31H_01H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH1", UCP_EVENT_31H_02H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.CH2", UCP_EVENT_31H_04H) \
+__PMC_EV_ALIAS("QMC_PRIORITY_UPDATES.ANY", UCP_EVENT_31H_07H) \
+__PMC_EV_ALIAS("IMC_RETRY.CH0", UCP_EVENT_32H_01H) \
+__PMC_EV_ALIAS("IMC_RETRY.CH1", UCP_EVENT_32H_02H) \
+__PMC_EV_ALIAS("IMC_RETRY.CH2", UCP_EVENT_32H_04H) \
+__PMC_EV_ALIAS("IMC_RETRY.ANY", UCP_EVENT_32H_07H) \
+__PMC_EV_ALIAS("QHL_FRC_ACK_CNFLTS.IOH", UCP_EVENT_33H_01H) \
+__PMC_EV_ALIAS("QHL_FRC_ACK_CNFLTS.REMOTE", UCP_EVENT_33H_02H) \
+__PMC_EV_ALIAS("QHL_FRC_ACK_CNFLTS.LOCAL", UCP_EVENT_33H_04H) \
+__PMC_EV_ALIAS("QHL_FRC_ACK_CNFLTS.ANY", UCP_EVENT_33H_07H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.IOH_ORDER", UCP_EVENT_34H_01H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.REMOTE_ORDER", UCP_EVENT_34H_02H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.LOCAL_ORDER", UCP_EVENT_34H_04H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.IOH_CONFLICT", UCP_EVENT_34H_08H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.REMOTE_CONFLICT", UCP_EVENT_34H_10H) \
+__PMC_EV_ALIAS("QHL_SLEEPS.LOCAL_CONFLICT", UCP_EVENT_34H_20H) \
+__PMC_EV_ALIAS("ADDR_OPCODE_MATCH.IOH", UCP_EVENT_35H_01H) \
+__PMC_EV_ALIAS("ADDR_OPCODE_MATCH.REMOTE", UCP_EVENT_35H_02H) \
+__PMC_EV_ALIAS("ADDR_OPCODE_MATCH.LOCAL", UCP_EVENT_35H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_0", UCP_EVENT_40H_01H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_0", UCP_EVENT_40H_02H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_0", UCP_EVENT_40H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.HOME.LINK_1", UCP_EVENT_40H_08H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.SNOOP.LINK_1", UCP_EVENT_40H_10H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.NDR.LINK_1", UCP_EVENT_40H_20H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.LINK_0", UCP_EVENT_40H_07H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_SINGLE_FLIT.LINK_1", UCP_EVENT_40H_38H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_0", UCP_EVENT_41H_01H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_0", UCP_EVENT_41H_02H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_0", UCP_EVENT_41H_04H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.DRS.LINK_1", UCP_EVENT_41H_08H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCB.LINK_1", UCP_EVENT_41H_10H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.NCS.LINK_1", UCP_EVENT_41H_20H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.LINK_0", UCP_EVENT_41H_07H) \
+__PMC_EV_ALIAS("QPI_TX_STALLED_MULTI_FLIT.LINK_1", UCP_EVENT_41H_38H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.FULL.LINK_0", UCP_EVENT_42H_01H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.BUSY.LINK_0", UCP_EVENT_42H_02H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.FULL.LINK_1", UCP_EVENT_42H_04H) \
+__PMC_EV_ALIAS("QPI_TX_HEADER.BUSY.LINK_1", UCP_EVENT_42H_08H) \
+__PMC_EV_ALIAS("QPI_RX_NO_PPT_CREDIT.STALLS.LINK_0", UCP_EVENT_43H_01H) \
+__PMC_EV_ALIAS("QPI_RX_NO_PPT_CREDIT.STALLS.LINK_1", UCP_EVENT_43H_02H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH0", UCP_EVENT_60H_01H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH1", UCP_EVENT_60H_02H) \
+__PMC_EV_ALIAS("DRAM_OPEN.CH2", UCP_EVENT_60H_04H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH0", UCP_EVENT_61H_01H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH1", UCP_EVENT_61H_02H) \
+__PMC_EV_ALIAS("DRAM_PAGE_CLOSE.CH2", UCP_EVENT_61H_04H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH0", UCP_EVENT_62H_01H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH1", UCP_EVENT_62H_02H) \
+__PMC_EV_ALIAS("DRAM_PAGE_MISS.CH2", UCP_EVENT_62H_04H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH0", UCP_EVENT_63H_01H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH0", UCP_EVENT_63H_02H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH1", UCP_EVENT_63H_04H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH1", UCP_EVENT_63H_08H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.CH2", UCP_EVENT_63H_10H) \
+__PMC_EV_ALIAS("DRAM_READ_CAS.AUTOPRE_CH2", UCP_EVENT_63H_20H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH0", UCP_EVENT_64H_01H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH0", UCP_EVENT_64H_02H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH1", UCP_EVENT_64H_04H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH1", UCP_EVENT_64H_08H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.CH2", UCP_EVENT_64H_10H) \
+__PMC_EV_ALIAS("DRAM_WRITE_CAS.AUTOPRE_CH2", UCP_EVENT_64H_20H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH0", UCP_EVENT_65H_01H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH1", UCP_EVENT_65H_02H) \
+__PMC_EV_ALIAS("DRAM_REFRESH.CH2", UCP_EVENT_65H_04H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH0", UCP_EVENT_66H_01H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH1", UCP_EVENT_66H_02H) \
+__PMC_EV_ALIAS("DRAM_PRE_ALL.CH2", UCP_EVENT_66H_04H) \
+__PMC_EV_ALIAS("DRAM_THERMAL_THROTTLED", UCP_EVENT_67H_01H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_TEMP.CORE_0", UCP_EVENT_80H_01H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_TEMP.CORE_1", UCP_EVENT_80H_02H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_TEMP.CORE_2", UCP_EVENT_80H_04H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_TEMP.CORE_3", UCP_EVENT_80H_08H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLED_TEMP.CORE_0", UCP_EVENT_81H_01H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLED_TEMP.CORE_1", UCP_EVENT_81H_02H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLED_TEMP.CORE_2", UCP_EVENT_81H_04H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLED_TEMP.CORE_3", UCP_EVENT_81H_08H) \
+__PMC_EV_ALIAS("PROCHOT_ASSERTION", UCP_EVENT_82H_01H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_PROCHOT.CORE_0", UCP_EVENT_83H_01H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_PROCHOT.CORE_1", UCP_EVENT_83H_02H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_PROCHOT.CORE_2", UCP_EVENT_83H_04H) \
+__PMC_EV_ALIAS("THERMAL_THROTTLING_PROCHOT.CORE_3", UCP_EVENT_83H_08H) \
+__PMC_EV_ALIAS("TURBO_MODE.CORE_0", UCP_EVENT_84H_01H) \
+__PMC_EV_ALIAS("TURBO_MODE.CORE_1", UCP_EVENT_84H_02H) \
+__PMC_EV_ALIAS("TURBO_MODE.CORE_2", UCP_EVENT_84H_04H) \
+__PMC_EV_ALIAS("TURBO_MODE.CORE_3", UCP_EVENT_84H_08H) \
+__PMC_EV_ALIAS("CYCLES_UNHALTED_L3_FLL_ENABLE", UCP_EVENT_85H_02H) \
+__PMC_EV_ALIAS("CYCLES_UNHALTED_L3_FLL_DISABLE", UCP_EVENT_86H_01H)
+
+
+#define __PMC_EV_ALIAS_SANDYBRIDGEUC() \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.RSPIHITI", UCP_EVENT_22H_01H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.RSPIHITFSE", UCP_EVENT_22H_02H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.RSPSHITFSE", UCP_EVENT_22H_04H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.RSPSFWDM", UCP_EVENT_22H_08H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.AND_EXTERNAL", UCP_EVENT_22H_20H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE.AND_XCORE", UCP_EVENT_22H_40H) \
+__PMC_EV_ALIAS("CB0_XSNP_RESPONSE_AND_XCORE2", UCP_EVENT_22H_80H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.M", UCP_EVENT_34H_01H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.E", UCP_EVENT_34H_02H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.S", UCP_EVENT_34H_04H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.I", UCP_EVENT_34H_08H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.AND_READ", UCP_EVENT_34H_10H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP_AND_READ2", UCP_EVENT_34H_20H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.AND_EXTSNP", UCP_EVENT_34H_40H) \
+__PMC_EV_ALIAS("CB0_CACHE_LOOKUP.AND_ANY", UCP_EVENT_34H_80H) \
+__PMC_EV_ALIAS("IMPH_CB0_TRK_OCCUPANCY.ALL", UCP_EVENT_80H_01H) \
+__PMC_EV_ALIAS("IMPH_CB0_TRK_REQUEST.ALL", UCP_EVENT_81H_01H) \
+__PMC_EV_ALIAS("IMPH_CB0_TRK_REQUEST.WRITES", UCP_EVENT_81H_20H) \
+__PMC_EV_ALIAS("IMPH_CB0_TRK_REQUEST.EVICTIONS", UCP_EVENT_81H_80H) \
+__PMC_EV_ALIAS("IMPH_C0H_TRK_OCCUPANCY.ALL", UCP_EVENT_83H_01H) \
+__PMC_EV_ALIAS("IMPC_C0H_TRK_REQUEST.ALL", UCP_EVENT_84H_01H)
+
+/*
+ * Intel XScale events from:
+ *
+ * Intel XScale Core Developer's Manual
+ * January, 2004, #27347302
+ *
+ * 3rd Generation Intel XScale Microarchitecture
+ * Developer's Manual
+ * May 2007, #31628302
+ *
+ * First 14 events are for 1st and 2nd Generation Intel XScale cores. The
+ * remaining are available only on 3rd Generation Intel XScale cores.
+ */
+#define __PMC_EV_XSCALE() \
+ __PMC_EV(XSCALE, IC_FETCH) \
+ __PMC_EV(XSCALE, IC_MISS) \
+ __PMC_EV(XSCALE, DATA_DEPENDENCY_STALLED) \
+ __PMC_EV(XSCALE, ITLB_MISS) \
+ __PMC_EV(XSCALE, DTLB_MISS) \
+ __PMC_EV(XSCALE, BRANCH_RETIRED) \
+ __PMC_EV(XSCALE, BRANCH_MISPRED) \
+ __PMC_EV(XSCALE, INSTR_RETIRED) \
+ __PMC_EV(XSCALE, DC_FULL_CYCLE) \
+ __PMC_EV(XSCALE, DC_FULL_CONTIG) \
+ __PMC_EV(XSCALE, DC_ACCESS) \
+ __PMC_EV(XSCALE, DC_MISS) \
+ __PMC_EV(XSCALE, DC_WRITEBACK) \
+ __PMC_EV(XSCALE, PC_CHANGE) \
+ __PMC_EV(XSCALE, BRANCH_RETIRED_ALL) \
+ __PMC_EV(XSCALE, INSTR_CYCLE) \
+ __PMC_EV(XSCALE, CP_STALL) \
+ __PMC_EV(XSCALE, PC_CHANGE_ALL) \
+ __PMC_EV(XSCALE, PIPELINE_FLUSH) \
+ __PMC_EV(XSCALE, BACKEND_STALL) \
+ __PMC_EV(XSCALE, MULTIPLIER_USE) \
+ __PMC_EV(XSCALE, MULTIPLIER_STALLED) \
+ __PMC_EV(XSCALE, DATA_CACHE_STALLED) \
+ __PMC_EV(XSCALE, L2_CACHE_REQ) \
+ __PMC_EV(XSCALE, L2_CACHE_MISS) \
+ __PMC_EV(XSCALE, ADDRESS_BUS_TRANS) \
+ __PMC_EV(XSCALE, SELF_ADDRESS_BUS_TRANS) \
+ __PMC_EV(XSCALE, DATA_BUS_TRANS)
+
+#define PMC_EV_XSCALE_FIRST PMC_EV_XSCALE_IC_FETCH
+#define PMC_EV_XSCALE_LAST PMC_EV_XSCALE_DATA_BUS_TRANS
+
+/*
+ * MIPS Events from "Programming the MIPS32 24K Core Family",
+ * Document Number: MD00355 Revision 04.63 December 19, 2008
+ * These events are kept in the order found in Table 7.4.
+ * For counters which are different between the left hand
+ * column (0/2) and the right hand column (1/3) the left
+ * hand is given first, e.g. BRANCH_COMPLETED and BRANCH_MISPRED
+ * in the definition below.
+ */
+
+#define __PMC_EV_MIPS24K() \
+ __PMC_EV(MIPS24K, CYCLE) \
+ __PMC_EV(MIPS24K, INSTR_EXECUTED) \
+ __PMC_EV(MIPS24K, BRANCH_COMPLETED) \
+ __PMC_EV(MIPS24K, BRANCH_MISPRED) \
+ __PMC_EV(MIPS24K, RETURN) \
+ __PMC_EV(MIPS24K, RETURN_MISPRED) \
+ __PMC_EV(MIPS24K, RETURN_NOT_31) \
+ __PMC_EV(MIPS24K, RETURN_NOTPRED) \
+ __PMC_EV(MIPS24K, ITLB_ACCESS) \
+ __PMC_EV(MIPS24K, ITLB_MISS) \
+ __PMC_EV(MIPS24K, DTLB_ACCESS) \
+ __PMC_EV(MIPS24K, DTLB_MISS) \
+ __PMC_EV(MIPS24K, JTLB_IACCESS) \
+ __PMC_EV(MIPS24K, JTLB_IMISS) \
+ __PMC_EV(MIPS24K, JTLB_DACCESS) \
+ __PMC_EV(MIPS24K, JTLB_DMISS) \
+ __PMC_EV(MIPS24K, IC_FETCH) \
+ __PMC_EV(MIPS24K, IC_MISS) \
+ __PMC_EV(MIPS24K, DC_LOADSTORE) \
+ __PMC_EV(MIPS24K, DC_WRITEBACK) \
+ __PMC_EV(MIPS24K, DC_MISS) \
+ __PMC_EV(MIPS24K, STORE_MISS) \
+ __PMC_EV(MIPS24K, LOAD_MISS) \
+ __PMC_EV(MIPS24K, INTEGER_COMPLETED) \
+ __PMC_EV(MIPS24K, FP_COMPLETED) \
+ __PMC_EV(MIPS24K, LOAD_COMPLETED) \
+ __PMC_EV(MIPS24K, STORE_COMPLETED) \
+ __PMC_EV(MIPS24K, BARRIER_COMPLETED) \
+ __PMC_EV(MIPS24K, MIPS16_COMPLETED) \
+ __PMC_EV(MIPS24K, NOP_COMPLETED) \
+ __PMC_EV(MIPS24K, INTEGER_MULDIV_COMPLETED)\
+ __PMC_EV(MIPS24K, RF_STALL) \
+ __PMC_EV(MIPS24K, INSTR_REFETCH) \
+ __PMC_EV(MIPS24K, STORE_COND_COMPLETED) \
+ __PMC_EV(MIPS24K, STORE_COND_FAILED) \
+ __PMC_EV(MIPS24K, ICACHE_REQUESTS) \
+ __PMC_EV(MIPS24K, ICACHE_HIT) \
+ __PMC_EV(MIPS24K, L2_WRITEBACK) \
+ __PMC_EV(MIPS24K, L2_ACCESS) \
+ __PMC_EV(MIPS24K, L2_MISS) \
+ __PMC_EV(MIPS24K, L2_ERR_CORRECTED) \
+ __PMC_EV(MIPS24K, EXCEPTIONS) \
+ __PMC_EV(MIPS24K, RF_CYCLES_STALLED) \
+ __PMC_EV(MIPS24K, IFU_CYCLES_STALLED) \
+ __PMC_EV(MIPS24K, ALU_CYCLES_STALLED) \
+ __PMC_EV(MIPS24K, UNCACHED_LOAD) \
+ __PMC_EV(MIPS24K, UNCACHED_STORE) \
+ __PMC_EV(MIPS24K, CP2_REG_TO_REG_COMPLETED)\
+ __PMC_EV(MIPS24K, MFTC_COMPLETED) \
+ __PMC_EV(MIPS24K, IC_BLOCKED_CYCLES) \
+ __PMC_EV(MIPS24K, DC_BLOCKED_CYCLES) \
+ __PMC_EV(MIPS24K, L2_IMISS_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, L2_DMISS_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, DMISS_CYCLES) \
+ __PMC_EV(MIPS24K, L2_MISS_CYCLES) \
+ __PMC_EV(MIPS24K, UNCACHED_BLOCK_CYCLES) \
+ __PMC_EV(MIPS24K, MDU_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, FPU_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, CP2_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, COREXTEND_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, ISPRAM_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, DSPRAM_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, CACHE_STALL_CYCLES) \
+ __PMC_EV(MIPS24K, LOAD_TO_USE_STALLS) \
+ __PMC_EV(MIPS24K, BASE_MISPRED_STALLS) \
+ __PMC_EV(MIPS24K, CPO_READ_STALLS) \
+ __PMC_EV(MIPS24K, BRANCH_MISPRED_CYCLES) \
+ __PMC_EV(MIPS24K, IFETCH_BUFFER_FULL) \
+ __PMC_EV(MIPS24K, FETCH_BUFFER_ALLOCATED) \
+ __PMC_EV(MIPS24K, EJTAG_ITRIGGER) \
+ __PMC_EV(MIPS24K, EJTAG_DTRIGGER) \
+ __PMC_EV(MIPS24K, FSB_LT_QUARTER) \
+ __PMC_EV(MIPS24K, FSB_QUARTER_TO_HALF) \
+ __PMC_EV(MIPS24K, FSB_GT_HALF) \
+ __PMC_EV(MIPS24K, FSB_FULL_PIPELINE_STALLS)\
+ __PMC_EV(MIPS24K, LDQ_LT_QUARTER) \
+ __PMC_EV(MIPS24K, LDQ_QUARTER_TO_HALF) \
+ __PMC_EV(MIPS24K, LDQ_GT_HALF) \
+ __PMC_EV(MIPS24K, LDQ_FULL_PIPELINE_STALLS)\
+ __PMC_EV(MIPS24K, WBB_LT_QUARTER) \
+ __PMC_EV(MIPS24K, WBB_QUARTER_TO_HALF) \
+ __PMC_EV(MIPS24K, WBB_GT_HALF) \
+ __PMC_EV(MIPS24K, WBB_FULL_PIPELINE_STALLS) \
+ __PMC_EV(MIPS24K, REQUEST_LATENCY) \
+ __PMC_EV(MIPS24K, REQUEST_COUNT)
+
+#define PMC_EV_MIPS24K_FIRST PMC_EV_MIPS24K_CYCLE
+#define PMC_EV_MIPS24K_LAST PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS
+
+/*
+ * Cavium Octeon counters. Obtained from cvmx-core.h
+ */
+#define __PMC_EV_OCTEON() \
+ __PMC_EV(OCTEON, CLK) \
+ __PMC_EV(OCTEON, ISSUE) \
+ __PMC_EV(OCTEON, RET) \
+ __PMC_EV(OCTEON, NISSUE) \
+ __PMC_EV(OCTEON, SISSUE) \
+ __PMC_EV(OCTEON, DISSUE) \
+ __PMC_EV(OCTEON, IFI) \
+ __PMC_EV(OCTEON, BR) \
+ __PMC_EV(OCTEON, BRMIS) \
+ __PMC_EV(OCTEON, J) \
+ __PMC_EV(OCTEON, JMIS) \
+ __PMC_EV(OCTEON, REPLAY) \
+ __PMC_EV(OCTEON, IUNA) \
+ __PMC_EV(OCTEON, TRAP) \
+ __PMC_EV(OCTEON, UULOAD) \
+ __PMC_EV(OCTEON, UUSTORE) \
+ __PMC_EV(OCTEON, ULOAD) \
+ __PMC_EV(OCTEON, USTORE) \
+ __PMC_EV(OCTEON, EC) \
+ __PMC_EV(OCTEON, MC) \
+ __PMC_EV(OCTEON, CC) \
+ __PMC_EV(OCTEON, CSRC) \
+ __PMC_EV(OCTEON, CFETCH) \
+ __PMC_EV(OCTEON, CPREF) \
+ __PMC_EV(OCTEON, ICA) \
+ __PMC_EV(OCTEON, II) \
+ __PMC_EV(OCTEON, IP) \
+ __PMC_EV(OCTEON, CIMISS) \
+ __PMC_EV(OCTEON, WBUF) \
+ __PMC_EV(OCTEON, WDAT) \
+ __PMC_EV(OCTEON, WBUFLD) \
+ __PMC_EV(OCTEON, WBUFFL) \
+ __PMC_EV(OCTEON, WBUFTR) \
+ __PMC_EV(OCTEON, BADD) \
+ __PMC_EV(OCTEON, BADDL2) \
+ __PMC_EV(OCTEON, BFILL) \
+ __PMC_EV(OCTEON, DDIDS) \
+ __PMC_EV(OCTEON, IDIDS) \
+ __PMC_EV(OCTEON, DIDNA) \
+ __PMC_EV(OCTEON, LDS) \
+ __PMC_EV(OCTEON, LMLDS) \
+ __PMC_EV(OCTEON, IOLDS) \
+ __PMC_EV(OCTEON, DMLDS) \
+ __PMC_EV(OCTEON, STS) \
+ __PMC_EV(OCTEON, LMSTS) \
+ __PMC_EV(OCTEON, IOSTS) \
+ __PMC_EV(OCTEON, IOBDMA) \
+ __PMC_EV(OCTEON, DTLB) \
+ __PMC_EV(OCTEON, DTLBAD) \
+ __PMC_EV(OCTEON, ITLB) \
+ __PMC_EV(OCTEON, SYNC) \
+ __PMC_EV(OCTEON, SYNCIOB) \
+ __PMC_EV(OCTEON, SYNCW)
+
+#define PMC_EV_OCTEON_FIRST PMC_EV_OCTEON_CLK
+#define PMC_EV_OCTEON_LAST PMC_EV_OCTEON_SYNCW
+
+#define __PMC_EV_PPC7450() \
+ __PMC_EV(PPC7450, CYCLE) \
+ __PMC_EV(PPC7450, INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLB_BIT_TRANSITIONS) \
+ __PMC_EV(PPC7450, INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, PMON_EXCEPT) \
+ __PMC_EV(PPC7450, PMON_SIG) \
+ __PMC_EV(PPC7450, VPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VFPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VIU1_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VIU2_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES) \
+ __PMC_EV(PPC7450, VSCR_SAT_SET) \
+ __PMC_EV(PPC7450, STORE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOPS) \
+ __PMC_EV(PPC7450, UNRESOLVED_BRANCHES) \
+ __PMC_EV(PPC7450, SPEC_BUFFER_CYCLES) \
+ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES) \
+ __PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES) \
+ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES) \
+ __PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES) \
+ __PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS) \
+ __PMC_EV(PPC7450, IU2_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, BRANCHES_COMPLETED) \
+ __PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LS_LM_COMPLETED) \
+ __PMC_EV(PPC7450, ITLB_HW_TABLE_SEARCH_CYCLES) \
+ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_ACCESSES) \
+ __PMC_EV(PPC7450, INSTR_BKPT_MATCHES) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_ON_MODIFIED) \
+ __PMC_EV(PPC7450, LOAD_MISS_ALIAS) \
+ __PMC_EV(PPC7450, LOAD_MISS_ALIAS_ON_TOUCH) \
+ __PMC_EV(PPC7450, TOUCH_ALIAS) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT_QUEUE) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HITS) \
+ __PMC_EV(PPC7450, WRITE_THROUGH_STORES) \
+ __PMC_EV(PPC7450, CACHE_INHIBITED_STORES) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_STORE_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_HITS) \
+ __PMC_EV(PPC7450, DST_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, REFRESHED_DSTS) \
+ __PMC_EV(PPC7450, SUCCESSFUL_DST_TABLE_SEARCHES) \
+ __PMC_EV(PPC7450, DSS_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, DST_STREAM_0_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, VTQ_SUSPENDS_DUE_TO_CTX_CHANGE) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH_HIT) \
+ __PMC_EV(PPC7450, VEC_LOAD_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, FP_STORE_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FPU_RENORMALIZATION) \
+ __PMC_EV(PPC7450, FPU_DENORMALIZATION) \
+ __PMC_EV(PPC7450, FP_STORE_CAUSES_STALL_IN_LSU) \
+ __PMC_EV(PPC7450, LD_ST_TRUE_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_INDEXED_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_ALIAS_VS_FSQ_WB0_WB1) \
+ __PMC_EV(PPC7450, LSU_ALIAS_VS_CSQ) \
+ __PMC_EV(PPC7450, LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0) \
+ __PMC_EV(PPC7450, LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0) \
+ __PMC_EV(PPC7450, LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1) \
+ __PMC_EV(PPC7450, LSU_TOUCH_ALIAS_VS_CSQ) \
+ __PMC_EV(PPC7450, LSU_LMQ_FULL_STALL) \
+ __PMC_EV(PPC7450, FP_LOAD_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FP_LOAD_DOUBLE_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, LSU_RA_LATCH_STALL) \
+ __PMC_EV(PPC7450, LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_LMQ_INDEX_ALIAS) \
+ __PMC_EV(PPC7450, LSU_STORE_QUEUE_INDEX_ALIAS) \
+ __PMC_EV(PPC7450, LSU_CSQ_FORWARDING) \
+ __PMC_EV(PPC7450, LSU_MISALIGNED_LOAD_FINISH) \
+ __PMC_EV(PPC7450, LSU_MISALIGN_STORE_COMPLETED) \
+ __PMC_EV(PPC7450, LSU_MISALIGN_STALL) \
+ __PMC_EV(PPC7450, FP_ONE_QUARTER_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_ONE_HALF_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_ALL_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_DENORMALIZED_RESULT) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISSES) \
+ __PMC_EV(PPC7450, DISPATCHES_TO_FPR_ISSUE_QUEUE) \
+ __PMC_EV(PPC7450, LSU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LOAD_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SS_SM_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLBIE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LWARX_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MFSPR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, REFETCH_SERIALIZATION) \
+ __PMC_EV(PPC7450, COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, CYCLES_ONE_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, CYCLES_TWO_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, ITLB_NON_SPECULATIVE_MISSES) \
+ __PMC_EV(PPC7450, CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_ACCESS_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_STORE_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS_CYCLES) \
+ __PMC_EV(PPC7450, L1_DATA_CYCLES_USED) \
+ __PMC_EV(PPC7450, DST_STREAM_1_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, VTQ_STREAM_CANCELED_PREMATURELY) \
+ __PMC_EV(PPC7450, VTQ_RESUMES_DUE_TO_CTX_CHANGE) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH_MISS) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH) \
+ __PMC_EV(PPC7450, TLBIE_SNOOPS) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_RELOADS) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_RELOADS) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_CASTOUTS_TO_L2) \
+ __PMC_EV(PPC7450, STORE_MERGE_GATHER) \
+ __PMC_EV(PPC7450, CACHEABLE_STORE_MERGE_TO_32_BYTES) \
+ __PMC_EV(PPC7450, DATA_BKPT_MATCHES) \
+ __PMC_EV(PPC7450, FALL_THROUGH_BRANCHES_PROCESSED) \
+ __PMC_EV(PPC7450, FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, SECOND_SPECULATION_BUFFER_ACTIVE) \
+ __PMC_EV(PPC7450, BPU_STALL_ON_LR_DEPENDENCY) \
+ __PMC_EV(PPC7450, BTIC_MISS) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STACK_CORRECTLY_RESOLVED) \
+ __PMC_EV(PPC7450, FPR_ISSUE_STALLED) \
+ __PMC_EV(PPC7450, SWITCHES_BETWEEN_PRIV_USER) \
+ __PMC_EV(PPC7450, LSU_COMPLETES_FP_STORE_SINGLE) \
+ __PMC_EV(PPC7450, VR_ISSUE_QUEUE_DISPATCHES) \
+ __PMC_EV(PPC7450, VR_STALLS) \
+ __PMC_EV(PPC7450, GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, FPR_ISSUE_QUEUE_ENTRIES) \
+ __PMC_EV(PPC7450, FPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, STWCX_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LS_LM_INSTR_PIECES) \
+ __PMC_EV(PPC7450, ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, DTLB_MISSES) \
+ __PMC_EV(PPC7450, CANCELLED_L1_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_OP_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_MISS_CYCLES) \
+ __PMC_EV(PPC7450, L1_DATA_PUSHES) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISS) \
+ __PMC_EV(PPC7450, VT2_FETCHES) \
+ __PMC_EV(PPC7450, TAKEN_BRANCHES_PROCESSED) \
+ __PMC_EV(PPC7450, BRANCH_FLUSHES) \
+ __PMC_EV(PPC7450, SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, THIRD_SPECULATION_BUFFER_ACTIVE) \
+ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY) \
+ __PMC_EV(PPC7450, FAST_BTIC_HIT) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STACK_MISPREDICTED) \
+ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, CYCLES_NO_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_STALLED) \
+ __PMC_EV(PPC7450, IU1_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, DSSALL_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLBSYNC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SYNC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SS_SM_INSTR_PIECES) \
+ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES) \
+ __PMC_EV(PPC7450, SNOOP_RETRIES) \
+ __PMC_EV(PPC7450, SUCCESSFUL_STWCX) \
+ __PMC_EV(PPC7450, DST_STREAM_3_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, MISPREDICTED_BRANCHES) \
+ __PMC_EV(PPC7450, FOLDED_BRANCHES) \
+ __PMC_EV(PPC7450, FP_STORE_DOUBLE_COMPLETES_IN_LSU) \
+ __PMC_EV(PPC7450, L2_CACHE_HITS) \
+ __PMC_EV(PPC7450, L3_CACHE_HITS) \
+ __PMC_EV(PPC7450, L2_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_DATA_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_DATA_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_LOAD_HITS) \
+ __PMC_EV(PPC7450, L2_STORE_HITS) \
+ __PMC_EV(PPC7450, L3_LOAD_HITS) \
+ __PMC_EV(PPC7450, L3_STORE_HITS) \
+ __PMC_EV(PPC7450, L2_TOUCH_HITS) \
+ __PMC_EV(PPC7450, L3_TOUCH_HITS) \
+ __PMC_EV(PPC7450, SNOOP_MODIFIED) \
+ __PMC_EV(PPC7450, SNOOP_VALID) \
+ __PMC_EV(PPC7450, INTERVENTION) \
+ __PMC_EV(PPC7450, L2_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_CACHE_CASTOUTS) \
+ __PMC_EV(PPC7450, L3_CACHE_CASTOUTS) \
+ __PMC_EV(PPC7450, L2SQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, L3SQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, RAQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, WAQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, L1_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, L2_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, L3_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, EXTERNAL_PUSHES) \
+ __PMC_EV(PPC7450, EXTERNAL_SNOOP_RETRY) \
+ __PMC_EV(PPC7450, DTQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, BUS_RETRY) \
+ __PMC_EV(PPC7450, L2_VALID_REQUEST) \
+ __PMC_EV(PPC7450, BORDQ_FULL) \
+ __PMC_EV(PPC7450, BUS_TAS_FOR_READS) \
+ __PMC_EV(PPC7450, BUS_TAS_FOR_WRITES) \
+ __PMC_EV(PPC7450, BUS_READS_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_WRITES_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_READS_WRITES_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_L1_RETRY) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_COLLISION) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_INTERVENTION_ORDERING) \
+ __PMC_EV(PPC7450, SNOOP_REQUESTS) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_REQUEST) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_STORE) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_FULL)
+
+#define PMC_EV_PPC7450_FIRST PMC_EV_PPC7450_CYCLE
+#define PMC_EV_PPC7450_LAST PMC_EV_PPC7450_PREFETCH_ENGINE_FULL
+
+/*
+ * All known PMC events.
+ *
+ * PMC event numbers are allocated sparsely to allow new PMC events to
+ * be added to a PMC class without breaking ABI compatibility. The
+ * current allocation scheme is:
+ *
+ * START #EVENTS DESCRIPTION
+ * 0 0x1000 Reserved
+ * 0x1000 0x0001 TSC
+ * 0x2000 0x0080 AMD K7 events
+ * 0x2080 0x0100 AMD K8 events
+ * 0x10000 0x0080 INTEL architectural fixed-function events
+ * 0x10080 0x0F80 INTEL architectural programmable events
+ * 0x11000 0x0080 INTEL Pentium 4 events
+ * 0x11080 0x0080 INTEL Pentium MMX events
+ * 0x11100 0x0100 INTEL Pentium Pro/P-II/P-III/Pentium-M events
+ * 0x11200 0x00FF INTEL XScale events
+ * 0x11300 0x00FF MIPS 24K events
+ * 0x20000 0x1000 Software events
+ */
+#define __PMC_EVENTS() \
+ __PMC_EV_BLOCK(TSC, 0x01000) \
+ __PMC_EV_TSC() \
+ __PMC_EV_BLOCK(K7, 0x2000) \
+ __PMC_EV_K7() \
+ __PMC_EV_BLOCK(K8, 0x2080) \
+ __PMC_EV_K8() \
+ __PMC_EV_BLOCK(IAF, 0x10000) \
+ __PMC_EV_IAF() \
+ __PMC_EV_BLOCK(IAP, 0x10080) \
+ __PMC_EV_IAP() \
+ __PMC_EV_BLOCK(P4, 0x11000) \
+ __PMC_EV_P4() \
+ __PMC_EV_BLOCK(P5, 0x11080) \
+ __PMC_EV_P5() \
+ __PMC_EV_BLOCK(P6, 0x11100) \
+ __PMC_EV_P6() \
+ __PMC_EV_BLOCK(XSCALE, 0x11200) \
+ __PMC_EV_XSCALE() \
+ __PMC_EV_BLOCK(MIPS24K, 0x11300) \
+ __PMC_EV_MIPS24K() \
+ __PMC_EV_BLOCK(OCTEON, 0x11400) \
+ __PMC_EV_OCTEON() \
+ __PMC_EV_BLOCK(UCF, 0x12000) \
+ __PMC_EV_UCF() \
+ __PMC_EV_BLOCK(UCP, 0x12080) \
+ __PMC_EV_UCP() \
+ __PMC_EV_BLOCK(PPC7450, 0x13000) \
+ __PMC_EV_PPC7450() \
+
+#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
+#define PMC_EVENT_LAST PMC_EV_SOFT_LAST
+
+#endif /* _DEV_HWPMC_PMC_EVENTS_H_ */
OpenPOWER on IntegriCloud