summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libpmc/libpmc.c62
-rw-r--r--sys/dev/hwpmc/hwpmc_powerpc.c812
-rw-r--r--sys/dev/hwpmc/pmc_events.h227
-rw-r--r--sys/powerpc/aim/machdep.c1
-rw-r--r--sys/powerpc/aim/trap.c15
-rw-r--r--sys/powerpc/include/pmc_mdep.h6
-rw-r--r--sys/powerpc/include/spr.h12
-rw-r--r--sys/sys/pmc.h10
8 files changed, 1125 insertions, 20 deletions
diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c
index efdabc8..ba63ace 100644
--- a/lib/libpmc/libpmc.c
+++ b/lib/libpmc/libpmc.c
@@ -83,6 +83,10 @@ static int mips24k_allocate_pmc(enum pmc_event _pe, char* ctrspec,
struct pmc_op_pmcallocate *_pmc_config);
#endif /* __mips__ */
+#if defined(__powerpc__)
+static int ppc7450_allocate_pmc(enum pmc_event _pe, char* ctrspec,
+ struct pmc_op_pmcallocate *_pmc_config);
+#endif /* __powerpc__ */
#define PMC_CALL(cmd, params) \
syscall(pmc_syscall, PMC_OP_##cmd, (params))
@@ -149,6 +153,7 @@ PMC_CLASSDEP_TABLE(p6, P6);
PMC_CLASSDEP_TABLE(xscale, XSCALE);
PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
PMC_CLASSDEP_TABLE(ucf, UCF);
+PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
#undef __PMC_EV_ALIAS
#define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
@@ -211,6 +216,7 @@ PMC_MDEP_TABLE(p5, P5, PMC_CLASS_TSC);
PMC_MDEP_TABLE(p6, P6, PMC_CLASS_TSC);
PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_XSCALE);
PMC_MDEP_TABLE(mips24k, MIPS24K, PMC_CLASS_MIPS24K);
+PMC_MDEP_TABLE(ppc7450, PPC7450, PMC_CLASS_PPC7450);
static const struct pmc_event_descr tsc_event_table[] =
{
@@ -263,6 +269,10 @@ PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale);
PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips24k);
#endif /* __mips__ */
+#if defined(__powerpc__)
+PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, ppc7450);
+#endif
+
#undef PMC_CLASS_TABLE_DESC
static const struct pmc_class_descr **pmc_class_table;
@@ -2212,6 +2222,44 @@ mips24k_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
}
#endif /* __mips__ */
+#if defined(__powerpc__)
+
+static struct pmc_event_alias ppc7450_aliases[] = {
+ EV_ALIAS("instructions", "INSTR_COMPLETED"),
+ EV_ALIAS("branches", "BRANCHES_COMPLETED"),
+ EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"),
+ EV_ALIAS(NULL, NULL)
+};
+
+#define PPC7450_KW_OS "os"
+#define PPC7450_KW_USR "usr"
+#define PPC7450_KW_ANYTHREAD "anythread"
+
+static int
+ppc7450_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
+ struct pmc_op_pmcallocate *pmc_config __unused)
+{
+ char *p;
+
+ (void) pe;
+
+ pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
+
+ while ((p = strsep(&ctrspec, ",")) != NULL) {
+ if (KWMATCH(p, PPC7450_KW_OS))
+ pmc_config->pm_caps |= PMC_CAP_SYSTEM;
+ else if (KWMATCH(p, PPC7450_KW_USR))
+ pmc_config->pm_caps |= PMC_CAP_USER;
+ else if (KWMATCH(p, PPC7450_KW_ANYTHREAD))
+ pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
+ else
+ return (-1);
+ }
+
+ return (0);
+}
+#endif /* __powerpc__ */
+
/*
* Match an event name `name' with its canonical form.
@@ -2573,6 +2621,10 @@ pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
ev = mips24k_event_table;
count = PMC_EVENT_TABLE_SIZE(mips24k);
break;
+ case PMC_CLASS_PPC7450:
+ ev = ppc7450_event_table;
+ count = PMC_EVENT_TABLE_SIZE(ppc7450);
+ break;
default:
errno = EINVAL;
return (-1);
@@ -2784,6 +2836,12 @@ pmc_init(void)
pmc_class_table[n] = &mips24k_class_table_descr;
break;
#endif /* __mips__ */
+#if defined(__powerpc__)
+ case PMC_CPU_PPC_7450:
+ PMC_MDEP_INIT(ppc7450);
+ pmc_class_table[n] = &ppc7450_class_table_descr;
+ break;
+#endif
default:
/*
* Some kind of CPU this version of the library knows nothing
@@ -2924,6 +2982,10 @@ _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
ev = mips24k_event_table;
evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k
);
+ } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
+ ev = ppc7450_event_table;
+ evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450
+);
} else if (pe == PMC_EV_TSC_TSC) {
ev = tsc_event_table;
evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
diff --git a/sys/dev/hwpmc/hwpmc_powerpc.c b/sys/dev/hwpmc/hwpmc_powerpc.c
index ce1caf6..32be384 100644
--- a/sys/dev/hwpmc/hwpmc_powerpc.c
+++ b/sys/dev/hwpmc/hwpmc_powerpc.c
@@ -1,4 +1,5 @@
/*-
+ * Copyright (c) 2011 Justin Hibbits
* Copyright (c) 2005, Joseph Koshy
* All rights reserved.
*
@@ -30,20 +31,297 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/pmc.h>
+#include <sys/pmckern.h>
+#include <sys/systm.h>
#include <machine/pmc_mdep.h>
+#include <machine/spr.h>
+#include <machine/cpu.h>
-struct pmc_mdep *
-pmc_md_initialize()
-{
- return NULL;
-}
+#define POWERPC_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \
+ PMC_CAP_SYSTEM | PMC_CAP_EDGE | \
+ PMC_CAP_THRESHOLD | PMC_CAP_READ | \
+ PMC_CAP_WRITE | PMC_CAP_INVERT | \
+ PMC_CAP_QUALIFIER)
-void
-pmc_md_finalize(struct pmc_mdep *md)
-{
- (void) md;
-}
+#define PPC_SET_PMC1SEL(r, x) ((r & ~(SPR_MMCR0_PMC1SEL(0x3f))) | SPR_MMCR0_PMC1SEL(x))
+#define PPC_SET_PMC2SEL(r, x) ((r & ~(SPR_MMCR0_PMC2SEL(0x3f))) | SPR_MMCR0_PMC2SEL(x))
+#define PPC_SET_PMC3SEL(r, x) ((r & ~(SPR_MMCR1_PMC3SEL(0x1f))) | SPR_MMCR1_PMC3SEL(x))
+#define PPC_SET_PMC4SEL(r, x) ((r & ~(SPR_MMCR1_PMC4SEL(0x1f))) | SPR_MMCR1_PMC4SEL(x))
+#define PPC_SET_PMC5SEL(r, x) ((r & ~(SPR_MMCR1_PMC5SEL(0x1f))) | SPR_MMCR1_PMC5SEL(x))
+#define PPC_SET_PMC6SEL(r, x) ((r & ~(SPR_MMCR1_PMC6SEL(0x3f))) | SPR_MMCR1_PMC6SEL(x))
+
+/* Change this when we support more than just the 7450. */
+#define PPC_MAX_PMCS 6
+
+#define POWERPC_PMC_KERNEL_ENABLE (0x1 << 30)
+#define POWERPC_PMC_USER_ENABLE (0x1 << 31)
+
+#define POWERPC_PMC_ENABLE (POWERPC_PMC_KERNEL_ENABLE | POWERPC_PMC_USER_ENABLE)
+#define POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(V) (0x80000000-(V))
+#define POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(P) ((P)-0x80000000)
+#define POWERPC_PMC_HAS_OVERFLOWED(x) (powerpc_pmcn_read(x) & (0x1 << 31))
+
+
+/*
+ * This should work for every 32-bit PowerPC implementation I know of (G3 and G4
+ * specifically). PoewrPC 970 will take more work.
+ */
+
+/*
+ * Per-processor information.
+ */
+struct powerpc_cpu {
+ struct pmc_hw *pc_ppcpmcs;
+};
+
+static struct powerpc_cpu **powerpc_pcpu;
+
+struct powerpc_event_code_map {
+ enum pmc_event pe_ev; /* enum value */
+ uint8_t pe_counter_mask; /* Which counter this can be counted in. */
+ uint8_t pe_code; /* numeric code */
+};
+
+#define PPC_PMC_MASK1 0
+#define PPC_PMC_MASK2 1
+#define PPC_PMC_MASK3 2
+#define PPC_PMC_MASK4 3
+#define PPC_PMC_MASK5 4
+#define PPC_PMC_MASK6 5
+#define PPC_PMC_MASK_ALL 0x3f
+
+#define PMC_POWERPC_EVENT(id, mask, number) \
+ { .pe_ev = PMC_EV_PPC7450_##id, .pe_counter_mask = mask, .pe_code = number }
+
+static struct powerpc_event_code_map powerpc_event_codes[] = {
+ PMC_POWERPC_EVENT(CYCLE,PPC_PMC_MASK_ALL, 1),
+ PMC_POWERPC_EVENT(INSTR_COMPLETED, 0x0f, 2),
+ PMC_POWERPC_EVENT(TLB_BIT_TRANSITIONS, 0x0f, 3),
+ PMC_POWERPC_EVENT(INSTR_DISPATCHED, 0x0f, 4),
+ PMC_POWERPC_EVENT(PMON_EXCEPT, 0x0f, 5),
+ PMC_POWERPC_EVENT(PMON_SIG, 0x0f, 7),
+ PMC_POWERPC_EVENT(VPU_INSTR_COMPLETED, 0x03, 8),
+ PMC_POWERPC_EVENT(VFPU_INSTR_COMPLETED, 0x03, 9),
+ PMC_POWERPC_EVENT(VIU1_INSTR_COMPLETED, 0x03, 10),
+ PMC_POWERPC_EVENT(VIU2_INSTR_COMPLETED, 0x03, 11),
+ PMC_POWERPC_EVENT(MTVSCR_INSTR_COMPLETED, 0x03, 12),
+ PMC_POWERPC_EVENT(MTVRSAVE_INSTR_COMPLETED, 0x03, 13),
+ PMC_POWERPC_EVENT(VPU_INSTR_WAIT_CYCLES, 0x03, 14),
+ PMC_POWERPC_EVENT(VFPU_INSTR_WAIT_CYCLES, 0x03, 15),
+ PMC_POWERPC_EVENT(VIU1_INSTR_WAIT_CYCLES, 0x03, 16),
+ PMC_POWERPC_EVENT(VIU2_INSTR_WAIT_CYCLES, 0x03, 17),
+ PMC_POWERPC_EVENT(MFVSCR_SYNC_CYCLES, 0x03, 18),
+ PMC_POWERPC_EVENT(VSCR_SAT_SET, 0x03, 19),
+ PMC_POWERPC_EVENT(STORE_INSTR_COMPLETED, 0x03, 20),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_MISSES, 0x03, 21),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOPS, 0x03, 22),
+ PMC_POWERPC_EVENT(UNRESOLVED_BRANCHES, 0x01, 23),
+ PMC_POWERPC_EVENT(SPEC_BUFFER_CYCLES, 0x01, 24),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_CYCLES, 0x01, 25),
+ PMC_POWERPC_EVENT(TRUE_BRANCH_TARGET_HITS, 0x01, 26),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STAC_PREDICTED, 0x01, 27),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_DISPATCHES, 0x01, 28),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_DISPATCHED, 0x01, 29),
+ PMC_POWERPC_EVENT(THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 30),
+ PMC_POWERPC_EVENT(THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES, 0x01, 31),
+ PMC_POWERPC_EVENT(CYCLES_NO_COMPLETED_INSTRS, 0x01, 32),
+ PMC_POWERPC_EVENT(IU2_INSTR_COMPLETED, 0x01, 33),
+ PMC_POWERPC_EVENT(BRANCHES_COMPLETED, 0x01, 34),
+ PMC_POWERPC_EVENT(EIEIO_INSTR_COMPLETED, 0x01, 35),
+ PMC_POWERPC_EVENT(MTSPR_INSTR_COMPLETED, 0x01, 36),
+ PMC_POWERPC_EVENT(SC_INSTR_COMPLETED, 0x01, 37),
+ PMC_POWERPC_EVENT(LS_LM_COMPLETED, 0x01, 38),
+ PMC_POWERPC_EVENT(ITLB_HW_TABLE_SEARCH_CYCLES, 0x01, 39),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x01, 40),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_ACCESSES, 0x01, 41),
+ PMC_POWERPC_EVENT(INSTR_BKPT_MATCHES, 0x01, 42),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD, 0x01, 43),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_ON_MODIFIED, 0x01, 44),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS, 0x01, 45),
+ PMC_POWERPC_EVENT(LOAD_MISS_ALIAS_ON_TOUCH, 0x01, 46),
+ PMC_POWERPC_EVENT(TOUCH_ALIAS, 0x01, 47),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT_QUEUE, 0x01, 48),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HIT_CASTOUT, 0x01, 49),
+ PMC_POWERPC_EVENT(L1_DATA_SNOOP_HITS, 0x01, 50),
+ PMC_POWERPC_EVENT(WRITE_THROUGH_STORES, 0x01, 51),
+ PMC_POWERPC_EVENT(CACHE_INHIBITED_STORES, 0x01, 52),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_HIT, 0x01, 53),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_HIT, 0x01, 54),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_HIT, 0x01, 55),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_HITS, 0x01, 56),
+ PMC_POWERPC_EVENT(DST_INSTR_DISPATCHED, 0x01, 57),
+ PMC_POWERPC_EVENT(REFRESHED_DSTS, 0x01, 58),
+ PMC_POWERPC_EVENT(SUCCESSFUL_DST_TABLE_SEARCHES, 0x01, 59),
+ PMC_POWERPC_EVENT(DSS_INSTR_COMPLETED, 0x01, 60),
+ PMC_POWERPC_EVENT(DST_STREAM_0_CACHE_LINE_FETCHES, 0x01, 61),
+ PMC_POWERPC_EVENT(VTQ_SUSPENDS_DUE_TO_CTX_CHANGE, 0x01, 62),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_HIT, 0x01, 63),
+ PMC_POWERPC_EVENT(VEC_LOAD_INSTR_COMPLETED, 0x01, 64),
+ PMC_POWERPC_EVENT(FP_STORE_INSTR_COMPLETED_IN_LSU, 0x01, 65),
+ PMC_POWERPC_EVENT(FPU_RENORMALIZATION, 0x01, 66),
+ PMC_POWERPC_EVENT(FPU_DENORMALIZATION, 0x01, 67),
+ PMC_POWERPC_EVENT(FP_STORE_CAUSES_STALL_IN_LSU, 0x01, 68),
+ PMC_POWERPC_EVENT(LD_ST_TRUE_ALIAS_STALL, 0x01, 70),
+ PMC_POWERPC_EVENT(LSU_INDEXED_ALIAS_STALL, 0x01, 71),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_FSQ_WB0_WB1, 0x01, 72),
+ PMC_POWERPC_EVENT(LSU_ALIAS_VS_CSQ, 0x01, 73),
+ PMC_POWERPC_EVENT(LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0, 0x01, 74),
+ PMC_POWERPC_EVENT(LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0, 0x01, 75),
+ PMC_POWERPC_EVENT(LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1, 0x01, 76),
+ PMC_POWERPC_EVENT(LSU_TOUCH_ALIAS_VS_CSQ, 0x01, 77),
+ PMC_POWERPC_EVENT(LSU_LMQ_FULL_STALL, 0x01, 78),
+ PMC_POWERPC_EVENT(FP_LOAD_INSTR_COMPLETED_IN_LSU, 0x01, 79),
+ PMC_POWERPC_EVENT(FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU, 0x01, 80),
+ PMC_POWERPC_EVENT(FP_LOAD_DOUBLE_COMPLETED_IN_LSU, 0x01, 81),
+ PMC_POWERPC_EVENT(LSU_RA_LATCH_STALL, 0x01, 82),
+ PMC_POWERPC_EVENT(LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL, 0x01, 83),
+ PMC_POWERPC_EVENT(LSU_LMQ_INDEX_ALIAS, 0x01, 84),
+ PMC_POWERPC_EVENT(LSU_STORE_QUEUE_INDEX_ALIAS, 0x01, 85),
+ PMC_POWERPC_EVENT(LSU_CSQ_FORWARDING, 0x01, 86),
+ PMC_POWERPC_EVENT(LSU_MISALIGNED_LOAD_FINISH, 0x01, 87),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STORE_COMPLETED, 0x01, 88),
+ PMC_POWERPC_EVENT(LSU_MISALIGN_STALL, 0x01, 89),
+ PMC_POWERPC_EVENT(FP_ONE_QUARTER_FPSCR_RENAMES_BUSY, 0x01, 90),
+ PMC_POWERPC_EVENT(FP_ONE_HALF_FPSCR_RENAMES_BUSY, 0x01, 91),
+ PMC_POWERPC_EVENT(FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY, 0x01, 92),
+ PMC_POWERPC_EVENT(FP_ALL_FPSCR_RENAMES_BUSY, 0x01, 93),
+ PMC_POWERPC_EVENT(FP_DENORMALIZED_RESULT, 0x01, 94),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISSES, 0x02, 23),
+ PMC_POWERPC_EVENT(DISPATCHES_TO_FPR_ISSUE_QUEUE, 0x02, 24),
+ PMC_POWERPC_EVENT(LSU_INSTR_COMPLETED, 0x02, 25),
+ PMC_POWERPC_EVENT(LOAD_INSTR_COMPLETED, 0x02, 26),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_COMPLETED, 0x02, 27),
+ PMC_POWERPC_EVENT(TLBIE_INSTR_COMPLETED, 0x02, 28),
+ PMC_POWERPC_EVENT(LWARX_INSTR_COMPLETED, 0x02, 29),
+ PMC_POWERPC_EVENT(MFSPR_INSTR_COMPLETED, 0x02, 30),
+ PMC_POWERPC_EVENT(REFETCH_SERIALIZATION, 0x02, 31),
+ PMC_POWERPC_EVENT(COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD, 0x02, 32),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x02, 33),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x02, 34),
+ PMC_POWERPC_EVENT(ITLB_NON_SPECULATIVE_MISSES, 0x02, 35),
+ PMC_POWERPC_EVENT(CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS, 0x02, 36),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_ACCESS_MISS, 0x02, 37),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS, 0x02, 38),
+ PMC_POWERPC_EVENT(L1_DATA_STORE_MISS, 0x02, 39),
+ PMC_POWERPC_EVENT(L1_DATA_TOUCH_MISS_CYCLES, 0x02, 40),
+ PMC_POWERPC_EVENT(L1_DATA_CYCLES_USED, 0x02, 41),
+ PMC_POWERPC_EVENT(DST_STREAM_1_CACHE_LINE_FETCHES, 0x02, 42),
+ PMC_POWERPC_EVENT(VTQ_STREAM_CANCELED_PREMATURELY, 0x02, 43),
+ PMC_POWERPC_EVENT(VTQ_RESUMES_DUE_TO_CTX_CHANGE, 0x02, 44),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH_MISS, 0x02, 45),
+ PMC_POWERPC_EVENT(VTQ_LINE_FETCH, 0x02, 46),
+ PMC_POWERPC_EVENT(TLBIE_SNOOPS, 0x02, 47),
+ PMC_POWERPC_EVENT(L1_INSTR_CACHE_RELOADS, 0x02, 48),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_RELOADS, 0x02, 49),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_CASTOUTS_TO_L2, 0x02, 50),
+ PMC_POWERPC_EVENT(STORE_MERGE_GATHER, 0x02, 51),
+ PMC_POWERPC_EVENT(CACHEABLE_STORE_MERGE_TO_32_BYTES, 0x02, 52),
+ PMC_POWERPC_EVENT(DATA_BKPT_MATCHES, 0x02, 53),
+ PMC_POWERPC_EVENT(FALL_THROUGH_BRANCHES_PROCESSED, 0x02, 54),
+ PMC_POWERPC_EVENT(FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x02, 55),
+ PMC_POWERPC_EVENT(SECOND_SPECULATION_BUFFER_ACTIVE, 0x02, 56),
+ PMC_POWERPC_EVENT(BPU_STALL_ON_LR_DEPENDENCY, 0x02, 57),
+ PMC_POWERPC_EVENT(BTIC_MISS, 0x02, 58),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_CORRECTLY_RESOLVED, 0x02, 59),
+ PMC_POWERPC_EVENT(FPR_ISSUE_STALLED, 0x02, 60),
+ PMC_POWERPC_EVENT(SWITCHES_BETWEEN_PRIV_USER, 0x02, 61),
+ PMC_POWERPC_EVENT(LSU_COMPLETES_FP_STORE_SINGLE, 0x02, 62),
+ PMC_POWERPC_EVENT(CYCLES_TWO_INSTR_COMPLETED, 0x04, 8),
+ PMC_POWERPC_EVENT(CYCLES_ONE_INSTR_DISPATCHED, 0x04, 9),
+ PMC_POWERPC_EVENT(VR_ISSUE_QUEUE_DISPATCHES, 0x04, 10),
+ PMC_POWERPC_EVENT(VR_STALLS, 0x04, 11),
+ PMC_POWERPC_EVENT(GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD, 0x04, 12),
+ PMC_POWERPC_EVENT(FPR_ISSUE_QUEUE_ENTRIES, 0x04, 13),
+ PMC_POWERPC_EVENT(FPU_INSTR_COMPLETED, 0x04, 14),
+ PMC_POWERPC_EVENT(STWCX_INSTR_COMPLETED, 0x04, 15),
+ PMC_POWERPC_EVENT(LS_LM_INSTR_PIECES, 0x04, 16),
+ PMC_POWERPC_EVENT(ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD, 0x04, 17),
+ PMC_POWERPC_EVENT(DTLB_MISSES, 0x04, 18),
+ PMC_POWERPC_EVENT(CANCELLED_L1_INSTR_CACHE_MISSES, 0x04, 19),
+ PMC_POWERPC_EVENT(L1_DATA_CACHE_OP_HIT, 0x04, 20),
+ PMC_POWERPC_EVENT(L1_DATA_LOAD_MISS_CYCLES, 0x04, 21),
+ PMC_POWERPC_EVENT(L1_DATA_PUSHES, 0x04, 22),
+ PMC_POWERPC_EVENT(L1_DATA_TOTAL_MISS, 0x04, 23),
+ PMC_POWERPC_EVENT(VT2_FETCHES, 0x04, 24),
+ PMC_POWERPC_EVENT(TAKEN_BRANCHES_PROCESSED, 0x04, 25),
+ PMC_POWERPC_EVENT(BRANCH_FLUSHES, 0x04, 26),
+ PMC_POWERPC_EVENT(SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x04, 27),
+ PMC_POWERPC_EVENT(THIRD_SPECULATION_BUFFER_ACTIVE, 0x04, 28),
+ PMC_POWERPC_EVENT(BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY, 0x04, 29),
+ PMC_POWERPC_EVENT(FAST_BTIC_HIT, 0x04, 30),
+ PMC_POWERPC_EVENT(BRANCH_LINK_STACK_MISPREDICTED, 0x04, 31),
+ PMC_POWERPC_EVENT(CYCLES_THREE_INSTR_COMPLETED, 0x08, 14),
+ PMC_POWERPC_EVENT(CYCLES_NO_INSTR_DISPATCHED, 0x08, 15),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD, 0x08, 16),
+ PMC_POWERPC_EVENT(GPR_ISSUE_QUEUE_STALLED, 0x08, 17),
+ PMC_POWERPC_EVENT(IU1_INSTR_COMPLETED, 0x08, 18),
+ PMC_POWERPC_EVENT(DSSALL_INSTR_COMPLETED, 0x08, 19),
+ PMC_POWERPC_EVENT(TLBSYNC_INSTR_COMPLETED, 0x08, 20),
+ PMC_POWERPC_EVENT(SYNC_INSTR_COMPLETED, 0x08, 21),
+ PMC_POWERPC_EVENT(SS_SM_INSTR_PIECES, 0x08, 22),
+ PMC_POWERPC_EVENT(DTLB_HW_SEARCH_CYCLES, 0x08, 23),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x08, 24),
+ PMC_POWERPC_EVENT(SUCCESSFUL_STWCX, 0x08, 25),
+ PMC_POWERPC_EVENT(DST_STREAM_3_CACHE_LINE_FETCHES, 0x08, 26),
+ PMC_POWERPC_EVENT(THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY, 0x08, 27),
+ PMC_POWERPC_EVENT(MISPREDICTED_BRANCHES, 0x08, 28),
+ PMC_POWERPC_EVENT(FOLDED_BRANCHES, 0x08, 29),
+ PMC_POWERPC_EVENT(FP_STORE_DOUBLE_COMPLETES_IN_LSU, 0x08, 30),
+ PMC_POWERPC_EVENT(L2_CACHE_HITS, 0x30, 2),
+ PMC_POWERPC_EVENT(L3_CACHE_HITS, 0x30, 3),
+ PMC_POWERPC_EVENT(L2_INSTR_CACHE_MISSES, 0x30, 4),
+ PMC_POWERPC_EVENT(L3_INSTR_CACHE_MISSES, 0x30, 5),
+ PMC_POWERPC_EVENT(L2_DATA_CACHE_MISSES, 0x30, 6),
+ PMC_POWERPC_EVENT(L3_DATA_CACHE_MISSES, 0x30, 7),
+ PMC_POWERPC_EVENT(L2_LOAD_HITS, 0x10, 8),
+ PMC_POWERPC_EVENT(L2_STORE_HITS, 0x10, 9),
+ PMC_POWERPC_EVENT(L3_LOAD_HITS, 0x10, 10),
+ PMC_POWERPC_EVENT(L3_STORE_HITS, 0x10, 11),
+ PMC_POWERPC_EVENT(L2_TOUCH_HITS, 0x30, 13),
+ PMC_POWERPC_EVENT(L3_TOUCH_HITS, 0x30, 14),
+ PMC_POWERPC_EVENT(SNOOP_RETRIES, 0x30, 15),
+ PMC_POWERPC_EVENT(SNOOP_MODIFIED, 0x10, 16),
+ PMC_POWERPC_EVENT(SNOOP_VALID, 0x10, 17),
+ PMC_POWERPC_EVENT(INTERVENTION, 0x30, 18),
+ PMC_POWERPC_EVENT(L2_CACHE_MISSES, 0x10, 19),
+ PMC_POWERPC_EVENT(L3_CACHE_MISSES, 0x10, 20),
+ PMC_POWERPC_EVENT(L2_CACHE_CASTOUTS, 0x20, 8),
+ PMC_POWERPC_EVENT(L3_CACHE_CASTOUTS, 0x20, 9),
+ PMC_POWERPC_EVENT(L2SQ_FULL_CYCLES, 0x20, 10),
+ PMC_POWERPC_EVENT(L3SQ_FULL_CYCLES, 0x20, 11),
+ PMC_POWERPC_EVENT(RAQ_FULL_CYCLES, 0x20, 16),
+ PMC_POWERPC_EVENT(WAQ_FULL_CYCLES, 0x20, 17),
+ PMC_POWERPC_EVENT(L1_EXTERNAL_INTERVENTIONS, 0x20, 19),
+ PMC_POWERPC_EVENT(L2_EXTERNAL_INTERVENTIONS, 0x20, 20),
+ PMC_POWERPC_EVENT(L3_EXTERNAL_INTERVENTIONS, 0x20, 21),
+ PMC_POWERPC_EVENT(EXTERNAL_INTERVENTIONS, 0x20, 22),
+ PMC_POWERPC_EVENT(EXTERNAL_PUSHES, 0x20, 23),
+ PMC_POWERPC_EVENT(EXTERNAL_SNOOP_RETRY, 0x20, 24),
+ PMC_POWERPC_EVENT(DTQ_FULL_CYCLES, 0x20, 25),
+ PMC_POWERPC_EVENT(BUS_RETRY, 0x20, 26),
+ PMC_POWERPC_EVENT(L2_VALID_REQUEST, 0x20, 27),
+ PMC_POWERPC_EVENT(BORDQ_FULL, 0x20, 28),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_READS, 0x20, 42),
+ PMC_POWERPC_EVENT(BUS_TAS_FOR_WRITES, 0x20, 43),
+ PMC_POWERPC_EVENT(BUS_READS_NOT_RETRIED, 0x20, 44),
+ PMC_POWERPC_EVENT(BUS_WRITES_NOT_RETRIED, 0x20, 45),
+ PMC_POWERPC_EVENT(BUS_READS_WRITES_NOT_RETRIED, 0x20, 46),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_L1_RETRY, 0x20, 47),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT, 0x20, 48),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_COLLISION, 0x20, 49),
+ PMC_POWERPC_EVENT(BUS_RETRY_DUE_TO_INTERVENTION_ORDERING, 0x20, 50),
+ PMC_POWERPC_EVENT(SNOOP_REQUESTS, 0x20, 51),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_REQUEST, 0x20, 52),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD, 0x20, 53),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_STORE, 0x20, 54),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH, 0x20, 55),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH, 0x20, 56),
+ PMC_POWERPC_EVENT(PREFETCH_ENGINE_FULL, 0x20, 57)
+};
+
+const size_t powerpc_event_codes_size =
+ sizeof(powerpc_event_codes) / sizeof(powerpc_event_codes[0]);
int
pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
@@ -55,6 +333,520 @@ pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
return (0);
}
+static pmc_value_t
+powerpc_pmcn_read(unsigned int pmc)
+{
+ switch (pmc) {
+ case 0:
+ return mfspr(SPR_PMC1);
+ break;
+ case 1:
+ return mfspr(SPR_PMC2);
+ break;
+ case 2:
+ return mfspr(SPR_PMC3);
+ break;
+ case 3:
+ return mfspr(SPR_PMC4);
+ break;
+ case 4:
+ return mfspr(SPR_PMC5);
+ break;
+ case 5:
+ return mfspr(SPR_PMC6);
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static void
+powerpc_pmcn_write(unsigned int pmc, uint32_t val)
+{
+ switch (pmc) {
+ case 0:
+ mtspr(SPR_PMC1, val);
+ break;
+ case 1:
+ mtspr(SPR_PMC2, val);
+ break;
+ case 2:
+ mtspr(SPR_PMC3, val);
+ break;
+ case 3:
+ mtspr(SPR_PMC4, val);
+ break;
+ case 4:
+ mtspr(SPR_PMC5, val);
+ break;
+ case 5:
+ mtspr(SPR_PMC6, val);
+ break;
+ default:
+ panic("Invalid PMC number: %d\n", pmc);
+ }
+}
+
+static int
+powerpc_allocate_pmc(int cpu, int ri, struct pmc *pm,
+ const struct pmc_op_pmcallocate *a)
+{
+ enum pmc_event pe;
+ uint32_t caps, config, counter;
+ int i;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ caps = a->pm_caps;
+
+ /*
+ * TODO: Check actual class for different generations.
+ */
+ if (a->pm_class != PMC_CLASS_PPC7450)
+ return (EINVAL);
+ pe = a->pm_ev;
+ for (i = 0; i < powerpc_event_codes_size; i++) {
+ if (powerpc_event_codes[i].pe_ev == pe) {
+ config = powerpc_event_codes[i].pe_code;
+ counter = powerpc_event_codes[i].pe_counter_mask;
+ break;
+ }
+ }
+ if (i == powerpc_event_codes_size)
+ return (EINVAL);
+
+ if ((counter & (1 << ri)) == 0)
+ return (EINVAL);
+
+ if (caps & PMC_CAP_SYSTEM)
+ config |= POWERPC_PMC_KERNEL_ENABLE;
+ if (caps & PMC_CAP_USER)
+ config |= POWERPC_PMC_USER_ENABLE;
+ if ((caps & (PMC_CAP_USER | PMC_CAP_SYSTEM)) == 0)
+ config |= POWERPC_PMC_ENABLE;
+
+ pm->pm_md.pm_powerpc.pm_powerpc_evsel = config;
+
+ PMCDBG(MDP,ALL,2,"powerpc-allocate ri=%d -> config=0x%x", ri, config);
+
+ return 0;
+}
+
+static int
+powerpc_read_pmc(int cpu, int ri, pmc_value_t *v)
+{
+ struct pmc *pm;
+ pmc_value_t tmp;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+ tmp = powerpc_pmcn_read(ri);
+ PMCDBG(MDP,REA,2,"ppc-read id=%d -> %jd", ri, tmp);
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ *v = POWERPC_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ else
+ *v = tmp;
+
+ return 0;
+}
+
+static int
+powerpc_write_pmc(int cpu, int ri, pmc_value_t v)
+{
+ struct pmc *pm;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ pm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
+ v = POWERPC_RELOAD_COUNT_TO_PERFCTR_VALUE(v);
+
+ PMCDBG(MDP,WRI,1,"powerpc-write cpu=%d ri=%d v=%jx", cpu, ri, v);
+
+ powerpc_pmcn_write(ri, v);
+
+ return 0;
+}
+
+static int
+powerpc_config_pmc(int cpu, int ri, struct pmc *pm)
+{
+ struct pmc_hw *phw;
+
+ PMCDBG(MDP,CFG,1, "cpu=%d ri=%d pm=%p", cpu, ri, pm);
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+
+ KASSERT(pm == NULL || phw->phw_pmc == NULL,
+ ("[powerpc,%d] pm=%p phw->pm=%p hwpmc not unconfigured",
+ __LINE__, pm, phw->phw_pmc));
+
+ phw->phw_pmc = pm;
+
+ return 0;
+}
+
+static int
+powerpc_start_pmc(int cpu, int ri)
+{
+ uint32_t config;
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+ config = pm->pm_md.pm_powerpc.pm_powerpc_evsel & ~POWERPC_PMC_ENABLE;
+
+ /* Enable the PMC. */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, config);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+
+ /* The mask is inverted (enable is 1) compared to the flags in MMCR0, which
+ * are Freeze flags.
+ */
+ config = ~pm->pm_md.pm_powerpc.pm_powerpc_evsel & POWERPC_PMC_ENABLE;
+
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr &= ~SPR_MMCR0_FC;
+ pmc_mmcr |= config;
+ mtspr(SPR_MMCR0, pmc_mmcr);
+
+ return 0;
+}
+
+static int
+powerpc_stop_pmc(int cpu, int ri)
+{
+ struct pmc *pm;
+ struct pmc_hw *phw;
+ register_t pmc_mmcr;
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ pm = phw->phw_pmc;
+
+ /*
+ * Disable the PMCs.
+ */
+ switch (ri) {
+ case 0:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC1SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 1:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC2SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 2:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC3SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 3:
+ pmc_mmcr = mfspr(SPR_MMCR0);
+ pmc_mmcr = PPC_SET_PMC4SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR0, pmc_mmcr);
+ break;
+ case 4:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC5SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ case 5:
+ pmc_mmcr = mfspr(SPR_MMCR1);
+ pmc_mmcr = PPC_SET_PMC6SEL(pmc_mmcr, 0);
+ mtspr(SPR_MMCR1, pmc_mmcr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int
+powerpc_release_pmc(int cpu, int ri, struct pmc *pmc)
+{
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] illegal CPU value %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] illegal row-index %d", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ KASSERT(phw->phw_pmc == NULL,
+ ("[powerpc,%d] PHW pmc %p non-NULL", __LINE__, phw->phw_pmc));
+
+ return 0;
+}
+
+static int
+powerpc_switch_in(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+powerpc_switch_out(struct pmc_cpu *pc, struct pmc_process *pp)
+{
+ return 0;
+}
+
+static int
+powerpc_intr(int cpu, struct trapframe *tf)
+{
+ int i, error, retval;
+ uint32_t config;
+ struct pmc *pm;
+ struct powerpc_cpu *pac;
+ pmc_value_t v;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));
+
+ PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
+ TRAPF_USERMODE(tf));
+
+ retval = 0;
+
+ pac = powerpc_pcpu[cpu];
+
+ /*
+ * look for all PMCs that have interrupted:
+ * - look for a running, sampling PMC which has overflowed
+ * and which has a valid 'struct pmc' association
+ *
+ * If found, we call a helper to process the interrupt.
+ */
+
+ for (i = 0; i < PPC_MAX_PMCS; i++) {
+ if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
+ !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ continue;
+ }
+
+ if (!POWERPC_PMC_HAS_OVERFLOWED(i))
+ continue;
+
+ retval = 1; /* Found an interrupting PMC. */
+
+ if (pm->pm_state != PMC_STATE_RUNNING)
+ continue;
+
+ /* Stop the PMC, reload count. */
+ v = pm->pm_sc.pm_reloadcount;
+ config = mfspr(SPR_MMCR0);
+
+ KASSERT((config & ~AMD_PMC_ENABLE) ==
+ (pm->pm_md.pm_amd.pm_amd_evsel & ~AMD_PMC_ENABLE),
+ ("[powerpc,%d] config mismatch reg=0x%x pm=0x%x", __LINE__,
+ config, pm->pm_md.pm_amd.pm_amd_evsel));
+
+ mtspr(SPR_MMCR0, config | SPR_MMCR0_FC);
+ powerpc_pmcn_write(i, v);
+
+ /* Restart the counter if logging succeeded. */
+ error = pmc_process_interrupt(cpu, pm, tf, TRAPF_USERMODE(tf));
+ mtspr(SPR_MMCR0, config);
+ if (error != 0)
+ powerpc_stop_pmc(cpu, i);
+ atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
+ &pmc_stats.pm_intr_ignored, 1);
+
+ }
+
+ /* Re-enable PERF exceptions. */
+ mtspr(SPR_MMCR0, mfspr(SPR_MMCR0) | SPR_MMCR0_PMXE);
+
+ return (retval);
+}
+
+static int
+powerpc_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
+{
+ int error;
+ struct pmc_hw *phw;
+ char powerpc_name[PMC_NAME_MAX];
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d], illegal CPU %d", __LINE__, cpu));
+ KASSERT(ri >= 0 && ri < PPC_MAX_PMCS,
+ ("[powerpc,%d] row-index %d out of range", __LINE__, ri));
+
+ phw = &powerpc_pcpu[cpu]->pc_ppcpmcs[ri];
+ snprintf(powerpc_name, sizeof(powerpc_name), "POWERPC-%d", ri);
+ if ((error = copystr(powerpc_name, pi->pm_name, PMC_NAME_MAX,
+ NULL)) != 0)
+ return error;
+ pi->pm_class = PMC_CLASS_PPC7450;
+ if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
+ pi->pm_enabled = TRUE;
+ *ppmc = phw->phw_pmc;
+ } else {
+ pi->pm_enabled = FALSE;
+ *ppmc = NULL;
+ }
+
+ return (0);
+}
+
+static int
+powerpc_get_config(int cpu, int ri, struct pmc **ppm)
+{
+ *ppm = powerpc_pcpu[cpu]->pc_ppcpmcs[ri].phw_pmc;
+
+ return 0;
+}
+
+static int
+powerpc_pcpu_init(struct pmc_mdep *md, int cpu)
+{
+ int first_ri, i;
+ struct pmc_cpu *pc;
+ struct powerpc_cpu *pac;
+ struct pmc_hw *phw;
+
+ KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
+ ("[powerpc,%d] wrong cpu number %d", __LINE__, cpu));
+ PMCDBG(MDP,INI,1,"powerpc-init cpu=%d", cpu);
+
+ powerpc_pcpu[cpu] = pac = malloc(sizeof(struct powerpc_cpu), M_PMC,
+ M_WAITOK|M_ZERO);
+ pac->pc_ppcpmcs = malloc(sizeof(struct pmc_hw) * PPC_MAX_PMCS,
+ M_PMC, M_WAITOK|M_ZERO);
+ pc = pmc_pcpu[cpu];
+ first_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450].pcd_ri;
+ KASSERT(pc != NULL, ("[powerpc,%d] NULL per-cpu pointer", __LINE__));
+
+ for (i = 0, phw = pac->pc_ppcpmcs; i < PPC_MAX_PMCS; i++, phw++) {
+ phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
+ PMC_PHW_CPU_TO_STATE(cpu) | PMC_PHW_INDEX_TO_STATE(i);
+ phw->phw_pmc = NULL;
+ pc->pc_hwpmcs[i + first_ri] = phw;
+ }
+
+ /* Clear the MMCRs, and set FC, to disable all PMCs. */
+ mtspr(SPR_MMCR0, SPR_MMCR0_FC | SPR_MMCR0_PMXE | SPR_MMCR0_PMC1CE | SPR_MMCR0_PMCNCE);
+ mtspr(SPR_MMCR1, 0);
+
+ return 0;
+}
+
+static int
+powerpc_pcpu_fini(struct pmc_mdep *md, int cpu)
+{
+ uint32_t mmcr0 = mfspr(SPR_MMCR0);
+
+ mmcr0 |= SPR_MMCR0_FC;
+ mtspr(SPR_MMCR0, mmcr0);
+ free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC);
+ free(powerpc_pcpu[cpu], M_PMC);
+ return 0;
+}
+
+struct pmc_mdep *
+pmc_md_initialize()
+{
+ struct pmc_mdep *pmc_mdep;
+ struct pmc_classdep *pcd;
+
+ /*
+ * Allocate space for pointers to PMC HW descriptors and for
+ * the MDEP structure used by MI code.
+ */
+ powerpc_pcpu = malloc(sizeof(struct powerpc_cpu *) * pmc_cpu_max(), M_PMC,
+ M_WAITOK|M_ZERO);
+
+ /* Just one class */
+ pmc_mdep = malloc(sizeof(struct pmc_mdep) + sizeof(struct pmc_classdep),
+ M_PMC, M_WAITOK|M_ZERO);
+
+ pmc_mdep->pmd_cputype = PMC_CPU_PPC_7450;
+ pmc_mdep->pmd_nclass = 1;
+
+ pcd = &pmc_mdep->pmd_classdep[PMC_MDEP_CLASS_INDEX_PPC7450];
+ pcd->pcd_caps = POWERPC_PMC_CAPS;
+ pcd->pcd_class = PMC_CLASS_PPC7450;
+ pcd->pcd_num = PPC_MAX_PMCS;
+ pcd->pcd_ri = pmc_mdep->pmd_npmc;
+ pcd->pcd_width = 32; /* All PMCs, even in ppc970, are 32-bit */
+
+ pcd->pcd_allocate_pmc = powerpc_allocate_pmc;
+ pcd->pcd_config_pmc = powerpc_config_pmc;
+ pcd->pcd_pcpu_fini = powerpc_pcpu_fini;
+ pcd->pcd_pcpu_init = powerpc_pcpu_init;
+ pcd->pcd_describe = powerpc_describe;
+ pcd->pcd_get_config = powerpc_get_config;
+ pcd->pcd_read_pmc = powerpc_read_pmc;
+ pcd->pcd_release_pmc = powerpc_release_pmc;
+ pcd->pcd_start_pmc = powerpc_start_pmc;
+ pcd->pcd_stop_pmc = powerpc_stop_pmc;
+ pcd->pcd_write_pmc = powerpc_write_pmc;
+
+ pmc_mdep->pmd_intr = powerpc_intr;
+ pmc_mdep->pmd_switch_in = powerpc_switch_in;
+ pmc_mdep->pmd_switch_out = powerpc_switch_out;
+
+ pmc_mdep->pmd_npmc += PPC_MAX_PMCS;
+
+ return (pmc_mdep);
+}
+
+void
+pmc_md_finalize(struct pmc_mdep *md)
+{
+ free(md, M_PMC);
+}
+
int
pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
struct trapframe *tf)
diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h
index 9fcf926..347e833 100644
--- a/sys/dev/hwpmc/pmc_events.h
+++ b/sys/dev/hwpmc/pmc_events.h
@@ -3093,6 +3093,231 @@ __PMC_EV_ALIAS("CYCLES_UNHALTED_L3_FLL_DISABLE", UCP_EVENT_86H_01H)
#define PMC_EV_MIPS24K_FIRST PMC_EV_MIPS24K_CYCLE
#define PMC_EV_MIPS24K_LAST PMC_EV_MIPS24K_WBB_FULL_PIPELINE_STALLS
+#define __PMC_EV_PPC7450() \
+ __PMC_EV(PPC7450, CYCLE) \
+ __PMC_EV(PPC7450, INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLB_BIT_TRANSITIONS) \
+ __PMC_EV(PPC7450, INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, PMON_EXCEPT) \
+ __PMC_EV(PPC7450, PMON_SIG) \
+ __PMC_EV(PPC7450, VPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VFPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VIU1_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VIU2_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTVSCR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTVRSAVE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, VPU_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VFPU_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VIU1_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, VIU2_INSTR_WAIT_CYCLES) \
+ __PMC_EV(PPC7450, MFVSCR_SYNC_CYCLES) \
+ __PMC_EV(PPC7450, VSCR_SAT_SET) \
+ __PMC_EV(PPC7450, STORE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOPS) \
+ __PMC_EV(PPC7450, UNRESOLVED_BRANCHES) \
+ __PMC_EV(PPC7450, SPEC_BUFFER_CYCLES) \
+ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_CYCLES) \
+ __PMC_EV(PPC7450, TRUE_BRANCH_TARGET_HITS) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STAC_PREDICTED) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_DISPATCHES) \
+ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, THRESHOLD_INSTR_QUEUE_ENTRIES_CYCLES) \
+ __PMC_EV(PPC7450, THRESHOLD_VEC_INSTR_QUEUE_ENTRIES_CYCLES) \
+ __PMC_EV(PPC7450, CYCLES_NO_COMPLETED_INSTRS) \
+ __PMC_EV(PPC7450, IU2_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, BRANCHES_COMPLETED) \
+ __PMC_EV(PPC7450, EIEIO_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MTSPR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LS_LM_COMPLETED) \
+ __PMC_EV(PPC7450, ITLB_HW_TABLE_SEARCH_CYCLES) \
+ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_ACCESSES) \
+ __PMC_EV(PPC7450, INSTR_BKPT_MATCHES) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_LOAD_MISS_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_ON_MODIFIED) \
+ __PMC_EV(PPC7450, LOAD_MISS_ALIAS) \
+ __PMC_EV(PPC7450, LOAD_MISS_ALIAS_ON_TOUCH) \
+ __PMC_EV(PPC7450, TOUCH_ALIAS) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT_QUEUE) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HIT_CASTOUT) \
+ __PMC_EV(PPC7450, L1_DATA_SNOOP_HITS) \
+ __PMC_EV(PPC7450, WRITE_THROUGH_STORES) \
+ __PMC_EV(PPC7450, CACHE_INHIBITED_STORES) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_STORE_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_HITS) \
+ __PMC_EV(PPC7450, DST_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, REFRESHED_DSTS) \
+ __PMC_EV(PPC7450, SUCCESSFUL_DST_TABLE_SEARCHES) \
+ __PMC_EV(PPC7450, DSS_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, DST_STREAM_0_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, VTQ_SUSPENDS_DUE_TO_CTX_CHANGE) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH_HIT) \
+ __PMC_EV(PPC7450, VEC_LOAD_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, FP_STORE_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FPU_RENORMALIZATION) \
+ __PMC_EV(PPC7450, FPU_DENORMALIZATION) \
+ __PMC_EV(PPC7450, FP_STORE_CAUSES_STALL_IN_LSU) \
+ __PMC_EV(PPC7450, LD_ST_TRUE_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_INDEXED_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_ALIAS_VS_FSQ_WB0_WB1) \
+ __PMC_EV(PPC7450, LSU_ALIAS_VS_CSQ) \
+ __PMC_EV(PPC7450, LSU_LOAD_HIT_LINE_ALIAS_VS_CSQ0) \
+ __PMC_EV(PPC7450, LSU_LOAD_MISS_LINE_ALIAS_VS_CSQ0) \
+ __PMC_EV(PPC7450, LSU_TOUCH_LINE_ALIAS_VS_FSQ_WB0_WB1) \
+ __PMC_EV(PPC7450, LSU_TOUCH_ALIAS_VS_CSQ) \
+ __PMC_EV(PPC7450, LSU_LMQ_FULL_STALL) \
+ __PMC_EV(PPC7450, FP_LOAD_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FP_LOAD_SINGLE_INSTR_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, FP_LOAD_DOUBLE_COMPLETED_IN_LSU) \
+ __PMC_EV(PPC7450, LSU_RA_LATCH_STALL) \
+ __PMC_EV(PPC7450, LSU_LOAD_VS_STORE_QUEUE_ALIAS_STALL) \
+ __PMC_EV(PPC7450, LSU_LMQ_INDEX_ALIAS) \
+ __PMC_EV(PPC7450, LSU_STORE_QUEUE_INDEX_ALIAS) \
+ __PMC_EV(PPC7450, LSU_CSQ_FORWARDING) \
+ __PMC_EV(PPC7450, LSU_MISALIGNED_LOAD_FINISH) \
+ __PMC_EV(PPC7450, LSU_MISALIGN_STORE_COMPLETED) \
+ __PMC_EV(PPC7450, LSU_MISALIGN_STALL) \
+ __PMC_EV(PPC7450, FP_ONE_QUARTER_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_ONE_HALF_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_THREE_QUARTERS_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_ALL_FPSCR_RENAMES_BUSY) \
+ __PMC_EV(PPC7450, FP_DENORMALIZED_RESULT) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISSES) \
+ __PMC_EV(PPC7450, DISPATCHES_TO_FPR_ISSUE_QUEUE) \
+ __PMC_EV(PPC7450, LSU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LOAD_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SS_SM_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLBIE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LWARX_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, MFSPR_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, REFETCH_SERIALIZATION) \
+ __PMC_EV(PPC7450, COMPLETION_QUEUE_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, CYCLES_ONE_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, CYCLES_TWO_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, ITLB_NON_SPECULATIVE_MISSES) \
+ __PMC_EV(PPC7450, CYCLES_WAITING_FROM_L1_INSTR_CACHE_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_ACCESS_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_STORE_MISS) \
+ __PMC_EV(PPC7450, L1_DATA_TOUCH_MISS_CYCLES) \
+ __PMC_EV(PPC7450, L1_DATA_CYCLES_USED) \
+ __PMC_EV(PPC7450, DST_STREAM_1_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, VTQ_STREAM_CANCELED_PREMATURELY) \
+ __PMC_EV(PPC7450, VTQ_RESUMES_DUE_TO_CTX_CHANGE) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH_MISS) \
+ __PMC_EV(PPC7450, VTQ_LINE_FETCH) \
+ __PMC_EV(PPC7450, TLBIE_SNOOPS) \
+ __PMC_EV(PPC7450, L1_INSTR_CACHE_RELOADS) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_RELOADS) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_CASTOUTS_TO_L2) \
+ __PMC_EV(PPC7450, STORE_MERGE_GATHER) \
+ __PMC_EV(PPC7450, CACHEABLE_STORE_MERGE_TO_32_BYTES) \
+ __PMC_EV(PPC7450, DATA_BKPT_MATCHES) \
+ __PMC_EV(PPC7450, FALL_THROUGH_BRANCHES_PROCESSED) \
+ __PMC_EV(PPC7450, FIRST_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, SECOND_SPECULATION_BUFFER_ACTIVE) \
+ __PMC_EV(PPC7450, BPU_STALL_ON_LR_DEPENDENCY) \
+ __PMC_EV(PPC7450, BTIC_MISS) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STACK_CORRECTLY_RESOLVED) \
+ __PMC_EV(PPC7450, FPR_ISSUE_STALLED) \
+ __PMC_EV(PPC7450, SWITCHES_BETWEEN_PRIV_USER) \
+ __PMC_EV(PPC7450, LSU_COMPLETES_FP_STORE_SINGLE) \
+ __PMC_EV(PPC7450, VR_ISSUE_QUEUE_DISPATCHES) \
+ __PMC_EV(PPC7450, VR_STALLS) \
+ __PMC_EV(PPC7450, GPR_RENAME_BUFFER_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, FPR_ISSUE_QUEUE_ENTRIES) \
+ __PMC_EV(PPC7450, FPU_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, STWCX_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, LS_LM_INSTR_PIECES) \
+ __PMC_EV(PPC7450, ITLB_HW_SEARCH_CYCLES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, DTLB_MISSES) \
+ __PMC_EV(PPC7450, CANCELLED_L1_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L1_DATA_CACHE_OP_HIT) \
+ __PMC_EV(PPC7450, L1_DATA_LOAD_MISS_CYCLES) \
+ __PMC_EV(PPC7450, L1_DATA_PUSHES) \
+ __PMC_EV(PPC7450, L1_DATA_TOTAL_MISS) \
+ __PMC_EV(PPC7450, VT2_FETCHES) \
+ __PMC_EV(PPC7450, TAKEN_BRANCHES_PROCESSED) \
+ __PMC_EV(PPC7450, BRANCH_FLUSHES) \
+ __PMC_EV(PPC7450, SECOND_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, THIRD_SPECULATION_BUFFER_ACTIVE) \
+ __PMC_EV(PPC7450, BRANCH_UNIT_STALL_ON_CTR_DEPENDENCY) \
+ __PMC_EV(PPC7450, FAST_BTIC_HIT) \
+ __PMC_EV(PPC7450, BRANCH_LINK_STACK_MISPREDICTED) \
+ __PMC_EV(PPC7450, CYCLES_THREE_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, CYCLES_NO_INSTR_DISPATCHED) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_ENTRIES_OVER_THRESHOLD) \
+ __PMC_EV(PPC7450, GPR_ISSUE_QUEUE_STALLED) \
+ __PMC_EV(PPC7450, IU1_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, DSSALL_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, TLBSYNC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SYNC_INSTR_COMPLETED) \
+ __PMC_EV(PPC7450, SS_SM_INSTR_PIECES) \
+ __PMC_EV(PPC7450, DTLB_HW_SEARCH_CYCLES) \
+ __PMC_EV(PPC7450, SNOOP_RETRIES) \
+ __PMC_EV(PPC7450, SUCCESSFUL_STWCX) \
+ __PMC_EV(PPC7450, DST_STREAM_3_CACHE_LINE_FETCHES) \
+ __PMC_EV(PPC7450, THIRD_SPECULATIVE_BRANCH_BUFFER_RESOLVED_CORRECTLY) \
+ __PMC_EV(PPC7450, MISPREDICTED_BRANCHES) \
+ __PMC_EV(PPC7450, FOLDED_BRANCHES) \
+ __PMC_EV(PPC7450, FP_STORE_DOUBLE_COMPLETES_IN_LSU) \
+ __PMC_EV(PPC7450, L2_CACHE_HITS) \
+ __PMC_EV(PPC7450, L3_CACHE_HITS) \
+ __PMC_EV(PPC7450, L2_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_INSTR_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_DATA_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_DATA_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_LOAD_HITS) \
+ __PMC_EV(PPC7450, L2_STORE_HITS) \
+ __PMC_EV(PPC7450, L3_LOAD_HITS) \
+ __PMC_EV(PPC7450, L3_STORE_HITS) \
+ __PMC_EV(PPC7450, L2_TOUCH_HITS) \
+ __PMC_EV(PPC7450, L3_TOUCH_HITS) \
+ __PMC_EV(PPC7450, SNOOP_MODIFIED) \
+ __PMC_EV(PPC7450, SNOOP_VALID) \
+ __PMC_EV(PPC7450, INTERVENTION) \
+ __PMC_EV(PPC7450, L2_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L3_CACHE_MISSES) \
+ __PMC_EV(PPC7450, L2_CACHE_CASTOUTS) \
+ __PMC_EV(PPC7450, L3_CACHE_CASTOUTS) \
+ __PMC_EV(PPC7450, L2SQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, L3SQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, RAQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, WAQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, L1_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, L2_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, L3_EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, EXTERNAL_INTERVENTIONS) \
+ __PMC_EV(PPC7450, EXTERNAL_PUSHES) \
+ __PMC_EV(PPC7450, EXTERNAL_SNOOP_RETRY) \
+ __PMC_EV(PPC7450, DTQ_FULL_CYCLES) \
+ __PMC_EV(PPC7450, BUS_RETRY) \
+ __PMC_EV(PPC7450, L2_VALID_REQUEST) \
+ __PMC_EV(PPC7450, BORDQ_FULL) \
+ __PMC_EV(PPC7450, BUS_TAS_FOR_READS) \
+ __PMC_EV(PPC7450, BUS_TAS_FOR_WRITES) \
+ __PMC_EV(PPC7450, BUS_READS_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_WRITES_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_READS_WRITES_NOT_RETRIED) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_L1_RETRY) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_PREVIOUS_ADJACENT) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_COLLISION) \
+ __PMC_EV(PPC7450, BUS_RETRY_DUE_TO_INTERVENTION_ORDERING) \
+ __PMC_EV(PPC7450, SNOOP_REQUESTS) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_REQUEST) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_STORE) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_INSTR_FETCH) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_COLLISION_VS_LOAD_STORE_INSTR_FETCH) \
+ __PMC_EV(PPC7450, PREFETCH_ENGINE_FULL)
+
+#define PMC_EV_PPC7450_FIRST PMC_EV_PPC7450_CYCLE
+#define PMC_EV_PPC7450_LAST PMC_EV_PPC7450_PREFETCH_ENGINE_FULL
+
/*
* All known PMC events.
*
@@ -3138,6 +3363,8 @@ __PMC_EV_ALIAS("CYCLES_UNHALTED_L3_FLL_DISABLE", UCP_EVENT_86H_01H)
__PMC_EV_UCF() \
__PMC_EV_BLOCK(UCP, 0x12080) \
__PMC_EV_UCP() \
+ __PMC_EV_BLOCK(PPC7450, 0x13000) \
+ __PMC_EV_PPC7450() \
#define PMC_EVENT_FIRST PMC_EV_TSC_TSC
#define PMC_EVENT_LAST PMC_EV_UCP_LAST
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 2049949..a3c36c5 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -499,6 +499,7 @@ powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
bcopy(generictrap, (void *)EXC_SC, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_FPA, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_VEC, (size_t)&trapsize);
+ bcopy(generictrap, (void *)EXC_PERF, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_VECAST_G4, (size_t)&trapsize);
bcopy(generictrap, (void *)EXC_VECAST_G5, (size_t)&trapsize);
#ifndef __powerpc64__
diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c
index 93feb51..91f478c 100644
--- a/sys/powerpc/aim/trap.c
+++ b/sys/powerpc/aim/trap.c
@@ -34,6 +34,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_hwpmc_hooks.h"
+
#include <sys/param.h>
#include <sys/kdb.h>
#include <sys/proc.h>
@@ -49,6 +51,9 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <sys/signalvar.h>
#include <sys/vmmeter.h>
+#ifdef HWPMC_HOOKS
+#include <sys/pmckern.h>
+#endif
#include <security/audit/audit.h>
@@ -159,6 +164,16 @@ trap(struct trapframe *frame)
CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
trapname(type), user ? "user" : "kernel");
+#ifdef HWPMC_HOOKS
+ if (type == EXC_PERF && (pmc_intr != NULL)) {
+#ifdef notyet
+ (*pmc_intr)(PCPU_GET(cpuid), frame);
+ if (!user)
+ return;
+#endif
+ }
+ else
+#endif
if (user) {
td->td_pticks = 0;
td->td_frame = frame;
diff --git a/sys/powerpc/include/pmc_mdep.h b/sys/powerpc/include/pmc_mdep.h
index 81a41fd..37531f2 100644
--- a/sys/powerpc/include/pmc_mdep.h
+++ b/sys/powerpc/include/pmc_mdep.h
@@ -7,6 +7,7 @@
#ifndef _MACHINE_PMC_MDEP_H_
#define _MACHINE_PMC_MDEP_H_
+#define PMC_MDEP_CLASS_INDEX_PPC7450 0
union pmc_md_op_pmcallocate {
uint64_t __pad[4];
};
@@ -17,7 +18,12 @@ union pmc_md_op_pmcallocate {
#if _KERNEL
+struct pmc_md_powerpc_pmc {
+ uint32_t pm_powerpc_evsel;
+};
+
union pmc_md_pmc {
+ struct pmc_md_powerpc_pmc pm_powerpc;
};
#define PMC_TRAPFRAME_TO_PC(TF) (0) /* Stubs */
diff --git a/sys/powerpc/include/spr.h b/sys/powerpc/include/spr.h
index 4f675c3..e356987 100644
--- a/sys/powerpc/include/spr.h
+++ b/sys/powerpc/include/spr.h
@@ -348,8 +348,8 @@
#define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */
#define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */
#define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */
-#define SPR_MMCR0_PMC1SEL(x) ((x) << 6) /* PMC1 selector */
-#define SPR_MMCR0_PMC2SEL(x) ((x) << 0) /* PMC2 selector */
+#define SPR_MMCR0_PMC1SEL(x) (((x) & 0x3f) << 6) /* PMC1 selector */
+#define SPR_MMCR0_PMC2SEL(x) (((x) & 0x3f) << 0) /* PMC2 selector */
#define SPR_970MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */
#define SPR_970MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */
#define SPR_SGR 0x3b9 /* 4.. Storage Guarded Register */
@@ -359,10 +359,10 @@
#define SPR_SLER 0x3bb /* 4.. Storage Little Endian Register */
#define SPR_SIA 0x3bb /* .6. Sampled Instruction Address */
#define SPR_MMCR1 0x3bc /* .6. Monitor Mode Control Register 2 */
-#define SPR_MMCR1_PMC3SEL(x) ((x) << 27) /* PMC 3 selector */
-#define SPR_MMCR1_PMC4SEL(x) ((x) << 22) /* PMC 4 selector */
-#define SPR_MMCR1_PMC5SEL(x) ((x) << 17) /* PMC 5 selector */
-#define SPR_MMCR1_PMC6SEL(x) ((x) << 11) /* PMC 6 selector */
+#define SPR_MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */
+#define SPR_MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */
+#define SPR_MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */
+#define SPR_MMCR1_PMC6SEL(x) (((x) & 0x3f) << 11) /* PMC 6 selector */
#define SPR_SU0R 0x3bc /* 4.. Storage User-defined 0 Register */
#define SPR_PMC3 0x3bd /* .6. Performance Counter Register 3 */
diff --git a/sys/sys/pmc.h b/sys/sys/pmc.h
index 8f5f769..53827b4 100644
--- a/sys/sys/pmc.h
+++ b/sys/sys/pmc.h
@@ -87,7 +87,8 @@
__PMC_CPU(INTEL_COREI7, 0x8B, "Intel Core i7") \
__PMC_CPU(INTEL_WESTMERE, 0x8C, "Intel Westmere") \
__PMC_CPU(INTEL_XSCALE, 0x100, "Intel XScale") \
- __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K")
+ __PMC_CPU(MIPS_24K, 0x200, "MIPS 24K") \
+ __PMC_CPU(PPC_7450, 0x300, "PowerPC MPC7450")
enum pmc_cputype {
#undef __PMC_CPU
@@ -96,7 +97,7 @@ enum pmc_cputype {
};
#define PMC_CPU_FIRST PMC_CPU_AMD_K7
-#define PMC_CPU_LAST PMC_CPU_MIPS_24K
+#define PMC_CPU_LAST PMC_CPU_PPC_7450
/*
* Classes of PMCs
@@ -114,7 +115,8 @@ enum pmc_cputype {
__PMC_CLASS(UCF) /* Intel Uncore fixed function */ \
__PMC_CLASS(UCP) /* Intel Uncore programmable */ \
__PMC_CLASS(XSCALE) /* Intel XScale counters */ \
- __PMC_CLASS(MIPS24K) /* MIPS 24K */
+ __PMC_CLASS(MIPS24K) /* MIPS 24K */ \
+ __PMC_CLASS(PPC7450) /* Motorola MPC7450 class */
enum pmc_class {
#undef __PMC_CLASS
@@ -123,7 +125,7 @@ enum pmc_class {
};
#define PMC_CLASS_FIRST PMC_CLASS_TSC
-#define PMC_CLASS_LAST PMC_CLASS_MIPS24K
+#define PMC_CLASS_LAST PMC_CLASS_PPC7450
/*
* A PMC can be in the following states:
OpenPOWER on IntegriCloud