diff options
author | Zi Shen Lim <zlim@netlogicmicro.com> | 2012-10-31 12:01:28 +0000 |
---|---|---|
committer | John Crispin <blogic@openwrt.org> | 2012-11-09 11:37:18 +0100 |
commit | 4be3d2f3966b9f010bb997dcab25e7af489a841e (patch) | |
tree | b033e91f2cb786e0e9fd3d0fbdcca33ecb5cf308 /arch/mips | |
parent | c783390a0ecef08df5c804f8c5f647431a04f502 (diff) | |
download | op-kernel-dev-4be3d2f3966b9f010bb997dcab25e7af489a841e.zip op-kernel-dev-4be3d2f3966b9f010bb997dcab25e7af489a841e.tar.gz |
MIPS: perf: Add XLP support for hardware perf.
Add support for XLP performance counters register in perf. Update
mips/Kconfig so that perf events can be selected for XLP.
Signed-off-by: Zi Shen Lim <zlim@netlogicmicro.com>
Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Patchwork: http://patchwork.linux-mips.org/patch/4457
Signed-off-by: John Crispin <blogic@openwrt.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 2 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 124 |
2 files changed, 125 insertions, 1 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dba9390..a4919b0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2186,7 +2186,7 @@ config NODES_SHIFT config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON) + depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) default y help Enable hardware performance counter support for perf events. If diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index a9b995d..b14c14d 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -840,6 +840,16 @@ static const struct mips_perf_event bmips5000_event_map [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, }; +static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ + [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, +}; + /* 24K/34K/1004K cores can share the same cache event map. */ static const struct mips_perf_event mipsxxcore_cache_map [PERF_COUNT_HW_CACHE_MAX] @@ -1092,6 +1102,100 @@ static const struct mips_perf_event octeon_cache_map }, }; +static const struct mips_perf_event xlp_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ + [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ + [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ + [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ + [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ + [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + /* + * Only general DTLB misses are counted use the same event for + * read and write. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + #ifdef CONFIG_MIPS_MT_SMP static void check_and_calc_range(struct perf_event *event, const struct mips_perf_event *pev) @@ -1444,6 +1548,20 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) return &raw_event; } +static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) +{ + unsigned int raw_id = config & 0xff; + + /* Only 1-63 are defined */ + if ((raw_id < 0x01) || (raw_id > 0x3f)) + return ERR_PTR(-EOPNOTSUPP); + + raw_event.cntr_mask = CNTR_ALL; + raw_event.event_id = raw_id; + + return &raw_event; +} + static int __init init_hw_perf_events(void) { @@ -1522,6 +1640,12 @@ init_hw_perf_events(void) mipspmu.general_event_map = &bmips5000_event_map; mipspmu.cache_event_map = &bmips5000_cache_map; break; + case CPU_XLP: + mipspmu.name = "xlp"; + mipspmu.general_event_map = &xlp_event_map; + mipspmu.cache_event_map = &xlp_cache_map; + mipspmu.map_raw_event = xlp_pmu_map_raw_event; + break; default: pr_cont("Either hardware does not support performance " "counters, or not yet implemented.\n"); |