summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-11 14:06:28 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-11 17:54:15 +0200
commitf4dbfa8f3131a84257223393905f7efad0ca5996 (patch)
tree67bb2666868c4449c2fa9ba6dc931721f60deb6c /arch
parent1c432d899d32d36371ee4ee310fa3609cf0e5742 (diff)
downloadop-kernel-dev-f4dbfa8f3131a84257223393905f7efad0ca5996.zip
op-kernel-dev-f4dbfa8f3131a84257223393905f7efad0ca5996.tar.gz
perf_counter: Standardize event names
Pure renames only, to PERF_COUNT_HW_* and PERF_COUNT_SW_*. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/power4-pmu.c12
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c12
-rw-r--r--arch/powerpc/kernel/power5-pmu.c12
-rw-r--r--arch/powerpc/kernel/power6-pmu.c12
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c12
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c32
-rw-r--r--arch/x86/mm/fault.c6
8 files changed, 52 insertions, 52 deletions
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 0e94b68..73956f0 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -535,12 +535,12 @@ static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
}
static int p4_generic_events[] = {
- [PERF_COUNT_CPU_CYCLES] = 7,
- [PERF_COUNT_INSTRUCTIONS] = 0x1001,
- [PERF_COUNT_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */
- [PERF_COUNT_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */
- [PERF_COUNT_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */
+ [PERF_COUNT_HW_CPU_CYCLES] = 7,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index bbf2cbb..5f8b774 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -606,12 +606,12 @@ static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
}
static int power5p_generic_events[] = {
- [PERF_COUNT_CPU_CYCLES] = 0xf,
- [PERF_COUNT_INSTRUCTIONS] = 0x100009,
- [PERF_COUNT_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */
- [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
- [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0xf,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 670cf10..d54723a 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -548,12 +548,12 @@ static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
}
static int power5_generic_events[] = {
- [PERF_COUNT_CPU_CYCLES] = 0xf,
- [PERF_COUNT_INSTRUCTIONS] = 0x100009,
- [PERF_COUNT_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */
- [PERF_COUNT_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
- [PERF_COUNT_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0xf,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 4da7078..0cd406ee 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -466,12 +466,12 @@ static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
}
static int power6_generic_events[] = {
- [PERF_COUNT_CPU_CYCLES] = 0x1e,
- [PERF_COUNT_INSTRUCTIONS] = 2,
- [PERF_COUNT_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
- [PERF_COUNT_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
- [PERF_COUNT_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 336adf1..46a2064 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -419,12 +419,12 @@ static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
}
static int ppc970_generic_events[] = {
- [PERF_COUNT_CPU_CYCLES] = 7,
- [PERF_COUNT_INSTRUCTIONS] = 1,
- [PERF_COUNT_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
- [PERF_COUNT_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
- [PERF_COUNT_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
+ [PERF_COUNT_HW_CPU_CYCLES] = 7,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 1,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ac0e112..5beffc8 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address);
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -312,7 +312,7 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0,
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -323,7 +323,7 @@ good_area:
#endif
} else {
current->min_flt++;
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0,
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 57ae1be..572fb43 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -69,13 +69,13 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
*/
static const u64 intel_perfmon_event_map[] =
{
- [PERF_COUNT_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
};
static u64 intel_pmu_event_map(int event)
@@ -485,12 +485,12 @@ static const u64 amd_0f_hw_cache_event_ids
*/
static const u64 amd_perfmon_event_map[] =
{
- [PERF_COUNT_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
- [PERF_COUNT_CACHE_MISSES] = 0x0081,
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
};
static u64 amd_pmu_event_map(int event)
@@ -970,11 +970,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
+ if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
return X86_PMC_IDX_FIXED_BUS_CYCLES;
return -1;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6f9df2b..5c6d816 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1045,7 +1045,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address);
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -1142,11 +1142,11 @@ good_area:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0,
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0,
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
OpenPOWER on IntegriCloud