diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-21 12:20:38 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-21 14:34:11 +0200 |
commit | 57c0c15b5244320065374ad2c54f4fbec77a6428 (patch) | |
tree | 35369d817f5925aca09b083bba47c437b91386d9 /arch/powerpc/kernel/perf_event.c | |
parent | cdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (diff) | |
download | op-kernel-dev-57c0c15b5244320065374ad2c54f4fbec77a6428.zip op-kernel-dev-57c0c15b5244320065374ad2c54f4fbec77a6428.tar.gz |
perf: Tidy up after the big rename
- provide compatibility Kconfig entry for existing PERF_COUNTERS .config's
- provide courtesy copy of old perf_counter.h, for user-space projects
- small indentation fixups
- fix up MAINTAINERS
- fix small x86 printout fallout
- fix up small PowerPC comment fallout (use 'counter' as in register)
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_event.c')
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c98321f..197b7d9 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); struct power_pmu *ppmu; /* - * Normally, to ignore kernel events we set the FCS (freeze events + * Normally, to ignore kernel events we set the FCS (freeze counters * in supervisor mode) bit in MMCR0, but if the kernel runs with the * hypervisor bit set in the MSR, or if we are running on a processor * where the hypervisor bit is forced to 1 (as on Apple G5 processors), @@ -159,7 +159,7 @@ void perf_event_print_debug(void) } /* - * Read one performance monitor event (PMC). + * Read one performance monitor counter (PMC). */ static unsigned long read_pmc(int idx) { @@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event) val = read_pmc(event->hw.idx); } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The events are only 32 bits wide */ + /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; atomic64_add(delta, &event->count); atomic64_sub(delta, &event->hw.period_left); @@ -543,7 +543,7 @@ void hw_perf_disable(void) } /* - * Set the 'freeze events' bit. + * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. @@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) } /* - * A event has overflowed; update its count and record + * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled * here so there is no possibility of being interrupted. */ @@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs) /* * Reset MMCR0 to its normal value. This will set PMXE and - * clear FC (freeze events) and PMAO (perf mon alert occurred) + * clear FC (freeze counters) and PMAO (perf mon alert occurred) * and thus allow interrupts to occur again. * XXX might want to use MSR.PM to keep the events frozen until * we get back out of this interrupt. |