diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-13 16:21:38 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-15 09:47:02 +0200 |
commit | 9e35ad388bea89f7d6f375af4c0ae98803688666 (patch) | |
tree | 9abbce9f6c9a914b1ea8d8dae82e159366030e4a /include/linux/perf_counter.h | |
parent | 962bf7a66edca4d36a730a38ff8410a67f560e40 (diff) | |
download | op-kernel-dev-9e35ad388bea89f7d6f375af4c0ae98803688666.zip op-kernel-dev-9e35ad388bea89f7d6f375af4c0ae98803688666.tar.gz |
perf_counter: Rework the perf counter disable/enable
The current disable/enable mechanism is:
token = hw_perf_save_disable();
...
/* do bits */
...
hw_perf_restore(token);
This works well, provided that the use nests properly. Except we don't.
x86 NMI/INT throttling has non-nested use of this, breaking things. Therefore
provide a reference counter disable/enable interface, where the first disable
disables the hardware, and the last enable enables the hardware again.
[ Impact: refactor, simplify the PMU disable/enable logic ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 614f921..e543ecc 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -544,8 +544,10 @@ extern void perf_counter_exit_task(struct task_struct *child); extern void perf_counter_do_pending(void); extern void perf_counter_print_debug(void); extern void perf_counter_unthrottle(void); -extern u64 hw_perf_save_disable(void); -extern void hw_perf_restore(u64 ctrl); +extern void __perf_disable(void); +extern bool __perf_enable(void); +extern void perf_disable(void); +extern void perf_enable(void); extern int perf_counter_task_disable(void); extern int perf_counter_task_enable(void); extern int hw_perf_group_sched_in(struct perf_counter *group_leader, @@ -600,8 +602,8 @@ static inline void perf_counter_exit_task(struct task_struct *child) { } static inline void perf_counter_do_pending(void) { } static inline void perf_counter_print_debug(void) { } static inline void perf_counter_unthrottle(void) { } -static inline void hw_perf_restore(u64 ctrl) { } -static inline u64 hw_perf_save_disable(void) { return 0; } +static inline void perf_disable(void) { } +static inline void perf_enable(void) { } static inline int perf_counter_task_disable(void) { return -EINVAL; } static inline int perf_counter_task_enable(void) { return -EINVAL; } |