summaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-14 08:49:00 +0200
committerIngo Molnar <mingo@elte.hu>2010-09-09 20:46:29 +0200
commit33696fc0d141bbbcb12f75b69608ea83282e3117 (patch)
tree72e08dba377d57eb7dd8c08a937a6de10e8af9c4 /kernel/perf_event.c
parent24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (diff)
downloadop-kernel-dev-33696fc0d141bbbcb12f75b69608ea83282e3117.zip
op-kernel-dev-33696fc0d141bbbcb12f75b69608ea83282e3117.tar.gz
perf: Per PMU disable
Changes perf_disable() into perf_pmu_disable(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9a98ce9..5ed0c06 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -71,23 +71,20 @@ static atomic64_t perf_event_id;
*/
static DEFINE_SPINLOCK(perf_resource_lock);
-void __weak hw_perf_disable(void) { barrier(); }
-void __weak hw_perf_enable(void) { barrier(); }
-
void __weak perf_event_print_debug(void) { }
-static DEFINE_PER_CPU(int, perf_disable_count);
-
-void perf_disable(void)
+void perf_pmu_disable(struct pmu *pmu)
{
- if (!__get_cpu_var(perf_disable_count)++)
- hw_perf_disable();
+ int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ if (!(*count)++)
+ pmu->pmu_disable(pmu);
}
-void perf_enable(void)
+void perf_pmu_enable(struct pmu *pmu)
{
- if (!--__get_cpu_var(perf_disable_count))
- hw_perf_enable();
+ int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ if (!--(*count))
+ pmu->pmu_enable(pmu);
}
static void get_ctx(struct perf_event_context *ctx)
@@ -4970,11 +4967,19 @@ static struct srcu_struct pmus_srcu;
int perf_pmu_register(struct pmu *pmu)
{
+ int ret;
+
mutex_lock(&pmus_lock);
+ ret = -ENOMEM;
+ pmu->pmu_disable_count = alloc_percpu(int);
+ if (!pmu->pmu_disable_count)
+ goto unlock;
list_add_rcu(&pmu->entry, &pmus);
+ ret = 0;
+unlock:
mutex_unlock(&pmus_lock);
- return 0;
+ return ret;
}
void perf_pmu_unregister(struct pmu *pmu)
@@ -4984,6 +4989,8 @@ void perf_pmu_unregister(struct pmu *pmu)
mutex_unlock(&pmus_lock);
synchronize_srcu(&pmus_srcu);
+
+ free_percpu(pmu->pmu_disable_count);
}
struct pmu *perf_init_event(struct perf_event *event)
OpenPOWER on IntegriCloud