summaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-06-11 17:32:03 +0200
committerIngo Molnar <mingo@elte.hu>2010-09-09 20:46:29 +0200
commit24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 (patch)
treea37d3a4cb101e3f67635a1920f447c9e9e8d8ab2 /kernel/perf_event.c
parent9ed6060d286b1eb55974d09080f442f809408c42 (diff)
downloadop-kernel-dev-24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7.zip
op-kernel-dev-24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7.tar.gz
perf: Reduce perf_disable() usage
Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c37
1 files changed, 1 insertions, 36 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 149ca18..9a98ce9 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
return;
raw_spin_lock(&ctx->lock);
- /*
- * Protect the list operation against NMI by disabling the
- * events on a global level.
- */
- perf_disable();
event_sched_out(event, cpuctx, ctx);
@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
perf_max_events - perf_reserved_percpu);
}
- perf_enable();
raw_spin_unlock(&ctx->lock);
}
@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
ctx->is_active = 1;
update_context_time(ctx);
- /*
- * Protect the list operation against NMI by disabling the
- * events on a global level. NOP for non NMI based events.
- */
- perf_disable();
-
add_event_to_ctx(event, ctx);
if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
cpuctx->max_pertask--;
unlock:
- perf_enable();
-
raw_spin_unlock(&ctx->lock);
}
@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
- perf_disable();
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
- perf_enable();
}
if (err) {
@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
goto out;
update_context_time(ctx);
- perf_disable();
if (!ctx->nr_active)
- goto out_enable;
+ goto out;
if (event_type & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
-
- out_enable:
- perf_enable();
out:
raw_spin_unlock(&ctx->lock);
}
@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
ctx->timestamp = perf_clock();
- perf_disable();
-
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
if (event_type & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx);
- perf_enable();
out:
raw_spin_unlock(&ctx->lock);
}
@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx)
return;
- perf_disable();
-
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx->task_ctx = ctx;
-
- perf_enable();
}
#define MAX_INTERRUPTS (~0ULL)
@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
- perf_disable();
perf_event_stop(event);
local64_set(&hwc->period_left, 0);
perf_event_start(event);
- perf_enable();
}
}
@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(event, 1);
- perf_disable();
event->pmu->unthrottle(event);
- perf_enable();
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
- perf_disable();
event->pmu->read(event);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
if (delta > 0)
perf_adjust_period(event, TICK_NSEC, delta);
- perf_enable();
}
raw_spin_unlock(&ctx->lock);
}
@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
if (!rotate)
return;
- perf_disable();
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
- perf_enable();
}
static int event_enable_on_exec(struct perf_event *event,
OpenPOWER on IntegriCloud