summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 09:29:16 +0100
committerIngo Molnar <mingo@kernel.org>2016-01-21 18:54:19 +0100
commit70a0165752944e0be0b1de4a9020473079962c18 (patch)
tree8ff05d0e0829991eebbdcf56a7856f539e91c8c2 /kernel
parent7e41d17753e6e0da55d343997454dd4fbe8d28a8 (diff)
downloadop-kernel-dev-70a0165752944e0be0b1de4a9020473079962c18.zip
op-kernel-dev-70a0165752944e0be0b1de4a9020473079962c18.tar.gz
perf: Fix cgroup scheduling in perf_enable_on_exec()
There is a comment that states that perf_event_context_sched_in() will also switch in the cgroup events, I cannot find it does so. Therefore all the resulting logic goes out the window too. Clean that up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c31
1 files changed, 7 insertions, 24 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9d1195a..e7bda0e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -579,13 +579,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
* we are holding the rcu lock
*/
cgrp1 = perf_cgroup_from_task(task, NULL);
-
- /*
- * next is NULL when called from perf_event_enable_on_exec()
- * that will systematically cause a cgroup_switch()
- */
- if (next)
- cgrp2 = perf_cgroup_from_task(next, NULL);
+ cgrp2 = perf_cgroup_from_task(next, NULL);
/*
* only schedule out current cgroup events if we know
@@ -611,8 +605,6 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
* we are holding the rcu lock
*/
cgrp1 = perf_cgroup_from_task(task, NULL);
-
- /* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev, NULL);
/*
@@ -1450,11 +1442,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
+ /*
+ * Because cgroup events are always per-cpu events, this will
+ * always be called from the right CPU.
+ */
cpuctx = __get_cpu_context(ctx);
/*
- * if there are no more cgroup events
- * then cler cgrp to avoid stale pointer
- * in update_cgrp_time_from_cpuctx()
+ * If there are no more cgroup events then clear cgrp to avoid
+ * stale pointer in update_cgrp_time_from_cpuctx().
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
@@ -3118,15 +3113,6 @@ static void perf_event_enable_on_exec(int ctxn)
if (!ctx || !ctx->nr_events)
goto out;
- /*
- * We must ctxsw out cgroup events to avoid conflict
- * when invoking perf_task_event_sched_in() later on
- * in this function. Otherwise we end up trying to
- * ctxswin cgroup events which are already scheduled
- * in.
- */
- perf_cgroup_sched_out(current, NULL);
-
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
@@ -3144,9 +3130,6 @@ static void perf_event_enable_on_exec(int ctxn)
raw_spin_unlock(&ctx->lock);
- /*
- * Also calls ctxswin for cgroup events, if any:
- */
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
OpenPOWER on IntegriCloud