diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-01-06 11:07:04 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-01-06 11:07:04 +0100 |
commit | 9cc96b0a211fd54f4bf314e7ef6e207ac5bce0c7 (patch) | |
tree | 90500cffe918433bc187b1e572fda04a0c13f6aa | |
parent | d64fe8e6b34147c2590c7bb29efd7b24e91866ef (diff) | |
parent | 12ca6ad2e3a896256f086497a7c7406a547ee373 (diff) | |
download | op-kernel-dev-9cc96b0a211fd54f4bf314e7ef6e207ac5bce0c7.zip op-kernel-dev-9cc96b0a211fd54f4bf314e7ef6e207ac5bce0c7.tar.gz |
Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/events/core.c | 35 |
1 files changed, 6 insertions, 29 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index a627f36..2c7bb20 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3136,15 +3136,16 @@ static int event_enable_on_exec(struct perf_event *event, * Enable all of a task's events that have been marked enable-on-exec. * This expects task == current. */ -static void perf_event_enable_on_exec(struct perf_event_context *ctx) +static void perf_event_enable_on_exec(int ctxn) { - struct perf_event_context *clone_ctx = NULL; + struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_event *event; unsigned long flags; int enabled = 0; int ret; local_irq_save(flags); + ctx = current->perf_event_ctxp[ctxn]; if (!ctx || !ctx->nr_events) goto out; @@ -3187,17 +3188,11 @@ out: void perf_event_exec(void) { - struct perf_event_context *ctx; int ctxn; rcu_read_lock(); - for_each_task_context_nr(ctxn) { - ctx = current->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - - perf_event_enable_on_exec(ctx); - } + for_each_task_context_nr(ctxn) + perf_event_enable_on_exec(ctxn); rcu_read_unlock(); } @@ -6465,9 +6460,6 @@ struct swevent_htable { /* Recursion avoidance in each contexts */ int recursion[PERF_NR_CONTEXTS]; - - /* Keeps track of cpu being initialized/exited */ - bool online; }; static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); @@ -6725,14 +6717,8 @@ static int perf_swevent_add(struct perf_event *event, int flags) hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); - if (!head) { - /* - * We can race with cpu hotplug code. Do not - * WARN if the cpu just got unplugged. - */ - WARN_ON_ONCE(swhash->online); + if (WARN_ON_ONCE(!head)) return -EINVAL; - } hlist_add_head_rcu(&event->hlist_entry, head); perf_event_update_userpage(event); @@ -6800,7 +6786,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) int err = 0; mutex_lock(&swhash->hlist_mutex); - if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; @@ -9263,7 +9248,6 @@ static void perf_event_init_cpu(int cpu) struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); - swhash->online = true; if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; @@ -9305,14 +9289,7 @@ static void perf_event_exit_cpu_context(int cpu) static void perf_event_exit_cpu(int cpu) { - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - perf_event_exit_cpu_context(cpu); - - mutex_lock(&swhash->hlist_mutex); - swhash->online = false; - swevent_hlist_release(swhash); - mutex_unlock(&swhash->hlist_mutex); } #else static inline void perf_event_exit_cpu(int cpu) { } |