summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-15 15:05:12 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-15 15:57:49 +0200
commit75f937f24bd9c003dcb9d7d5509f23459f1f6000 (patch)
tree859d5ebe9b47b952d13873d7a2a580012682aeb0 /kernel/perf_counter.c
parent613d8602292165f86ba1969784fea01a06d55900 (diff)
downloadop-kernel-dev-75f937f24bd9c003dcb9d7d5509f23459f1f6000.zip
op-kernel-dev-75f937f24bd9c003dcb9d7d5509f23459f1f6000.tar.gz
perf_counter: Fix ctx->mutex vs counter->mutex inversion
Simon triggered a lockdep inversion report about us taking ctx->mutex vs counter->mutex in inverse orders. Fix that up. Reported-by: Simon Holm Thøgersen <odie@cs.aau.dk> Tested-by: Simon Holm Thøgersen <odie@cs.aau.dk> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e914daf..109a957 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1620,22 +1620,6 @@ static void perf_counter_reset(struct perf_counter *counter)
perf_counter_update_userpage(counter);
}
-static void perf_counter_for_each_sibling(struct perf_counter *counter,
- void (*func)(struct perf_counter *))
-{
- struct perf_counter_context *ctx = counter->ctx;
- struct perf_counter *sibling;
-
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
- counter = counter->group_leader;
-
- func(counter);
- list_for_each_entry(sibling, &counter->sibling_list, list_entry)
- func(sibling);
- mutex_unlock(&ctx->mutex);
-}
-
/*
* Holding the top-level counter's child_mutex means that any
* descendant process that has inherited this counter will block
@@ -1658,14 +1642,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
static void perf_counter_for_each(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
- struct perf_counter *child;
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *sibling;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- mutex_lock(&counter->child_mutex);
- perf_counter_for_each_sibling(counter, func);
- list_for_each_entry(child, &counter->child_list, child_list)
- perf_counter_for_each_sibling(child, func);
- mutex_unlock(&counter->child_mutex);
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ counter = counter->group_leader;
+
+ perf_counter_for_each_child(counter, func);
+ func(counter);
+ list_for_each_entry(sibling, &counter->sibling_list, list_entry)
+ perf_counter_for_each_child(counter, func);
+ mutex_unlock(&ctx->mutex);
}
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
OpenPOWER on IntegriCloud