summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/thread.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2016-07-04 14:16:23 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-07-04 20:27:25 -0300
commit6c502584438bda63fc1a67606854fb0b300465cd (patch)
tree670c04d2c278b314f4698922f6778aa73c6e963d /tools/perf/util/thread.c
parenta2873325ffb21cecca8032673eb698cb4d778dc6 (diff)
downloadop-kernel-dev-6c502584438bda63fc1a67606854fb0b300465cd.zip
op-kernel-dev-6c502584438bda63fc1a67606854fb0b300465cd.tar.gz
perf unwind: Call unwind__prepare_access for forked thread
Currently we call unwind__prepare_access for map event. In case we report fork event the thread inherits its parent's maps and unwind__prepare_access is never called for the thread. This causes unwind__get_entries seeing uninitialized unwind_libunwind_ops and thus returning no callchain. Adding unwind__prepare_access calls for fork even processing. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: David Ahern <dsahern@gmail.com> Cc: He Kuang <hekuang@huawei.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1467634583-29147-5-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/thread.c')
-rw-r--r--tools/perf/util/thread.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 2439b12..8b10a55 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -212,6 +212,39 @@ int thread__insert_map(struct thread *thread, struct map *map)
return 0;
}
+static int __thread__prepare_access(struct thread *thread)
+{
+ bool initialized = false;
+ int i, err = 0;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ struct maps *maps = &thread->mg->maps[i];
+ struct map *map;
+
+ pthread_rwlock_rdlock(&maps->lock);
+
+ for (map = maps__first(maps); map; map = map__next(map)) {
+ err = unwind__prepare_access(thread, map, &initialized);
+ if (err || initialized)
+ break;
+ }
+
+ pthread_rwlock_unlock(&maps->lock);
+ }
+
+ return err;
+}
+
+static int thread__prepare_access(struct thread *thread)
+{
+ int err = 0;
+
+ if (symbol_conf.use_callchain)
+ err = __thread__prepare_access(thread);
+
+ return err;
+}
+
static int thread__clone_map_groups(struct thread *thread,
struct thread *parent)
{
@@ -219,7 +252,7 @@ static int thread__clone_map_groups(struct thread *thread,
/* This is new thread, we share map groups for process. */
if (thread->pid_ == parent->pid_)
- return 0;
+ return thread__prepare_access(thread);
if (thread->mg == parent->mg) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
@@ -229,7 +262,7 @@ static int thread__clone_map_groups(struct thread *thread,
/* But this one is new process, copy maps. */
for (i = 0; i < MAP__NR_TYPES; ++i)
- if (map_groups__clone(thread->mg, parent->mg, i) < 0)
+ if (map_groups__clone(thread, parent->mg, i) < 0)
return -ENOMEM;
return 0;
OpenPOWER on IntegriCloud