summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-top.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-12 22:39:13 -0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-22 19:56:29 -0200
commit70db7533caef02350ec8d6852e589491bca3a951 (patch)
tree463dadbec60d585cb8c1de84b230378010d5c8b5 /tools/perf/builtin-top.c
parent115d2d8963a426670ac3ce983fc4c4e001703943 (diff)
downloadop-kernel-dev-70db7533caef02350ec8d6852e589491bca3a951.zip
op-kernel-dev-70db7533caef02350ec8d6852e589491bca3a951.tar.gz
perf evlist: Move the mmap array from perf_evsel
Adopting the new model used in 'perf record', where we don't have a map per thread per cpu, instead we have an mmap per cpu, established on the first fd for that cpu and ask the kernel using the PERF_EVENT_IOC_SET_OUTPUT ioctl to send events for the other fds on that cpu for the one with the mmap. The methods moved from perf_evsel to perf_evlist, but for easing review they were modified in place, in evsel.c, the next patch will move the migrated methods to evlist.c. With this 'perf top' now uses the same mmap model used by 'perf record' and the next patches will make 'perf record' use these new routines, establishing a common codebase for both tools. Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r--tools/perf/builtin-top.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7d723ad..df85c1f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -78,7 +78,7 @@ static struct cpu_map *cpus;
static int realtime_prio = 0;
static bool group = false;
static unsigned int page_size;
-static unsigned int mmap_pages = 16;
+static unsigned int mmap_pages = 128;
static int freq = 1000; /* 1 KHz */
static int delay_secs = 2;
@@ -991,8 +991,7 @@ static int symbol_filter(struct map *map, struct symbol *sym)
static void event__process_sample(const event_t *self,
struct sample_data *sample,
- struct perf_session *session,
- struct perf_evsel *evsel)
+ struct perf_session *session)
{
u64 ip = self->ip.ip;
struct sym_entry *syme;
@@ -1085,8 +1084,12 @@ static void event__process_sample(const event_t *self,
syme = symbol__priv(al.sym);
if (!syme->skip) {
- syme->count[evsel->idx]++;
+ struct perf_evsel *evsel;
+
syme->origin = origin;
+ evsel = perf_evlist__id2evsel(evsel_list, sample->id);
+ assert(evsel != NULL);
+ syme->count[evsel->idx]++;
record_precise_ip(syme, evsel->idx, ip);
pthread_mutex_lock(&active_symbols_lock);
if (list_empty(&syme->node) || !syme->node.next)
@@ -1095,11 +1098,9 @@ static void event__process_sample(const event_t *self,
}
}
-static void perf_session__mmap_read_counter(struct perf_session *self,
- struct perf_evsel *evsel,
- int cpu, int thread_idx)
+static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
{
- struct perf_mmap *md = xyarray__entry(evsel->mmap, cpu, thread_idx);
+ struct perf_mmap *md = &evsel_list->mmap[cpu];
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
unsigned char *data = md->base + page_size;
@@ -1153,7 +1154,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
event__parse_sample(event, self, &sample);
if (event->header.type == PERF_RECORD_SAMPLE)
- event__process_sample(event, &sample, self, evsel);
+ event__process_sample(event, &sample, self);
else
event__process(event, &sample, self);
old += size;
@@ -1164,19 +1165,10 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
static void perf_session__mmap_read(struct perf_session *self)
{
- struct perf_evsel *counter;
- int i, thread_index;
-
- for (i = 0; i < cpus->nr; i++) {
- list_for_each_entry(counter, &evsel_list->entries, node) {
- for (thread_index = 0;
- thread_index < threads->nr;
- thread_index++) {
- perf_session__mmap_read_counter(self,
- counter, i, thread_index);
- }
- }
- }
+ int i;
+
+ for (i = 0; i < cpus->nr; i++)
+ perf_session__mmap_read_cpu(self, i);
}
static void start_counters(struct perf_evlist *evlist)
@@ -1194,6 +1186,11 @@ static void start_counters(struct perf_evlist *evlist)
attr->sample_freq = freq;
}
+ if (evlist->nr_entries > 1) {
+ attr->sample_type |= PERF_SAMPLE_ID;
+ attr->read_format |= PERF_FORMAT_ID;
+ }
+
attr->mmap = 1;
try_again:
if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) {
@@ -1225,15 +1222,16 @@ try_again:
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1);
}
-
- if (perf_evsel__mmap(counter, cpus, threads, mmap_pages, evlist) < 0)
- die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
+
+ if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, true) < 0)
+ die("failed to mmap with %d (%s)\n", errno, strerror(errno));
}
static int __cmd_top(void)
{
pthread_t thread;
+ struct perf_evsel *first;
int ret;
/*
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
@@ -1249,6 +1247,8 @@ static int __cmd_top(void)
event__synthesize_threads(event__process, session);
start_counters(evsel_list);
+ first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
+ perf_session__set_sample_type(session, first->attr.sample_type);
/* Wait for a minimal set of events before starting the snapshot */
poll(evsel_list->pollfd, evsel_list->nr_fds, 100);
@@ -1394,8 +1394,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
usage_with_options(top_usage, options);
list_for_each_entry(pos, &evsel_list->entries, node) {
- if (perf_evsel__alloc_mmap(pos, cpus->nr, threads->nr) < 0 ||
- perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
+ if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
goto out_free_fd;
/*
* Fill in the ones not specifically initialized via -c:
@@ -1406,7 +1405,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
pos->attr.sample_period = default_interval;
}
- if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0)
+ if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0 ||
+ perf_evlist__alloc_mmap(evsel_list, cpus->nr) < 0)
goto out_free_fd;
sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node);
OpenPOWER on IntegriCloud