From f51304d3fe4fee475991ee424a4b7f85eec65a7b Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:46 -0600 Subject: perf symbols: Add machine id to modules debug message Current debug message is: Problems creating module maps, continuing anyway... When running multiple VMs it would be nice to know which machine the message is referring to: $ perf kvm --guest --guestmount=/tmp/guest-mount record -av -- sleep 10 Problems creating module maps for guest 6613, continuing anyway... Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342826756-64663-2-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 50958bb..66c132e 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2564,8 +2564,15 @@ int machine__create_kernel_maps(struct machine *machine) __machine__create_kernel_maps(machine, kernel) < 0) return -1; - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) - pr_debug("Problems creating module maps, continuing anyway...\n"); + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { + if (machine__is_host(machine)) + pr_debug("Problems creating module maps, " + "continuing anyway...\n"); + else + pr_debug("Problems creating module maps for guest %d, " + "continuing anyway...\n", machine->pid); + } + /* * Now that we have all the maps created, just set the ->end of them: */ -- cgit v1.1 From 5cd95c2db479aa7a66f6fa572dfa410c6314c78e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:47 -0600 Subject: perf kvm: Set name for VM process in guest machine COMM events are not generated in the context of a guest machine, so the thread name is never set for the VMM process. For example, the qemu-kvm name applies to the process in the host machine, not the guest machine. So, samples for guest machines are currently displayed as: 99.67% :5671 [unknown] [g] 0xffffffff81366b41 where 5671 is the pid of the VMM. With this patch the samples in the guest machine are shown as: 18.43% [guest/5671] [unknown] [g] 0xffffffff810d68b7 Tested-by: Jiri Olsa Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342826756-64663-3-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index a1f4e36..8668569 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -7,6 +7,7 @@ #include #include #include "map.h" +#include "thread.h" const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", @@ -585,7 +586,21 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid) self->kmaps.machine = self; self->pid = pid; self->root_dir = strdup(root_dir); - return self->root_dir == NULL ? -ENOMEM : 0; + if (self->root_dir == NULL) + return -ENOMEM; + + if (pid != HOST_KERNEL_ID) { + struct thread *thread = machine__findnew_thread(self, pid); + char comm[64]; + + if (thread == NULL) + return -ENOMEM; + + snprintf(comm, sizeof(comm), "[guest/%d]", pid); + thread__set_comm(thread, comm); + } + + return 0; } static void dsos__delete(struct list_head *self) -- cgit v1.1 From 7c0f4a4113ba5de7898c246eeaeee4c23d94b887 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:48 -0600 Subject: perf kvm: Guest userspace samples should not be lumped with host uspace e.g., perf kvm --host --guest report -i perf.data --stdio -D shows: 1 599127912065356 0x143b8 [0x48]: PERF_RECORD_SAMPLE(IP, 5): 5671/5676: 0x7fdf95a061c0 period: 1 addr: 0 ... chain: nr:2 ..... 0: ffffffffffffff80 ..... 1: fffffffffffffe00 ... thread: qemu-kvm:5671 ...... dso: (IP, 5) means sample in guest userspace. Those samples should not be lumped into the VMM's host thread. i.e, the report output: 56.86% qemu-kvm [unknown] [u] 0x00007fdf95a061c0 With this patch the output emphasizes it is a guest userspace hit: 56.86% [guest/5671] [unknown] [u] 0x00007fdf95a061c0 Looking at 3 VMs (2 64-bit, 1 32-bit) with each running a CPU bound process (openssl speed), perf report currently shows: 93.84% 117726 qemu-kvm [unknown] [u] 0x00007fd7dcaea8e5 which is wrong. With this patch you get: 31.50% 39258 [guest/18772] [unknown] [u] 0x00007fd7dcaea8e5 31.50% 39236 [guest/11230] [unknown] [u] 0x0000000000a57340 30.84% 39232 [guest/18395] [unknown] [u] 0x00007f66f641e107 Tested-by: Jiri Olsa Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342826756-64663-4-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/session.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8e48559..90ee39d 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -918,7 +918,9 @@ static struct machine * { const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { + if (perf_guest && + ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || + (cpumode == PERF_RECORD_MISC_GUEST_USER))) { u32 pid; if (event->header.type == PERF_RECORD_MMAP) -- cgit v1.1 From adb5d2a487c55e5ca2ecc0b73c8f592e95d292c7 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:49 -0600 Subject: perf kvm: Fix bug resolving guest kernel syms Guest kernel symbols are not resolved despite passing the information needed to resolve them. e.g., perf kvm --guest --guestmount=/tmp/guest-mount record -a -- sleep 1 perf kvm --guest --guestmount=/tmp/guest-mount report --stdio 36.55% [guest/11399] [unknown] [g] 0xffffffff81600bc8 33.19% [guest/10474] [unknown] [g] 0x00000000c0116e00 30.26% [guest/11094] [unknown] [g] 0xffffffff8100a288 43.69% [guest/10474] [unknown] [g] 0x00000000c0103d90 37.38% [guest/11399] [unknown] [g] 0xffffffff81600bc8 12.24% [guest/11094] [unknown] [g] 0xffffffff810aa91d 6.69% [guest/11094] [unknown] [u] 0x00007fa784d721c3 which is just pathetic. After a maddening 2 days sifting through perf minutia I found it -- id_hdr_size is not initialized for guest machines. This shows up on the report side as random garbage for the cpu and timestamp, e.g., 29816 7310572949125804849 0x1ac0 [0x50]: PERF_RECORD_MMAP ... That messes up the sample sorting such that synthesized guest maps are processed last. With this patch you get a much more helpful report: 12.11% [guest/11399] [guest.kernel.kallsyms.11399] [g] irqtime_account_process_tick 10.58% [guest/11399] [guest.kernel.kallsyms.11399] [g] run_timer_softirq 6.95% [guest/11094] [guest.kernel.kallsyms.11094] [g] printk_needs_cpu 6.50% [guest/11094] [guest.kernel.kallsyms.11094] [g] do_timer 6.45% [guest/11399] [guest.kernel.kallsyms.11399] [g] idle_balance 4.90% [guest/11094] [guest.kernel.kallsyms.11094] [g] native_read_tsc ... v2: - changed rbtree walk to use rb_first per Namhyung's suggestion Tested-by: Jiri Olsa Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342826756-64663-5-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 13 +++++++++++++ tools/perf/util/map.h | 1 + tools/perf/util/session.c | 1 + 3 files changed, 15 insertions(+) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 8668569..16d783d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -729,3 +729,16 @@ char *machine__mmap_name(struct machine *self, char *bf, size_t size) return bf; } + +void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) +{ + struct rb_node *node; + struct machine *machine; + + for (node = rb_first(machines); node; node = rb_next(node)) { + machine = rb_entry(node, struct machine, rb_node); + machine->id_hdr_size = id_hdr_size; + } + + return; +} diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index c14c665..03a1e9b 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -151,6 +151,7 @@ struct machine *machines__add(struct rb_root *self, pid_t pid, struct machine *machines__find_host(struct rb_root *self); struct machine *machines__find(struct rb_root *self, pid_t pid); struct machine *machines__findnew(struct rb_root *self, pid_t pid); +void machines__set_id_hdr_size(struct rb_root *self, u16 id_hdr_size); char *machine__mmap_name(struct machine *self, char *bf, size_t size); int machine__init(struct machine *self, const char *root_dir, pid_t pid); void machine__exit(struct machine *self); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 90ee39d..8e4f075 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -87,6 +87,7 @@ void perf_session__update_sample_type(struct perf_session *self) self->sample_id_all = perf_evlist__sample_id_all(self->evlist); self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); self->host_machine.id_hdr_size = self->id_hdr_size; + machines__set_id_hdr_size(&self->machines, self->id_hdr_size); } int perf_session__create_kernel_maps(struct perf_session *self) -- cgit v1.1 From c80c3c269011c67b8dabef5238af44a6d94e4d0e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:51 -0600 Subject: perf kvm: Limit repetitive guestmount message to once per directory After 7ed97ad use of the guestmount option without a subdir for *each* VM generates an error message for each sample related to that VM. Once per VM is enough. Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342826756-64663-7-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 16d783d..cc33486 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -8,6 +8,7 @@ #include #include "map.h" #include "thread.h" +#include "strlist.h" const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", @@ -695,7 +696,15 @@ struct machine *machines__findnew(struct rb_root *self, pid_t pid) (symbol_conf.guestmount)) { sprintf(path, "%s/%d", symbol_conf.guestmount, pid); if (access(path, R_OK)) { - pr_err("Can't access file %s\n", path); + static struct strlist *seen; + + if (!seen) + seen = strlist__new(true, NULL); + + if (!strlist__has_entry(seen, path)) { + pr_err("Can't access file %s\n", path); + strlist__add(seen, path); + } machine = NULL; goto out; } -- cgit v1.1 From 78b961ff8e67207adb15959526cdea4cc50ae1ed Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 20 Jul 2012 17:25:52 -0600 Subject: perf tools: Dump exclude_{guest,host}, precise_ip header info too Adds the attributes to the event line in the header dump. From: event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, ... to event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, excl_host = 0, excl_guest = 0, precise_ip = 0, ... Signed-off-by: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1342826756-64663-8-git-send-email-dsahern@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 5a47aba..3a6d204 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1212,6 +1212,12 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) attr.exclude_user, attr.exclude_kernel); + fprintf(fp, ", excl_host = %d, excl_guest = %d", + attr.exclude_host, + attr.exclude_guest); + + fprintf(fp, ", precise_ip = %d", attr.precise_ip); + if (nr) fprintf(fp, ", id = {"); -- cgit v1.1 From 8760db726e2afcd1a78e2ff58965c6b35a5826cb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 18 Jul 2012 19:10:55 +0200 Subject: perf hists: Return correct number of characters printed in callchain Include the omitted number of characters printed for the first entry. Not that it really matters because nobody seem to care about the number of printed characters for now. But just in case. Signed-off-by: Frederic Weisbecker Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342631456-7233-2-git-send-email-fweisbec@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 514e2a4..90dc35a 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -708,7 +708,7 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, bool printed = false; struct rb_node *node; int i = 0; - int ret; + int ret = 0; /* * If have one single callchain root, don't bother printing @@ -747,8 +747,9 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, root = &cnode->rb_root; } - return __callchain__fprintf_graph(fp, root, total_samples, + ret += __callchain__fprintf_graph(fp, root, total_samples, 1, 1, left_margin); + return ret; } static size_t __callchain__fprintf_flat(FILE *fp, -- cgit v1.1 From 0983cc0dbca45250cbb5984bec7c303ac265b8e5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 18 Jul 2012 19:10:54 +0200 Subject: perf tools: Fix trace events storms due to weight demux Trace events have a period (weight) of 1 by default. This can be overriden on events definition by using the __perf_count() macro. For example, the sched_stat_runtime() is weighted with the runtime of the task that fired the event. By default, perf handles such weighted event by dividing it into individual events carrying a weight of 1. For example if sched_stat_runtime is fired and the task has run 5000000 nsecs, perf divides it into 5000000 events in the buffer. This behaviour makes weighted events unusable because they quickly fullfill the buffers and we lose most events. The commit 5d81e5cfb37a174e8ddc0413e2e70cdf05807ace ("events: Don't divide events if it has field period") solves this problem by sending only one event when PERF_SAMPLE_PERIOD flag is set. The weight is carried in the sample itself such that we don't need to demultiplex it anymore. This patch provides the last missing piece to use this feature by setting PERF_SAMPLE_PERIOD from perf tools when we deal with trace events. Before: $ ./perf record -e sched:* -a sleep 1 [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 1.619 MB perf.data (~70749 samples) ] Warning: Processed 16909 events and lost 1 chunks! Check IO/CPU overload! $ ./perf script perf 1894 [003] 824.898327: sched_migrate_task: comm=perf pid=1898 prio=120 orig_cpu=2 dest_cpu=0 perf 1894 [003] 824.898335: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898336: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898337: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898338: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898339: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898340: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] perf 1894 [003] 824.898341: sched_stat_sleep: comm=perf pid=1898 delay=113179500 [ns] [...] After: $ ./perf record -e sched:* -a sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.074 MB perf.data (~3228 samples) ] $ ./perf script perf 1461 [000] 554.286957: sched_migrate_task: comm=perf pid=1465 prio=120 orig_cpu=3 dest_cpu=1 perf 1461 [000] 554.286964: sched_stat_sleep: comm=perf pid=1465 delay=133047190 [ns] perf 1461 [000] 554.286967: sched_wakeup: comm=perf pid=1465 prio=120 success=1 target_cpu=001 swapper 0 [001] 554.286976: sched_stat_wait: comm=perf pid=1465 delay=0 [ns] swapper 0 [001] 554.286983: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=perf [...] Signed-off-by: Frederic Weisbecker Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342631456-7233-1-git-send-email-fweisbec@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 2 +- tools/perf/util/parse-events.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index f74e956..3edfd34 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -214,7 +214,7 @@ int perf_evlist__add_tracepoints(struct perf_evlist *evlist, attrs[i].type = PERF_TYPE_TRACEPOINT; attrs[i].config = err; attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | - PERF_SAMPLE_CPU); + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD); attrs[i].sample_period = 1; } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 1aa721d..a729945 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -377,6 +377,7 @@ static int add_tracepoint(struct list_head **list, int *idx, attr.sample_type |= PERF_SAMPLE_RAW; attr.sample_type |= PERF_SAMPLE_TIME; attr.sample_type |= PERF_SAMPLE_CPU; + attr.sample_type |= PERF_SAMPLE_PERIOD; attr.sample_period = 1; snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name); -- cgit v1.1 From 6654f5d8bdaa438b1e60dfeb90f9d46ca969c012 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 18 Jul 2012 19:10:56 +0200 Subject: perf hists: Print newline between hists callchains Tiny cosmetic fix. The lack of a newline between hists callchains was looking slightly messy. Before: 0.24% swapper [kernel.kallsyms] [k] _raw_spin_lock_irq | --- _raw_spin_lock_irq run_timer_softirq __do_softirq call_softirq do_softirq irq_exit smp_apic_timer_interrupt apic_timer_interrupt default_idle amd_e400_idle cpu_idle start_secondary 0.10% perf [kernel.kallsyms] [k] lock_is_held | --- lock_is_held __might_sleep mutex_lock_nested perf_event_for_each_child perf_ioctl do_vfs_ioctl sys_ioctl system_call_fastpath ioctl cmd_record run_builtin main __libc_start_main After: 0.24% swapper [kernel.kallsyms] [k] _raw_spin_lock_irq | --- _raw_spin_lock_irq run_timer_softirq __do_softirq call_softirq do_softirq irq_exit smp_apic_timer_interrupt apic_timer_interrupt default_idle amd_e400_idle cpu_idle start_secondary 0.10% perf [kernel.kallsyms] [k] lock_is_held | --- lock_is_held __might_sleep mutex_lock_nested perf_event_for_each_child perf_ioctl do_vfs_ioctl sys_ioctl system_call_fastpath ioctl cmd_record run_builtin main __libc_start_main Signed-off-by: Frederic Weisbecker Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1342631456-7233-3-git-send-email-fweisbec@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hist.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 90dc35a..f247ef2 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -749,6 +749,8 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, ret += __callchain__fprintf_graph(fp, root, total_samples, 1, 1, left_margin); + ret += fprintf(fp, "\n"); + return ret; } -- cgit v1.1 From 44f24cb3156a1e7d2b6bb501b7f6153aed08994c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 22 Jul 2012 14:14:32 +0200 Subject: perf symbols: Factor DSO symtab types to generic binary types Adding interface to access DSOs so it could be used from another place. New DSO binary type is added - making current SYMTAB__* types more general: DSO_BINARY_TYPE__* = SYMTAB__* Following function is added to return path based on the specified binary type: dso__binary_type_file Signed-off-by: Jiri Olsa Cc: Arun Sharma Cc: Benjamin Redelings Cc: Corey Ashford Cc: Cyrill Gorcunov Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: Tom Zanussi Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/1342959280-5361-10-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 2 +- tools/perf/util/annotate.c | 2 +- tools/perf/util/symbol.c | 209 +++++++++++++++++++++++++++------------------ tools/perf/util/symbol.h | 34 ++++---- 4 files changed, 145 insertions(+), 102 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index e3cab5f..35e86c6 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -125,7 +125,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) /* * We can't annotate with just /proc/kallsyms */ - if (map->dso->symtab_type == SYMTAB__KALLSYMS) { + if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { pr_err("Can't annotate %s: No vmlinux file was found in the " "path\n", sym->name); sleep(1); diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 8069dfb..7d3641f6 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -777,7 +777,7 @@ fallback: free_filename = false; } - if (dso->symtab_type == SYMTAB__KALLSYMS) { + if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; char *build_id_msg = NULL; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 66c132e..60677a6 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -48,6 +48,23 @@ struct symbol_conf symbol_conf = { .symfs = "", }; +static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__KALLSYMS, + DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__JAVA_JIT, + DSO_BINARY_TYPE__DEBUGLINK, + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab) + int dso__name_len(const struct dso *dso) { if (!dso) @@ -318,7 +335,7 @@ struct dso *dso__new(const char *name) dso__set_short_name(dso, dso->name); for (i = 0; i < MAP__NR_TYPES; ++i) dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; - dso->symtab_type = SYMTAB__NOT_FOUND; + dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; dso->loaded = 0; dso->sorted_by_name = 0; dso->has_build_id = 0; @@ -806,9 +823,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, symbols__fixup_end(&dso->symbols[map->type]); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->symtab_type = SYMTAB__GUEST_KALLSYMS; + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; else - dso->symtab_type = SYMTAB__KALLSYMS; + dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; return dso__split_kallsyms(dso, map, filter); } @@ -1660,32 +1677,110 @@ out: char dso__symtab_origin(const struct dso *dso) { static const char origin[] = { - [SYMTAB__KALLSYMS] = 'k', - [SYMTAB__JAVA_JIT] = 'j', - [SYMTAB__DEBUGLINK] = 'l', - [SYMTAB__BUILD_ID_CACHE] = 'B', - [SYMTAB__FEDORA_DEBUGINFO] = 'f', - [SYMTAB__UBUNTU_DEBUGINFO] = 'u', - [SYMTAB__BUILDID_DEBUGINFO] = 'b', - [SYMTAB__SYSTEM_PATH_DSO] = 'd', - [SYMTAB__SYSTEM_PATH_KMODULE] = 'K', - [SYMTAB__GUEST_KALLSYMS] = 'g', - [SYMTAB__GUEST_KMODULE] = 'G', + [DSO_BINARY_TYPE__KALLSYMS] = 'k', + [DSO_BINARY_TYPE__JAVA_JIT] = 'j', + [DSO_BINARY_TYPE__DEBUGLINK] = 'l', + [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', + [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', + [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', + [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', }; - if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) + if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) return '!'; return origin[dso->symtab_type]; } +int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, + char *root_dir, char *file, size_t size) +{ + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + int ret = 0; + + switch (type) { + case DSO_BINARY_TYPE__DEBUGLINK: { + char *debuglink; + + strncpy(file, dso->long_name, size); + debuglink = file + dso->long_name_len; + while (debuglink != file && *debuglink != '/') + debuglink--; + if (*debuglink == '/') + debuglink++; + filename__read_debuglink(dso->long_name, debuglink, + size - (debuglink - file)); + } + break; + case DSO_BINARY_TYPE__BUILD_ID_CACHE: + /* skip the locally configured cache if a symfs is given */ + if (symbol_conf.symfs[0] || + (dso__build_id_filename(dso, file, size) == NULL)) + ret = -1; + break; + + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + snprintf(file, size, "%s/usr/lib/debug%s.debug", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + snprintf(file, size, "%s/usr/lib/debug%s", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + if (!dso->has_build_id) { + ret = -1; + break; + } + + build_id__sprintf(dso->build_id, + sizeof(dso->build_id), + build_id_hex); + snprintf(file, size, + "%s/usr/lib/debug/.build-id/%.2s/%s.debug", + symbol_conf.symfs, build_id_hex, build_id_hex + 2); + break; + + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + snprintf(file, size, "%s%s", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__GUEST_KMODULE: + snprintf(file, size, "%s%s%s", symbol_conf.symfs, + root_dir, dso->long_name); + break; + + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: + snprintf(file, size, "%s%s", symbol_conf.symfs, + dso->long_name); + break; + + default: + case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__JAVA_JIT: + case DSO_BINARY_TYPE__NOT_FOUND: + ret = -1; + break; + } + + return ret; +} + int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) { - int size = PATH_MAX; char *name; int ret = -1; int fd; + u_int i; struct machine *machine; - const char *root_dir; + char *root_dir = (char *) ""; int want_symtab; dso__set_loaded(dso, map->type); @@ -1700,7 +1795,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) else machine = NULL; - name = malloc(size); + name = malloc(PATH_MAX); if (!name) return -1; @@ -1719,81 +1814,27 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) } ret = dso__load_perf_map(dso, map, filter); - dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : - SYMTAB__NOT_FOUND; + dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : + DSO_BINARY_TYPE__NOT_FOUND; return ret; } + if (machine) + root_dir = machine->root_dir; + /* Iterate over candidate debug images. * On the first pass, only load images if they have a full symtab. * Failing that, do a second pass where we accept .dynsym also */ want_symtab = 1; restart: - for (dso->symtab_type = SYMTAB__DEBUGLINK; - dso->symtab_type != SYMTAB__NOT_FOUND; - dso->symtab_type++) { - switch (dso->symtab_type) { - case SYMTAB__DEBUGLINK: { - char *debuglink; - strncpy(name, dso->long_name, size); - debuglink = name + dso->long_name_len; - while (debuglink != name && *debuglink != '/') - debuglink--; - if (*debuglink == '/') - debuglink++; - filename__read_debuglink(dso->long_name, debuglink, - size - (debuglink - name)); - } - break; - case SYMTAB__BUILD_ID_CACHE: - /* skip the locally configured cache if a symfs is given */ - if (symbol_conf.symfs[0] || - (dso__build_id_filename(dso, name, size) == NULL)) { - continue; - } - break; - case SYMTAB__FEDORA_DEBUGINFO: - snprintf(name, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, dso->long_name); - break; - case SYMTAB__UBUNTU_DEBUGINFO: - snprintf(name, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, dso->long_name); - break; - case SYMTAB__BUILDID_DEBUGINFO: { - char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - - if (!dso->has_build_id) - continue; + for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { - build_id__sprintf(dso->build_id, - sizeof(dso->build_id), - build_id_hex); - snprintf(name, size, - "%s/usr/lib/debug/.build-id/%.2s/%s.debug", - symbol_conf.symfs, build_id_hex, build_id_hex + 2); - } - break; - case SYMTAB__SYSTEM_PATH_DSO: - snprintf(name, size, "%s%s", - symbol_conf.symfs, dso->long_name); - break; - case SYMTAB__GUEST_KMODULE: - if (map->groups && machine) - root_dir = machine->root_dir; - else - root_dir = ""; - snprintf(name, size, "%s%s%s", symbol_conf.symfs, - root_dir, dso->long_name); - break; + dso->symtab_type = binary_type_symtab[i]; - case SYMTAB__SYSTEM_PATH_KMODULE: - snprintf(name, size, "%s%s", symbol_conf.symfs, - dso->long_name); - break; - default:; - } + if (dso__binary_type_file(dso, dso->symtab_type, + root_dir, name, PATH_MAX)) + continue; /* Name is now the name of the next image to try */ fd = open(name, O_RDONLY); @@ -2010,9 +2051,9 @@ struct map *machine__new_module(struct machine *machine, u64 start, return NULL; if (machine__is_host(machine)) - dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; else - dso->symtab_type = SYMTAB__GUEST_KMODULE; + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; map_groups__insert(&machine->kmaps, map); return map; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index a884b99..dc474f0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -155,6 +155,21 @@ struct addr_location { s32 cpu; }; +enum dso_binary_type { + DSO_BINARY_TYPE__KALLSYMS = 0, + DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__JAVA_JIT, + DSO_BINARY_TYPE__DEBUGLINK, + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__NOT_FOUND, +}; + enum dso_kernel_type { DSO_TYPE_USER = 0, DSO_TYPE_KERNEL, @@ -173,13 +188,13 @@ struct dso { struct rb_root symbol_names[MAP__NR_TYPES]; enum dso_kernel_type kernel; enum dso_swap_type needs_swap; + enum dso_binary_type symtab_type; u8 adjust_symbols:1; u8 has_build_id:1; u8 hit:1; u8 annotate_warned:1; u8 sname_alloc:1; u8 lname_alloc:1; - unsigned char symtab_type; u8 sorted_by_name; u8 loaded; u8 build_id[BUILD_ID_SIZE]; @@ -253,21 +268,6 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, enum map_type type, FILE *fp); size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); -enum symtab_type { - SYMTAB__KALLSYMS = 0, - SYMTAB__GUEST_KALLSYMS, - SYMTAB__JAVA_JIT, - SYMTAB__DEBUGLINK, - SYMTAB__BUILD_ID_CACHE, - SYMTAB__FEDORA_DEBUGINFO, - SYMTAB__UBUNTU_DEBUGINFO, - SYMTAB__BUILDID_DEBUGINFO, - SYMTAB__SYSTEM_PATH_DSO, - SYMTAB__GUEST_KMODULE, - SYMTAB__SYSTEM_PATH_KMODULE, - SYMTAB__NOT_FOUND, -}; - char dso__symtab_origin(const struct dso *dso); void dso__set_long_name(struct dso *dso, char *name); void dso__set_build_id(struct dso *dso, void *build_id); @@ -304,4 +304,6 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type); size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); +int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, + char *root_dir, char *file, size_t size); #endif /* __PERF_SYMBOL */ -- cgit v1.1 From 949d160b6962c13fc071afefa22997780fcc94a5 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 22 Jul 2012 14:14:33 +0200 Subject: perf symbols: Add interface to read DSO image data Adding following interface for DSO object to allow reading of DSO image data: dso__data_fd - opens DSO and returns file descriptor Binary types are used to locate/open DSO in following order: DSO_BINARY_TYPE__BUILD_ID_CACHE DSO_BINARY_TYPE__SYSTEM_PATH_DSO In other word we first try to open DSO build-id path, and if that fails we try to open DSO system path. dso__data_read_offset - reads DSO data from specified offset dso__data_read_addr - reads DSO data from specified address/map. Signed-off-by: Jiri Olsa Cc: Arun Sharma Cc: Benjamin Redelings Cc: Corey Ashford Cc: Cyrill Gorcunov Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: Tom Zanussi Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/1342959280-5361-11-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/symbol.h | 8 ++++ 2 files changed, 117 insertions(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 60677a6..8131949 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -65,6 +65,14 @@ static enum dso_binary_type binary_type_symtab[] = { #define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab) +static enum dso_binary_type binary_type_data[] = { + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data) + int dso__name_len(const struct dso *dso) { if (!dso) @@ -336,6 +344,7 @@ struct dso *dso__new(const char *name) for (i = 0; i < MAP__NR_TYPES; ++i) dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; + dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; dso->loaded = 0; dso->sorted_by_name = 0; dso->has_build_id = 0; @@ -2953,3 +2962,103 @@ struct map *dso__new_map(const char *name) return map; } + +static int open_dso(struct dso *dso, struct machine *machine) +{ + char *root_dir = (char *) ""; + char *name; + int fd; + + name = malloc(PATH_MAX); + if (!name) + return -ENOMEM; + + if (machine) + root_dir = machine->root_dir; + + if (dso__binary_type_file(dso, dso->data_type, + root_dir, name, PATH_MAX)) { + free(name); + return -EINVAL; + } + + fd = open(name, O_RDONLY); + free(name); + return fd; +} + +int dso__data_fd(struct dso *dso, struct machine *machine) +{ + int i = 0; + + if (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND) + return open_dso(dso, machine); + + do { + int fd; + + dso->data_type = binary_type_data[i++]; + + fd = open_dso(dso, machine); + if (fd >= 0) + return fd; + + } while (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND); + + return -EINVAL; +} + +static ssize_t dso_cache_read(struct dso *dso __used, u64 offset __used, + u8 *data __used, ssize_t size __used) +{ + return -EINVAL; +} + +static int dso_cache_add(struct dso *dso __used, u64 offset __used, + u8 *data __used, ssize_t size __used) +{ + return 0; +} + +static ssize_t read_dso_data(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) +{ + ssize_t rsize = -1; + int fd; + + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -1; + + do { + if (-1 == lseek(fd, offset, SEEK_SET)) + break; + + rsize = read(fd, data, size); + if (-1 == rsize) + break; + + if (dso_cache_add(dso, offset, data, size)) + pr_err("Failed to add data int dso cache."); + + } while (0); + + close(fd); + return rsize; +} + +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) +{ + if (dso_cache_read(dso, offset, data, size)) + return read_dso_data(dso, machine, offset, data, size); + return 0; +} + +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size) +{ + u64 offset = map->map_ip(map, addr); + return dso__data_read_offset(dso, machine, offset, data, size); +} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index dc474f0..9b9ea00 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -189,6 +189,7 @@ struct dso { enum dso_kernel_type kernel; enum dso_swap_type needs_swap; enum dso_binary_type symtab_type; + enum dso_binary_type data_type; u8 adjust_symbols:1; u8 has_build_id:1; u8 hit:1; @@ -306,4 +307,11 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, char *root_dir, char *file, size_t size); + +int dso__data_fd(struct dso *dso, struct machine *machine); +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size); +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size); #endif /* __PERF_SYMBOL */ -- cgit v1.1 From 4dff624ae05bf3fb89f7653b3a55e7a5f1f1dadf Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 22 Jul 2012 14:14:39 +0200 Subject: perf symbols: Add dso data caching Adding dso data caching so we don't need to open/read/close, each time we want dso data. The DSO data caching affects following functions: dso__data_read_offset dso__data_read_addr Each DSO read tries to find the data (based on offset) inside the cache. If it's not present it fills the cache from file, and returns the data. If it is present, data are returned with no file read. Each data read is cached by reading cache page sized/aligned amount of DSO data. The cache page size is hardcoded to 4096. The cache is using RB tree with file offset as a sort key. Signed-off-by: Jiri Olsa Cc: Arun Sharma Cc: Benjamin Redelings Cc: Corey Ashford Cc: Cyrill Gorcunov Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: Tom Zanussi Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/1342959280-5361-17-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 154 +++++++++++++++++++++++++++++++++++++++++------ tools/perf/util/symbol.h | 11 ++++ 2 files changed, 147 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8131949..fdad4eee 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -29,6 +29,7 @@ #define NT_GNU_BUILD_ID 3 #endif +static void dso_cache__free(struct rb_root *root); static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); static int elf_read_build_id(Elf *elf, void *bf, size_t size); static void dsos__add(struct list_head *head, struct dso *dso); @@ -343,6 +344,7 @@ struct dso *dso__new(const char *name) dso__set_short_name(dso, dso->name); for (i = 0; i < MAP__NR_TYPES; ++i) dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; + dso->cache = RB_ROOT; dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; dso->loaded = 0; @@ -378,6 +380,7 @@ void dso__delete(struct dso *dso) free((char *)dso->short_name); if (dso->lname_alloc) free(dso->long_name); + dso_cache__free(&dso->cache); free(dso); } @@ -3008,22 +3011,87 @@ int dso__data_fd(struct dso *dso, struct machine *machine) return -EINVAL; } -static ssize_t dso_cache_read(struct dso *dso __used, u64 offset __used, - u8 *data __used, ssize_t size __used) +static void +dso_cache__free(struct rb_root *root) { - return -EINVAL; + struct rb_node *next = rb_first(root); + + while (next) { + struct dso_cache *cache; + + cache = rb_entry(next, struct dso_cache, rb_node); + next = rb_next(&cache->rb_node); + rb_erase(&cache->rb_node, root); + free(cache); + } } -static int dso_cache_add(struct dso *dso __used, u64 offset __used, - u8 *data __used, ssize_t size __used) +static struct dso_cache* +dso_cache__find(struct rb_root *root, u64 offset) { - return 0; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct dso_cache *cache; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + else + return cache; + } + return NULL; +} + +static void +dso_cache__insert(struct rb_root *root, struct dso_cache *new) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct dso_cache *cache; + u64 offset = new->offset; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + } + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, root); +} + +static ssize_t +dso_cache__memcpy(struct dso_cache *cache, u64 offset, + u8 *data, u64 size) +{ + u64 cache_offset = offset - cache->offset; + u64 cache_size = min(cache->size - cache_offset, size); + + memcpy(data, cache->data + cache_offset, cache_size); + return cache_size; } -static ssize_t read_dso_data(struct dso *dso, struct machine *machine, - u64 offset, u8 *data, ssize_t size) +static ssize_t +dso_cache__read(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) { - ssize_t rsize = -1; + struct dso_cache *cache; + ssize_t ret; int fd; fd = dso__data_fd(dso, machine); @@ -3031,28 +3099,78 @@ static ssize_t read_dso_data(struct dso *dso, struct machine *machine, return -1; do { - if (-1 == lseek(fd, offset, SEEK_SET)) + u64 cache_offset; + + ret = -ENOMEM; + + cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); + if (!cache) break; - rsize = read(fd, data, size); - if (-1 == rsize) + cache_offset = offset & DSO__DATA_CACHE_MASK; + ret = -EINVAL; + + if (-1 == lseek(fd, cache_offset, SEEK_SET)) break; - if (dso_cache_add(dso, offset, data, size)) - pr_err("Failed to add data int dso cache."); + ret = read(fd, cache->data, DSO__DATA_CACHE_SIZE); + if (ret <= 0) + break; + + cache->offset = cache_offset; + cache->size = ret; + dso_cache__insert(&dso->cache, cache); + + ret = dso_cache__memcpy(cache, offset, data, size); } while (0); + if (ret <= 0) + free(cache); + close(fd); - return rsize; + return ret; +} + +static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) +{ + struct dso_cache *cache; + + cache = dso_cache__find(&dso->cache, offset); + if (cache) + return dso_cache__memcpy(cache, offset, data, size); + else + return dso_cache__read(dso, machine, offset, data, size); } ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, u64 offset, u8 *data, ssize_t size) { - if (dso_cache_read(dso, offset, data, size)) - return read_dso_data(dso, machine, offset, data, size); - return 0; + ssize_t r = 0; + u8 *p = data; + + do { + ssize_t ret; + + ret = dso_cache_read(dso, machine, offset, p, size); + if (ret < 0) + return ret; + + /* Reached EOF, return what we have. */ + if (!ret) + break; + + BUG_ON(ret > size); + + r += ret; + p += ret; + offset += ret; + size -= ret; + + } while (size); + + return r; } ssize_t dso__data_read_addr(struct dso *dso, struct map *map, diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 9b9ea00..980d5f5 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -182,10 +182,21 @@ enum dso_swap_type { DSO_SWAP__YES, }; +#define DSO__DATA_CACHE_SIZE 4096 +#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1) + +struct dso_cache { + struct rb_node rb_node; + u64 offset; + u64 size; + char data[0]; +}; + struct dso { struct list_head node; struct rb_root symbols[MAP__NR_TYPES]; struct rb_root symbol_names[MAP__NR_TYPES]; + struct rb_root cache; enum dso_kernel_type kernel; enum dso_swap_type needs_swap; enum dso_binary_type symtab_type; -- cgit v1.1 From f7add556534529ab18501ced98d7f3f2fc7f0621 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 22 Jul 2012 14:14:40 +0200 Subject: perf test: Add dso data caching tests Adding automated test for DSO data reading. Testing raw/cached reads from different file/cache locations. Signed-off-by: Jiri Olsa Cc: Arun Sharma Cc: Benjamin Redelings Cc: Corey Ashford Cc: Cyrill Gorcunov Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter Cc: Stephane Eranian Cc: Tom Zanussi Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/1342959280-5361-18-git-send-email-jolsa@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 1 + tools/perf/builtin-test.c | 4 ++ tools/perf/util/dso-test-data.c | 153 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/symbol.h | 1 + 4 files changed, 159 insertions(+) create mode 100644 tools/perf/util/dso-test-data.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 75d74e5..e8f0579 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -354,6 +354,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o LIB_OBJS += $(OUTPUT)util/wrapper.o LIB_OBJS += $(OUTPUT)util/sigchain.o LIB_OBJS += $(OUTPUT)util/symbol.o +LIB_OBJS += $(OUTPUT)util/dso-test-data.o LIB_OBJS += $(OUTPUT)util/color.o LIB_OBJS += $(OUTPUT)util/pager.o LIB_OBJS += $(OUTPUT)util/header.o diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 5ce3030..d909eb7 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -1142,6 +1142,10 @@ static struct test { .func = test__perf_pmu, }, { + .desc = "Test dso data interface", + .func = dso__test_data, + }, + { .func = NULL, }, }; diff --git a/tools/perf/util/dso-test-data.c b/tools/perf/util/dso-test-data.c new file mode 100644 index 0000000..541cdc7 --- /dev/null +++ b/tools/perf/util/dso-test-data.c @@ -0,0 +1,153 @@ +#include "util.h" + +#include +#include +#include +#include +#include + +#include "symbol.h" + +#define TEST_ASSERT_VAL(text, cond) \ +do { \ + if (!(cond)) { \ + pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ + return -1; \ + } \ +} while (0) + +static char *test_file(int size) +{ + static char buf_templ[] = "/tmp/test-XXXXXX"; + char *templ = buf_templ; + int fd, i; + unsigned char *buf; + + fd = mkostemp(templ, O_CREAT|O_WRONLY|O_TRUNC); + + buf = malloc(size); + if (!buf) { + close(fd); + return NULL; + } + + for (i = 0; i < size; i++) + buf[i] = (unsigned char) ((int) i % 10); + + if (size != write(fd, buf, size)) + templ = NULL; + + close(fd); + return templ; +} + +#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20) + +struct test_data_offset { + off_t offset; + u8 data[10]; + int size; +}; + +struct test_data_offset offsets[] = { + /* Fill first cache page. */ + { + .offset = 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read first cache page. */ + { + .offset = 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Fill cache boundary pages. */ + { + .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read cache boundary pages. */ + { + .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Fill final cache page. */ + { + .offset = TEST_FILE_SIZE - 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read final cache page. */ + { + .offset = TEST_FILE_SIZE - 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read final cache page. */ + { + .offset = TEST_FILE_SIZE - 3, + .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 }, + .size = 3, + }, +}; + +int dso__test_data(void) +{ + struct machine machine; + struct dso *dso; + char *file = test_file(TEST_FILE_SIZE); + size_t i; + + TEST_ASSERT_VAL("No test file", file); + + memset(&machine, 0, sizeof(machine)); + + dso = dso__new((const char *)file); + + /* Basic 10 bytes tests. */ + for (i = 0; i < ARRAY_SIZE(offsets); i++) { + struct test_data_offset *data = &offsets[i]; + ssize_t size; + u8 buf[10]; + + memset(buf, 0, 10); + size = dso__data_read_offset(dso, &machine, data->offset, + buf, 10); + + TEST_ASSERT_VAL("Wrong size", size == data->size); + TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10)); + } + + /* Read cross multiple cache pages. */ + { + ssize_t size; + int c; + u8 *buf; + + buf = malloc(TEST_FILE_SIZE); + TEST_ASSERT_VAL("ENOMEM\n", buf); + + /* First iteration to fill caches, second one to read them. */ + for (c = 0; c < 2; c++) { + memset(buf, 0, TEST_FILE_SIZE); + size = dso__data_read_offset(dso, &machine, 10, + buf, TEST_FILE_SIZE); + + TEST_ASSERT_VAL("Wrong size", + size == (TEST_FILE_SIZE - 10)); + + for (i = 0; i < (size_t)size; i++) + TEST_ASSERT_VAL("Wrong data", + buf[i] == (i % 10)); + } + + free(buf); + } + + dso__delete(dso); + unlink(file); + return 0; +} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 980d5f5..1fe733a 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -325,4 +325,5 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, ssize_t dso__data_read_addr(struct dso *dso, struct map *map, struct machine *machine, u64 addr, u8 *data, ssize_t size); +int dso__test_data(void); #endif /* __PERF_SYMBOL */ -- cgit v1.1 From 4a841d650ea435c69e60675537f158a620697290 Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Sun, 15 Jul 2012 03:03:10 +0800 Subject: perf tools: Make the breakpoint events sample period default to 1 There have one problem about hw_breakpoint perf event, as watched, the events reported to userspace is not correctly, sometime one trigger bp_event report several events, sometime bp_event cannot go through to user. The root cause is attr->freq is 1 passed to kernel defaultly in bp events, this make kernel calculate event period not as expect, make sample period to 1 will change attr->freq to 0, to fix this problem. This patch is similar with commit f92128 about tracepoint events: perf: Make the trace events sample period default to 1 Signed-off-by: Jovi Zhang Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/CACV3sbLF8taiCq_VYW-sgRJyupeMzg58C7ZXfMe3xZUiH_Mx6w@mail.gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/parse-events.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a729945..74a5af4 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -490,6 +490,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, attr.bp_len = HW_BREAKPOINT_LEN_4; attr.type = PERF_TYPE_BREAKPOINT; + attr.sample_period = 1; return add_event(list, idx, &attr, NULL); } -- cgit v1.1 From 4cc49d4dc82a39a542a31c1f51ead08a46fd33f1 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 24 Jul 2012 00:06:54 +0300 Subject: perf tools: use XSI-complaint version of strerror_r() instead of GNU-specific MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Perf uses GNU-specific version of strerror_r(). The GNU-specific strerror_r() returns a pointer to a string containing the error message. This may be either a pointer to a string that the function stores in buf, or a pointer to some (immutable) static string (in which case buf is unused). In glibc-2.16 GNU version was marked with attribute warn_unused_result. It triggers few warnings in perf: util/target.c: In function ‘perf_target__strerror’: util/target.c:114:13: error: ignoring return value of ‘strerror_r’, declared with attribute warn_unused_result [-Werror=unused-result] ui/browsers/hists.c: In function ‘hist_browser__dump’: ui/browsers/hists.c:981:13: error: ignoring return value of ‘strerror_r’, declared with attribute warn_unused_result [-Werror=unused-result] They are bugs. Let's fix strerror_r() usage. Signed-off-by: Kirill A. Shutemov Acked-by: Ulrich Drepper Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Ulrich Drepper Link: http://lkml.kernel.org/r/20120723210654.GA25248@shutemov.name [ committer note: s/assert/BUG_ON/g ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 4 ++-- tools/perf/util/target.c | 11 ++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 482f051..413bd62 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -978,8 +978,8 @@ static int hist_browser__dump(struct hist_browser *browser) fp = fopen(filename, "w"); if (fp == NULL) { char bf[64]; - strerror_r(errno, bf, sizeof(bf)); - ui_helpline__fpush("Couldn't write to %s: %s", filename, bf); + const char *err = strerror_r(errno, bf, sizeof(bf)); + ui_helpline__fpush("Couldn't write to %s: %s", filename, err); return -1; } diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c index 1064d5b..3f59c49 100644 --- a/tools/perf/util/target.c +++ b/tools/perf/util/target.c @@ -110,8 +110,17 @@ int perf_target__strerror(struct perf_target *target, int errnum, int idx; const char *msg; + BUG_ON(buflen > 0); + if (errnum >= 0) { - strerror_r(errnum, buf, buflen); + const char *err = strerror_r(errnum, buf, buflen); + + if (err != buf) { + size_t len = strlen(err); + char *c = mempcpy(buf, err, min(buflen - 1, len)); + *c = '\0'; + } + return 0; } -- cgit v1.1 From 043d1a5c14e95212dbf48251051804ede1ed1862 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 24 Jul 2012 00:04:07 +0300 Subject: perf tools: Fix build error with bison 2.6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bison 2.6 started to generate parse_events_parse() declaration in header. In this case we have redundant redeclaration: util/parse-events.c:29:5: error: redundant redeclaration of ‘parse_events_parse’ [-Werror=redundant-decls] In file included from util/parse-events.c:14:0: util/parse-events-bison.h:99:5: note: previous declaration of ‘parse_events_parse’ was here cc1: all warnings being treated as errors Let's disable -Wredundant-decls for util/parse-events.c since it includes header we can't control. Signed-off-by: Kirill A. Shutemov Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20120723210407.GA25186@shutemov.name Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index e8f0579..77f124f 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -804,6 +804,9 @@ $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Wno-redundant-decls $< + $(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< -- cgit v1.1 From 52b5c0d485385d3c767d979496983ca2b6987f5c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 6 Jul 2012 16:21:32 +0900 Subject: tools lib traceevent: Detect build environment changes Cross compiling perf requires setting ARCH and CROSS_COMPILE variables, but libtraceevent couldn't detect the changes so it ends up believing no recompiling is required. Thus the linker failed like: LINK perf ../lib/traceevent//libtraceevent.a: member ../lib/traceevent//libtraceevent.a(event-parse.o) in archive is not an object collect2: ld returned 1 exit status make: *** [perf] Error 1 This patch fixes this by adding TRACEEVENT-CFLAGS file like PERF-CFLAGS to track those changes. Signed-off-by: Namhyung Kim Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1341559297-25725-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/Makefile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 46c2f6b..14131cb 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -207,7 +207,7 @@ libtraceevent.so: $(PEVENT_LIB_OBJS) libtraceevent.a: $(PEVENT_LIB_OBJS) $(Q)$(do_build_static_lib) -$(PEVENT_LIB_OBJS): %.o: $(src)/%.c +$(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS $(Q)$(do_fpic_compile) define make_version.h @@ -272,6 +272,16 @@ ifneq ($(dep_includes),) include $(dep_includes) endif +### Detect environment changes +TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE) + +TRACEEVENT-CFLAGS: force + @FLAGS='$(TRACK_CFLAGS)'; \ + if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \ + echo 1>&2 " * new build flags or cross compiler"; \ + echo "$$FLAGS" >TRACEEVENT-CFLAGS; \ + fi + tags: force $(RM) tags find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \ @@ -297,7 +307,7 @@ install: install_lib clean: $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d - $(RM) tags TAGS + $(RM) TRACEEVENT-CFLAGS tags TAGS endif # skip-makefile -- cgit v1.1 From 52f18a2ff9b012a7efdbd520ca0dc0e118a8a837 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 6 Jul 2012 16:21:33 +0900 Subject: tools lib traceevent: Ignore TRACEEVENT-CFLAGS file The TRACEEVENT-CFLAGS file is used to detect any change on compiler flags. Just ignore it. Signed-off-by: Namhyung Kim Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1341559297-25725-3-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 tools/lib/traceevent/.gitignore diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore new file mode 100644 index 0000000..35f56be --- /dev/null +++ b/tools/lib/traceevent/.gitignore @@ -0,0 +1 @@ +TRACEEVENT-CFLAGS -- cgit v1.1 From 8696329b7bcf32e69ad12d5975ad1497936d43ec Mon Sep 17 00:00:00 2001 From: Cody Schafer Date: Thu, 19 Jul 2012 20:05:25 -0700 Subject: perf annotate: Prevent overflow in size calculation A large enough symbol size causes an overflow in the size parameter to the histogram allocation, leading to a segfault in symbol__inc_addr_samples later on when this histogram is accessed. In the case of being called via perf-report, this returns back and gracefully ignores the sample, eventually ignoring the chained return value of perf_session_deliver_event in flush_sample_queue. Signed-off-by: Cody Schafer Acked-by: Namhyung Kim Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Sukadev Bhattiprolu Link: http://lkml.kernel.org/r/1342753525-4521-1-git-send-email-cody@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 7d3641f6..3a282c0 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -426,7 +426,18 @@ int symbol__alloc_hist(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); const size_t size = symbol__size(sym); - size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); + size_t sizeof_sym_hist; + + /* Check for overflow when calculating sizeof_sym_hist */ + if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64)) + return -1; + + sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); + + /* Check for overflow in zalloc argument */ + if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src)) + / symbol_conf.nr_events) + return -1; notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); if (notes->src == NULL) -- cgit v1.1 From 895dd92c032e1604aa0d7afaef7716e058343b67 Mon Sep 17 00:00:00 2001 From: Andrew Vagin Date: Thu, 12 Jul 2012 14:14:29 +0400 Subject: sched: Deliver sched_switch events to the current task Otherwise they can't be filtered for a defined task: perf record -e sched:sched_switch ./foo This command doesn't report any events without this patch. I think it isn't a security concern if someone knows who will be executed next - this can already be observed by polling /proc state. By default perf is disabled for non-root users in any case. I need these events for profiling sleep times. sched_switch is used for getting callchains and sched_stat_* is used for getting time periods. These events are combined in user space, then it can be analyzed by perf tools. Signed-off-by: Andrew Vagin Signed-off-by: Peter Zijlstra Cc: Steven Rostedt Cc: Arun Sharma Link: http://lkml.kernel.org/r/1342088069-1005148-1-git-send-email-avagin@openvz.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 468bdd4..ad732b5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1910,12 +1910,12 @@ static inline void prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { + trace_sched_switch(prev, next); sched_info_switch(prev, next); perf_event_task_sched_out(prev, next); fire_sched_out_preempt_notifiers(prev, next); prepare_lock_switch(rq, next); prepare_arch_switch(next); - trace_sched_switch(prev, next); } /** -- cgit v1.1 From 4f3f713fc78d966d81ad87d2f3587369f9b34ae6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 23 Jul 2012 14:23:30 +0800 Subject: perf/x86: Fix typo in format definition of uncore PCU filter The format definition of uncore PCU filter should be filter_band* instead of filter_brand*. Reported-by: Stephane Eranian Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1343024611-4692-1-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 19faffc..a5de59f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -33,10 +33,10 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); +DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); /* Sandy Bridge-EP uncore support */ static struct intel_uncore_type snbep_uncore_cbox; @@ -272,10 +272,10 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = { &format_attr_thresh5.attr, &format_attr_occ_invert.attr, &format_attr_occ_edge.attr, - &format_attr_filter_brand0.attr, - &format_attr_filter_brand1.attr, - &format_attr_filter_brand2.attr, - &format_attr_filter_brand3.attr, + &format_attr_filter_band0.attr, + &format_attr_filter_band1.attr, + &format_attr_filter_band2.attr, + &format_attr_filter_band3.attr, NULL, }; -- cgit v1.1 From 254298c726b93bb8ed92774b4a209b479851fa6d Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 5 Jul 2012 14:32:17 +0800 Subject: perf/x86: Add Intel Nehalem-EX uncore support The uncore subsystem in Nehalem-EX consists of 7 components (U-Box, C-Box, B-Box, S-Box, R-Box, M-Box and W-Box). This patch is large because the way to program these boxes is diverse. Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/4FF534F1.3030307@intel.com [ Improved the code. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 1484 +++++++++++++++++++++---- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 203 +++- 2 files changed, 1455 insertions(+), 232 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index a5de59f..d998170 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -38,6 +38,77 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); +static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + u64 count; + + rdmsrl(event->hw.event_base, count); + + return count; +} + +/* + * generic get constraint function for shared match/mask registers. + */ +static struct event_constraint * +uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct intel_uncore_extra_reg *er; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + unsigned long flags; + bool ok = false; + + /* + * reg->alloc can be set due to existing state, so for fake box we + * need to ignore this, otherwise we might fail to allocate proper + * fake state for this extra reg constraint. + */ + if (reg1->idx == EXTRA_REG_NONE || + (!uncore_box_is_fake(box) && reg1->alloc)) + return NULL; + + er = &box->shared_regs[reg1->idx]; + raw_spin_lock_irqsave(&er->lock, flags); + if (!atomic_read(&er->ref) || + (er->config1 == reg1->config && er->config2 == reg2->config)) { + atomic_inc(&er->ref); + er->config1 = reg1->config; + er->config2 = reg2->config; + ok = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + if (ok) { + if (!uncore_box_is_fake(box)) + reg1->alloc = 1; + return NULL; + } + + return &constraint_empty; +} + +static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct intel_uncore_extra_reg *er; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + + /* + * Only put constraint if extra reg was actually allocated. Also + * takes care of event which do not use an extra shared reg. + * + * Also, if this is a fake box we shouldn't touch any event state + * (reg->alloc) and we don't care about leaving inconsistent box + * state either since it will be thrown out. + */ + if (uncore_box_is_fake(box) || !reg1->alloc) + return; + + er = &box->shared_regs[reg1->idx]; + atomic_dec(&er->ref); + reg1->alloc = 0; +} + /* Sandy Bridge-EP uncore support */ static struct intel_uncore_type snbep_uncore_cbox; static struct intel_uncore_type snbep_uncore_pcu; @@ -64,18 +135,15 @@ static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) pci_write_config_dword(pdev, box_ctl, config); } -static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, - struct perf_event *event) +static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; - pci_write_config_dword(pdev, hwc->config_base, hwc->config | - SNBEP_PMON_CTL_EN); + pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } -static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, - struct perf_event *event) +static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; @@ -83,8 +151,7 @@ static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, pci_write_config_dword(pdev, hwc->config_base, hwc->config); } -static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, - struct perf_event *event) +static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; @@ -92,14 +159,15 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); + return count; } static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, - SNBEP_PMON_BOX_CTL_INT); + + pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT); } static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) @@ -112,7 +180,6 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) rdmsrl(msr, config); config |= SNBEP_PMON_BOX_CTL_FRZ; wrmsrl(msr, config); - return; } } @@ -126,12 +193,10 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) rdmsrl(msr, config); config &= ~SNBEP_PMON_BOX_CTL_FRZ; wrmsrl(msr, config); - return; } } -static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) +static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; @@ -150,68 +215,15 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, wrmsrl(hwc->config_base, hwc->config); } -static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - u64 count; - - rdmsrl(hwc->event_base, count); - return count; -} - static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) { unsigned msr = uncore_msr_box_ctl(box); + if (msr) wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); } -static struct event_constraint * -snbep_uncore_get_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct intel_uncore_extra_reg *er; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - unsigned long flags; - bool ok = false; - - if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc)) - return NULL; - - er = &box->shared_regs[reg1->idx]; - raw_spin_lock_irqsave(&er->lock, flags); - if (!atomic_read(&er->ref) || er->config1 == reg1->config) { - atomic_inc(&er->ref); - er->config1 = reg1->config; - ok = true; - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - if (ok) { - if (box->phys_id >= 0) - reg1->alloc = 1; - return NULL; - } - return &constraint_empty; -} - -static void snbep_uncore_put_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct intel_uncore_extra_reg *er; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - - if (box->phys_id < 0 || !reg1->alloc) - return; - - er = &box->shared_regs[reg1->idx]; - atomic_dec(&er->ref); - reg1->alloc = 0; -} - -static int snbep_uncore_hw_config(struct intel_uncore_box *box, - struct perf_event *event) +static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; @@ -221,14 +233,16 @@ static int snbep_uncore_hw_config(struct intel_uncore_box *box, SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; reg1->config = event->attr.config1 & SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; - } else if (box->pmu->type == &snbep_uncore_pcu) { - reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; - reg1->config = event->attr.config1 & - SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; } else { - return 0; + if (box->pmu->type == &snbep_uncore_pcu) { + reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; + reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; + } else { + return 0; + } } reg1->idx = 0; + return 0; } @@ -320,9 +334,9 @@ static struct intel_uncore_ops snbep_uncore_msr_ops = { .enable_box = snbep_uncore_msr_enable_box, .disable_event = snbep_uncore_msr_disable_event, .enable_event = snbep_uncore_msr_enable_event, - .read_counter = snbep_uncore_msr_read_counter, - .get_constraint = snbep_uncore_get_constraint, - .put_constraint = snbep_uncore_put_constraint, + .read_counter = uncore_msr_read_counter, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, .hw_config = snbep_uncore_hw_config, }; @@ -589,188 +603,1208 @@ static void snbep_pci2phy_map_init(void) /* get the Node ID mapping */ pci_read_config_dword(ubox_dev, 0x54, &config); /* - * every three bits in the Node ID mapping register maps - * to a particular node. + * every three bits in the Node ID mapping register maps + * to a particular node. + */ + for (i = 0; i < 8; i++) { + if (nodeid == ((config >> (3 * i)) & 0x7)) { + pcibus_to_physid[bus] = i; + break; + } + } + }; + return; +} +/* end of Sandy Bridge-EP uncore support */ + +/* Sandy Bridge uncore support */ +static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); +} + +static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void snb_uncore_msr_init_box(struct intel_uncore_box *box) +{ + if (box->pmu->pmu_idx == 0) { + wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, + SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); + } +} + +static struct attribute *snb_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask5.attr, + NULL, +}; + +static struct attribute_group snb_uncore_format_group = { + .name = "format", + .attrs = snb_uncore_formats_attr, +}; + +static struct intel_uncore_ops snb_uncore_msr_ops = { + .init_box = snb_uncore_msr_init_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = snb_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct event_constraint snb_uncore_cbox_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x80, 0x1), + UNCORE_EVENT_CONSTRAINT(0x83, 0x1), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type snb_uncore_cbox = { + .name = "cbox", + .num_counters = 2, + .num_boxes = 4, + .perf_ctr_bits = 44, + .fixed_ctr_bits = 48, + .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, + .fixed_ctr = SNB_UNC_FIXED_CTR, + .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, + .single_fixed = 1, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = SNB_UNC_CBO_MSR_OFFSET, + .constraints = snb_uncore_cbox_constraints, + .ops = &snb_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + +static struct intel_uncore_type *snb_msr_uncores[] = { + &snb_uncore_cbox, + NULL, +}; +/* end of Sandy Bridge uncore support */ + +/* Nehalem uncore support */ +static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); +} + +static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); +} + +static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *nhm_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask8.attr, + NULL, +}; + +static struct attribute_group nhm_uncore_format_group = { + .name = "format", + .attrs = nhm_uncore_formats_attr, +}; + +static struct uncore_event_desc nhm_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), + INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), + INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), + INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), + INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), + INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), + INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), + INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_ops nhm_uncore_msr_ops = { + .disable_box = nhm_uncore_msr_disable_box, + .enable_box = nhm_uncore_msr_enable_box, + .disable_event = snb_uncore_msr_disable_event, + .enable_event = nhm_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct intel_uncore_type nhm_uncore = { + .name = "", + .num_counters = 8, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = NHM_UNC_PERFEVTSEL0, + .perf_ctr = NHM_UNC_UNCORE_PMC0, + .fixed_ctr = NHM_UNC_FIXED_CTR, + .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, + .event_mask = NHM_UNC_RAW_EVENT_MASK, + .event_descs = nhm_uncore_events, + .ops = &nhm_uncore_msr_ops, + .format_group = &nhm_uncore_format_group, +}; + +static struct intel_uncore_type *nhm_msr_uncores[] = { + &nhm_uncore, + NULL, +}; +/* end of Nehalem uncore support */ + +/* Nehalem-EX uncore support */ +#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ + ((1ULL << (n)) - 1))) + +DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); +DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); +DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); +DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); + +static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) +{ + wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); +} + +static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + u64 config; + + if (msr) { + rdmsrl(msr, config); + config &= ~((1ULL << uncore_num_counters(box)) - 1); + /* WBox has a fixed counter */ + if (uncore_msr_fixed_ctl(box)) + config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; + wrmsrl(msr, config); + } +} + +static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + u64 config; + + if (msr) { + rdmsrl(msr, config); + config |= (1ULL << uncore_num_counters(box)) - 1; + /* WBox has a fixed counter */ + if (uncore_msr_fixed_ctl(box)) + config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; + wrmsrl(msr, config); + } +} + +static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); + else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); + else + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); +} + +#define NHMEX_UNCORE_OPS_COMMON_INIT() \ + .init_box = nhmex_uncore_msr_init_box, \ + .disable_box = nhmex_uncore_msr_disable_box, \ + .enable_box = nhmex_uncore_msr_enable_box, \ + .disable_event = nhmex_uncore_msr_disable_event, \ + .read_counter = uncore_msr_read_counter + +static struct intel_uncore_ops nhmex_uncore_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_uncore_msr_enable_event, +}; + +static struct attribute *nhmex_uncore_ubox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_edge.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_ubox_format_group = { + .name = "format", + .attrs = nhmex_uncore_ubox_formats_attr, +}; + +static struct intel_uncore_type nhmex_uncore_ubox = { + .name = "ubox", + .num_counters = 1, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, + .perf_ctr = NHMEX_U_MSR_PMON_CTR, + .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_ubox_format_group +}; + +static struct attribute *nhmex_uncore_cbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_cbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_cbox_formats_attr, +}; + +static struct intel_uncore_type nhmex_uncore_cbox = { + .name = "cbox", + .num_counters = 6, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, + .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, + .msr_offset = NHMEX_C_MSR_OFFSET, + .pair_ctr_ctl = 1, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_cbox_format_group +}; + +static struct uncore_event_desc nhmex_uncore_wbox_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), + { /* end: all zeroes */ }, +}; + +static struct intel_uncore_type nhmex_uncore_wbox = { + .name = "wbox", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_W_MSR_PMON_CNT0, + .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, + .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, + .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, + .pair_ctr_ctl = 1, + .event_descs = nhmex_uncore_wbox_events, + .ops = &nhmex_uncore_ops, + .format_group = &nhmex_uncore_cbox_format_group +}; + +static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int ctr, ev_sel; + + ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> + NHMEX_B_PMON_CTR_SHIFT; + ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> + NHMEX_B_PMON_CTL_EV_SEL_SHIFT; + + /* events that do not use the match/mask registers */ + if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || + (ctr == 2 && ev_sel != 0x4) || ctr == 3) + return 0; + + if (box->pmu->pmu_idx == 0) + reg1->reg = NHMEX_B0_MSR_MATCH; + else + reg1->reg = NHMEX_B1_MSR_MATCH; + reg1->idx = 0; + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; + return 0; +} + +static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, reg1->config); + wrmsrl(reg1->reg + 1, reg2->config); + } + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); +} + +/* + * The Bbox has 4 counters, but each counter monitors different events. + * Use bits 6-7 in the event config to select counter. + */ +static struct event_constraint nhmex_uncore_bbox_constraints[] = { + EVENT_CONSTRAINT(0 , 1, 0xc0), + EVENT_CONSTRAINT(0x40, 2, 0xc0), + EVENT_CONSTRAINT(0x80, 4, 0xc0), + EVENT_CONSTRAINT(0xc0, 8, 0xc0), + EVENT_CONSTRAINT_END, +}; + +static struct attribute *nhmex_uncore_bbox_formats_attr[] = { + &format_attr_event5.attr, + &format_attr_counter.attr, + &format_attr_match.attr, + &format_attr_mask.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_bbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_bbox_formats_attr, +}; + +static struct intel_uncore_ops nhmex_uncore_bbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_bbox_msr_enable_event, + .hw_config = nhmex_bbox_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_bbox = { + .name = "bbox", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_B0_MSR_PMON_CTL0, + .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, + .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, + .msr_offset = NHMEX_B_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 1, + .constraints = nhmex_uncore_bbox_constraints, + .ops = &nhmex_uncore_bbox_ops, + .format_group = &nhmex_uncore_bbox_format_group +}; + +static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + + if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; + } else { + reg1->config = ~0ULL; + reg2->config = ~0ULL; + } + + if (box->pmu->pmu_idx == 0) + reg1->reg = NHMEX_S0_MSR_MM_CFG; + else + reg1->reg = NHMEX_S1_MSR_MM_CFG; + + reg1->idx = 0; + + return 0; +} + +static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + + wrmsrl(reg1->reg, 0); + if (reg1->config != ~0ULL || reg2->config != ~0ULL) { + wrmsrl(reg1->reg + 1, reg1->config); + wrmsrl(reg1->reg + 2, reg2->config); + wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); + } + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); +} + +static struct attribute *nhmex_uncore_sbox_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + &format_attr_mm_cfg.attr, + &format_attr_match.attr, + &format_attr_mask.attr, + NULL, +}; + +static struct attribute_group nhmex_uncore_sbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_sbox_formats_attr, +}; + +static struct intel_uncore_ops nhmex_uncore_sbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_sbox_msr_enable_event, + .hw_config = nhmex_sbox_hw_config, + .get_constraint = uncore_get_constraint, + .put_constraint = uncore_put_constraint, +}; + +static struct intel_uncore_type nhmex_uncore_sbox = { + .name = "sbox", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_S0_MSR_PMON_CTL0, + .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, + .event_mask = NHMEX_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, + .msr_offset = NHMEX_S_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 1, + .ops = &nhmex_uncore_sbox_ops, + .format_group = &nhmex_uncore_sbox_format_group +}; + +enum { + EXTRA_REG_NHMEX_M_FILTER, + EXTRA_REG_NHMEX_M_DSP, + EXTRA_REG_NHMEX_M_ISS, + EXTRA_REG_NHMEX_M_MAP, + EXTRA_REG_NHMEX_M_MSC_THR, + EXTRA_REG_NHMEX_M_PGT, + EXTRA_REG_NHMEX_M_PLD, + EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, +}; + +static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { + MBOX_INC_SEL_EXTAR_REG(0x0, DSP), + MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), + MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), + MBOX_INC_SEL_EXTAR_REG(0x9, ISS), + /* event 0xa uses two extra registers */ + MBOX_INC_SEL_EXTAR_REG(0xa, ISS), + MBOX_INC_SEL_EXTAR_REG(0xa, PLD), + MBOX_INC_SEL_EXTAR_REG(0xb, PLD), + /* events 0xd ~ 0x10 use the same extra register */ + MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), + MBOX_INC_SEL_EXTAR_REG(0x16, PGT), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), + MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), + EVENT_EXTRA_END +}; + +static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) +{ + struct intel_uncore_extra_reg *er; + unsigned long flags; + bool ret = false; + u64 mask; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + er = &box->shared_regs[idx]; + raw_spin_lock_irqsave(&er->lock, flags); + if (!atomic_read(&er->ref) || er->config == config) { + atomic_inc(&er->ref); + er->config = config; + ret = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + return ret; + } + /* + * The ZDP_CTL_FVC MSR has 4 fields which are used to control + * events 0xd ~ 0x10. Besides these 4 fields, there are additional + * fields which are shared. + */ + idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + if (WARN_ON_ONCE(idx >= 4)) + return false; + + /* mask of the shared fields */ + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + + raw_spin_lock_irqsave(&er->lock, flags); + /* add mask of the non-shared field if it's in use */ + if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) + mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + + if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { + atomic_add(1 << (idx * 8), &er->ref); + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | + NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + er->config &= ~mask; + er->config |= (config & mask); + ret = true; + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + return ret; +} + +static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) +{ + struct intel_uncore_extra_reg *er; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + er = &box->shared_regs[idx]; + atomic_dec(&er->ref); + return; + } + + idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + atomic_sub(1 << (idx * 8), &er->ref); +} + +u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); + u64 config = reg1->config; + + /* get the non-shared control bits and shift them */ + idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (new_idx > orig_idx) { + idx = new_idx - orig_idx; + config <<= 3 * idx; + } else { + idx = orig_idx - new_idx; + config >>= 3 * idx; + } + + /* add the shared control bits back */ + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + if (modify) { + /* adjust the main event selector */ + if (new_idx > orig_idx) + hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; + else + hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; + reg1->config = config; + reg1->idx = ~0xff | new_idx; + } + return config; +} + +static struct event_constraint * +nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + int i, idx[2], alloc = 0; + u64 config1 = reg1->config; + + idx[0] = __BITS_VALUE(reg1->idx, 0, 8); + idx[1] = __BITS_VALUE(reg1->idx, 1, 8); +again: + for (i = 0; i < 2; i++) { + if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) + idx[i] = 0xff; + + if (idx[i] == 0xff) + continue; + + if (!nhmex_mbox_get_shared_reg(box, idx[i], + __BITS_VALUE(config1, i, 32))) + goto fail; + alloc |= (0x1 << i); + } + + /* for the match/mask registers */ + if ((uncore_box_is_fake(box) || !reg2->alloc) && + !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) + goto fail; + + /* + * If it's a fake box -- as per validate_{group,event}() we + * shouldn't touch event state and we can avoid doing so + * since both will only call get_event_constraints() once + * on each event, this avoids the need for reg->alloc. + */ + if (!uncore_box_is_fake(box)) { + if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) + nhmex_mbox_alter_er(event, idx[0], true); + reg1->alloc |= alloc; + reg2->alloc = 1; + } + return NULL; +fail: + if (idx[0] != 0xff && !(alloc & 0x1) && + idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { + /* + * events 0xd ~ 0x10 are functional identical, but are + * controlled by different fields in the ZDP_CTL_FVC + * register. If we failed to take one field, try the + * rest 3 choices. */ - for (i = 0; i < 8; i++) { - if (nodeid == ((config >> (3 * i)) & 0x7)) { - pcibus_to_physid[bus] = i; - break; - } + BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); + idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + idx[0] = (idx[0] + 1) % 4; + idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; + if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { + config1 = nhmex_mbox_alter_er(event, idx[0], false); + goto again; } - }; - return; -} -/* end of Sandy Bridge-EP uncore support */ + } + if (alloc & 0x1) + nhmex_mbox_put_shared_reg(box, idx[0]); + if (alloc & 0x2) + nhmex_mbox_put_shared_reg(box, idx[1]); + return &constraint_empty; +} -/* Sandy Bridge uncore support */ -static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) +static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); + if (uncore_box_is_fake(box)) + return; + + if (reg1->alloc & 0x1) + nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); + if (reg1->alloc & 0x2) + nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); + reg1->alloc = 0; + + if (reg2->alloc) { + nhmex_mbox_put_shared_reg(box, reg2->idx); + reg2->alloc = 0; + } } -static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, - struct perf_event *event) +static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) { - wrmsrl(event->hw.config_base, 0); + if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) + return er->idx; + return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; } -static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box, - struct perf_event *event) +static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) { - u64 count; - rdmsrl(event->hw.event_base, count); - return count; + struct intel_uncore_type *type = box->pmu->type; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + struct extra_reg *er; + unsigned msr; + int reg_idx = 0; + + if (WARN_ON_ONCE(reg1->idx != -1)) + return -EINVAL; + /* + * The mbox events may require 2 extra MSRs at the most. But only + * the lower 32 bits in these MSRs are significant, so we can use + * config1 to pass two MSRs' config. + */ + for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { + if (er->event != (event->hw.config & er->config_mask)) + continue; + if (event->attr.config1 & ~er->valid_mask) + return -EINVAL; + if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || + er->idx == __BITS_VALUE(reg1->idx, 1, 8)) + continue; + if (WARN_ON_ONCE(reg_idx >= 2)) + return -EINVAL; + + msr = er->msr + type->msr_offset * box->pmu->pmu_idx; + if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) + return -EINVAL; + + /* always use the 32~63 bits to pass the PLD config */ + if (er->idx == EXTRA_REG_NHMEX_M_PLD) + reg_idx = 1; + + reg1->idx &= ~(0xff << (reg_idx * 8)); + reg1->reg &= ~(0xffff << (reg_idx * 16)); + reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); + reg1->reg |= msr << (reg_idx * 16); + reg1->config = event->attr.config1; + reg_idx++; + } + /* use config2 to pass the filter config */ + reg2->idx = EXTRA_REG_NHMEX_M_FILTER; + if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) + reg2->config = event->attr.config2; + else + reg2->config = ~0ULL; + if (box->pmu->pmu_idx == 0) + reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; + else + reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; + + return 0; } -static void snb_uncore_msr_init_box(struct intel_uncore_box *box) +static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) { - if (box->pmu->pmu_idx == 0) { - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, - SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); - } + struct intel_uncore_extra_reg *er; + unsigned long flags; + u64 config; + + if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) + return box->shared_regs[idx].config; + + er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; + raw_spin_lock_irqsave(&er->lock, flags); + config = er->config; + raw_spin_unlock_irqrestore(&er->lock, flags); + return config; } -static struct attribute *snb_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask5.attr, +static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int idx; + + idx = __BITS_VALUE(reg1->idx, 0, 8); + if (idx != 0xff) + wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), + nhmex_mbox_shared_reg_config(box, idx)); + idx = __BITS_VALUE(reg1->idx, 1, 8); + if (idx != 0xff) + wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), + nhmex_mbox_shared_reg_config(box, idx)); + + wrmsrl(reg2->reg, 0); + if (reg2->config != ~0ULL) { + wrmsrl(reg2->reg + 1, + reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); + wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); + wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + } + + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); +} + +DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); + +static struct attribute *nhmex_uncore_mbox_formats_attr[] = { + &format_attr_count_mode.attr, + &format_attr_storage_mode.attr, + &format_attr_wrap_mode.attr, + &format_attr_flag_mode.attr, + &format_attr_inc_sel.attr, + &format_attr_set_flag_sel.attr, + &format_attr_filter_cfg.attr, + &format_attr_filter_match.attr, + &format_attr_filter_mask.attr, + &format_attr_dsp.attr, + &format_attr_thr.attr, + &format_attr_fvc.attr, + &format_attr_pgt.attr, + &format_attr_map.attr, + &format_attr_iss.attr, + &format_attr_pld.attr, NULL, }; -static struct attribute_group snb_uncore_format_group = { - .name = "format", - .attrs = snb_uncore_formats_attr, +static struct attribute_group nhmex_uncore_mbox_format_group = { + .name = "format", + .attrs = nhmex_uncore_mbox_formats_attr, }; -static struct intel_uncore_ops snb_uncore_msr_ops = { - .init_box = snb_uncore_msr_init_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = snb_uncore_msr_enable_event, - .read_counter = snb_uncore_msr_read_counter, +static struct uncore_event_desc nhmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), + { /* end: all zeroes */ }, }; -static struct event_constraint snb_uncore_cbox_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x80, 0x1), - UNCORE_EVENT_CONSTRAINT(0x83, 0x1), - EVENT_CONSTRAINT_END +static struct intel_uncore_ops nhmex_uncore_mbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_mbox_msr_enable_event, + .hw_config = nhmex_mbox_hw_config, + .get_constraint = nhmex_mbox_get_constraint, + .put_constraint = nhmex_mbox_put_constraint, }; -static struct intel_uncore_type snb_uncore_cbox = { - .name = "cbox", - .num_counters = 2, - .num_boxes = 4, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, - .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, - .fixed_ctr = SNB_UNC_FIXED_CTR, - .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, - .single_fixed = 1, - .event_mask = SNB_UNC_RAW_EVENT_MASK, - .msr_offset = SNB_UNC_CBO_MSR_OFFSET, - .constraints = snb_uncore_cbox_constraints, - .ops = &snb_uncore_msr_ops, - .format_group = &snb_uncore_format_group, +static struct intel_uncore_type nhmex_uncore_mbox = { + .name = "mbox", + .num_counters = 6, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_M0_MSR_PMU_CTL0, + .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, + .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, + .msr_offset = NHMEX_M_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 8, + .event_descs = nhmex_uncore_mbox_events, + .ops = &nhmex_uncore_mbox_ops, + .format_group = &nhmex_uncore_mbox_format_group, }; -static struct intel_uncore_type *snb_msr_uncores[] = { - &snb_uncore_cbox, - NULL, -}; -/* end of Sandy Bridge uncore support */ +void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + int port; -/* Nehalem uncore support */ -static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) + /* adjust the main event selector */ + if (reg1->idx % 2) { + reg1->idx--; + hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + } else { + reg1->idx++; + hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + } + + /* adjust address or config of extra register */ + port = reg1->idx / 6 + box->pmu->pmu_idx * 4; + switch (reg1->idx % 6) { + case 0: + reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); + break; + case 1: + reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); + break; + case 2: + /* the 8~15 bits to the 0~7 bits */ + reg1->config >>= 8; + break; + case 3: + /* the 0~7 bits to the 8~15 bits */ + reg1->config <<= 8; + break; + case 4: + reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); + break; + case 5: + reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); + break; + }; +} + +/* + * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. + * An event set consists of 6 events, the 3rd and 4th events in + * an event set use the same extra register. So an event set uses + * 5 extra registers. + */ +static struct event_constraint * +nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) { - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + struct intel_uncore_extra_reg *er; + unsigned long flags; + int idx, er_idx; + u64 config1; + bool ok = false; + + if (!uncore_box_is_fake(box) && reg1->alloc) + return NULL; + + idx = reg1->idx % 6; + config1 = reg1->config; +again: + er_idx = idx; + /* the 3rd and 4th events use the same extra register */ + if (er_idx > 2) + er_idx--; + er_idx += (reg1->idx / 6) * 5; + + er = &box->shared_regs[er_idx]; + raw_spin_lock_irqsave(&er->lock, flags); + if (idx < 2) { + if (!atomic_read(&er->ref) || er->config == reg1->config) { + atomic_inc(&er->ref); + er->config = reg1->config; + ok = true; + } + } else if (idx == 2 || idx == 3) { + /* + * these two events use different fields in a extra register, + * the 0~7 bits and the 8~15 bits respectively. + */ + u64 mask = 0xff << ((idx - 2) * 8); + if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || + !((er->config ^ config1) & mask)) { + atomic_add(1 << ((idx - 2) * 8), &er->ref); + er->config &= ~mask; + er->config |= config1 & mask; + ok = true; + } + } else { + if (!atomic_read(&er->ref) || + (er->config == (hwc->config >> 32) && + er->config1 == reg1->config && + er->config2 == reg2->config)) { + atomic_inc(&er->ref); + er->config = (hwc->config >> 32); + er->config1 = reg1->config; + er->config2 = reg2->config; + ok = true; + } + } + raw_spin_unlock_irqrestore(&er->lock, flags); + + if (!ok) { + /* + * The Rbox events are always in pairs. The paired + * events are functional identical, but use different + * extra registers. If we failed to take an extra + * register, try the alternative. + */ + if (idx % 2) + idx--; + else + idx++; + if (idx != reg1->idx % 6) { + if (idx == 2) + config1 >>= 8; + else if (idx == 3) + config1 <<= 8; + goto again; + } + } else { + if (!uncore_box_is_fake(box)) { + if (idx != reg1->idx % 6) + nhmex_rbox_alter_er(box, event); + reg1->alloc = 1; + } + return NULL; + } + return &constraint_empty; } -static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) +static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) { - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, - NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); + struct intel_uncore_extra_reg *er; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + int idx, er_idx; + + if (uncore_box_is_fake(box) || !reg1->alloc) + return; + + idx = reg1->idx % 6; + er_idx = idx; + if (er_idx > 2) + er_idx--; + er_idx += (reg1->idx / 6) * 5; + + er = &box->shared_regs[er_idx]; + if (idx == 2 || idx == 3) + atomic_sub(1 << ((idx - 2) * 8), &er->ref); + else + atomic_dec(&er->ref); + + reg1->alloc = 0; } -static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) +static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; + struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + int port, idx; - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); + idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> + NHMEX_R_PMON_CTL_EV_SEL_SHIFT; + if (idx >= 0x18) + return -EINVAL; + + reg1->idx = idx; + reg1->config = event->attr.config1; + + port = idx / 6 + box->pmu->pmu_idx * 4; + idx %= 6; + switch (idx) { + case 0: + reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); + break; + case 1: + reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); + break; + case 2: + case 3: + reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); + break; + case 4: + case 5: + if (idx == 4) + reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); + else + reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); + reg2->config = event->attr.config2; + hwc->config |= event->attr.config & (~0ULL << 32); + break; + }; + return 0; } -static struct attribute *nhm_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask8.attr, +static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx) +{ + struct intel_uncore_extra_reg *er; + unsigned long flags; + u64 config; + + er = &box->shared_regs[idx]; + + raw_spin_lock_irqsave(&er->lock, flags); + config = er->config; + raw_spin_unlock_irqrestore(&er->lock, flags); + + return config; +} + +static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; + int idx, er_idx; + + idx = reg1->idx % 6; + er_idx = idx; + if (er_idx > 2) + er_idx--; + er_idx += (reg1->idx / 6) * 5; + + switch (idx) { + case 0: + case 1: + wrmsrl(reg1->reg, reg1->config); + break; + case 2: + case 3: + wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); + break; + case 4: + case 5: + wrmsrl(reg1->reg, reg1->config); + wrmsrl(reg1->reg + 1, hwc->config >> 32); + wrmsrl(reg1->reg + 2, reg2->config); + break; + }; + + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); +} + +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); +DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); + +static struct attribute *nhmex_uncore_rbox_formats_attr[] = { + &format_attr_event5.attr, + &format_attr_xbr_mm_cfg.attr, + &format_attr_xbr_match.attr, + &format_attr_xbr_mask.attr, + &format_attr_qlx_cfg.attr, + &format_attr_iperf_cfg.attr, NULL, }; -static struct attribute_group nhm_uncore_format_group = { +static struct attribute_group nhmex_uncore_rbox_format_group = { .name = "format", - .attrs = nhm_uncore_formats_attr, + .attrs = nhmex_uncore_rbox_formats_attr, }; -static struct uncore_event_desc nhm_uncore_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), +static struct uncore_event_desc nhmex_uncore_rbox_events[] = { + INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), + INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), + INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), + INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), + INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), + INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), { /* end: all zeroes */ }, }; -static struct intel_uncore_ops nhm_uncore_msr_ops = { - .disable_box = nhm_uncore_msr_disable_box, - .enable_box = nhm_uncore_msr_enable_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = nhm_uncore_msr_enable_event, - .read_counter = snb_uncore_msr_read_counter, +static struct intel_uncore_ops nhmex_uncore_rbox_ops = { + NHMEX_UNCORE_OPS_COMMON_INIT(), + .enable_event = nhmex_rbox_msr_enable_event, + .hw_config = nhmex_rbox_hw_config, + .get_constraint = nhmex_rbox_get_constraint, + .put_constraint = nhmex_rbox_put_constraint, }; -static struct intel_uncore_type nhm_uncore = { - .name = "", - .num_counters = 8, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = NHM_UNC_PERFEVTSEL0, - .perf_ctr = NHM_UNC_UNCORE_PMC0, - .fixed_ctr = NHM_UNC_FIXED_CTR, - .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, - .event_mask = NHM_UNC_RAW_EVENT_MASK, - .event_descs = nhm_uncore_events, - .ops = &nhm_uncore_msr_ops, - .format_group = &nhm_uncore_format_group, +static struct intel_uncore_type nhmex_uncore_rbox = { + .name = "rbox", + .num_counters = 8, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_ctl = NHMEX_R_MSR_PMON_CTL0, + .perf_ctr = NHMEX_R_MSR_PMON_CNT0, + .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, + .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, + .msr_offset = NHMEX_R_MSR_OFFSET, + .pair_ctr_ctl = 1, + .num_shared_regs = 20, + .event_descs = nhmex_uncore_rbox_events, + .ops = &nhmex_uncore_rbox_ops, + .format_group = &nhmex_uncore_rbox_format_group }; -static struct intel_uncore_type *nhm_msr_uncores[] = { - &nhm_uncore, +static struct intel_uncore_type *nhmex_msr_uncores[] = { + &nhmex_uncore_ubox, + &nhmex_uncore_cbox, + &nhmex_uncore_bbox, + &nhmex_uncore_sbox, + &nhmex_uncore_mbox, + &nhmex_uncore_rbox, + &nhmex_uncore_wbox, NULL, }; -/* end of Nehalem uncore support */ +/* end of Nehalem-EX uncore support */ -static void uncore_assign_hw_event(struct intel_uncore_box *box, - struct perf_event *event, int idx) +static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) { struct hw_perf_event *hwc = &event->hw; @@ -787,8 +1821,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, hwc->event_base = uncore_perf_ctr(box, hwc->idx); } -static void uncore_perf_event_update(struct intel_uncore_box *box, - struct perf_event *event) +static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) { u64 prev_count, new_count, delta; int shift; @@ -858,14 +1891,12 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) box->hrtimer.function = uncore_pmu_hrtimer; } -struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, - int cpu) +struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) { struct intel_uncore_box *box; int i, size; - size = sizeof(*box) + type->num_shared_regs * - sizeof(struct intel_uncore_extra_reg); + size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); if (!box) @@ -915,12 +1946,11 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) * perf core schedules event on the basis of cpu, uncore events are * collected by one of the cpus inside a physical package. */ - return uncore_pmu_to_box(uncore_event_to_pmu(event), - smp_processor_id()); + return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); } -static int uncore_collect_events(struct intel_uncore_box *box, - struct perf_event *leader, bool dogrp) +static int +uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) { struct perf_event *event; int n, max_count; @@ -952,8 +1982,7 @@ static int uncore_collect_events(struct intel_uncore_box *box, } static struct event_constraint * -uncore_get_event_constraint(struct intel_uncore_box *box, - struct perf_event *event) +uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) { struct intel_uncore_type *type = box->pmu->type; struct event_constraint *c; @@ -977,15 +2006,13 @@ uncore_get_event_constraint(struct intel_uncore_box *box, return &type->unconstrainted; } -static void uncore_put_event_constraint(struct intel_uncore_box *box, - struct perf_event *event) +static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) { if (box->pmu->type->ops->put_constraint) box->pmu->type->ops->put_constraint(box, event); } -static int uncore_assign_events(struct intel_uncore_box *box, - int assign[], int n) +static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) { unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; @@ -1407,8 +2434,7 @@ static bool pcidrv_registered; /* * add a pci uncore device */ -static int __devinit uncore_pci_add(struct intel_uncore_type *type, - struct pci_dev *pdev) +static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) { struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; @@ -1485,6 +2511,7 @@ static int __devinit uncore_pci_probe(struct pci_dev *pdev, struct intel_uncore_type *type; type = (struct intel_uncore_type *)id->driver_data; + return uncore_pci_add(type, pdev); } @@ -1612,8 +2639,8 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) return 0; } -static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores, - int old_cpu, int new_cpu) +static void __cpuinit +uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) { struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; @@ -1694,8 +2721,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu) uncore_change_context(pci_uncores, -1, cpu); } -static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) +static int + __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; @@ -1732,12 +2759,12 @@ static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, } static struct notifier_block uncore_cpu_nb __cpuinitdata = { - .notifier_call = uncore_cpu_notifier, + .notifier_call = uncore_cpu_notifier, /* * to migrate uncore events, our notifier should be executed * before perf core's notifier. */ - .priority = CPU_PRI_PERF + 1, + .priority = CPU_PRI_PERF + 1, }; static void __init uncore_cpu_setup(void *dummy) @@ -1767,6 +2794,9 @@ static int __init uncore_cpu_init(void) snbep_uncore_cbox.num_boxes = max_cores; msr_uncores = snbep_msr_uncores; break; + case 46: + msr_uncores = nhmex_msr_uncores; + break; default: return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index b13e9ea..47b1776 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -5,8 +5,6 @@ #include "perf_event.h" #define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_BOX_HASH_SIZE 8 - #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) #define UNCORE_FIXED_EVENT 0xff @@ -158,6 +156,193 @@ #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd +/* NHM-EX event control */ +#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff +#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 +#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) +#define NHMEX_PMON_CTL_EDGE_DET (1 << 18) +#define NHMEX_PMON_CTL_PMI_EN (1 << 20) +#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) +#define NHMEX_PMON_CTL_INVERT (1 << 23) +#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 +#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ + NHMEX_PMON_CTL_UMASK_MASK | \ + NHMEX_PMON_CTL_EDGE_DET | \ + NHMEX_PMON_CTL_INVERT | \ + NHMEX_PMON_CTL_TRESH_MASK) + +/* NHM-EX Ubox */ +#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 +#define NHMEX_U_MSR_PMON_CTR 0xc11 +#define NHMEX_U_MSR_PMON_EV_SEL 0xc10 + +#define NHMEX_U_PMON_GLOBAL_EN (1 << 0) +#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e +#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) +#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) +#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) + +#define NHMEX_U_PMON_RAW_EVENT_MASK \ + (NHMEX_PMON_CTL_EV_SEL_MASK | \ + NHMEX_PMON_CTL_EDGE_DET) + +/* NHM-EX Cbox */ +#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 +#define NHMEX_C0_MSR_PMON_CTR0 0xd11 +#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 +#define NHMEX_C_MSR_OFFSET 0x20 + +/* NHM-EX Bbox */ +#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 +#define NHMEX_B0_MSR_PMON_CTR0 0xc31 +#define NHMEX_B0_MSR_PMON_CTL0 0xc30 +#define NHMEX_B_MSR_OFFSET 0x40 +#define NHMEX_B0_MSR_MATCH 0xe45 +#define NHMEX_B0_MSR_MASK 0xe46 +#define NHMEX_B1_MSR_MATCH 0xe4d +#define NHMEX_B1_MSR_MASK 0xe4e + +#define NHMEX_B_PMON_CTL_EN (1 << 0) +#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 +#define NHMEX_B_PMON_CTL_EV_SEL_MASK \ + (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) +#define NHMEX_B_PMON_CTR_SHIFT 6 +#define NHMEX_B_PMON_CTR_MASK \ + (0x3 << NHMEX_B_PMON_CTR_SHIFT) +#define NHMEX_B_PMON_RAW_EVENT_MASK \ + (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ + NHMEX_B_PMON_CTR_MASK) + +/* NHM-EX Sbox */ +#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 +#define NHMEX_S0_MSR_PMON_CTR0 0xc51 +#define NHMEX_S0_MSR_PMON_CTL0 0xc50 +#define NHMEX_S_MSR_OFFSET 0x80 +#define NHMEX_S0_MSR_MM_CFG 0xe48 +#define NHMEX_S0_MSR_MATCH 0xe49 +#define NHMEX_S0_MSR_MASK 0xe4a +#define NHMEX_S1_MSR_MM_CFG 0xe58 +#define NHMEX_S1_MSR_MATCH 0xe59 +#define NHMEX_S1_MSR_MASK 0xe5a + +#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) + +/* NHM-EX Mbox */ +#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 +#define NHMEX_M0_MSR_PMU_DSP 0xca5 +#define NHMEX_M0_MSR_PMU_ISS 0xca6 +#define NHMEX_M0_MSR_PMU_MAP 0xca7 +#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 +#define NHMEX_M0_MSR_PMU_PGT 0xca9 +#define NHMEX_M0_MSR_PMU_PLD 0xcaa +#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab +#define NHMEX_M0_MSR_PMU_CTL0 0xcb0 +#define NHMEX_M0_MSR_PMU_CNT0 0xcb1 +#define NHMEX_M_MSR_OFFSET 0x40 +#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 +#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c + +#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) +#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL +#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL +#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 + +#define NHMEX_M_PMON_CTL_EN (1 << 0) +#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) +#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 +#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ + (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) +#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 +#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ + (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) +#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) +#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) +#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 +#define NHMEX_M_PMON_CTL_INC_SEL_MASK \ + (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) +#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 +#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ + (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) +#define NHMEX_M_PMON_RAW_EVENT_MASK \ + (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ + NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ + NHMEX_M_PMON_CTL_WRAP_MODE | \ + NHMEX_M_PMON_CTL_FLAG_MODE | \ + NHMEX_M_PMON_CTL_INC_SEL_MASK | \ + NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) + + +#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f +#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) +#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) +#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ + (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ + NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ + NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ + NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) +#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) + +/* + * use the 9~13 bits to select event If the 7th bit is not set, + * otherwise use the 19~21 bits to select event. + */ +#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) +#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ + NHMEX_M_PMON_CTL_FLAG_MODE) +#define MBOX_INC_SEL_EXTAR_REG(c, r) \ + EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ + MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) +#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ + EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ + MBOX_SET_FLAG_SEL_MASK, \ + (u64)-1, NHMEX_M_##r) + +/* NHM-EX Rbox */ +#define NHMEX_R_MSR_GLOBAL_CTL 0xe00 +#define NHMEX_R_MSR_PMON_CTL0 0xe10 +#define NHMEX_R_MSR_PMON_CNT0 0xe11 +#define NHMEX_R_MSR_OFFSET 0x20 + +#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ + ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) +#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) +#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) +#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ + (((n) < 4 ? 0 : 0x10) + (n) * 4) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ + (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) +#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ + (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) +#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ + (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) + +#define NHMEX_R_PMON_CTL_EN (1 << 0) +#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 +#define NHMEX_R_PMON_CTL_EV_SEL_MASK \ + (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) +#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) +#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK + +/* NHM-EX Wbox */ +#define NHMEX_W_MSR_GLOBAL_CTL 0xc80 +#define NHMEX_W_MSR_PMON_CNT0 0xc90 +#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 +#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 +#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 + +#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) + struct intel_uncore_ops; struct intel_uncore_pmu; struct intel_uncore_box; @@ -178,6 +363,7 @@ struct intel_uncore_type { unsigned msr_offset; unsigned num_shared_regs:8; unsigned single_fixed:1; + unsigned pair_ctr_ctl:1; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; @@ -213,7 +399,7 @@ struct intel_uncore_pmu { struct intel_uncore_extra_reg { raw_spinlock_t lock; - u64 config1; + u64 config, config1, config2; atomic_t ref; }; @@ -323,14 +509,16 @@ unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) static inline unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { - return idx + box->pmu->type->event_ctl + + return box->pmu->type->event_ctl + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + box->pmu->type->msr_offset * box->pmu->pmu_idx; } static inline unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) { - return idx + box->pmu->type->perf_ctr + + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + box->pmu->type->msr_offset * box->pmu->pmu_idx; } @@ -422,3 +610,8 @@ static inline void uncore_box_init(struct intel_uncore_box *box) box->pmu->type->ops->init_box(box); } } + +static inline bool uncore_box_is_fake(struct intel_uncore_box *box) +{ + return (box->phys_id < 0); +} -- cgit v1.1 From 74e6543fdc4e7553f572f7898ade649a09d85049 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 17 Jul 2012 17:27:55 +0800 Subject: perf/x86: Fix LLC-* and node-* events on Intel SandyBridge LLC-* and node-* events require using the OFFCORE_RESPONSE events on SandyBridge, but the hw_cache_extra_regs is left uninitialized. This patch adds the missing extra register configure table for SandyBridge. Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1342517275-2875-1-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 92 +++++++++++++++++++++++++++++++--- 1 file changed, 86 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7a8b9d0..3823669 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -138,6 +138,84 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } +#define SNB_DMND_DATA_RD (1ULL << 0) +#define SNB_DMND_RFO (1ULL << 1) +#define SNB_DMND_IFETCH (1ULL << 2) +#define SNB_DMND_WB (1ULL << 3) +#define SNB_PF_DATA_RD (1ULL << 4) +#define SNB_PF_RFO (1ULL << 5) +#define SNB_PF_IFETCH (1ULL << 6) +#define SNB_LLC_DATA_RD (1ULL << 7) +#define SNB_LLC_RFO (1ULL << 8) +#define SNB_LLC_IFETCH (1ULL << 9) +#define SNB_BUS_LOCKS (1ULL << 10) +#define SNB_STRM_ST (1ULL << 11) +#define SNB_OTHER (1ULL << 15) +#define SNB_RESP_ANY (1ULL << 16) +#define SNB_NO_SUPP (1ULL << 17) +#define SNB_LLC_HITM (1ULL << 18) +#define SNB_LLC_HITE (1ULL << 19) +#define SNB_LLC_HITS (1ULL << 20) +#define SNB_LLC_HITF (1ULL << 21) +#define SNB_LOCAL (1ULL << 22) +#define SNB_REMOTE (0xffULL << 23) +#define SNB_SNP_NONE (1ULL << 31) +#define SNB_SNP_NOT_NEEDED (1ULL << 32) +#define SNB_SNP_MISS (1ULL << 33) +#define SNB_NO_FWD (1ULL << 34) +#define SNB_SNP_FWD (1ULL << 35) +#define SNB_HITM (1ULL << 36) +#define SNB_NON_DRAM (1ULL << 37) + +#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) +#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) +#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) + +#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ + SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ + SNB_HITM) + +#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) +#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) + +#define SNB_L3_ACCESS SNB_RESP_ANY +#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) + +static __initconst const u64 snb_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(LL ) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, + [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, + [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, + [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, + [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, + [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, + [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, + }, + }, +}; + static __initconst const u64 snb_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -235,16 +313,16 @@ static __initconst const u64 snb_hw_cache_event_ids }, [ C(NODE) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = -1, - [ C(RESULT_MISS) ] = -1, + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, }, }, @@ -1964,6 +2042,8 @@ __init int intel_pmu_init(void) case 58: /* IvyBridge */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); intel_pmu_lbr_init_snb(); -- cgit v1.1 From 597ed953d7db28528b4687e46388c1aa905c14bd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 9 Jul 2012 13:50:23 +0200 Subject: perf/x86: Make bitfield unsigned Fix: arch/x86/kernel/cpu/perf_event.h:377:43: sparse: dubious one-bit signed bitfield Cc: Borislav Petkov Reported-by: Fengguang Wu Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-2jxkmktkppkclj1qe6qxd7ah@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index a15df4b..821d53b 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -374,7 +374,7 @@ struct x86_pmu { /* * Intel DebugStore bits */ - int bts :1, + unsigned int bts :1, bts_active :1, pebs :1, pebs_active :1, -- cgit v1.1 From c1ece48cf7ec07c6b3e093a4036b54bc6078f782 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 24 Jul 2012 10:44:10 +0800 Subject: perf/x86: Fix format definition of SNB-EP uncore QPI box The event control register of SNB-EP uncore QPI box has a one bit extension at bit position 21. Reported-by: Stephane Eranian Signed-off-by: Yan, Zheng Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1343097850-4348-1-git-send-email-zheng.z.yan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 22 +++++++++++++++++++++- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 4 ++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index d998170..7563fda 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -18,6 +18,7 @@ static struct event_constraint constraint_empty = EVENT_CONSTRAINT(0, 0, 0); DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); @@ -293,6 +294,15 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = { NULL, }; +static struct attribute *snbep_uncore_qpi_formats_attr[] = { + &format_attr_event_ext.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + static struct uncore_event_desc snbep_uncore_imc_events[] = { INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), @@ -328,6 +338,11 @@ static struct attribute_group snbep_uncore_pcu_format_group = { .attrs = snbep_uncore_pcu_formats_attr, }; +static struct attribute_group snbep_uncore_qpi_format_group = { + .name = "format", + .attrs = snbep_uncore_qpi_formats_attr, +}; + static struct intel_uncore_ops snbep_uncore_msr_ops = { .init_box = snbep_uncore_msr_init_box, .disable_box = snbep_uncore_msr_disable_box, @@ -499,8 +514,13 @@ static struct intel_uncore_type snbep_uncore_qpi = { .num_counters = 4, .num_boxes = 2, .perf_ctr_bits = 48, + .perf_ctr = SNBEP_PCI_PMON_CTR0, + .event_ctl = SNBEP_PCI_PMON_CTL0, + .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, + .ops = &snbep_uncore_pci_ops, .event_descs = snbep_uncore_qpi_events, - SNBEP_UNCORE_PCI_COMMON_INIT(), + .format_group = &snbep_uncore_qpi_format_group, }; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 47b1776..f385189 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -113,6 +113,10 @@ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) +#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ + (SNBEP_PMON_RAW_EVENT_MASK | \ + SNBEP_PMON_CTL_EV_SEL_EXT) + /* SNB-EP pci control register */ #define SNBEP_PCI_PMON_BOX_CTL 0xf4 #define SNBEP_PCI_PMON_CTL0 0xd8 -- cgit v1.1 From 35d56ca9d401d9d0ac8d91e4db1485af5f38f6fd Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Tue, 17 Jul 2012 10:14:41 +0800 Subject: perf/x86: Fix missing struct before structure name When CONFIG_PERF_EVENTS disabled, there will have a compiliation error, because missing struct before structure name. Signed-off-by: Jovi Zhang Cc: Peter Zijlstra Cc: Jiri Kosina Link: http://lkml.kernel.org/r/CACV3sbKF%3DCX%2B2jWEWesfCA6rBoQ3wDM4-5ac9MuBtVbCtMRHdQ@mail.gmail.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index c78f14a..dab3935 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -234,7 +234,7 @@ extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); extern void perf_check_microcode(void); #else -static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) +static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) { *nr = 0; return NULL; -- cgit v1.1 From f403072c6108e15f319a4a5036b650c77760522c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:12 +0200 Subject: uprobes: Don't recheck vma/f_mapping in write_opcode() write_opcode() rechecks valid_vma() and ->f_mapping, this is pointless. The caller, register_for_each_vma() or uprobe_mmap(), has already done these checks under mmap_sem. To clarify, uprobe_mmap() checks valid_vma() only, but we can rely on build_probe_list(vm_file->f_mapping->host). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182212.GA20304@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f935327..a2b32a5 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -206,33 +206,16 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t opcode) { struct page *old_page, *new_page; - struct address_space *mapping; void *vaddr_old, *vaddr_new; struct vm_area_struct *vma; - struct uprobe *uprobe; int ret; + retry: /* Read the page with vaddr into memory */ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); if (ret <= 0) return ret; - ret = -EINVAL; - - /* - * We are interested in text pages only. Our pages of interest - * should be mapped for read and execute only. We desist from - * adding probes in write mapped pages since the breakpoints - * might end up in the file copy. - */ - if (!valid_vma(vma, is_swbp_insn(&opcode))) - goto put_out; - - uprobe = container_of(auprobe, struct uprobe, arch); - mapping = uprobe->inode->i_mapping; - if (mapping != vma->vm_file->f_mapping) - goto put_out; - ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) -- cgit v1.1 From c517ee744b96e441d9c731e245f83c6d08dc0a19 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:16 +0200 Subject: uprobes: __replace_page() should not use page_address_in_vma() page_address_in_vma(old_page) in __replace_page() is ugly and wrong. The caller already knows the correct virtual address, this page was found by get_user_pages(vaddr). However, page_address_in_vma() can actually fail if page->mapping was cleared by __delete_from_page_cache() after get_user_pages() returns. But this means the race with page reclaim, write_opcode() should not fail, it should retry and read this page again. Probably the race with remove_mapping() is not possible due to page_freeze_refs() logic, but afaics at least shmem_writepage()->shmem_delete_from_page_cache() can clear ->mapping. We could change __replace_page() to return -EAGAIN in this case, but it would be better to simply use the caller's vaddr and rely on page_check_address(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182216.GA20311@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a2b32a5..6fda799 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -127,22 +127,19 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) * based on replace_page in mm/ksm.c * * @vma: vma that holds the pte pointing to page + * @addr: address the old @page is mapped at * @page: the cowed page we are replacing by kpage * @kpage: the modified page we replace page by * * Returns 0 on success, -EFAULT on failure. */ -static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) +static int __replace_page(struct vm_area_struct *vma, unsigned long addr, + struct page *page, struct page *kpage) { struct mm_struct *mm = vma->vm_mm; - unsigned long addr; spinlock_t *ptl; pte_t *ptep; - addr = page_address_in_vma(page, vma); - if (addr == -EFAULT) - return -EFAULT; - ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) return -EAGAIN; @@ -243,7 +240,7 @@ retry: goto unlock_out; lock_page(new_page); - ret = __replace_page(vma, old_page, new_page); + ret = __replace_page(vma, vaddr, old_page, new_page); unlock_page(new_page); unlock_out: -- cgit v1.1 From 089ba999dc881a7549d97c55ac9e0052d061867d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:18 +0200 Subject: uprobes: Kill write_opcode()->lock_page(new_page) write_opcode() does lock_page(new_page) for no reason. Nobody can see this page until __replace_page() exposes it under ptl lock, and we do nothing with this page after pte_unmap_unlock(). If nothing else, the similar code in do_wp_page() doesn't lock the new page for page_add_new_anon_rmap/set_pte_at_notify. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182218.GA20315@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6fda799..23c562b7 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -239,9 +239,7 @@ retry: if (ret) goto unlock_out; - lock_page(new_page); ret = __replace_page(vma, vaddr, old_page, new_page); - unlock_page(new_page); unlock_out: unlock_page(old_page); -- cgit v1.1 From 9f92448ceeea5326db7d114005a7e7ac03904edf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:20 +0200 Subject: uprobes: Clean up and document write_opcode()->lock_page(old_page) The comment above write_opcode()->lock_page(old_page) tells about the race with do_wp_page(). I don't really understand which exactly race it means, but afaics this lock_page() was not enough to close all races with do_wp_page(). Anyway, since: 77fc4af1b59d uprobes: Change register_for_each_vma() to take mm->mmap_sem for writing this code is always called with ->mmap_sem held for writing, so we can forget about do_wp_page(). However, we can't simply remove this lock_page(), and the only (afaics) reason is __replace_page()->try_to_free_swap(). Nothing in write_opcode() needs it, move it into __replace_page() and fix the comment. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182220.GA20322@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 23c562b7..5db150b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -139,10 +139,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; pte_t *ptep; + int err; + /* freeze PageSwapCache() for try_to_free_swap() below */ + lock_page(page); + + err = -EAGAIN; ptep = page_check_address(page, mm, addr, &ptl, 0); if (!ptep) - return -EAGAIN; + goto unlock; get_page(kpage); page_add_new_anon_rmap(kpage, vma, addr); @@ -162,7 +167,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, put_page(page); pte_unmap_unlock(ptep, ptl); - return 0; + err = 0; + unlock: + unlock_page(page); + return err; } /** @@ -216,15 +224,10 @@ retry: ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) - goto put_out; + goto put_old; __SetPageUptodate(new_page); - /* - * lock page will serialize against do_wp_page()'s - * PageAnon() handling - */ - lock_page(old_page); /* copy the page now that we've got it stable */ vaddr_old = kmap_atomic(old_page); vaddr_new = kmap_atomic(new_page); @@ -237,15 +240,13 @@ retry: ret = anon_vma_prepare(vma); if (ret) - goto unlock_out; + goto put_new; ret = __replace_page(vma, vaddr, old_page, new_page); -unlock_out: - unlock_page(old_page); +put_new: page_cache_release(new_page); - -put_out: +put_old: put_page(old_page); if (unlikely(ret == -EAGAIN)) -- cgit v1.1 From 665605a2a207dbe1fa429b474f932d6ea138ba92 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:29 +0200 Subject: uprobes: Uprobe_mmap/munmap needs list_for_each_entry_safe() The bug was introduced by me in 449d0d7c ("uprobes: Simplify the usage of uprobe->pending_list"). Yes, we do not care about uprobe->pending_list after return and nobody can remove the current list entry, but put_uprobe(uprobe) can actually free it and thus we need list_for_each_safe(). Reported-by: Srikar Dronamraju Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Link: http://lkml.kernel.org/r/20120729182229.GA20329@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5db150b..bed2161 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1010,7 +1010,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head) int uprobe_mmap(struct vm_area_struct *vma) { struct list_head tmp_list; - struct uprobe *uprobe; + struct uprobe *uprobe, *u; struct inode *inode; int ret, count; @@ -1028,7 +1028,7 @@ int uprobe_mmap(struct vm_area_struct *vma) ret = 0; count = 0; - list_for_each_entry(uprobe, &tmp_list, pending_list) { + list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { if (!ret) { loff_t vaddr = vma_address(vma, uprobe->offset); @@ -1076,7 +1076,7 @@ int uprobe_mmap(struct vm_area_struct *vma) void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct list_head tmp_list; - struct uprobe *uprobe; + struct uprobe *uprobe, *u; struct inode *inode; if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) @@ -1093,7 +1093,7 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, &tmp_list); - list_for_each_entry(uprobe, &tmp_list, pending_list) { + list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { loff_t vaddr = vma_address(vma, uprobe->offset); if (vaddr >= start && vaddr < end) { -- cgit v1.1 From 2fd611a991391a6050cbd139201a2e12fc306540 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:31 +0200 Subject: uprobes: Suppress uprobe_munmap() from mmput() uprobe_munmap() does get_user_pages() and it is also called from the final mmput()->exit_mmap() path. This slows down exit/mmput() for no reason, and I think it is simply dangerous/wrong to try to fault-in a page into the dying mm. If nothing else, this happens after the last sync_mm_rss(), afaics handle_mm_fault() can change the task->rss_stat and make the subsequent check_mm() unhappy. Change uprobe_munmap() to check mm->mm_users != 0. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182231.GA20336@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index bed2161..9db9cdf 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1082,6 +1082,9 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) return; + if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ + return; + if (!atomic_read(&vma->vm_mm->uprobes_state.count)) return; -- cgit v1.1 From aefd8933d445abf7ff0d4027c624737898827bcd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:33 +0200 Subject: uprobes: Fix overflow in vma_address()/find_active_uprobe() vma->vm_pgoff is "unsigned long", it should be promoted to loff_t before the multiplication to avoid the overflow. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182233.GA20339@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 9db9cdf..fb961d5 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -117,7 +117,7 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) loff_t vaddr; vaddr = vma->vm_start + offset; - vaddr -= vma->vm_pgoff << PAGE_SHIFT; + vaddr -= (loff_t)vma->vm_pgoff << PAGE_SHIFT; return vaddr; } @@ -1450,7 +1450,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) inode = vma->vm_file->f_mapping->host; offset = bp_vaddr - vma->vm_start; - offset += (vma->vm_pgoff << PAGE_SHIFT); + offset += (loff_t)vma->vm_pgoff << PAGE_SHIFT; uprobe = find_uprobe(inode, offset); } -- cgit v1.1 From 6dab3cc078e3da0d26534410bc9e018a17031d95 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:36 +0200 Subject: uprobes: Remove copy_vma()->uprobe_mmap() Remove copy_vma()->uprobe_mmap(new_vma), it is absolutely wrong. This new_vma was just initialized to represent the new unmapped area, [vm_start, vm_end) was returned by get_unmapped_area() in the caller. This means that uprobe_mmap()->get_user_pages() will fail for sure, simply because find_vma() can never succeed. And I verified that sys_mremap()->mremap_to() indeed always fails with the wrong ENOMEM code if [addr, addr+old_len] is probed. And why this uprobe_mmap() was added? I believe the intent was wrong. Note that the caller is going to do move_page_tables(), all registered uprobes are already faulted in, we only change the virtual addresses. NOTE: However, somehow we need to close the race with uprobe_register() which relies on map_info->vaddr. This needs another fix I'll try to do later. Probably we need uprobe_mmap() in move_vma() but we can not do this right now, this can confuse uprobes_state.counter (which I still hope we are going to kill). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182236.GA20342@redhat.com Signed-off-by: Ingo Molnar --- mm/mmap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 3edfcdf..e5a4614 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2418,9 +2418,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (new_vma->vm_file) { get_file(new_vma->vm_file); - if (uprobe_mmap(new_vma)) - goto out_free_mempol; - if (vma->vm_flags & VM_EXECUTABLE) added_exe_file_vma(mm); } -- cgit v1.1 From 89133786f9408d53361874a8c784fff150fc7f7c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:38 +0200 Subject: uprobes: Remove insert_vm_struct()->uprobe_mmap() Remove insert_vm_struct()->uprobe_mmap(). It is not needed, nobody except arch/ia64/kernel/perfmon.c uses insert_vm_struct(vma) with vma->vm_file != NULL. And it is wrong. Again, get_user_pages() can not succeed before vma_link(vma) makes is visible to find_vma(). And even if this worked, we must not insert the new bp before this mapping is visible to vma_prio_tree_foreach() for uprobe_unregister(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182238.GA20349@redhat.com Signed-off-by: Ingo Molnar --- mm/mmap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index e5a4614..4fe2697 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2345,9 +2345,6 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; - if (vma->vm_file && uprobe_mmap(vma)) - return -EINVAL; - vma_link(mm, vma, prev, rb_link, rb_parent); return 0; } -- cgit v1.1 From 891c39708144bbe401b4aa151bffd0fe41b1dafd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:40 +0200 Subject: uprobes: Teach build_probe_list() to consider the range Currently build_probe_list() builds the list of all uprobes attached to the given inode, and the caller should filter out those who don't fall into the [start,end) range, this is sub-optimal. This patch turns find_least_offset_node() into find_node_in_range() which returns the first node inside the [min,max] range, and changes build_probe_list() to use this node as a starting point for rb_prev() and rb_next() to find all other nodes the caller needs. The resulting list is no longer sorted but we do not care. This can speed up both build_probe_list() and the callers, but there is another reason to introduce find_node_in_range(). It can be used to figure out whether the given vma has uprobes or not, this will be needed soon. While at it, shift INIT_LIST_HEAD(tmp_list) into build_probe_list(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182240.GA20352@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 103 +++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 53 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index fb961d5..2ef1233 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -939,59 +939,66 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume put_uprobe(uprobe); } -/* - * Of all the nodes that correspond to the given inode, return the node - * with the least offset. - */ -static struct rb_node *find_least_offset_node(struct inode *inode) +static struct rb_node * +find_node_in_range(struct inode *inode, loff_t min, loff_t max) { - struct uprobe u = { .inode = inode, .offset = 0}; struct rb_node *n = uprobes_tree.rb_node; - struct rb_node *close_node = NULL; - struct uprobe *uprobe; - int match; while (n) { - uprobe = rb_entry(n, struct uprobe, rb_node); - match = match_uprobe(&u, uprobe); - - if (uprobe->inode == inode) - close_node = n; + struct uprobe *u = rb_entry(n, struct uprobe, rb_node); - if (!match) - return close_node; - - if (match < 0) + if (inode < u->inode) { n = n->rb_left; - else + } else if (inode > u->inode) { n = n->rb_right; + } else { + if (max < u->offset) + n = n->rb_left; + else if (min > u->offset) + n = n->rb_right; + else + break; + } } - return close_node; + return n; } /* - * For a given inode, build a list of probes that need to be inserted. + * For a given range in vma, build a list of probes that need to be inserted. */ -static void build_probe_list(struct inode *inode, struct list_head *head) +static void build_probe_list(struct inode *inode, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *head) { - struct uprobe *uprobe; + loff_t min, max; unsigned long flags; - struct rb_node *n; - - spin_lock_irqsave(&uprobes_treelock, flags); - - n = find_least_offset_node(inode); + struct rb_node *n, *t; + struct uprobe *u; - for (; n; n = rb_next(n)) { - uprobe = rb_entry(n, struct uprobe, rb_node); - if (uprobe->inode != inode) - break; + INIT_LIST_HEAD(head); + min = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + start - vma->vm_start; + max = min + (end - start) - 1; - list_add(&uprobe->pending_list, head); - atomic_inc(&uprobe->ref); + spin_lock_irqsave(&uprobes_treelock, flags); + n = find_node_in_range(inode, min, max); + if (n) { + for (t = n; t; t = rb_prev(t)) { + u = rb_entry(t, struct uprobe, rb_node); + if (u->inode != inode || u->offset < min) + break; + list_add(&u->pending_list, head); + atomic_inc(&u->ref); + } + for (t = n; (t = rb_next(t)); ) { + u = rb_entry(t, struct uprobe, rb_node); + if (u->inode != inode || u->offset > max) + break; + list_add(&u->pending_list, head); + atomic_inc(&u->ref); + } } - spin_unlock_irqrestore(&uprobes_treelock, flags); } @@ -1021,9 +1028,8 @@ int uprobe_mmap(struct vm_area_struct *vma) if (!inode) return 0; - INIT_LIST_HEAD(&tmp_list); mutex_lock(uprobes_mmap_hash(inode)); - build_probe_list(inode, &tmp_list); + build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); ret = 0; count = 0; @@ -1032,11 +1038,6 @@ int uprobe_mmap(struct vm_area_struct *vma) if (!ret) { loff_t vaddr = vma_address(vma, uprobe->offset); - if (vaddr < vma->vm_start || vaddr >= vma->vm_end) { - put_uprobe(uprobe); - continue; - } - ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); /* * We can race against uprobe_register(), see the @@ -1092,21 +1093,17 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon if (!inode) return; - INIT_LIST_HEAD(&tmp_list); mutex_lock(uprobes_mmap_hash(inode)); - build_probe_list(inode, &tmp_list); + build_probe_list(inode, vma, start, end, &tmp_list); list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { loff_t vaddr = vma_address(vma, uprobe->offset); - - if (vaddr >= start && vaddr < end) { - /* - * An unregister could have removed the probe before - * unmap. So check before we decrement the count. - */ - if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) - atomic_dec(&vma->vm_mm->uprobes_state.count); - } + /* + * An unregister could have removed the probe before + * unmap. So check before we decrement the count. + */ + if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) + atomic_dec(&vma->vm_mm->uprobes_state.count); put_uprobe(uprobe); } mutex_unlock(uprobes_mmap_hash(inode)); -- cgit v1.1 From cb113b47d098185f3f1f67e8300d05ddce842b66 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:42 +0200 Subject: uprobes: Introduce vaddr_to_offset(vma, vaddr) Add the new helper, vaddr_to_offset(vma, vaddr) which returns the offset in vma->vm_file this vaddr is mapped at. Change build_probe_list() and find_active_uprobe() to use the new helper, the next patch adds another user. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182242.GA20355@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2ef1233..b03256c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -122,6 +122,11 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) return vaddr; } +static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) +{ + return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); +} + /** * __replace_page - replace page in vma by new page. * based on replace_page in mm/ksm.c @@ -978,7 +983,7 @@ static void build_probe_list(struct inode *inode, struct uprobe *u; INIT_LIST_HEAD(head); - min = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + start - vma->vm_start; + min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; spin_lock_irqsave(&uprobes_treelock, flags); @@ -1442,12 +1447,9 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) vma = find_vma(mm, bp_vaddr); if (vma && vma->vm_start <= bp_vaddr) { if (valid_vma(vma, false)) { - struct inode *inode; - loff_t offset; + struct inode *inode = vma->vm_file->f_mapping->host; + loff_t offset = vaddr_to_offset(vma, bp_vaddr); - inode = vma->vm_file->f_mapping->host; - offset = bp_vaddr - vma->vm_start; - offset += (loff_t)vma->vm_pgoff << PAGE_SHIFT; uprobe = find_uprobe(inode, offset); } -- cgit v1.1 From f4d6dfe55115efe981b4b5f37183ddccaaa792f0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:44 +0200 Subject: uprobes: Fix register_for_each_vma()->vma_address() check 1. register_for_each_vma() checks that vma_address() == vaddr, but this is not enough. We should also ensure that vaddr >= vm_start, find_vma() guarantees "vaddr < vm_end" only. 2. After the prevous changes, register_for_each_vma() is the only reason why vma_address() has to return loff_t, all other users know that we have the valid mapping at this offset and thus the overflow is not possible. Change the code to use vaddr_to_offset() instead, imho this looks more clean/understandable and now we can change vma_address(). 3. While at it, remove the unnecessary type-cast. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182244.GA20362@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index b03256c..cdc3c95 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -823,12 +823,13 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) goto free; down_write(&mm->mmap_sem); - vma = find_vma(mm, (unsigned long)info->vaddr); - if (!vma || !valid_vma(vma, is_register)) + vma = find_vma(mm, info->vaddr); + if (!vma || !valid_vma(vma, is_register) || + vma->vm_file->f_mapping->host != uprobe->inode) goto unlock; - if (vma->vm_file->f_mapping->host != uprobe->inode || - vma_address(vma, uprobe->offset) != info->vaddr) + if (vma->vm_start > info->vaddr || + vaddr_to_offset(vma, info->vaddr) != uprobe->offset) goto unlock; if (is_register) { -- cgit v1.1 From 57683f72b8c01c53c85fe13e82fe1feb3a06bcd5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:47 +0200 Subject: uprobes: Rename vma_address() and make it return "unsigned long" 1. vma_address() returns loff_t, this looks confusing and this is unnecessary after the previous change. Make it return "ulong", all callers truncate the result anyway. 2. Its name conflicts with mm/rmap.c:vma_address(), rename it to offset_to_vaddr(), this matches vaddr_to_offset(). Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182247.GA20365@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index cdc3c95..bb30a4f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -112,14 +112,9 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register) return false; } -static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) +static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) { - loff_t vaddr; - - vaddr = vma->vm_start + offset; - vaddr -= (loff_t)vma->vm_pgoff << PAGE_SHIFT; - - return vaddr; + return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); } static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) @@ -775,7 +770,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) curr = info; info->mm = vma->vm_mm; - info->vaddr = vma_address(vma, offset); + info->vaddr = offset_to_vaddr(vma, offset); } mutex_unlock(&mapping->i_mmap_mutex); @@ -1042,7 +1037,7 @@ int uprobe_mmap(struct vm_area_struct *vma) list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { if (!ret) { - loff_t vaddr = vma_address(vma, uprobe->offset); + unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); /* @@ -1103,7 +1098,7 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon build_probe_list(inode, vma, start, end, &tmp_list); list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { - loff_t vaddr = vma_address(vma, uprobe->offset); + unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); /* * An unregister could have removed the probe before * unmap. So check before we decrement the count. -- cgit v1.1 From 194f8dcbe9629d8e9346cf96345a9c0bbf0e67ae Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Jul 2012 20:22:49 +0200 Subject: uprobes: __replace_page() needs munlock_vma_page() Like do_wp_page(), __replace_page() should do munlock_vma_page() for the case when the old page still has other !VM_LOCKED mappings. Unfortunately this needs mm/internal.h. Also, move put_page() outside of ptl lock. This doesn't really matter but looks a bit better. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju Cc: Anton Arapov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/20120729182249.GA20372@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/uprobes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index bb30a4f..c08a22d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -32,6 +32,7 @@ #include /* try_to_free_swap */ #include /* user_enable_single_step */ #include /* notifier mechanism */ +#include "../../mm/internal.h" /* munlock_vma_page */ #include @@ -141,7 +142,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep; int err; - /* freeze PageSwapCache() for try_to_free_swap() below */ + /* For try_to_free_swap() and munlock_vma_page() below */ lock_page(page); err = -EAGAIN; @@ -164,9 +165,12 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, page_remove_rmap(page); if (!page_mapped(page)) try_to_free_swap(page); - put_page(page); pte_unmap_unlock(ptep, ptl); + if (vma->vm_flags & VM_LOCKED) + munlock_vma_page(page); + put_page(page); + err = 0; unlock: unlock_page(page); -- cgit v1.1