diff options
117 files changed, 5582 insertions, 3003 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 81c287f..0293fc8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1885,6 +1885,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. arch_perfmon: [X86] Force use of architectural perfmon on Intel CPUs instead of the CPU specific event set. + timer: [X86] Force use of architectural NMI + timer mode (see also oprofile.timer + for generic hr timer mode) + [s390] Force legacy basic mode sampling + (report cpu_type "timer") oops=panic Always panic on oopses. Default is to just kill the process, but there is a small probability of diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt index b510564..bb24c2a0e 100644 --- a/Documentation/trace/events.txt +++ b/Documentation/trace/events.txt @@ -191,8 +191,6 @@ And for string fields they are: Currently, only exact string matches are supported. -Currently, the maximum number of predicates in a filter is 16. - 5.2 Setting filters ------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 4b0669c..2505740 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX config HAVE_OPROFILE bool +config OPROFILE_NMI_TIMER + def_bool y + depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI + config KPROBES bool "Kprobes" depends on MODULES diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index f43c0e4..9daee91 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -22,6 +22,7 @@ #include <asm/irq.h> #include "hwsampler.h" +#include "op_counter.h" #define MAX_NUM_SDB 511 #define MIN_NUM_SDB 1 @@ -896,6 +897,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, if (sample_data_ptr->P == 1) { /* userspace sample */ unsigned int pid = sample_data_ptr->prim_asn; + if (!counter_config.user) + goto skip_sample; rcu_read_lock(); tsk = pid_task(find_vpid(pid), PIDTYPE_PID); if (tsk) @@ -903,6 +906,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, rcu_read_unlock(); } else { /* kernelspace sample */ + if (!counter_config.kernel) + goto skip_sample; regs = task_pt_regs(current); } @@ -910,7 +915,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, !sample_data_ptr->P, tsk); mutex_unlock(&hws_sem); - + skip_sample: sample_data_ptr++; } } diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index bd58b72..2297be4 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -2,10 +2,11 @@ * arch/s390/oprofile/init.c * * S390 Version - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Thomas Spatzier (tspat@de.ibm.com) * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com) * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com) + * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) * * @remark Copyright 2002-2011 OProfile authors */ @@ -14,6 +15,8 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/module.h> +#include <asm/processor.h> #include "../../../drivers/oprofile/oprof.h" @@ -22,6 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); #ifdef CONFIG_64BIT #include "hwsampler.h" +#include "op_counter.h" #define DEFAULT_INTERVAL 4127518 @@ -35,16 +39,41 @@ static unsigned long oprofile_max_interval; static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS; static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS; -static int hwsampler_file; +static int hwsampler_enabled; static int hwsampler_running; /* start_mutex must be held to change */ +static int hwsampler_available; static struct oprofile_operations timer_ops; +struct op_counter_config counter_config; + +enum __force_cpu_type { + reserved = 0, /* do not force */ + timer, +}; +static int force_cpu_type; + +static int set_cpu_type(const char *str, struct kernel_param *kp) +{ + if (!strcmp(str, "timer")) { + force_cpu_type = timer; + printk(KERN_INFO "oprofile: forcing timer to be returned " + "as cpu type\n"); + } else { + force_cpu_type = 0; + } + + return 0; +} +module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); +MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" + "(report cpu_type \"timer\""); + static int oprofile_hwsampler_start(void) { int retval; - hwsampler_running = hwsampler_file; + hwsampler_running = hwsampler_enabled; if (!hwsampler_running) return timer_ops.start(); @@ -72,10 +101,16 @@ static void oprofile_hwsampler_stop(void) return; } +/* + * File ops used for: + * /dev/oprofile/0/enabled + * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer) + */ + static ssize_t hwsampler_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { - return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset); + return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset); } static ssize_t hwsampler_write(struct file *file, char const __user *buf, @@ -91,6 +126,9 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, if (retval <= 0) return retval; + if (val != 0 && val != 1) + return -EINVAL; + if (oprofile_started) /* * save to do without locking as we set @@ -99,7 +137,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, */ return -EBUSY; - hwsampler_file = val; + hwsampler_enabled = val; return count; } @@ -109,38 +147,311 @@ static const struct file_operations hwsampler_fops = { .write = hwsampler_write, }; +/* + * File ops used for: + * /dev/oprofile/0/count + * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer) + * + * Make sure that the value is within the hardware range. + */ + +static ssize_t hw_interval_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(oprofile_hw_interval, buf, + count, offset); +} + +static ssize_t hw_interval_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + if (val < oprofile_min_interval) + oprofile_hw_interval = oprofile_min_interval; + else if (val > oprofile_max_interval) + oprofile_hw_interval = oprofile_max_interval; + else + oprofile_hw_interval = val; + + return count; +} + +static const struct file_operations hw_interval_fops = { + .read = hw_interval_read, + .write = hw_interval_write, +}; + +/* + * File ops used for: + * /dev/oprofile/0/event + * Only a single event with number 0 is supported with this counter. + * + * /dev/oprofile/0/unit_mask + * This is a dummy file needed by the user space tools. + * No value other than 0 is accepted or returned. + */ + +static ssize_t hwsampler_zero_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(0, buf, count, offset); +} + +static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + if (val != 0) + return -EINVAL; + return count; +} + +static const struct file_operations zero_fops = { + .read = hwsampler_zero_read, + .write = hwsampler_zero_write, +}; + +/* /dev/oprofile/0/kernel file ops. */ + +static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(counter_config.kernel, + buf, count, offset); +} + +static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + + if (val != 0 && val != 1) + return -EINVAL; + + counter_config.kernel = val; + + return count; +} + +static const struct file_operations kernel_fops = { + .read = hwsampler_kernel_read, + .write = hwsampler_kernel_write, +}; + +/* /dev/oprofile/0/user file ops. */ + +static ssize_t hwsampler_user_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(counter_config.user, + buf, count, offset); +} + +static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + + if (val != 0 && val != 1) + return -EINVAL; + + counter_config.user = val; + + return count; +} + +static const struct file_operations user_fops = { + .read = hwsampler_user_read, + .write = hwsampler_user_write, +}; + + +/* + * File ops used for: /dev/oprofile/timer/enabled + * The value always has to be the inverted value of hwsampler_enabled. So + * no separate variable is created. That way we do not need locking. + */ + +static ssize_t timer_enabled_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset); +} + +static ssize_t timer_enabled_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + + if (val != 0 && val != 1) + return -EINVAL; + + /* Timer cannot be disabled without having hardware sampling. */ + if (val == 0 && !hwsampler_available) + return -EINVAL; + + if (oprofile_started) + /* + * save to do without locking as we set + * hwsampler_running in start() when start_mutex is + * held + */ + return -EBUSY; + + hwsampler_enabled = !val; + + return count; +} + +static const struct file_operations timer_enabled_fops = { + .read = timer_enabled_read, + .write = timer_enabled_write, +}; + + static int oprofile_create_hwsampling_files(struct super_block *sb, - struct dentry *root) + struct dentry *root) { - struct dentry *hw_dir; + struct dentry *dir; + + dir = oprofilefs_mkdir(sb, root, "timer"); + if (!dir) + return -EINVAL; + + oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops); + + if (!hwsampler_available) + return 0; /* reinitialize default values */ - hwsampler_file = 1; + hwsampler_enabled = 1; + counter_config.kernel = 1; + counter_config.user = 1; - hw_dir = oprofilefs_mkdir(sb, root, "hwsampling"); - if (!hw_dir) - return -EINVAL; + if (!force_cpu_type) { + /* + * Create the counter file system. A single virtual + * counter is created which can be used to + * enable/disable hardware sampling dynamically from + * user space. The user space will configure a single + * counter with a single event. The value of 'event' + * and 'unit_mask' are not evaluated by the kernel code + * and can only be set to 0. + */ + + dir = oprofilefs_mkdir(sb, root, "0"); + if (!dir) + return -EINVAL; - oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops); - oprofilefs_create_ulong(sb, hw_dir, "hw_interval", - &oprofile_hw_interval); - oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval", - &oprofile_min_interval); - oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval", - &oprofile_max_interval); - oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks", - &oprofile_sdbt_blocks); + oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops); + oprofilefs_create_file(sb, dir, "event", &zero_fops); + oprofilefs_create_file(sb, dir, "count", &hw_interval_fops); + oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops); + oprofilefs_create_file(sb, dir, "kernel", &kernel_fops); + oprofilefs_create_file(sb, dir, "user", &user_fops); + oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", + &oprofile_sdbt_blocks); + } else { + /* + * Hardware sampling can be used but the cpu_type is + * forced to timer in order to deal with legacy user + * space tools. The /dev/oprofile/hwsampling fs is + * provided in that case. + */ + dir = oprofilefs_mkdir(sb, root, "hwsampling"); + if (!dir) + return -EINVAL; + + oprofilefs_create_file(sb, dir, "hwsampler", + &hwsampler_fops); + oprofilefs_create_file(sb, dir, "hw_interval", + &hw_interval_fops); + oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval", + &oprofile_min_interval); + oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval", + &oprofile_max_interval); + oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", + &oprofile_sdbt_blocks); + } return 0; } static int oprofile_hwsampler_init(struct oprofile_operations *ops) { + /* + * Initialize the timer mode infrastructure as well in order + * to be able to switch back dynamically. oprofile_timer_init + * is not supposed to fail. + */ + if (oprofile_timer_init(ops)) + BUG(); + + memcpy(&timer_ops, ops, sizeof(timer_ops)); + ops->create_files = oprofile_create_hwsampling_files; + + /* + * If the user space tools do not support newer cpu types, + * the force_cpu_type module parameter + * can be used to always return \"timer\" as cpu type. + */ + if (force_cpu_type != timer) { + struct cpuid id; + + get_cpu_id (&id); + + switch (id.machine) { + case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; + case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; + default: return -ENODEV; + } + } + if (hwsampler_setup()) return -ENODEV; /* - * create hwsampler files only if hwsampler_setup() succeeds. + * Query the range for the sampling interval from the + * hardware. */ oprofile_min_interval = hwsampler_query_min_interval(); if (oprofile_min_interval == 0) @@ -155,23 +466,17 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) if (oprofile_hw_interval > oprofile_max_interval) oprofile_hw_interval = oprofile_max_interval; - if (oprofile_timer_init(ops)) - return -ENODEV; - - printk(KERN_INFO "oprofile: using hardware sampling\n"); - - memcpy(&timer_ops, ops, sizeof(timer_ops)); + printk(KERN_INFO "oprofile: System z hardware sampling " + "facility found.\n"); ops->start = oprofile_hwsampler_start; ops->stop = oprofile_hwsampler_stop; - ops->create_files = oprofile_create_hwsampling_files; return 0; } static void oprofile_hwsampler_exit(void) { - oprofile_timer_exit(); hwsampler_shutdown(); } @@ -182,7 +487,15 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = s390_backtrace; #ifdef CONFIG_64BIT - return oprofile_hwsampler_init(ops); + + /* + * -ENODEV is not reported to the caller. The module itself + * will use the timer mode sampling as fallback and this is + * always available. + */ + hwsampler_available = oprofile_hwsampler_init(ops) == 0; + + return 0; #else return -ENODEV; #endif diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h new file mode 100644 index 0000000..1a8d3ca --- /dev/null +++ b/arch/s390/oprofile/op_counter.h @@ -0,0 +1,23 @@ +/** + * arch/s390/oprofile/op_counter.h + * + * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com) + * + * @remark Copyright 2011 OProfile authors + */ + +#ifndef OP_COUNTER_H +#define OP_COUNTER_H + +struct op_counter_config { + /* `enabled' maps to the hwsampler_file variable. */ + /* `count' maps to the oprofile_hw_interval variable. */ + /* `event' and `unit_mask' are unused. */ + unsigned long kernel; + unsigned long user; +}; + +extern struct op_counter_config counter_config; + +#endif /* OP_COUNTER_H */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 88c765e..74df3f1 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -137,6 +137,13 @@ static inline int insn_is_avx(struct insn *insn) return (insn->vex_prefix.value != 0); } +/* Ensure this instruction is decoded completely */ +static inline int insn_complete(struct insn *insn) +{ + return insn->opcode.got && insn->modrm.got && insn->sib.got && + insn->displacement.got && insn->immediate.got; +} + static inline insn_byte_t insn_vex_m_bits(struct insn *insn) { if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index f61c62f..096c975 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -57,6 +57,7 @@ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 +#define ARCH_PERFMON_EVENTS_COUNT 7 /* * Intel "Architectural Performance Monitoring" CPUID @@ -72,6 +73,19 @@ union cpuid10_eax { unsigned int full; }; +union cpuid10_ebx { + struct { + unsigned int no_unhalted_core_cycles:1; + unsigned int no_instructions_retired:1; + unsigned int no_unhalted_reference_cycles:1; + unsigned int no_llc_reference:1; + unsigned int no_llc_misses:1; + unsigned int no_branch_instruction_retired:1; + unsigned int no_branch_misses_retired:1; + } split; + unsigned int full; +}; + union cpuid10_edx { struct { unsigned int num_counters_fixed:5; @@ -81,6 +95,15 @@ union cpuid10_edx { unsigned int full; }; +struct x86_pmu_capability { + int version; + int num_counters_gp; + int num_counters_fixed; + int bit_width_gp; + int bit_width_fixed; + unsigned int events_mask; + int events_mask_len; +}; /* * Fixed-purpose performance events: @@ -89,23 +112,24 @@ union cpuid10_edx { /* * All 3 fixed-mode PMCs are configured via this single MSR: */ -#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d +#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d /* * The counts are available in three separate MSRs: */ /* Instr_Retired.Any: */ -#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 -#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) +#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 +#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) /* CPU_CLK_Unhalted.Core: */ -#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a -#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) +#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a +#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) /* CPU_CLK_Unhalted.Ref: */ -#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b -#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) +#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b +#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) +#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) /* * We model BTS tracing as another fixed-mode PMC. @@ -202,6 +226,7 @@ struct perf_guest_switch_msr { }; extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); +extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); #else static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) { @@ -209,6 +234,11 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) return NULL; } +static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) +{ + memset(cap, 0, sizeof(*cap)); +} + static inline void perf_events_lapic_init(void) { } #endif diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2bda212..5adce10 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -484,18 +484,195 @@ static inline int is_x86_event(struct perf_event *event) return event->pmu == &pmu; } +/* + * Event scheduler state: + * + * Assign events iterating over all events and counters, beginning + * with events with least weights first. Keep the current iterator + * state in struct sched_state. + */ +struct sched_state { + int weight; + int event; /* event index */ + int counter; /* counter index */ + int unassigned; /* number of events to be assigned left */ + unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; +}; + +/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */ +#define SCHED_STATES_MAX 2 + +struct perf_sched { + int max_weight; + int max_events; + struct event_constraint **constraints; + struct sched_state state; + int saved_states; + struct sched_state saved[SCHED_STATES_MAX]; +}; + +/* + * Initialize interator that runs through all events and counters. + */ +static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c, + int num, int wmin, int wmax) +{ + int idx; + + memset(sched, 0, sizeof(*sched)); + sched->max_events = num; + sched->max_weight = wmax; + sched->constraints = c; + + for (idx = 0; idx < num; idx++) { + if (c[idx]->weight == wmin) + break; + } + + sched->state.event = idx; /* start with min weight */ + sched->state.weight = wmin; + sched->state.unassigned = num; +} + +static void perf_sched_save_state(struct perf_sched *sched) +{ + if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) + return; + + sched->saved[sched->saved_states] = sched->state; + sched->saved_states++; +} + +static bool perf_sched_restore_state(struct perf_sched *sched) +{ + if (!sched->saved_states) + return false; + + sched->saved_states--; + sched->state = sched->saved[sched->saved_states]; + + /* continue with next counter: */ + clear_bit(sched->state.counter++, sched->state.used); + + return true; +} + +/* + * Select a counter for the current event to schedule. Return true on + * success. + */ +static bool __perf_sched_find_counter(struct perf_sched *sched) +{ + struct event_constraint *c; + int idx; + + if (!sched->state.unassigned) + return false; + + if (sched->state.event >= sched->max_events) + return false; + + c = sched->constraints[sched->state.event]; + + /* Prefer fixed purpose counters */ + if (x86_pmu.num_counters_fixed) { + idx = X86_PMC_IDX_FIXED; + for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) { + if (!__test_and_set_bit(idx, sched->state.used)) + goto done; + } + } + /* Grab the first unused counter starting with idx */ + idx = sched->state.counter; + for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) { + if (!__test_and_set_bit(idx, sched->state.used)) + goto done; + } + + return false; + +done: + sched->state.counter = idx; + + if (c->overlap) + perf_sched_save_state(sched); + + return true; +} + +static bool perf_sched_find_counter(struct perf_sched *sched) +{ + while (!__perf_sched_find_counter(sched)) { + if (!perf_sched_restore_state(sched)) + return false; + } + + return true; +} + +/* + * Go through all unassigned events and find the next one to schedule. + * Take events with the least weight first. Return true on success. + */ +static bool perf_sched_next_event(struct perf_sched *sched) +{ + struct event_constraint *c; + + if (!sched->state.unassigned || !--sched->state.unassigned) + return false; + + do { + /* next event */ + sched->state.event++; + if (sched->state.event >= sched->max_events) { + /* next weight */ + sched->state.event = 0; + sched->state.weight++; + if (sched->state.weight > sched->max_weight) + return false; + } + c = sched->constraints[sched->state.event]; + } while (c->weight != sched->state.weight); + + sched->state.counter = 0; /* start with first counter */ + + return true; +} + +/* + * Assign a counter for each event. + */ +static int perf_assign_events(struct event_constraint **constraints, int n, + int wmin, int wmax, int *assign) +{ + struct perf_sched sched; + + perf_sched_init(&sched, constraints, n, wmin, wmax); + + do { + if (!perf_sched_find_counter(&sched)) + break; /* failed */ + if (assign) + assign[sched.state.event] = sched.state.counter; + } while (perf_sched_next_event(&sched)); + + return sched.state.unassigned; +} + int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; - int i, j, w, wmax, num = 0; + int i, wmin, wmax, num = 0; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); - for (i = 0; i < n; i++) { + for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); constraints[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); } /* @@ -521,60 +698,12 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (assign) assign[i] = hwc->idx; } - if (i == n) - goto done; - - /* - * begin slow path - */ - - bitmap_zero(used_mask, X86_PMC_IDX_MAX); - /* - * weight = number of possible counters - * - * 1 = most constrained, only works on one counter - * wmax = least constrained, works on any counter - * - * assign events to counters starting with most - * constrained events. - */ - wmax = x86_pmu.num_counters; + /* slow path */ + if (i != n) + num = perf_assign_events(constraints, n, wmin, wmax, assign); /* - * when fixed event counters are present, - * wmax is incremented by 1 to account - * for one more choice - */ - if (x86_pmu.num_counters_fixed) - wmax++; - - for (w = 1, num = n; num && w <= wmax; w++) { - /* for each event */ - for (i = 0; num && i < n; i++) { - c = constraints[i]; - hwc = &cpuc->event_list[i]->hw; - - if (c->weight != w) - continue; - - for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { - if (!test_bit(j, used_mask)) - break; - } - - if (j == X86_PMC_IDX_MAX) - break; - - __set_bit(j, used_mask); - - if (assign) - assign[i] = j; - num--; - } - } -done: - /* * scheduling failed or is just a simulation, * free resources if necessary */ @@ -1119,6 +1248,7 @@ static void __init pmu_check_apic(void) static int __init init_hw_perf_events(void) { + struct x86_pmu_quirk *quirk; struct event_constraint *c; int err; @@ -1147,8 +1277,8 @@ static int __init init_hw_perf_events(void) pr_cont("%s PMU driver.\n", x86_pmu.name); - if (x86_pmu.quirks) - x86_pmu.quirks(); + for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) + quirk->func(); if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", @@ -1171,12 +1301,18 @@ static int __init init_hw_perf_events(void) unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, - 0, x86_pmu.num_counters); + 0, x86_pmu.num_counters, 0); if (x86_pmu.event_constraints) { + /* + * event on fixed counter2 (REF_CYCLES) only works on this + * counter, so do not extend mask to generic counters + */ for_each_event_constraint(c, x86_pmu.event_constraints) { - if (c->cmask != X86_RAW_EVENT_MASK) + if (c->cmask != X86_RAW_EVENT_MASK + || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { continue; + } c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->weight += x86_pmu.num_counters; @@ -1566,3 +1702,15 @@ unsigned long perf_misc_flags(struct pt_regs *regs) return misc; } + +void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) +{ + cap->version = x86_pmu.version; + cap->num_counters_gp = x86_pmu.num_counters; + cap->num_counters_fixed = x86_pmu.num_counters_fixed; + cap->bit_width_gp = x86_pmu.cntval_bits; + cap->bit_width_fixed = x86_pmu.cntval_bits; + cap->events_mask = (unsigned int)x86_pmu.events_maskl; + cap->events_mask_len = x86_pmu.events_mask_len; +} +EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index b9698d4..8944062 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -45,6 +45,7 @@ struct event_constraint { u64 code; u64 cmask; int weight; + int overlap; }; struct amd_nb { @@ -151,15 +152,40 @@ struct cpu_hw_events { void *kfree_on_online; }; -#define __EVENT_CONSTRAINT(c, n, m, w) {\ +#define __EVENT_CONSTRAINT(c, n, m, w, o) {\ { .idxmsk64 = (n) }, \ .code = (c), \ .cmask = (m), \ .weight = (w), \ + .overlap = (o), \ } #define EVENT_CONSTRAINT(c, n, m) \ - __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) + +/* + * The overlap flag marks event constraints with overlapping counter + * masks. This is the case if the counter mask of such an event is not + * a subset of any other counter mask of a constraint with an equal or + * higher weight, e.g.: + * + * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); + * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); + * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); + * + * The event scheduler may not select the correct counter in the first + * cycle because it needs to know which subsequent events will be + * scheduled. It may fail to schedule the events then. So we set the + * overlap flag for such constraints to give the scheduler a hint which + * events to select for counter rescheduling. + * + * Care must be taken as the rescheduling algorithm is O(n!) which + * will increase scheduling cycles for an over-commited system + * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros + * and its counter masks must be kept at a minimum. + */ +#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ + __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) /* * Constraint on the Event code. @@ -235,6 +261,11 @@ union perf_capabilities { u64 capabilities; }; +struct x86_pmu_quirk { + struct x86_pmu_quirk *next; + void (*func)(void); +}; + /* * struct x86_pmu - generic x86 pmu */ @@ -259,6 +290,11 @@ struct x86_pmu { int num_counters_fixed; int cntval_bits; u64 cntval_mask; + union { + unsigned long events_maskl; + unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; + }; + int events_mask_len; int apic; u64 max_period; struct event_constraint * @@ -268,7 +304,7 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; - void (*quirks)(void); + struct x86_pmu_quirk *quirks; int perfctr_second_write; int (*cpu_prepare)(int cpu); @@ -309,6 +345,15 @@ struct x86_pmu { struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); }; +#define x86_add_quirk(func_) \ +do { \ + static struct x86_pmu_quirk __quirk __initdata = { \ + .func = func_, \ + }; \ + __quirk.next = x86_pmu.quirks; \ + x86_pmu.quirks = &__quirk; \ +} while (0) + #define ERF_NO_HT_SHARING 1 #define ERF_HAS_RSP_1 2 diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index aeefd45..0397b23 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = { static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); -static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); +static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 121f1be..3bd37bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -28,6 +28,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, + [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ }; static struct event_constraint intel_core_event_constraints[] __read_mostly = @@ -45,12 +46,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - /* - * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event - * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed - * ratio between these counters. - */ - /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -68,7 +64,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ @@ -90,7 +86,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ @@ -102,7 +98,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ @@ -125,7 +121,7 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ EVENT_CONSTRAINT_END }; @@ -1519,7 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = { .guest_get_msrs = intel_guest_get_msrs, }; -static void intel_clovertown_quirks(void) +static __init void intel_clovertown_quirk(void) { /* * PEBS is unreliable due to: @@ -1545,19 +1541,60 @@ static void intel_clovertown_quirks(void) x86_pmu.pebs_constraints = NULL; } -static void intel_sandybridge_quirks(void) +static __init void intel_sandybridge_quirk(void) { printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); x86_pmu.pebs = 0; x86_pmu.pebs_constraints = NULL; } +static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { + { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, + { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, + { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, + { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, + { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, + { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, + { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, +}; + +static __init void intel_arch_events_quirk(void) +{ + int bit; + + /* disable event that reported as not presend by cpuid */ + for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { + intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; + printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", + intel_arch_events_map[bit].name); + } +} + +static __init void intel_nehalem_quirk(void) +{ + union cpuid10_ebx ebx; + + ebx.full = x86_pmu.events_maskl; + if (ebx.split.no_branch_misses_retired) { + /* + * Erratum AAJ80 detected, we work it around by using + * the BR_MISP_EXEC.ANY event. This will over-count + * branch-misses, but it's still much better than the + * architectural event which is often completely bogus: + */ + intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; + ebx.split.no_branch_misses_retired = 0; + x86_pmu.events_maskl = ebx.full; + printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); + } +} + __init int intel_pmu_init(void) { union cpuid10_edx edx; union cpuid10_eax eax; + union cpuid10_ebx ebx; unsigned int unused; - unsigned int ebx; int version; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { @@ -1574,8 +1611,8 @@ __init int intel_pmu_init(void) * Check whether the Architectural PerfMon supports * Branch Misses Retired hw_event or not. */ - cpuid(10, &eax.full, &ebx, &unused, &edx.full); - if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) + cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); + if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) return -ENODEV; version = eax.split.version_id; @@ -1589,6 +1626,9 @@ __init int intel_pmu_init(void) x86_pmu.cntval_bits = eax.split.bit_width; x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; + x86_pmu.events_maskl = ebx.full; + x86_pmu.events_mask_len = eax.split.mask_length; + /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events: @@ -1608,6 +1648,8 @@ __init int intel_pmu_init(void) intel_ds_init(); + x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ + /* * Install the hw-cache-events table: */ @@ -1617,7 +1659,7 @@ __init int intel_pmu_init(void) break; case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ - x86_pmu.quirks = intel_clovertown_quirks; + x86_add_quirk(intel_clovertown_quirk); case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ case 29: /* six-core 45 nm xeon "Dunnington" */ @@ -1651,17 +1693,8 @@ __init int intel_pmu_init(void) /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - if (ebx & 0x40) { - /* - * Erratum AAJ80 detected, we work it around by using - * the BR_MISP_EXEC.ANY event. This will over-count - * branch-misses, but it's still much better than the - * architectural event which is often completely bogus: - */ - intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; + x86_add_quirk(intel_nehalem_quirk); - pr_cont("erratum AAJ80 worked around, "); - } pr_cont("Nehalem events, "); break; @@ -1701,7 +1734,7 @@ __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ - x86_pmu.quirks = intel_sandybridge_quirks; + x86_add_quirk(intel_sandybridge_quirk); case 45: /* SandyBridge, "Romely-EP" */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -1738,5 +1771,6 @@ __init int intel_pmu_init(void) break; } } + return 0; } diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index ea9d5f2f..2889b3d 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -50,7 +50,7 @@ void arch_jump_label_transform(struct jump_entry *entry, put_online_cpus(); } -void arch_jump_label_transform_static(struct jump_entry *entry, +__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { __jump_label_transform(entry, type, text_poke_early); diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 46fc4ee..88ad5fb 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -82,9 +82,16 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, const insn_attr_t *table; if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) return 0; - table = inat_avx_tables[vex_m][vex_p]; + /* At first, this checks the master table */ + table = inat_avx_tables[vex_m][0]; if (!table) return 0; + if (!inat_is_group(table[opcode]) && vex_p) { + /* If this is not a group, get attribute directly */ + table = inat_avx_tables[vex_m][vex_p]; + if (!table) + return 0; + } return table[opcode]; } diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 374562e..5a1f9f3 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -202,7 +202,7 @@ void insn_get_opcode(struct insn *insn) m = insn_vex_m_bits(insn); p = insn_vex_p_bits(insn); insn->attr = inat_get_avx_attribute(op, m, p); - if (!inat_accept_vex(insn->attr)) + if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr)) insn->attr = 0; /* This instruction is bad */ goto end; /* VEX has only 1 byte for opcode */ } @@ -249,6 +249,8 @@ void insn_get_modrm(struct insn *insn) pfx = insn_last_prefix(insn); insn->attr = inat_get_group_attribute(mod, pfx, insn->attr); + if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) + insn->attr = 0; /* This is bad */ } } diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index a793da5..5b83c51 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -1,5 +1,11 @@ # x86 Opcode Maps # +# This is (mostly) based on following documentations. +# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2 +# (#325383-040US, October 2011) +# - Intel(R) Advanced Vector Extensions Programming Reference +# (#319433-011,JUNE 2011). +# #<Opcode maps> # Table: table-name # Referrer: escaped-name @@ -15,10 +21,13 @@ # EndTable # # AVX Superscripts -# (VEX): this opcode can accept VEX prefix. -# (oVEX): this opcode requires VEX prefix. -# (o128): this opcode only supports 128bit VEX. -# (o256): this opcode only supports 256bit VEX. +# (v): this opcode requires VEX prefix. +# (v1): this opcode only supports 128bit VEX. +# +# Last Prefix Superscripts +# - (66): the last prefix is 0x66 +# - (F3): the last prefix is 0xF3 +# - (F2): the last prefix is 0xF2 # Table: one byte opcode @@ -199,8 +208,8 @@ a0: MOV AL,Ob a1: MOV rAX,Ov a2: MOV Ob,AL a3: MOV Ov,rAX -a4: MOVS/B Xb,Yb -a5: MOVS/W/D/Q Xv,Yv +a4: MOVS/B Yb,Xb +a5: MOVS/W/D/Q Yv,Xv a6: CMPS/B Xb,Yb a7: CMPS/W/D Xv,Yv a8: TEST AL,Ib @@ -233,8 +242,8 @@ c0: Grp2 Eb,Ib (1A) c1: Grp2 Ev,Ib (1A) c2: RETN Iw (f64) c3: RETN -c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix) -c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix) +c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) +c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) c6: Grp11 Eb,Ib (1A) c7: Grp11 Ev,Iz (1A) c8: ENTER Iw,Ib @@ -320,14 +329,19 @@ AVXcode: 1 # 3DNow! uses the last imm byte as opcode extension. 0f: 3DNow! Pq,Qq,Ib # 0x0f 0x10-0x1f -10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128) -11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128) -12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX) -13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128) -14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX) -15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX) -16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX) -17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128) +# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands +# but it actually has operands. And also, vmovss and vmovsd only accept 128bit. +# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form. +# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming +# Reference A.1 +10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1) +11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1) +12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2) +13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1) +14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66) +15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66) +16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3) +17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) 18: Grp16 (1A) 19: 1a: @@ -345,14 +359,14 @@ AVXcode: 1 25: 26: 27: -28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX) -29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX) -2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128) -2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX) -2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128) -2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128) -2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128) -2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128) +28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66) +29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66) +2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1) +2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66) +2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1) +2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1) +2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1) +2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1) # 0x0f 0x30-0x3f 30: WRMSR 31: RDTSC @@ -388,65 +402,66 @@ AVXcode: 1 4e: CMOVLE/NG Gv,Ev 4f: CMOVNLE/G Gv,Ev # 0x0f 0x50-0x5f -50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX) -51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128) -52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128) -53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128) -54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX) -55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX) -56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX) -57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX) -58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128) -59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128) -5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128) -5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX) -5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128) -5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128) -5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128) -5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128) +50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66) +51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1) +52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1) +53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1) +54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66) +55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66) +56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66) +57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66) +58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1) +59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1) +5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1) +5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3) +5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1) +5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1) +5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1) +5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1) # 0x0f 0x60-0x6f -60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128) -61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128) -62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128) -63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128) -64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128) -65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128) -66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128) -67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128) -68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128) -69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128) -6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128) -6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128) -6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128) -6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128) -6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128) -6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX) +60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1) +61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1) +62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1) +63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1) +64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1) +65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1) +66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1) +67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1) +68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1) +69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1) +6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1) +6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1) +6c: vpunpcklqdq Vx,Hx,Wx (66),(v1) +6d: vpunpckhqdq Vx,Hx,Wx (66),(v1) +6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1) +6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3) # 0x0f 0x70-0x7f -70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128) +70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1) 71: Grp12 (1A) 72: Grp13 (1A) 73: Grp14 (1A) -74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128) -75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128) -76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128) -77: emms/vzeroupper/vzeroall (VEX) -78: VMREAD Ed/q,Gd/q -79: VMWRITE Gd/q,Ed/q +74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1) +75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1) +76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1) +# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX. +77: emms | vzeroupper | vzeroall +78: VMREAD Ey,Gy +79: VMWRITE Gy,Ey 7a: 7b: -7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX) -7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX) -7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128) -7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX) +7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2) +7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2) +7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1) +7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3) # 0x0f 0x80-0x8f 80: JO Jz (f64) 81: JNO Jz (f64) -82: JB/JNAE/JC Jz (f64) -83: JNB/JAE/JNC Jz (f64) -84: JZ/JE Jz (f64) -85: JNZ/JNE Jz (f64) +82: JB/JC/JNAE Jz (f64) +83: JAE/JNB/JNC Jz (f64) +84: JE/JZ Jz (f64) +85: JNE/JNZ Jz (f64) 86: JBE/JNA Jz (f64) -87: JNBE/JA Jz (f64) +87: JA/JNBE Jz (f64) 88: JS Jz (f64) 89: JNS Jz (f64) 8a: JP/JPE Jz (f64) @@ -502,18 +517,18 @@ b8: JMPE | POPCNT Gv,Ev (F3) b9: Grp10 (1A) ba: Grp8 Ev,Ib (1A) bb: BTC Ev,Gv -bc: BSF Gv,Ev -bd: BSR Gv,Ev +bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) +bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) be: MOVSX Gv,Eb bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf c0: XADD Eb,Gb c1: XADD Ev,Gv -c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX) -c3: movnti Md/q,Gd/q -c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128) -c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128) -c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX) +c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1) +c3: movnti My,Gy +c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1) +c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1) +c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66) c7: Grp9 (1A) c8: BSWAP RAX/EAX/R8/R8D c9: BSWAP RCX/ECX/R9/R9D @@ -524,55 +539,55 @@ cd: BSWAP RBP/EBP/R13/R13D ce: BSWAP RSI/ESI/R14/R14D cf: BSWAP RDI/EDI/R15/R15D # 0x0f 0xd0-0xdf -d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX) -d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128) -d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128) -d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128) -d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128) -d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128) -d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) -d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128) -d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128) -d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128) -da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128) -db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128) -dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128) -dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128) -de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128) -df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128) +d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2) +d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1) +d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1) +d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1) +d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1) +d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1) +d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1) +d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1) +d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1) +da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1) +db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) +dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1) +dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1) +de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1) +df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) # 0x0f 0xe0-0xef -e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128) -e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128) -e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128) -e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128) -e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128) -e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128) -e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX) -e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX) -e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128) -e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128) -ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128) -eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128) -ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128) -ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128) -ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128) -ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128) +e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1) +e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1) +e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1) +e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1) +e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1) +e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1) +e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2) +e7: movntq Mq,Pq | vmovntdq Mx,Vx (66) +e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1) +e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1) +ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1) +eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) +ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1) +ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1) +ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1) +ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) # 0x0f 0xf0-0xff -f0: lddqu Vdq,Mdq (F2),(VEX) -f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128) -f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128) -f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128) -f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128) -f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128) -f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128) -f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128) -f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128) -f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128) -fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128) -fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128) -fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128) -fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128) -fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128) +f0: vlddqu Vx,Mx (F2) +f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1) +f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1) +f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1) +f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1) +f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1) +f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1) +f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1) +f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1) +f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1) +fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1) +fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) +fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) +fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) +fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) ff: EndTable @@ -580,155 +595,193 @@ Table: 3-byte opcode 1 (0x0f 0x38) Referrer: 3-byte escape 1 AVXcode: 2 # 0x0f 0x38 0x00-0x0f -00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128) -01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128) -02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128) -03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128) -04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128) -05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128) -06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128) -07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128) -08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128) -09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128) -0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128) -0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128) -0c: Vpermilps /r (66),(oVEX) -0d: Vpermilpd /r (66),(oVEX) -0e: vtestps /r (66),(oVEX) -0f: vtestpd /r (66),(oVEX) +00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1) +01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1) +02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1) +03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1) +04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1) +05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1) +06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1) +07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1) +08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1) +09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1) +0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1) +0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1) +0c: vpermilps Vx,Hx,Wx (66),(v) +0d: vpermilpd Vx,Hx,Wx (66),(v) +0e: vtestps Vx,Wx (66),(v) +0f: vtestpd Vx,Wx (66),(v) # 0x0f 0x38 0x10-0x1f 10: pblendvb Vdq,Wdq (66) 11: 12: -13: +13: vcvtph2ps Vx,Wx,Ib (66),(v) 14: blendvps Vdq,Wdq (66) 15: blendvpd Vdq,Wdq (66) -16: -17: ptest Vdq,Wdq (66),(VEX) -18: vbroadcastss /r (66),(oVEX) -19: vbroadcastsd /r (66),(oVEX),(o256) -1a: vbroadcastf128 /r (66),(oVEX),(o256) +16: vpermps Vqq,Hqq,Wqq (66),(v) +17: vptest Vx,Wx (66) +18: vbroadcastss Vx,Wd (66),(v) +19: vbroadcastsd Vqq,Wq (66),(v) +1a: vbroadcastf128 Vqq,Mdq (66),(v) 1b: -1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128) -1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128) -1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128) +1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1) +1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1) +1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1) 1f: # 0x0f 0x38 0x20-0x2f -20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128) -21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128) -22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128) -23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128) -24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128) -25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128) +20: vpmovsxbw Vx,Ux/Mq (66),(v1) +21: vpmovsxbd Vx,Ux/Md (66),(v1) +22: vpmovsxbq Vx,Ux/Mw (66),(v1) +23: vpmovsxwd Vx,Ux/Mq (66),(v1) +24: vpmovsxwq Vx,Ux/Md (66),(v1) +25: vpmovsxdq Vx,Ux/Mq (66),(v1) 26: 27: -28: pmuldq Vdq,Wdq (66),(VEX),(o128) -29: pcmpeqq Vdq,Wdq (66),(VEX),(o128) -2a: movntdqa Vdq,Mdq (66),(VEX),(o128) -2b: packusdw Vdq,Wdq (66),(VEX),(o128) -2c: vmaskmovps(ld) /r (66),(oVEX) -2d: vmaskmovpd(ld) /r (66),(oVEX) -2e: vmaskmovps(st) /r (66),(oVEX) -2f: vmaskmovpd(st) /r (66),(oVEX) +28: vpmuldq Vx,Hx,Wx (66),(v1) +29: vpcmpeqq Vx,Hx,Wx (66),(v1) +2a: vmovntdqa Vx,Mx (66),(v1) +2b: vpackusdw Vx,Hx,Wx (66),(v1) +2c: vmaskmovps Vx,Hx,Mx (66),(v) +2d: vmaskmovpd Vx,Hx,Mx (66),(v) +2e: vmaskmovps Mx,Hx,Vx (66),(v) +2f: vmaskmovpd Mx,Hx,Vx (66),(v) # 0x0f 0x38 0x30-0x3f -30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128) -31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128) -32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128) -33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128) -34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128) -35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128) -36: -37: pcmpgtq Vdq,Wdq (66),(VEX),(o128) -38: pminsb Vdq,Wdq (66),(VEX),(o128) -39: pminsd Vdq,Wdq (66),(VEX),(o128) -3a: pminuw Vdq,Wdq (66),(VEX),(o128) -3b: pminud Vdq,Wdq (66),(VEX),(o128) -3c: pmaxsb Vdq,Wdq (66),(VEX),(o128) -3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) -3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) -3f: pmaxud Vdq,Wdq (66),(VEX),(o128) +30: vpmovzxbw Vx,Ux/Mq (66),(v1) +31: vpmovzxbd Vx,Ux/Md (66),(v1) +32: vpmovzxbq Vx,Ux/Mw (66),(v1) +33: vpmovzxwd Vx,Ux/Mq (66),(v1) +34: vpmovzxwq Vx,Ux/Md (66),(v1) +35: vpmovzxdq Vx,Ux/Mq (66),(v1) +36: vpermd Vqq,Hqq,Wqq (66),(v) +37: vpcmpgtq Vx,Hx,Wx (66),(v1) +38: vpminsb Vx,Hx,Wx (66),(v1) +39: vpminsd Vx,Hx,Wx (66),(v1) +3a: vpminuw Vx,Hx,Wx (66),(v1) +3b: vpminud Vx,Hx,Wx (66),(v1) +3c: vpmaxsb Vx,Hx,Wx (66),(v1) +3d: vpmaxsd Vx,Hx,Wx (66),(v1) +3e: vpmaxuw Vx,Hx,Wx (66),(v1) +3f: vpmaxud Vx,Hx,Wx (66),(v1) # 0x0f 0x38 0x40-0x8f -40: pmulld Vdq,Wdq (66),(VEX),(o128) -41: phminposuw Vdq,Wdq (66),(VEX),(o128) -80: INVEPT Gd/q,Mdq (66) -81: INVPID Gd/q,Mdq (66) +40: vpmulld Vx,Hx,Wx (66),(v1) +41: vphminposuw Vdq,Wdq (66),(v1) +42: +43: +44: +45: vpsrlvd/q Vx,Hx,Wx (66),(v) +46: vpsravd Vx,Hx,Wx (66),(v) +47: vpsllvd/q Vx,Hx,Wx (66),(v) +# Skip 0x48-0x57 +58: vpbroadcastd Vx,Wx (66),(v) +59: vpbroadcastq Vx,Wx (66),(v) +5a: vbroadcasti128 Vqq,Mdq (66),(v) +# Skip 0x5b-0x77 +78: vpbroadcastb Vx,Wx (66),(v) +79: vpbroadcastw Vx,Wx (66),(v) +# Skip 0x7a-0x7f +80: INVEPT Gy,Mdq (66) +81: INVPID Gy,Mdq (66) +82: INVPCID Gy,Mdq (66) +8c: vpmaskmovd/q Vx,Hx,Mx (66),(v) +8e: vpmaskmovd/q Mx,Vx,Hx (66),(v) # 0x0f 0x38 0x90-0xbf (FMA) -96: vfmaddsub132pd/ps /r (66),(VEX) -97: vfmsubadd132pd/ps /r (66),(VEX) -98: vfmadd132pd/ps /r (66),(VEX) -99: vfmadd132sd/ss /r (66),(VEX),(o128) -9a: vfmsub132pd/ps /r (66),(VEX) -9b: vfmsub132sd/ss /r (66),(VEX),(o128) -9c: vfnmadd132pd/ps /r (66),(VEX) -9d: vfnmadd132sd/ss /r (66),(VEX),(o128) -9e: vfnmsub132pd/ps /r (66),(VEX) -9f: vfnmsub132sd/ss /r (66),(VEX),(o128) -a6: vfmaddsub213pd/ps /r (66),(VEX) -a7: vfmsubadd213pd/ps /r (66),(VEX) -a8: vfmadd213pd/ps /r (66),(VEX) -a9: vfmadd213sd/ss /r (66),(VEX),(o128) -aa: vfmsub213pd/ps /r (66),(VEX) -ab: vfmsub213sd/ss /r (66),(VEX),(o128) -ac: vfnmadd213pd/ps /r (66),(VEX) -ad: vfnmadd213sd/ss /r (66),(VEX),(o128) -ae: vfnmsub213pd/ps /r (66),(VEX) -af: vfnmsub213sd/ss /r (66),(VEX),(o128) -b6: vfmaddsub231pd/ps /r (66),(VEX) -b7: vfmsubadd231pd/ps /r (66),(VEX) -b8: vfmadd231pd/ps /r (66),(VEX) -b9: vfmadd231sd/ss /r (66),(VEX),(o128) -ba: vfmsub231pd/ps /r (66),(VEX) -bb: vfmsub231sd/ss /r (66),(VEX),(o128) -bc: vfnmadd231pd/ps /r (66),(VEX) -bd: vfnmadd231sd/ss /r (66),(VEX),(o128) -be: vfnmsub231pd/ps /r (66),(VEX) -bf: vfnmsub231sd/ss /r (66),(VEX),(o128) +90: vgatherdd/q Vx,Hx,Wx (66),(v) +91: vgatherqd/q Vx,Hx,Wx (66),(v) +92: vgatherdps/d Vx,Hx,Wx (66),(v) +93: vgatherqps/d Vx,Hx,Wx (66),(v) +94: +95: +96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v) +97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v) +98: vfmadd132ps/d Vx,Hx,Wx (66),(v) +99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1) +9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) +9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) +9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v) +9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1) +9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v) +9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1) +a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v) +a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v) +a8: vfmadd213ps/d Vx,Hx,Wx (66),(v) +a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1) +aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) +ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) +ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v) +ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1) +ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v) +af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1) +b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v) +b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v) +b8: vfmadd231ps/d Vx,Hx,Wx (66),(v) +b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1) +ba: vfmsub231ps/d Vx,Hx,Wx (66),(v) +bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1) +bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v) +bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1) +be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) +bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) # 0x0f 0x38 0xc0-0xff -db: aesimc Vdq,Wdq (66),(VEX),(o128) -dc: aesenc Vdq,Wdq (66),(VEX),(o128) -dd: aesenclast Vdq,Wdq (66),(VEX),(o128) -de: aesdec Vdq,Wdq (66),(VEX),(o128) -df: aesdeclast Vdq,Wdq (66),(VEX),(o128) -f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) -f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) +db: VAESIMC Vdq,Wdq (66),(v1) +dc: VAESENC Vdq,Hdq,Wdq (66),(v1) +dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) +de: VAESDEC Vdq,Hdq,Wdq (66),(v1) +df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) +f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) +f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) +f3: ANDN Gy,By,Ey (v) +f4: Grp17 (1A) +f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) +f6: MULX By,Gy,rDX,Ey (F2),(v) +f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) EndTable Table: 3-byte opcode 2 (0x0f 0x3a) Referrer: 3-byte escape 2 AVXcode: 3 # 0x0f 0x3a 0x00-0xff -04: vpermilps /r,Ib (66),(oVEX) -05: vpermilpd /r,Ib (66),(oVEX) -06: vperm2f128 /r,Ib (66),(oVEX),(o256) -08: roundps Vdq,Wdq,Ib (66),(VEX) -09: roundpd Vdq,Wdq,Ib (66),(VEX) -0a: roundss Vss,Wss,Ib (66),(VEX),(o128) -0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128) -0c: blendps Vdq,Wdq,Ib (66),(VEX) -0d: blendpd Vdq,Wdq,Ib (66),(VEX) -0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128) -0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128) -14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128) -15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128) -16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128) -17: extractps Ed,Vdq,Ib (66),(VEX),(o128) -18: vinsertf128 /r,Ib (66),(oVEX),(o256) -19: vextractf128 /r,Ib (66),(oVEX),(o256) -20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128) -21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128) -22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128) -40: dpps Vdq,Wdq,Ib (66),(VEX) -41: dppd Vdq,Wdq,Ib (66),(VEX),(o128) -42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128) -44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128) -4a: vblendvps /r,Ib (66),(oVEX) -4b: vblendvpd /r,Ib (66),(oVEX) -4c: vpblendvb /r,Ib (66),(oVEX),(o128) -60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128) -61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128) -62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128) -63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128) -df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128) +00: vpermq Vqq,Wqq,Ib (66),(v) +01: vpermpd Vqq,Wqq,Ib (66),(v) +02: vpblendd Vx,Hx,Wx,Ib (66),(v) +03: +04: vpermilps Vx,Wx,Ib (66),(v) +05: vpermilpd Vx,Wx,Ib (66),(v) +06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v) +07: +08: vroundps Vx,Wx,Ib (66) +09: vroundpd Vx,Wx,Ib (66) +0a: vroundss Vss,Wss,Ib (66),(v1) +0b: vroundsd Vsd,Wsd,Ib (66),(v1) +0c: vblendps Vx,Hx,Wx,Ib (66) +0d: vblendpd Vx,Hx,Wx,Ib (66) +0e: vpblendw Vx,Hx,Wx,Ib (66),(v1) +0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1) +14: vpextrb Rd/Mb,Vdq,Ib (66),(v1) +15: vpextrw Rd/Mw,Vdq,Ib (66),(v1) +16: vpextrd/q Ey,Vdq,Ib (66),(v1) +17: vextractps Ed,Vdq,Ib (66),(v1) +18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) +19: vextractf128 Wdq,Vqq,Ib (66),(v) +1d: vcvtps2ph Wx,Vx,Ib (66),(v) +20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1) +21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1) +22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1) +38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) +39: vextracti128 Wdq,Vqq,Ib (66),(v) +40: vdpps Vx,Hx,Wx,Ib (66) +41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1) +42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) +44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1) +46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v) +4a: vblendvps Vx,Hx,Wx,Lx (66),(v) +4b: vblendvpd Vx,Hx,Wx,Lx (66),(v) +4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1) +60: vpcmpestrm Vdq,Wdq,Ib (66),(v1) +61: vpcmpestri Vdq,Wdq,Ib (66),(v1) +62: vpcmpistrm Vdq,Wdq,Ib (66),(v1) +63: vpcmpistri Vdq,Wdq,Ib (66),(v1) +df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) +f0: RORX Gy,Ey,Ib (F2),(v) EndTable GrpTable: Grp1 @@ -790,7 +843,7 @@ GrpTable: Grp5 2: CALLN Ev (f64) 3: CALLF Ep 4: JMPN Ev (f64) -5: JMPF Ep +5: JMPF Mp 6: PUSH Ev (d64) 7: EndTable @@ -807,7 +860,7 @@ EndTable GrpTable: Grp7 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) -2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) 3: LIDT Ms 4: SMSW Mw/Rv 5: @@ -824,44 +877,45 @@ EndTable GrpTable: Grp9 1: CMPXCHG8B/16B Mq/Mdq -6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) -7: VMPTRST Mq +6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) +7: VMPTRST Mq | VMPTRST Mq (F3) EndTable GrpTable: Grp10 EndTable GrpTable: Grp11 +# Note: the operands are given by group opcode 0: MOV EndTable GrpTable: Grp12 -2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128) -4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128) -6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128) +2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1) +4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1) +6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1) EndTable GrpTable: Grp13 -2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128) -4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128) -6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128) +2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1) +4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) +6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1) EndTable GrpTable: Grp14 -2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128) -3: psrldq Udq,Ib (66),(11B),(VEX),(o128) -6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128) -7: pslldq Udq,Ib (66),(11B),(VEX),(o128) +2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1) +3: vpsrldq Hx,Ux,Ib (66),(11B),(v1) +6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1) +7: vpslldq Hx,Ux,Ib (66),(11B),(v1) EndTable GrpTable: Grp15 -0: fxsave -1: fxstor -2: ldmxcsr (VEX) -3: stmxcsr (VEX) +0: fxsave | RDFSBASE Ry (F3),(11B) +1: fxstor | RDGSBASE Ry (F3),(11B) +2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B) +3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) 4: XSAVE 5: XRSTOR | lfence (11B) -6: mfence (11B) +6: XSAVEOPT | mfence (11B) 7: clflush | sfence (11B) EndTable @@ -872,6 +926,12 @@ GrpTable: Grp16 3: prefetch T2 EndTable +GrpTable: Grp17 +1: BLSR By,Ey (v) +2: BLSMSK By,Ey (v) +3: BLSI By,Ey (v) +EndTable + # AMD's Prefetch Group GrpTable: GrpP 0: PREFETCH diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile index 446902b..1599f56 100644 --- a/arch/x86/oprofile/Makefile +++ b/arch/x86/oprofile/Makefile @@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprof.o cpu_buffer.o buffer_sync.o \ event_buffer.o oprofile_files.o \ oprofilefs.o oprofile_stats.o \ - timer_int.o ) + timer_int.o nmi_timer_int.o ) oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ op_model_ppro.o op_model_p4.o -oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c index f148cf6..9e138d0 100644 --- a/arch/x86/oprofile/init.c +++ b/arch/x86/oprofile/init.c @@ -16,37 +16,23 @@ * with the NMI mode driver. */ +#ifdef CONFIG_X86_LOCAL_APIC extern int op_nmi_init(struct oprofile_operations *ops); -extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); -extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); +#else +static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } +static void op_nmi_exit(void) { } +#endif -static int nmi_timer; +extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); int __init oprofile_arch_init(struct oprofile_operations *ops) { - int ret; - - ret = -ENODEV; - -#ifdef CONFIG_X86_LOCAL_APIC - ret = op_nmi_init(ops); -#endif - nmi_timer = (ret != 0); -#ifdef CONFIG_X86_IO_APIC - if (nmi_timer) - ret = op_nmi_timer_init(ops); -#endif ops->backtrace = x86_backtrace; - - return ret; + return op_nmi_init(ops); } - void oprofile_arch_exit(void) { -#ifdef CONFIG_X86_LOCAL_APIC - if (!nmi_timer) - op_nmi_exit(); -#endif + op_nmi_exit(); } diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 75f9528..26b8a85 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type) return 0; } -static int force_arch_perfmon; -static int force_cpu_type(const char *str, struct kernel_param *kp) +enum __force_cpu_type { + reserved = 0, /* do not force */ + timer, + arch_perfmon, +}; + +static int force_cpu_type; + +static int set_cpu_type(const char *str, struct kernel_param *kp) { - if (!strcmp(str, "arch_perfmon")) { - force_arch_perfmon = 1; + if (!strcmp(str, "timer")) { + force_cpu_type = timer; + printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); + } else if (!strcmp(str, "arch_perfmon")) { + force_cpu_type = arch_perfmon; printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); + } else { + force_cpu_type = 0; } return 0; } -module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); +module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ - if (force_arch_perfmon && cpu_has_arch_perfmon) + if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) return 0; /* @@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) if (!cpu_has_apic) return -ENODEV; + if (force_cpu_type == timer) + return -ENODEV; + switch (vendor) { case X86_VENDOR_AMD: /* Needs to be at least an Athlon (or hammer in 32bit mode) */ diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c deleted file mode 100644 index 7f8052c..0000000 --- a/arch/x86/oprofile/nmi_timer_int.c +++ /dev/null @@ -1,50 +0,0 @@ -/** - * @file nmi_timer_int.c - * - * @remark Copyright 2003 OProfile authors - * @remark Read the file COPYING - * - * @author Zwane Mwaikambo <zwane@linuxpower.ca> - */ - -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/errno.h> -#include <linux/oprofile.h> -#include <linux/rcupdate.h> -#include <linux/kdebug.h> - -#include <asm/nmi.h> -#include <asm/apic.h> -#include <asm/ptrace.h> - -static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs) -{ - oprofile_add_sample(regs, 0); - return NMI_HANDLED; -} - -static int timer_start(void) -{ - if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify, - 0, "oprofile-timer")) - return 1; - return 0; -} - - -static void timer_stop(void) -{ - unregister_nmi_handler(NMI_LOCAL, "oprofile-timer"); - synchronize_sched(); /* Allow already-started NMIs to complete. */ -} - - -int __init op_nmi_timer_init(struct oprofile_operations *ops) -{ - ops->start = timer_start; - ops->stop = timer_stop; - ops->cpu_type = "timer"; - printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); - return 0; -} diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index f820826..d511aa9 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -18,14 +18,21 @@ chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk quiet_cmd_posttest = TEST $@ cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) -posttest: $(obj)/test_get_len vmlinux +quiet_cmd_sanitytest = TEST $@ + cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000 + +posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity $(call cmd,posttest) + $(call cmd,sanitytest) -hostprogs-y := test_get_len +hostprogs-y += test_get_len insn_sanity # -I needed for generated C source and C source which in the kernel tree. HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ +HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ + # Dependencies are also needed. $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c +$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index eaf11f5..5f6a5b6 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -47,7 +47,7 @@ BEGIN { sep_expr = "^\\|$" group_expr = "^Grp[0-9A-Za-z]+" - imm_expr = "^[IJAO][a-z]" + imm_expr = "^[IJAOL][a-z]" imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" @@ -59,6 +59,7 @@ BEGIN { imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" imm_flag["Ob"] = "INAT_MOFFSET" imm_flag["Ov"] = "INAT_MOFFSET" + imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" force64_expr = "\\([df]64\\)" @@ -70,8 +71,12 @@ BEGIN { lprefix3_expr = "\\(F2\\)" max_lprefix = 4 - vexok_expr = "\\(VEX\\)" - vexonly_expr = "\\(oVEX\\)" + # All opcodes starting with lower-case 'v' or with (v1) superscript + # accepts VEX prefix + vexok_opcode_expr = "^v.*" + vexok_expr = "\\(v1\\)" + # All opcodes with (v) superscript supports *only* VEX prefix + vexonly_expr = "\\(v\\)" prefix_expr = "\\(Prefix\\)" prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" @@ -85,8 +90,8 @@ BEGIN { prefix_num["SEG=GS"] = "INAT_PFX_GS" prefix_num["SEG=SS"] = "INAT_PFX_SS" prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" - prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2" - prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3" + prefix_num["VEX+1byte"] = "INAT_PFX_VEX2" + prefix_num["VEX+2byte"] = "INAT_PFX_VEX3" clear_vars() } @@ -310,12 +315,10 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(opcode, fpu_expr)) flags = add_flags(flags, "INAT_MODRM") - # check VEX only code + # check VEX codes if (match(ext, vexonly_expr)) flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") - - # check VEX only code - if (match(ext, vexok_expr)) + else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr)) flags = add_flags(flags, "INAT_VEXOK") # check prefixes diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c new file mode 100644 index 0000000..cc2f8c1 --- /dev/null +++ b/arch/x86/tools/insn_sanity.c @@ -0,0 +1,275 @@ +/* + * x86 decoder sanity test - based on test_get_insn.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + * Copyright (C) Hitachi, Ltd., 2011 + */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +#define unlikely(cond) (cond) +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) + +#include <asm/insn.h> +#include <inat.c> +#include <insn.c> + +/* + * Test of instruction analysis against tampering. + * Feed random binary to instruction decoder and ensure not to + * access out-of-instruction-buffer. + */ + +#define DEFAULT_MAX_ITER 10000 +#define INSN_NOP 0x90 + +static const char *prog; /* Program name */ +static int verbose; /* Verbosity */ +static int x86_64; /* x86-64 bit mode flag */ +static unsigned int seed; /* Random seed */ +static unsigned long iter_start; /* Start of iteration number */ +static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */ +static FILE *input_file; /* Input file name */ + +static void usage(const char *err) +{ + if (err) + fprintf(stderr, "Error: %s\n\n", err); + fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); + fprintf(stderr, "\t-y 64bit mode\n"); + fprintf(stderr, "\t-n 32bit mode\n"); + fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n"); + fprintf(stderr, "\t-s Give a random seed (and iteration number)\n"); + fprintf(stderr, "\t-m Give a maximum iteration number\n"); + fprintf(stderr, "\t-i Give an input file with decoded binary\n"); + exit(1); +} + +static void dump_field(FILE *fp, const char *name, const char *indent, + struct insn_field *field) +{ + fprintf(fp, "%s.%s = {\n", indent, name); + fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n", + indent, field->value, field->bytes[0], field->bytes[1], + field->bytes[2], field->bytes[3]); + fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent, + field->got, field->nbytes); +} + +static void dump_insn(FILE *fp, struct insn *insn) +{ + fprintf(fp, "Instruction = {\n"); + dump_field(fp, "prefixes", "\t", &insn->prefixes); + dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix); + dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix); + dump_field(fp, "opcode", "\t", &insn->opcode); + dump_field(fp, "modrm", "\t", &insn->modrm); + dump_field(fp, "sib", "\t", &insn->sib); + dump_field(fp, "displacement", "\t", &insn->displacement); + dump_field(fp, "immediate1", "\t", &insn->immediate1); + dump_field(fp, "immediate2", "\t", &insn->immediate2); + fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n", + insn->attr, insn->opnd_bytes, insn->addr_bytes); + fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n", + insn->length, insn->x86_64, insn->kaddr); +} + +static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter, + unsigned char *insn_buf, struct insn *insn) +{ + int i; + + fprintf(fp, "%s:\n", msg); + + dump_insn(fp, insn); + + fprintf(fp, "You can reproduce this with below command(s);\n"); + + /* Input a decoded instruction sequence directly */ + fprintf(fp, " $ echo "); + for (i = 0; i < MAX_INSN_SIZE; i++) + fprintf(fp, " %02x", insn_buf[i]); + fprintf(fp, " | %s -i -\n", prog); + + if (!input_file) { + fprintf(fp, "Or \n"); + /* Give a seed and iteration number */ + fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter); + } +} + +static void init_random_seed(void) +{ + int fd; + + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) + goto fail; + + if (read(fd, &seed, sizeof(seed)) != sizeof(seed)) + goto fail; + + close(fd); + return; +fail: + usage("Failed to open /dev/urandom"); +} + +/* Read given instruction sequence from the input file */ +static int read_next_insn(unsigned char *insn_buf) +{ + char buf[256] = "", *tmp; + int i; + + tmp = fgets(buf, ARRAY_SIZE(buf), input_file); + if (tmp == NULL || feof(input_file)) + return 0; + + for (i = 0; i < MAX_INSN_SIZE; i++) { + insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16); + if (*tmp != ' ') + break; + } + + return i; +} + +static int generate_insn(unsigned char *insn_buf) +{ + int i; + + if (input_file) + return read_next_insn(insn_buf); + + /* Fills buffer with random binary up to MAX_INSN_SIZE */ + for (i = 0; i < MAX_INSN_SIZE - 1; i += 2) + *(unsigned short *)(&insn_buf[i]) = random() & 0xffff; + + while (i < MAX_INSN_SIZE) + insn_buf[i++] = random() & 0xff; + + return i; +} + +static void parse_args(int argc, char **argv) +{ + int c; + char *tmp = NULL; + int set_seed = 0; + + prog = argv[0]; + while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) { + switch (c) { + case 'y': + x86_64 = 1; + break; + case 'n': + x86_64 = 0; + break; + case 'v': + verbose++; + break; + case 'i': + if (strcmp("-", optarg) == 0) + input_file = stdin; + else + input_file = fopen(optarg, "r"); + if (!input_file) + usage("Failed to open input file"); + break; + case 's': + seed = (unsigned int)strtoul(optarg, &tmp, 0); + if (*tmp == ',') { + optarg = tmp + 1; + iter_start = strtoul(optarg, &tmp, 0); + } + if (*tmp != '\0' || tmp == optarg) + usage("Failed to parse seed"); + set_seed = 1; + break; + case 'm': + iter_end = strtoul(optarg, &tmp, 0); + if (*tmp != '\0' || tmp == optarg) + usage("Failed to parse max_iter"); + break; + default: + usage(NULL); + } + } + + /* Check errors */ + if (iter_end < iter_start) + usage("Max iteration number must be bigger than iter-num"); + + if (set_seed && input_file) + usage("Don't use input file (-i) with random seed (-s)"); + + /* Initialize random seed */ + if (!input_file) { + if (!set_seed) /* No seed is given */ + init_random_seed(); + srand(seed); + } +} + +int main(int argc, char **argv) +{ + struct insn insn; + int insns = 0; + int errors = 0; + unsigned long i; + unsigned char insn_buf[MAX_INSN_SIZE * 2]; + + parse_args(argc, argv); + + /* Prepare stop bytes with NOPs */ + memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE); + + for (i = 0; i < iter_end; i++) { + if (generate_insn(insn_buf) <= 0) + break; + + if (i < iter_start) /* Skip to given iteration number */ + continue; + + /* Decode an instruction */ + insn_init(&insn, insn_buf, x86_64); + insn_get_length(&insn); + + if (insn.next_byte <= insn.kaddr || + insn.kaddr + MAX_INSN_SIZE < insn.next_byte) { + /* Access out-of-range memory */ + dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn); + errors++; + } else if (verbose && !insn_complete(&insn)) + dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn); + else if (verbose >= 2) + dump_insn(stdout, &insn); + insns++; + } + + fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); + + return errors ? 1 : 0; +} diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c new file mode 100644 index 0000000..76f1c93 --- /dev/null +++ b/drivers/oprofile/nmi_timer_int.c @@ -0,0 +1,173 @@ +/** + * @file nmi_timer_int.c + * + * @remark Copyright 2011 Advanced Micro Devices, Inc. + * + * @author Robert Richter <robert.richter@amd.com> + */ + +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/errno.h> +#include <linux/oprofile.h> +#include <linux/perf_event.h> + +#ifdef CONFIG_OPROFILE_NMI_TIMER + +static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events); +static int ctr_running; + +static struct perf_event_attr nmi_timer_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +static void nmi_timer_callback(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + event->hw.interrupts = 0; /* don't throttle interrupts */ + oprofile_add_sample(regs, 0); +} + +static int nmi_timer_start_cpu(int cpu) +{ + struct perf_event *event = per_cpu(nmi_timer_events, cpu); + + if (!event) { + event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, + nmi_timer_callback, NULL); + if (IS_ERR(event)) + return PTR_ERR(event); + per_cpu(nmi_timer_events, cpu) = event; + } + + if (event && ctr_running) + perf_event_enable(event); + + return 0; +} + +static void nmi_timer_stop_cpu(int cpu) +{ + struct perf_event *event = per_cpu(nmi_timer_events, cpu); + + if (event && ctr_running) + perf_event_disable(event); +} + +static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action, + void *data) +{ + int cpu = (unsigned long)data; + switch (action) { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + nmi_timer_start_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + nmi_timer_stop_cpu(cpu); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block nmi_timer_cpu_nb = { + .notifier_call = nmi_timer_cpu_notifier +}; + +static int nmi_timer_start(void) +{ + int cpu; + + get_online_cpus(); + ctr_running = 1; + for_each_online_cpu(cpu) + nmi_timer_start_cpu(cpu); + put_online_cpus(); + + return 0; +} + +static void nmi_timer_stop(void) +{ + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + nmi_timer_stop_cpu(cpu); + ctr_running = 0; + put_online_cpus(); +} + +static void nmi_timer_shutdown(void) +{ + struct perf_event *event; + int cpu; + + get_online_cpus(); + unregister_cpu_notifier(&nmi_timer_cpu_nb); + for_each_possible_cpu(cpu) { + event = per_cpu(nmi_timer_events, cpu); + if (!event) + continue; + perf_event_disable(event); + per_cpu(nmi_timer_events, cpu) = NULL; + perf_event_release_kernel(event); + } + + put_online_cpus(); +} + +static int nmi_timer_setup(void) +{ + int cpu, err; + u64 period; + + /* clock cycles per tick: */ + period = (u64)cpu_khz * 1000; + do_div(period, HZ); + nmi_timer_attr.sample_period = period; + + get_online_cpus(); + err = register_cpu_notifier(&nmi_timer_cpu_nb); + if (err) + goto out; + /* can't attach events to offline cpus: */ + for_each_online_cpu(cpu) { + err = nmi_timer_start_cpu(cpu); + if (err) + break; + } + if (err) + nmi_timer_shutdown(); +out: + put_online_cpus(); + return err; +} + +int __init op_nmi_timer_init(struct oprofile_operations *ops) +{ + int err = 0; + + err = nmi_timer_setup(); + if (err) + return err; + nmi_timer_shutdown(); /* only check, don't alloc */ + + ops->create_files = NULL; + ops->setup = nmi_timer_setup; + ops->shutdown = nmi_timer_shutdown; + ops->start = nmi_timer_start; + ops->stop = nmi_timer_stop; + ops->cpu_type = "timer"; + + printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); + + return 0; +} + +#endif diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index f8c752e..ed2c3ec 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -246,37 +246,31 @@ static int __init oprofile_init(void) int err; /* always init architecture to setup backtrace support */ + timer_mode = 0; err = oprofile_arch_init(&oprofile_ops); + if (!err) { + if (!timer && !oprofilefs_register()) + return 0; + oprofile_arch_exit(); + } - timer_mode = err || timer; /* fall back to timer mode on errors */ - if (timer_mode) { - if (!err) - oprofile_arch_exit(); + /* setup timer mode: */ + timer_mode = 1; + /* no nmi timer mode if oprofile.timer is set */ + if (timer || op_nmi_timer_init(&oprofile_ops)) { err = oprofile_timer_init(&oprofile_ops); if (err) return err; } - err = oprofilefs_register(); - if (!err) - return 0; - - /* failed */ - if (timer_mode) - oprofile_timer_exit(); - else - oprofile_arch_exit(); - - return err; + return oprofilefs_register(); } static void __exit oprofile_exit(void) { oprofilefs_unregister(); - if (timer_mode) - oprofile_timer_exit(); - else + if (!timer_mode) oprofile_arch_exit(); } diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index 177b73d..d32ef81 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -35,7 +35,15 @@ struct dentry; void oprofile_create_files(struct super_block *sb, struct dentry *root); int oprofile_timer_init(struct oprofile_operations *ops); -void oprofile_timer_exit(void); +#ifdef CONFIG_OPROFILE_NMI_TIMER +int op_nmi_timer_init(struct oprofile_operations *ops); +#else +static inline int op_nmi_timer_init(struct oprofile_operations *ops) +{ + return -ENODEV; +} +#endif + int oprofile_set_ulong(unsigned long *addr, unsigned long val); int oprofile_set_timeout(unsigned long time); diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 878fba1..93404f7 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -97,24 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = { .notifier_call = oprofile_cpu_notify, }; -int oprofile_timer_init(struct oprofile_operations *ops) +static int oprofile_hrtimer_setup(void) { - int rc; - - rc = register_hotcpu_notifier(&oprofile_cpu_notifier); - if (rc) - return rc; - ops->create_files = NULL; - ops->setup = NULL; - ops->shutdown = NULL; - ops->start = oprofile_hrtimer_start; - ops->stop = oprofile_hrtimer_stop; - ops->cpu_type = "timer"; - printk(KERN_INFO "oprofile: using timer interrupt.\n"); - return 0; + return register_hotcpu_notifier(&oprofile_cpu_notifier); } -void oprofile_timer_exit(void) +static void oprofile_hrtimer_shutdown(void) { unregister_hotcpu_notifier(&oprofile_cpu_notifier); } + +int oprofile_timer_init(struct oprofile_operations *ops) +{ + ops->create_files = NULL; + ops->setup = oprofile_hrtimer_setup; + ops->shutdown = oprofile_hrtimer_shutdown; + ops->start = oprofile_hrtimer_start; + ops->stop = oprofile_hrtimer_stop; + ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using timer interrupt.\n"); + return 0; +} diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3ef66a..3c1063a 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w); #include <asm/bitops.h> #define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_cont(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) static __inline__ int get_bitmask_order(unsigned int count) diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 388b0d4..5ce8b14 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/workqueue.h> #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) @@ -14,6 +15,12 @@ struct jump_label_key { #endif }; +struct jump_label_key_deferred { + struct jump_label_key key; + unsigned long timeout; + struct delayed_work work; +}; + # include <asm/jump_label.h> # define HAVE_JUMP_LABEL #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ @@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, extern int jump_label_text_reserved(void *start, void *end); extern void jump_label_inc(struct jump_label_key *key); extern void jump_label_dec(struct jump_label_key *key); +extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); extern bool jump_label_enabled(struct jump_label_key *key); extern void jump_label_apply_nops(struct module *mod); +extern void jump_label_rate_limit(struct jump_label_key_deferred *key, + unsigned long rl); #else /* !HAVE_JUMP_LABEL */ @@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void) { } +struct jump_label_key_deferred { + struct jump_label_key key; +}; + static __always_inline bool static_branch(struct jump_label_key *key) { if (unlikely(atomic_read(&key->enabled))) @@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key) atomic_dec(&key->enabled); } +static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) +{ + jump_label_dec(&key->key); +} + static inline int jump_label_text_reserved(void *start, void *end) { return 0; @@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod) { return 0; } + +static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, + unsigned long rl) +{ +} #endif /* HAVE_JUMP_LABEL */ +#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) +#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) + #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b1f8912..0885561 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -54,6 +54,7 @@ enum perf_hw_id { PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX, /* non-ABI */ }; @@ -890,6 +891,7 @@ struct perf_event_context { int nr_active; int is_active; int nr_stat; + int nr_freq; int rotate_disable; atomic_t refcount; struct task_struct *task; @@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) } } -extern struct jump_label_key perf_sched_events; +extern struct jump_label_key_deferred perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { - if (static_branch(&perf_sched_events)) + if (static_branch(&perf_sched_events.key)) __perf_event_task_sched_in(prev, task); } @@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); - if (static_branch(&perf_sched_events)) + if (static_branch(&perf_sched_events.key)) __perf_event_task_sched_out(prev, next); } diff --git a/kernel/events/Makefile b/kernel/events/Makefile index 89e5e8a..22d901f 100644 --- a/kernel/events/Makefile +++ b/kernel/events/Makefile @@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_core.o = -pg endif -obj-y := core.o ring_buffer.o +obj-y := core.o ring_buffer.o callchain.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c new file mode 100644 index 0000000..057e24b --- /dev/null +++ b/kernel/events/callchain.c @@ -0,0 +1,191 @@ +/* + * Performance events callchain code, extracted from core.c: + * + * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> + * + * For licensing details see kernel-base/COPYING + */ + +#include <linux/perf_event.h> +#include <linux/slab.h> +#include "internal.h" + +struct callchain_cpus_entries { + struct rcu_head rcu_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); +static atomic_t nr_callchain_events; +static DEFINE_MUTEX(callchain_mutex); +static struct callchain_cpus_entries *callchain_cpus_entries; + + +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +__weak void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +static void release_callchain_buffers_rcu(struct rcu_head *head) +{ + struct callchain_cpus_entries *entries; + int cpu; + + entries = container_of(head, struct callchain_cpus_entries, rcu_head); + + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + + kfree(entries); +} + +static void release_callchain_buffers(void) +{ + struct callchain_cpus_entries *entries; + + entries = callchain_cpus_entries; + rcu_assign_pointer(callchain_cpus_entries, NULL); + call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); +} + +static int alloc_callchain_buffers(void) +{ + int cpu; + int size; + struct callchain_cpus_entries *entries; + + /* + * We can't use the percpu allocation API for data that can be + * accessed from NMI. Use a temporary manual per cpu allocation + * until that gets sorted out. + */ + size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); + + entries = kzalloc(size, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; + + for_each_possible_cpu(cpu) { + entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); + if (!entries->cpu_entries[cpu]) + goto fail; + } + + rcu_assign_pointer(callchain_cpus_entries, entries); + + return 0; + +fail: + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + kfree(entries); + + return -ENOMEM; +} + +int get_callchain_buffers(void) +{ + int err = 0; + int count; + + mutex_lock(&callchain_mutex); + + count = atomic_inc_return(&nr_callchain_events); + if (WARN_ON_ONCE(count < 1)) { + err = -EINVAL; + goto exit; + } + + if (count > 1) { + /* If the allocation failed, give up */ + if (!callchain_cpus_entries) + err = -ENOMEM; + goto exit; + } + + err = alloc_callchain_buffers(); + if (err) + release_callchain_buffers(); +exit: + mutex_unlock(&callchain_mutex); + + return err; +} + +void put_callchain_buffers(void) +{ + if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { + release_callchain_buffers(); + mutex_unlock(&callchain_mutex); + } +} + +static struct perf_callchain_entry *get_callchain_entry(int *rctx) +{ + int cpu; + struct callchain_cpus_entries *entries; + + *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); + if (*rctx == -1) + return NULL; + + entries = rcu_dereference(callchain_cpus_entries); + if (!entries) + return NULL; + + cpu = smp_processor_id(); + + return &entries->cpu_entries[cpu][*rctx]; +} + +static void +put_callchain_entry(int rctx) +{ + put_recursion_context(__get_cpu_var(callchain_recursion), rctx); +} + +struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + int rctx; + struct perf_callchain_entry *entry; + + + entry = get_callchain_entry(&rctx); + if (rctx == -1) + return NULL; + + if (!entry) + goto exit_put; + + entry->nr = 0; + + if (!user_mode(regs)) { + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); + perf_callchain_kernel(entry, regs); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } + +exit_put: + put_callchain_entry(rctx); + + return entry; +} diff --git a/kernel/events/core.c b/kernel/events/core.c index fc0e7ff..890eb02 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -128,7 +128,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -struct jump_label_key perf_sched_events __read_mostly; +struct jump_label_key_deferred perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu--; ctx->nr_active--; + if (event->attr.freq && event->attr.sample_freq) + ctx->nr_freq--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; } @@ -1325,6 +1327,7 @@ retry: } raw_spin_unlock_irq(&ctx->lock); } +EXPORT_SYMBOL_GPL(perf_event_disable); static void perf_set_shadow_time(struct perf_event *event, struct perf_event_context *ctx, @@ -1406,6 +1409,8 @@ event_sched_in(struct perf_event *event, if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; + if (event->attr.freq && event->attr.sample_freq) + ctx->nr_freq++; if (event->attr.exclusive) cpuctx->exclusive = 1; @@ -1662,8 +1667,7 @@ retry: * Note: this works for group members as well as group leaders * since the non-leader members' sibling_lists will be empty. */ -static void __perf_event_mark_enabled(struct perf_event *event, - struct perf_event_context *ctx) +static void __perf_event_mark_enabled(struct perf_event *event) { struct perf_event *sub; u64 tstamp = perf_event_time(event); @@ -1701,7 +1705,7 @@ static int __perf_event_enable(void *info) */ perf_cgroup_set_timestamp(current, ctx); - __perf_event_mark_enabled(event, ctx); + __perf_event_mark_enabled(event); if (!event_filter_match(event)) { if (is_cgroup_event(event)) @@ -1782,7 +1786,7 @@ void perf_event_enable(struct perf_event *event) retry: if (!ctx->is_active) { - __perf_event_mark_enabled(event, ctx); + __perf_event_mark_enabled(event); goto out; } @@ -1809,6 +1813,7 @@ retry: out: raw_spin_unlock_irq(&ctx->lock); } +EXPORT_SYMBOL_GPL(perf_event_enable); int perf_event_refresh(struct perf_event *event, int refresh) { @@ -2327,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) u64 interrupts, now; s64 delta; + if (!ctx->nr_freq) + return; + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->state != PERF_EVENT_STATE_ACTIVE) continue; @@ -2382,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) { u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; struct perf_event_context *ctx = NULL; - int rotate = 0, remove = 1; + int rotate = 0, remove = 1, freq = 0; if (cpuctx->ctx.nr_events) { remove = 0; if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) rotate = 1; + if (cpuctx->ctx.nr_freq) + freq = 1; } ctx = cpuctx->task_ctx; @@ -2395,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) remove = 0; if (ctx->nr_events != ctx->nr_active) rotate = 1; + if (ctx->nr_freq) + freq = 1; } + if (!rotate && !freq) + goto done; + perf_ctx_lock(cpuctx, cpuctx->task_ctx); perf_pmu_disable(cpuctx->ctx.pmu); - perf_ctx_adjust_freq(&cpuctx->ctx, interval); - if (ctx) - perf_ctx_adjust_freq(ctx, interval); - if (!rotate) - goto done; + if (freq) { + perf_ctx_adjust_freq(&cpuctx->ctx, interval); + if (ctx) + perf_ctx_adjust_freq(ctx, interval); + } - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - if (ctx) - ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); + if (rotate) { + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + if (ctx) + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); - rotate_ctx(&cpuctx->ctx); - if (ctx) - rotate_ctx(ctx); + rotate_ctx(&cpuctx->ctx); + if (ctx) + rotate_ctx(ctx); - perf_event_sched_in(cpuctx, ctx, current); + perf_event_sched_in(cpuctx, ctx, current); + } + + perf_pmu_enable(cpuctx->ctx.pmu); + perf_ctx_unlock(cpuctx, cpuctx->task_ctx); done: if (remove) list_del_init(&cpuctx->rotation_list); - - perf_pmu_enable(cpuctx->ctx.pmu); - perf_ctx_unlock(cpuctx, cpuctx->task_ctx); } void perf_event_task_tick(void) @@ -2448,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event, if (event->state >= PERF_EVENT_STATE_INACTIVE) return 0; - __perf_event_mark_enabled(event, ctx); + __perf_event_mark_enabled(event); return 1; } @@ -2480,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx) raw_spin_lock(&ctx->lock); task_ctx_sched_out(ctx); - list_for_each_entry(event, &ctx->pinned_groups, group_entry) { - ret = event_enable_on_exec(event, ctx); - if (ret) - enabled = 1; - } - - list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + list_for_each_entry(event, &ctx->event_list, event_entry) { ret = event_enable_on_exec(event, ctx); if (ret) enabled = 1; @@ -2574,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event) } /* - * Callchain support - */ - -struct callchain_cpus_entries { - struct rcu_head rcu_head; - struct perf_callchain_entry *cpu_entries[0]; -}; - -static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); -static atomic_t nr_callchain_events; -static DEFINE_MUTEX(callchain_mutex); -struct callchain_cpus_entries *callchain_cpus_entries; - - -__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -__weak void perf_callchain_user(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -static void release_callchain_buffers_rcu(struct rcu_head *head) -{ - struct callchain_cpus_entries *entries; - int cpu; - - entries = container_of(head, struct callchain_cpus_entries, rcu_head); - - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - - kfree(entries); -} - -static void release_callchain_buffers(void) -{ - struct callchain_cpus_entries *entries; - - entries = callchain_cpus_entries; - rcu_assign_pointer(callchain_cpus_entries, NULL); - call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); -} - -static int alloc_callchain_buffers(void) -{ - int cpu; - int size; - struct callchain_cpus_entries *entries; - - /* - * We can't use the percpu allocation API for data that can be - * accessed from NMI. Use a temporary manual per cpu allocation - * until that gets sorted out. - */ - size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); - - entries = kzalloc(size, GFP_KERNEL); - if (!entries) - return -ENOMEM; - - size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; - - for_each_possible_cpu(cpu) { - entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, - cpu_to_node(cpu)); - if (!entries->cpu_entries[cpu]) - goto fail; - } - - rcu_assign_pointer(callchain_cpus_entries, entries); - - return 0; - -fail: - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - kfree(entries); - - return -ENOMEM; -} - -static int get_callchain_buffers(void) -{ - int err = 0; - int count; - - mutex_lock(&callchain_mutex); - - count = atomic_inc_return(&nr_callchain_events); - if (WARN_ON_ONCE(count < 1)) { - err = -EINVAL; - goto exit; - } - - if (count > 1) { - /* If the allocation failed, give up */ - if (!callchain_cpus_entries) - err = -ENOMEM; - goto exit; - } - - err = alloc_callchain_buffers(); - if (err) - release_callchain_buffers(); -exit: - mutex_unlock(&callchain_mutex); - - return err; -} - -static void put_callchain_buffers(void) -{ - if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { - release_callchain_buffers(); - mutex_unlock(&callchain_mutex); - } -} - -static int get_recursion_context(int *recursion) -{ - int rctx; - - if (in_nmi()) - rctx = 3; - else if (in_irq()) - rctx = 2; - else if (in_softirq()) - rctx = 1; - else - rctx = 0; - - if (recursion[rctx]) - return -1; - - recursion[rctx]++; - barrier(); - - return rctx; -} - -static inline void put_recursion_context(int *recursion, int rctx) -{ - barrier(); - recursion[rctx]--; -} - -static struct perf_callchain_entry *get_callchain_entry(int *rctx) -{ - int cpu; - struct callchain_cpus_entries *entries; - - *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); - if (*rctx == -1) - return NULL; - - entries = rcu_dereference(callchain_cpus_entries); - if (!entries) - return NULL; - - cpu = smp_processor_id(); - - return &entries->cpu_entries[cpu][*rctx]; -} - -static void -put_callchain_entry(int rctx) -{ - put_recursion_context(__get_cpu_var(callchain_recursion), rctx); -} - -static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - int rctx; - struct perf_callchain_entry *entry; - - - entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - - if (!entry) - goto exit_put; - - entry->nr = 0; - - if (!user_mode(regs)) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); - perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); - } - -exit_put: - put_callchain_entry(rctx); - - return entry; -} - -/* * Initialize the perf_event context in a task_struct: */ static void __perf_event_init_context(struct perf_event_context *ctx) @@ -2946,7 +2748,7 @@ static void free_event(struct perf_event *event) if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_dec(&perf_sched_events); + jump_label_dec_deferred(&perf_sched_events); if (event->attr.mmap || event->attr.mmap_data) atomic_dec(&nr_mmap_events); if (event->attr.comm) @@ -2957,7 +2759,7 @@ static void free_event(struct perf_event *event) put_callchain_buffers(); if (is_cgroup_event(event)) { atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_dec(&perf_sched_events); + jump_label_dec_deferred(&perf_sched_events); } } @@ -4820,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow, struct hw_perf_event *hwc = &event->hw; int throttle = 0; - data->period = event->hw.last_period; if (!overflow) overflow = perf_swevent_set_period(event); @@ -4854,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, if (!is_sampling_event(event)) return; + if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { + data->period = nr; + return perf_swevent_overflow(event, 1, data, regs); + } else + data->period = event->hw.last_period; + if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) return perf_swevent_overflow(event, 1, data, regs); @@ -5981,7 +5788,7 @@ done: if (!event->parent) { if (event->attach_state & PERF_ATTACH_TASK) - jump_label_inc(&perf_sched_events); + jump_label_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) atomic_inc(&nr_mmap_events); if (event->attr.comm) @@ -6219,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open, * - that may need work on context switch */ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_inc(&perf_sched_events); + jump_label_inc(&perf_sched_events.key); } /* @@ -7065,6 +6872,9 @@ void __init perf_event_init(void) ret = init_hw_breakpoint(); WARN(ret, "hw_breakpoint initialization failed with: %d", ret); + + /* do not patch jump label more than once per second */ + jump_label_rate_limit(&perf_sched_events, HZ); } static int __init perf_event_sysfs_init(void) diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 64568a6..b0b107f 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -1,6 +1,10 @@ #ifndef _KERNEL_EVENTS_INTERNAL_H #define _KERNEL_EVENTS_INTERNAL_H +#include <linux/hardirq.h> + +/* Buffer handling */ + #define RING_BUFFER_WRITABLE 0x01 struct ring_buffer { @@ -67,7 +71,7 @@ static inline int page_order(struct ring_buffer *rb) } #endif -static unsigned long perf_data_size(struct ring_buffer *rb) +static inline unsigned long perf_data_size(struct ring_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); } @@ -96,4 +100,37 @@ __output_copy(struct perf_output_handle *handle, } while (len); } +/* Callchain handling */ +extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +extern int get_callchain_buffers(void); +extern void put_callchain_buffers(void); + +static inline int get_recursion_context(int *recursion) +{ + int rctx; + + if (in_nmi()) + rctx = 3; + else if (in_irq()) + rctx = 2; + else if (in_softirq()) + rctx = 1; + else + rctx = 0; + + if (recursion[rctx]) + return -1; + + recursion[rctx]++; + barrier(); + + return rctx; +} + +static inline void put_recursion_context(int *recursion, int rctx) +{ + barrier(); + recursion[rctx]--; +} + #endif /* _KERNEL_EVENTS_INTERNAL_H */ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 66ff710..30c3c77 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -72,15 +72,46 @@ void jump_label_inc(struct jump_label_key *key) jump_label_unlock(); } -void jump_label_dec(struct jump_label_key *key) +static void __jump_label_dec(struct jump_label_key *key, + unsigned long rate_limit, struct delayed_work *work) { if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) return; - jump_label_update(key, JUMP_LABEL_DISABLE); + if (rate_limit) { + atomic_inc(&key->enabled); + schedule_delayed_work(work, rate_limit); + } else + jump_label_update(key, JUMP_LABEL_DISABLE); + jump_label_unlock(); } +static void jump_label_update_timeout(struct work_struct *work) +{ + struct jump_label_key_deferred *key = + container_of(work, struct jump_label_key_deferred, work.work); + __jump_label_dec(&key->key, 0, NULL); +} + +void jump_label_dec(struct jump_label_key *key) +{ + __jump_label_dec(key, 0, NULL); +} + +void jump_label_dec_deferred(struct jump_label_key_deferred *key) +{ + __jump_label_dec(&key->key, key->timeout, &key->work); +} + + +void jump_label_rate_limit(struct jump_label_key_deferred *key, + unsigned long rl) +{ + key->timeout = rl; + INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); +} + static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (entry->code <= (unsigned long)end && @@ -111,7 +142,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start, * running code can override this to make the non-live update case * cheaper. */ -void __weak arch_jump_label_transform_static(struct jump_entry *entry, +void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { arch_jump_label_transform(entry, type); @@ -217,8 +248,13 @@ void jump_label_apply_nops(struct module *mod) if (iter_start == iter_stop) return; - for (iter = iter_start; iter < iter_stop; iter++) - arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); + for (iter = iter_start; iter < iter_stop; iter++) { + struct jump_label_key *iterk; + + iterk = (struct jump_label_key *)(unsigned long)iter->key; + arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ? + JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE); + } } static int jump_label_add_module(struct module *mod) @@ -258,8 +294,7 @@ static int jump_label_add_module(struct module *mod) key->next = jlm; if (jump_label_enabled(key)) - __jump_label_update(key, iter, iter_stop, - JUMP_LABEL_ENABLE); + __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); } return 0; diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8fb7551..8889f7d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -500,36 +500,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) usage[i] = '\0'; } -static int __print_lock_name(struct lock_class *class) +static void __print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN]; const char *name; name = class->name; - if (!name) - name = __get_key_name(class->key, str); - - return printk("%s", name); -} - -static void print_lock_name(struct lock_class *class) -{ - char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; - const char *name; - - get_usage_chars(class, usage); - - name = class->name; if (!name) { name = __get_key_name(class->key, str); - printk(" (%s", name); + printk("%s", name); } else { - printk(" (%s", name); + printk("%s", name); if (class->name_version > 1) printk("#%d", class->name_version); if (class->subclass) printk("/%d", class->subclass); } +} + +static void print_lock_name(struct lock_class *class) +{ + char usage[LOCK_USAGE_CHARS]; + + get_usage_chars(class, usage); + + printk(" ("); + __print_lock_name(class); printk("){%s}", usage); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a043d22..91dc4bc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -338,7 +338,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds trace_options default values */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | - TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; + TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | + TRACE_ITER_IRQ_INFO; static int trace_stop_count; static DEFINE_RAW_SPINLOCK(tracing_start_lock); @@ -426,6 +427,7 @@ static const char *trace_options[] = { "record-cmd", "overwrite", "disable_on_free", + "irq-info", NULL }; @@ -1843,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p) trace_event_read_unlock(); } +static void +get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) +{ + unsigned long count; + int cpu; + + *total = 0; + *entries = 0; + + for_each_tracing_cpu(cpu) { + count = ring_buffer_entries_cpu(tr->buffer, cpu); + /* + * If this buffer has skipped entries, then we hold all + * entries for the trace and we need to ignore the + * ones before the time stamp. + */ + if (tr->data[cpu]->skipped_entries) { + count -= tr->data[cpu]->skipped_entries; + /* total is the same as the entries */ + *total += count; + } else + *total += count + + ring_buffer_overrun_cpu(tr->buffer, cpu); + *entries += count; + } +} + static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n"); @@ -1855,12 +1884,35 @@ static void print_lat_help_header(struct seq_file *m) seq_puts(m, "# \\ / ||||| \\ | / \n"); } -static void print_func_help_header(struct seq_file *m) +static void print_event_info(struct trace_array *tr, struct seq_file *m) +{ + unsigned long total; + unsigned long entries; + + get_total_entries(tr, &total, &entries); + seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", + entries, total, num_online_cpus()); + seq_puts(m, "#\n"); +} + +static void print_func_help_header(struct trace_array *tr, struct seq_file *m) { - seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); + print_event_info(tr, m); + seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); seq_puts(m, "# | | | | |\n"); } +static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) +{ + print_event_info(tr, m); + seq_puts(m, "# _-----=> irqs-off\n"); + seq_puts(m, "# / _----=> need-resched\n"); + seq_puts(m, "# | / _---=> hardirq/softirq\n"); + seq_puts(m, "# || / _--=> preempt-depth\n"); + seq_puts(m, "# ||| / delay\n"); + seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); + seq_puts(m, "# | | | |||| | |\n"); +} void print_trace_header(struct seq_file *m, struct trace_iterator *iter) @@ -1869,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[tr->cpu]; struct tracer *type = current_trace; - unsigned long entries = 0; - unsigned long total = 0; - unsigned long count; + unsigned long entries; + unsigned long total; const char *name = "preemption"; - int cpu; if (type) name = type->name; - - for_each_tracing_cpu(cpu) { - count = ring_buffer_entries_cpu(tr->buffer, cpu); - /* - * If this buffer has skipped entries, then we hold all - * entries for the trace and we need to ignore the - * ones before the time stamp. - */ - if (tr->data[cpu]->skipped_entries) { - count -= tr->data[cpu]->skipped_entries; - /* total is the same as the entries */ - total += count; - } else - total += count + - ring_buffer_overrun_cpu(tr->buffer, cpu); - entries += count; - } + get_total_entries(tr, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); @@ -2140,6 +2174,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) return print_trace_fmt(iter); } +void trace_latency_header(struct seq_file *m) +{ + struct trace_iterator *iter = m->private; + + /* print nothing if the buffers are empty */ + if (trace_empty(iter)) + return; + + if (iter->iter_flags & TRACE_FILE_LAT_FMT) + print_trace_header(m, iter); + + if (!(trace_flags & TRACE_ITER_VERBOSE)) + print_lat_help_header(m); +} + void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; @@ -2155,8 +2204,12 @@ void trace_default_header(struct seq_file *m) if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { - if (!(trace_flags & TRACE_ITER_VERBOSE)) - print_func_help_header(m); + if (!(trace_flags & TRACE_ITER_VERBOSE)) { + if (trace_flags & TRACE_ITER_IRQ_INFO) + print_func_help_header_irq(iter->tr, m); + else + print_func_help_header(iter->tr, m); + } } } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 092e1f8..2c26574 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -370,6 +370,7 @@ void trace_graph_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); +void trace_latency_header(struct seq_file *m); void trace_default_header(struct seq_file *m); void print_trace_header(struct seq_file *m, struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter); @@ -654,6 +655,7 @@ enum trace_iterator_flags { TRACE_ITER_RECORD_CMD = 0x100000, TRACE_ITER_OVERWRITE = 0x200000, TRACE_ITER_STOP_ON_FREE = 0x400000, + TRACE_ITER_IRQ_INFO = 0x800000, }; /* diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 95dc31e..f04cc31 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -27,6 +27,12 @@ #include "trace.h" #include "trace_output.h" +#define DEFAULT_SYS_FILTER_MESSAGE \ + "### global filter ###\n" \ + "# Use this to set filters for multiple events.\n" \ + "# Only events with the given fields will be affected.\n" \ + "# If no events are modified, an error message will be displayed here" + enum filter_op_ids { OP_OR, @@ -646,7 +652,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, if (filter && filter->filter_string) trace_seq_printf(s, "%s\n", filter->filter_string); else - trace_seq_printf(s, "none\n"); + trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); mutex_unlock(&event_mutex); } @@ -1838,7 +1844,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system, if (!filter) goto out; - replace_filter_string(filter, filter_string); + /* System filters just show a default message */ + kfree(filter->filter_string); + filter->filter_string = NULL; + /* * No event actually uses the system filter * we can free it without synchronize_sched(). @@ -1848,14 +1857,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system, parse_init(ps, filter_ops, filter_string); err = filter_parse(ps); - if (err) { - append_filter_err(ps, system->filter); - goto out; - } + if (err) + goto err_filter; err = replace_system_preds(system, ps, filter_string); if (err) - append_filter_err(ps, system->filter); + goto err_filter; out: filter_opstack_clear(ps); @@ -1865,6 +1872,11 @@ out_unlock: mutex_unlock(&event_mutex); return err; + +err_filter: + replace_filter_string(filter, filter_string); + append_filter_err(ps, system->filter); + goto out; } #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 20dad0d..99d20e9 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -280,9 +280,20 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } -static void irqsoff_print_header(struct seq_file *s) { } static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } + +#ifdef CONFIG_FUNCTION_TRACER +static void irqsoff_print_header(struct seq_file *s) +{ + trace_default_header(s); +} +#else +static void irqsoff_print_header(struct seq_file *s) +{ + trace_latency_header(s); +} +#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 5199930..0d6ff35 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -627,11 +627,23 @@ int trace_print_context(struct trace_iterator *iter) unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long secs = (unsigned long)t; char comm[TASK_COMM_LEN]; + int ret; trace_find_cmdline(entry->pid, comm); - return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", - comm, entry->pid, iter->cpu, secs, usec_rem); + ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", + comm, entry->pid, iter->cpu); + if (!ret) + return 0; + + if (trace_flags & TRACE_ITER_IRQ_INFO) { + ret = trace_print_lat_fmt(s, entry); + if (!ret) + return 0; + } + + return trace_seq_printf(s, " %5lu.%06lu: ", + secs, usec_rem); } int trace_print_lat_context(struct trace_iterator *iter) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e4a70c0..ff791ea 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -280,9 +280,20 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter) } static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } -static void wakeup_print_header(struct seq_file *s) { } static void wakeup_trace_open(struct trace_iterator *iter) { } static void wakeup_trace_close(struct trace_iterator *iter) { } + +#ifdef CONFIG_FUNCTION_TRACER +static void wakeup_print_header(struct seq_file *s) +{ + trace_default_header(s); +} +#else +static void wakeup_print_header(struct seq_file *s) +{ + trace_latency_header(s); +} +#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index fe6762e..c89f9e1 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -22,7 +22,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data) + Input file name. (default: perf.data unless stdin is a fifo) -d:: --dsos=<dso[,dso...]>:: @@ -66,7 +66,7 @@ OPTIONS used. This interfaces starts by centering on the line with more samples, TAB/UNTAB cycles through the lines with more samples. --c:: +-C:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt index cc22325..25c52ef 100644 --- a/tools/perf/Documentation/perf-buildid-list.txt +++ b/tools/perf/Documentation/perf-buildid-list.txt @@ -26,7 +26,7 @@ OPTIONS Show only DSOs with hits. -i:: --input=:: - Input file name. (default: perf.data) + Input file name. (default: perf.data unless stdin is a fifo) -f:: --force:: Don't do ownership validation. diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt index 0cada9e..0507ec7 100644 --- a/tools/perf/Documentation/perf-evlist.txt +++ b/tools/perf/Documentation/perf-evlist.txt @@ -18,7 +18,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data) + Input file name. (default: perf.data unless stdin is a fifo) SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt index a52fcde..7c8fbbf 100644 --- a/tools/perf/Documentation/perf-kmem.txt +++ b/tools/perf/Documentation/perf-kmem.txt @@ -23,7 +23,7 @@ OPTIONS ------- -i <file>:: --input=<file>:: - Select the input file (default: perf.data) + Select the input file (default: perf.data unless stdin is a fifo) --caller:: Show per-callsite statistics diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt index 4a26a2f..d6b2a4f 100644 --- a/tools/perf/Documentation/perf-lock.txt +++ b/tools/perf/Documentation/perf-lock.txt @@ -29,7 +29,7 @@ COMMON OPTIONS -i:: --input=<file>:: - Input file name. + Input file name. (default: perf.data unless stdin is a fifo) -v:: --verbose:: diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 5a520f8..2937f7e 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -89,7 +89,7 @@ OPTIONS -m:: --mmap-pages=:: - Number of mmap data pages. + Number of mmap data pages. Must be a power of two. -g:: --call-graph:: diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 212f24d..9b430e9 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -19,7 +19,7 @@ OPTIONS ------- -i:: --input=:: - Input file name. (default: perf.data) + Input file name. (default: perf.data unless stdin is a fifo) -v:: --verbose:: @@ -39,7 +39,7 @@ OPTIONS -T:: --threads:: Show per-thread event counters --C:: +-c:: --comms=:: Only consider symbols in these comms. CSV that understands file://filename entries. @@ -80,9 +80,10 @@ OPTIONS --dump-raw-trace:: Dump raw trace in ASCII. --g [type,min,order]:: +-g [type,min[,limit],order]:: --call-graph:: - Display call chains using type, min percent threshold and order. + Display call chains using type, min percent threshold, optional print + limit and order. type can be either: - flat: single column, linear exposure of call chains. - graph: use a graph tree, displaying absolute overhead rates. @@ -128,7 +129,7 @@ OPTIONS --symfs=<directory>:: Look for files with symbols relative to this directory. --c:: +-C:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index 5b212b5..8ff4df9 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt @@ -40,7 +40,7 @@ OPTIONS ------- -i:: --input=<file>:: - Input file name. (default: perf.data) + Input file name. (default: perf.data unless stdin is a fifo) -v:: --verbose:: diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index dec87ec..2f6cef4 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -106,7 +106,7 @@ OPTIONS -i:: --input=:: - Input file name. + Input file name. (default: perf.data unless stdin is a fifo) -d:: --debug-mode:: @@ -182,12 +182,17 @@ OPTIONS --hide-call-graph:: When printing symbols do not display call chain. --c:: +-C:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default is to report samples on all CPUs. +-c:: +--comms=:: + Only display events for these comms. CSV that understands + file://filename entries. + -I:: --show-info:: Display extended information about the perf.data file. This adds diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt index 2c3b462..b24ac40 100644 --- a/tools/perf/Documentation/perf-test.txt +++ b/tools/perf/Documentation/perf-test.txt @@ -8,13 +8,19 @@ perf-test - Runs sanity tests. SYNOPSIS -------- [verse] -'perf test <options>' +'perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]' DESCRIPTION ----------- This command does assorted sanity tests, initially through linked routines but also will look for a directory with more tests in the form of scripts. +To get a list of available tests use 'perf test list', specifying a test name +fragment will show all tests that have it. + +To run just specific tests, inform test name fragments or the numbers obtained +from 'perf test list'. + OPTIONS ------- -v:: diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index d7b79e2..1632b0e 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt @@ -27,7 +27,7 @@ OPTIONS Select the output file (default: output.svg) -i:: --input=:: - Select the input file (default: perf.data) + Select the input file (default: perf.data unless stdin is a fifo) -w:: --width=:: Select the width of the SVG file (default: 1000) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b98e307..ac86d67 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -278,6 +278,7 @@ LIB_H += util/strbuf.h LIB_H += util/strlist.h LIB_H += util/strfilter.h LIB_H += util/svghelper.h +LIB_H += util/tool.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 46b4c24..214ba7f 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -27,32 +27,32 @@ #include "util/sort.h" #include "util/hist.h" #include "util/session.h" +#include "util/tool.h" #include <linux/bitmap.h> -static char const *input_name = "perf.data"; - -static bool force, use_tui, use_stdio; - -static bool full_paths; - -static bool print_line; - -static const char *sym_hist_filter; - -static const char *cpu_list; -static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); +struct perf_annotate { + struct perf_tool tool; + char const *input_name; + bool force, use_tui, use_stdio; + bool full_paths; + bool print_line; + const char *sym_hist_filter; + const char *cpu_list; + DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); +}; -static int perf_evlist__add_sample(struct perf_evlist *evlist, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct addr_location *al) +static int perf_evsel__add_sample(struct perf_evsel *evsel, + struct perf_sample *sample, + struct addr_location *al, + struct perf_annotate *ann) { struct hist_entry *he; int ret; - if (sym_hist_filter != NULL && - (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) { + if (ann->sym_hist_filter != NULL && + (al->sym == NULL || + strcmp(ann->sym_hist_filter, al->sym->name) != 0)) { /* We're only interested in a symbol named sym_hist_filter */ if (al->sym != NULL) { rb_erase(&al->sym->rb_node, @@ -69,8 +69,7 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist, ret = 0; if (he->ms.sym != NULL) { struct annotation *notes = symbol__annotation(he->ms.sym); - if (notes->src == NULL && - symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0) + if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) return -ENOMEM; ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); @@ -81,25 +80,26 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist, return ret; } -static int process_sample_event(union perf_event *event, +static int process_sample_event(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct perf_session *session) + struct machine *machine) { + struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool); struct addr_location al; - if (perf_event__preprocess_sample(event, session, &al, sample, + if (perf_event__preprocess_sample(event, machine, &al, sample, symbol__annotate_init) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; } - if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) + if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap)) return 0; - if (!al.filtered && - perf_evlist__add_sample(session->evlist, sample, evsel, &al)) { + if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) { pr_warning("problem incrementing symbol count, " "skipping event\n"); return -1; @@ -108,14 +108,15 @@ static int process_sample_event(union perf_event *event, return 0; } -static int hist_entry__tty_annotate(struct hist_entry *he, int evidx) +static int hist_entry__tty_annotate(struct hist_entry *he, int evidx, + struct perf_annotate *ann) { return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx, - print_line, full_paths, 0, 0); + ann->print_line, ann->full_paths, 0, 0); } static void hists__find_annotations(struct hists *self, int evidx, - int nr_events) + struct perf_annotate *ann) { struct rb_node *nd = rb_first(&self->entries), *next; int key = K_RIGHT; @@ -138,8 +139,7 @@ find_next: } if (use_browser > 0) { - key = hist_entry__tui_annotate(he, evidx, nr_events, - NULL, NULL, 0); + key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0); switch (key) { case K_RIGHT: next = rb_next(nd); @@ -154,7 +154,7 @@ find_next: if (next != NULL) nd = next; } else { - hist_entry__tty_annotate(he, evidx); + hist_entry__tty_annotate(he, evidx, ann); nd = rb_next(nd); /* * Since we have a hist_entry per IP for the same @@ -167,33 +167,26 @@ find_next: } } -static struct perf_event_ops event_ops = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .fork = perf_event__process_task, - .ordered_samples = true, - .ordering_requires_timestamps = true, -}; - -static int __cmd_annotate(void) +static int __cmd_annotate(struct perf_annotate *ann) { int ret; struct perf_session *session; struct perf_evsel *pos; u64 total_nr_samples; - session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops); + session = perf_session__new(ann->input_name, O_RDONLY, + ann->force, false, &ann->tool); if (session == NULL) return -ENOMEM; - if (cpu_list) { - ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); + if (ann->cpu_list) { + ret = perf_session__cpu_bitmap(session, ann->cpu_list, + ann->cpu_bitmap); if (ret) goto out_delete; } - ret = perf_session__process_events(session, &event_ops); + ret = perf_session__process_events(session, &ann->tool); if (ret) goto out_delete; @@ -217,13 +210,12 @@ static int __cmd_annotate(void) total_nr_samples += nr_samples; hists__collapse_resort(hists); hists__output_resort(hists); - hists__find_annotations(hists, pos->idx, - session->evlist->nr_entries); + hists__find_annotations(hists, pos->idx, ann); } } if (total_nr_samples == 0) { - ui__warning("The %s file has no samples!\n", input_name); + ui__warning("The %s file has no samples!\n", session->filename); goto out_delete; } out_delete: @@ -247,29 +239,41 @@ static const char * const annotate_usage[] = { NULL }; -static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", +int cmd_annotate(int argc, const char **argv, const char *prefix __used) +{ + struct perf_annotate annotate = { + .tool = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .fork = perf_event__process_task, + .ordered_samples = true, + .ordering_requires_timestamps = true, + }, + }; + const struct option options[] = { + OPT_STRING('i', "input", &annotate.input_name, "file", "input file name"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), - OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", + OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol", "symbol to annotate"), - OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), + OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), - OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), - OPT_BOOLEAN('l', "print-line", &print_line, + OPT_BOOLEAN('l', "print-line", &annotate.print_line, "print matching source lines (may be slow)"), - OPT_BOOLEAN('P', "full-paths", &full_paths, + OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, "Don't shorten the displayed pathnames"), - OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), + OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, @@ -279,15 +283,13 @@ static const struct option options[] = { OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_END() -}; + }; -int cmd_annotate(int argc, const char **argv, const char *prefix __used) -{ argc = parse_options(argc, argv, options, annotate_usage, 0); - if (use_stdio) + if (annotate.use_stdio) use_browser = 0; - else if (use_tui) + else if (annotate.use_tui) use_browser = 1; setup_browser(true); @@ -308,7 +310,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) if (argc > 1) usage_with_options(annotate_usage, options); - sym_hist_filter = argv[0]; + annotate.sym_hist_filter = argv[0]; } if (field_sep && *field_sep == '.') { @@ -316,5 +318,5 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) return -1; } - return __cmd_annotate(); + return __cmd_annotate(&annotate); } diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index cb690a6..5248046 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -18,7 +18,7 @@ #include <libelf.h> -static char const *input_name = "perf.data"; +static const char *input_name; static bool force; static bool show_kernel; static bool with_hits; @@ -39,24 +39,6 @@ static const struct option options[] = { OPT_END() }; -static int perf_session__list_build_ids(void) -{ - struct perf_session *session; - - session = perf_session__new(input_name, O_RDONLY, force, false, - &build_id__mark_dso_hit_ops); - if (session == NULL) - return -1; - - if (with_hits) - perf_session__process_events(session, &build_id__mark_dso_hit_ops); - - perf_session__fprintf_dsos_buildid(session, stdout, with_hits); - - perf_session__delete(session); - return 0; -} - static int sysfs__fprintf_build_id(FILE *fp) { u8 kallsyms_build_id[BUILD_ID_SIZE]; @@ -85,17 +67,36 @@ static int filename__fprintf_build_id(const char *name, FILE *fp) return fprintf(fp, "%s\n", sbuild_id); } -static int __cmd_buildid_list(void) +static int perf_session__list_build_ids(void) { - if (show_kernel) - return sysfs__fprintf_build_id(stdout); + struct perf_session *session; elf_version(EV_CURRENT); + + session = perf_session__new(input_name, O_RDONLY, force, false, + &build_id__mark_dso_hit_ops); + if (session == NULL) + return -1; + /* - * See if this is an ELF file first: - */ - if (filename__fprintf_build_id(input_name, stdout)) - return 0; + * See if this is an ELF file first: + */ + if (filename__fprintf_build_id(session->filename, stdout)) + goto out; + + if (with_hits) + perf_session__process_events(session, &build_id__mark_dso_hit_ops); + + perf_session__fprintf_dsos_buildid(session, stdout, with_hits); +out: + perf_session__delete(session); + return 0; +} + +static int __cmd_buildid_list(void) +{ + if (show_kernel) + return sysfs__fprintf_build_id(stdout); return perf_session__list_build_ids(); } diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index b39f3a1..4f19513 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -9,7 +9,9 @@ #include "util/debug.h" #include "util/event.h" #include "util/hist.h" +#include "util/evsel.h" #include "util/session.h" +#include "util/tool.h" #include "util/sort.h" #include "util/symbol.h" #include "util/util.h" @@ -30,14 +32,15 @@ static int hists__add_entry(struct hists *self, return -ENOMEM; } -static int diff__process_sample_event(union perf_event *event, +static int diff__process_sample_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct perf_session *session) + struct machine *machine) { struct addr_location al; - if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) { + if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; @@ -46,16 +49,16 @@ static int diff__process_sample_event(union perf_event *event, if (al.filtered || al.sym == NULL) return 0; - if (hists__add_entry(&session->hists, &al, sample->period)) { + if (hists__add_entry(&evsel->hists, &al, sample->period)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } - session->hists.stats.total_period += sample->period; + evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_event_ops event_ops = { +static struct perf_tool perf_diff = { .sample = diff__process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, @@ -145,13 +148,13 @@ static int __cmd_diff(void) int ret, i; struct perf_session *session[2]; - session[0] = perf_session__new(input_old, O_RDONLY, force, false, &event_ops); - session[1] = perf_session__new(input_new, O_RDONLY, force, false, &event_ops); + session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff); + session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff); if (session[0] == NULL || session[1] == NULL) return -ENOMEM; for (i = 0; i < 2; ++i) { - ret = perf_session__process_events(session[i], &event_ops); + ret = perf_session__process_events(session[i], &perf_diff); if (ret) goto out_delete; } diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 4c5e9e0..2676032 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -15,7 +15,7 @@ #include "util/parse-options.h" #include "util/session.h" -static char const *input_name = "perf.data"; +static const char *input_name; static int __cmd_evlist(void) { diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 8dfc12b..09c1061 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -9,6 +9,7 @@ #include "perf.h" #include "util/session.h" +#include "util/tool.h" #include "util/debug.h" #include "util/parse-options.h" @@ -16,8 +17,9 @@ static char const *input_name = "-"; static bool inject_build_ids; -static int perf_event__repipe_synth(union perf_event *event, - struct perf_session *session __used) +static int perf_event__repipe_synth(struct perf_tool *tool __used, + union perf_event *event, + struct machine *machine __used) { uint32_t size; void *buf = event; @@ -36,41 +38,70 @@ static int perf_event__repipe_synth(union perf_event *event, return 0; } -static int perf_event__repipe(union perf_event *event, +static int perf_event__repipe_op2_synth(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session __used) +{ + return perf_event__repipe_synth(tool, event, NULL); +} + +static int perf_event__repipe_event_type_synth(struct perf_tool *tool, + union perf_event *event) +{ + return perf_event__repipe_synth(tool, event, NULL); +} + +static int perf_event__repipe_tracing_data_synth(union perf_event *event, + struct perf_session *session __used) +{ + return perf_event__repipe_synth(NULL, event, NULL); +} + +static int perf_event__repipe_attr(union perf_event *event, + struct perf_evlist **pevlist __used) +{ + return perf_event__repipe_synth(NULL, event, NULL); +} + +static int perf_event__repipe(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine) { - return perf_event__repipe_synth(event, session); + return perf_event__repipe_synth(tool, event, machine); } -static int perf_event__repipe_sample(union perf_event *event, +static int perf_event__repipe_sample(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct perf_session *session) + struct machine *machine) { - return perf_event__repipe_synth(event, session); + return perf_event__repipe_synth(tool, event, machine); } -static int perf_event__repipe_mmap(union perf_event *event, +static int perf_event__repipe_mmap(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, - struct perf_session *session) + struct machine *machine) { int err; - err = perf_event__process_mmap(event, sample, session); - perf_event__repipe(event, sample, session); + err = perf_event__process_mmap(tool, event, sample, machine); + perf_event__repipe(tool, event, sample, machine); return err; } -static int perf_event__repipe_task(union perf_event *event, +static int perf_event__repipe_task(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, - struct perf_session *session) + struct machine *machine) { int err; - err = perf_event__process_task(event, sample, session); - perf_event__repipe(event, sample, session); + err = perf_event__process_task(tool, event, sample, machine); + perf_event__repipe(tool, event, sample, machine); return err; } @@ -80,7 +111,7 @@ static int perf_event__repipe_tracing_data(union perf_event *event, { int err; - perf_event__repipe_synth(event, session); + perf_event__repipe_synth(NULL, event, NULL); err = perf_event__process_tracing_data(event, session); return err; @@ -100,10 +131,10 @@ static int dso__read_build_id(struct dso *self) return -1; } -static int dso__inject_build_id(struct dso *self, struct perf_session *session) +static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, + struct machine *machine) { u16 misc = PERF_RECORD_MISC_USER; - struct machine *machine; int err; if (dso__read_build_id(self) < 0) { @@ -111,17 +142,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session) return -1; } - machine = perf_session__find_host_machine(session); - if (machine == NULL) { - pr_err("Can't find machine for session\n"); - return -1; - } - if (self->kernel) misc = PERF_RECORD_MISC_KERNEL; - err = perf_event__synthesize_build_id(self, misc, perf_event__repipe, - machine, session); + err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe, + machine); if (err) { pr_err("Can't synthesize build_id event for %s\n", self->long_name); return -1; @@ -130,10 +155,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session) return 0; } -static int perf_event__inject_buildid(union perf_event *event, +static int perf_event__inject_buildid(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct perf_session *session) + struct machine *machine) { struct addr_location al; struct thread *thread; @@ -141,21 +167,21 @@ static int perf_event__inject_buildid(union perf_event *event, cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - thread = perf_session__findnew(session, event->ip.pid); + thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); goto repipe; } - thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, - event->ip.pid, event->ip.ip, &al); + thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, + event->ip.ip, &al); if (al.map != NULL) { if (!al.map->dso->hit) { al.map->dso->hit = 1; if (map__load(al.map, NULL) >= 0) { - dso__inject_build_id(al.map->dso, session); + dso__inject_build_id(al.map->dso, tool, machine); /* * If this fails, too bad, let the other side * account this as unresolved. @@ -168,24 +194,24 @@ static int perf_event__inject_buildid(union perf_event *event, } repipe: - perf_event__repipe(event, sample, session); + perf_event__repipe(tool, event, sample, machine); return 0; } -struct perf_event_ops inject_ops = { +struct perf_tool perf_inject = { .sample = perf_event__repipe_sample, .mmap = perf_event__repipe, .comm = perf_event__repipe, .fork = perf_event__repipe, .exit = perf_event__repipe, .lost = perf_event__repipe, - .read = perf_event__repipe, + .read = perf_event__repipe_sample, .throttle = perf_event__repipe, .unthrottle = perf_event__repipe, - .attr = perf_event__repipe_synth, - .event_type = perf_event__repipe_synth, - .tracing_data = perf_event__repipe_synth, - .build_id = perf_event__repipe_synth, + .attr = perf_event__repipe_attr, + .event_type = perf_event__repipe_event_type_synth, + .tracing_data = perf_event__repipe_tracing_data_synth, + .build_id = perf_event__repipe_op2_synth, }; extern volatile int session_done; @@ -203,17 +229,17 @@ static int __cmd_inject(void) signal(SIGINT, sig_handler); if (inject_build_ids) { - inject_ops.sample = perf_event__inject_buildid; - inject_ops.mmap = perf_event__repipe_mmap; - inject_ops.fork = perf_event__repipe_task; - inject_ops.tracing_data = perf_event__repipe_tracing_data; + perf_inject.sample = perf_event__inject_buildid; + perf_inject.mmap = perf_event__repipe_mmap; + perf_inject.fork = perf_event__repipe_task; + perf_inject.tracing_data = perf_event__repipe_tracing_data; } - session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops); + session = perf_session__new(input_name, O_RDONLY, false, true, &perf_inject); if (session == NULL) return -ENOMEM; - ret = perf_session__process_events(session, &inject_ops); + ret = perf_session__process_events(session, &perf_inject); perf_session__delete(session); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 225e963..fe1ad8f 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -7,6 +7,7 @@ #include "util/thread.h" #include "util/header.h" #include "util/session.h" +#include "util/tool.h" #include "util/parse-options.h" #include "util/trace-event.h" @@ -18,7 +19,7 @@ struct alloc_stat; typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); -static char const *input_name = "perf.data"; +static const char *input_name; static int alloc_flag; static int caller_flag; @@ -303,12 +304,13 @@ static void process_raw_event(union perf_event *raw_event __used, void *data, } } -static int process_sample_event(union perf_event *event, +static int process_sample_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct perf_session *session) + struct machine *machine) { - struct thread *thread = perf_session__findnew(session, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -324,7 +326,7 @@ static int process_sample_event(union perf_event *event, return 0; } -static struct perf_event_ops event_ops = { +static struct perf_tool perf_kmem = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, @@ -483,7 +485,7 @@ static int __cmd_kmem(void) { int err = -EINVAL; struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &event_ops); + 0, false, &perf_kmem); if (session == NULL) return -ENOMEM; @@ -494,7 +496,7 @@ static int __cmd_kmem(void) goto out_delete; setup_pager(); - err = perf_session__process_events(session, &event_ops); + err = perf_session__process_events(session, &perf_kmem); if (err != 0) goto out_delete; sort_result(); diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 34d1e85..032324a 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -38,7 +38,7 @@ static const struct option kvm_options[] = { OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), OPT_BOOLEAN(0, "host", &perf_host, - "Collect guest os data"), + "Collect host os data"), OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", "guest mount directory under which every guest os" " instance has a subdir"), diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 899080a..2296c39 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -12,6 +12,7 @@ #include "util/debug.h" #include "util/session.h" +#include "util/tool.h" #include <sys/types.h> #include <sys/prctl.h> @@ -325,7 +326,7 @@ alloc_failed: die("memory allocation failed\n"); } -static char const *input_name = "perf.data"; +static const char *input_name; struct raw_event_sample { u32 size; @@ -845,12 +846,13 @@ static void dump_info(void) die("Unknown type of information\n"); } -static int process_sample_event(union perf_event *event, +static int process_sample_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, - struct perf_session *s) + struct machine *machine) { - struct thread *thread = perf_session__findnew(s, sample->tid); + struct thread *thread = machine__findnew_thread(machine, sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -863,7 +865,7 @@ static int process_sample_event(union perf_event *event, return 0; } -static struct perf_event_ops eops = { +static struct perf_tool eops = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 710ae3d..59d43ab 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -46,7 +46,6 @@ #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" #define DEFAULT_FUNC_FILTER "!_*" -#define MAX_PATH_LEN 256 /* Session management structure */ static struct { diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6ab58cc..0abfb18 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -22,6 +22,7 @@ #include "util/evsel.h" #include "util/debug.h" #include "util/session.h" +#include "util/tool.h" #include "util/symbol.h" #include "util/cpumap.h" #include "util/thread_map.h" @@ -35,55 +36,36 @@ enum write_mode_t { WRITE_APPEND }; -static u64 user_interval = ULLONG_MAX; -static u64 default_interval = 0; - -static unsigned int page_size; -static unsigned int mmap_pages = UINT_MAX; -static unsigned int user_freq = UINT_MAX; -static int freq = 1000; -static int output; -static int pipe_output = 0; -static const char *output_name = NULL; -static bool group = false; -static int realtime_prio = 0; -static bool nodelay = false; -static bool raw_samples = false; -static bool sample_id_all_avail = true; -static bool system_wide = false; -static pid_t target_pid = -1; -static pid_t target_tid = -1; -static pid_t child_pid = -1; -static bool no_inherit = false; -static enum write_mode_t write_mode = WRITE_FORCE; -static bool call_graph = false; -static bool inherit_stat = false; -static bool no_samples = false; -static bool sample_address = false; -static bool sample_time = false; -static bool no_buildid = false; -static bool no_buildid_cache = false; -static struct perf_evlist *evsel_list; - -static long samples = 0; -static u64 bytes_written = 0; - -static int file_new = 1; -static off_t post_processing_offset; - -static struct perf_session *session; -static const char *cpu_list; -static const char *progname; - -static void advance_output(size_t size) +struct perf_record { + struct perf_tool tool; + struct perf_record_opts opts; + u64 bytes_written; + const char *output_name; + struct perf_evlist *evlist; + struct perf_session *session; + const char *progname; + int output; + unsigned int page_size; + int realtime_prio; + enum write_mode_t write_mode; + bool no_buildid; + bool no_buildid_cache; + bool force; + bool file_new; + bool append_file; + long samples; + off_t post_processing_offset; +}; + +static void advance_output(struct perf_record *rec, size_t size) { - bytes_written += size; + rec->bytes_written += size; } -static void write_output(void *buf, size_t size) +static void write_output(struct perf_record *rec, void *buf, size_t size) { while (size) { - int ret = write(output, buf, size); + int ret = write(rec->output, buf, size); if (ret < 0) die("failed to write"); @@ -91,30 +73,33 @@ static void write_output(void *buf, size_t size) size -= ret; buf += ret; - bytes_written += ret; + rec->bytes_written += ret; } } -static int process_synthesized_event(union perf_event *event, +static int process_synthesized_event(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *self __used) + struct machine *machine __used) { - write_output(event, event->header.size); + struct perf_record *rec = container_of(tool, struct perf_record, tool); + write_output(rec, event, event->header.size); return 0; } -static void mmap_read(struct perf_mmap *md) +static void perf_record__mmap_read(struct perf_record *rec, + struct perf_mmap *md) { unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; - unsigned char *data = md->base + page_size; + unsigned char *data = md->base + rec->page_size; unsigned long size; void *buf; if (old == head) return; - samples++; + rec->samples++; size = head - old; @@ -123,14 +108,14 @@ static void mmap_read(struct perf_mmap *md) size = md->mask + 1 - (old & md->mask); old += size; - write_output(buf, size); + write_output(rec, buf, size); } buf = &data[old & md->mask]; size = head - old; old += size; - write_output(buf, size); + write_output(rec, buf, size); md->prev = old; perf_mmap__write_tail(md, old); @@ -149,17 +134,18 @@ static void sig_handler(int sig) signr = sig; } -static void sig_atexit(void) +static void perf_record__sig_exit(int exit_status __used, void *arg) { + struct perf_record *rec = arg; int status; - if (child_pid > 0) { + if (rec->evlist->workload.pid > 0) { if (!child_finished) - kill(child_pid, SIGTERM); + kill(rec->evlist->workload.pid, SIGTERM); wait(&status); if (WIFSIGNALED(status)) - psignal(WTERMSIG(status), progname); + psignal(WTERMSIG(status), rec->progname); } if (signr == -1 || signr == SIGUSR1) @@ -169,78 +155,6 @@ static void sig_atexit(void) kill(getpid(), signr); } -static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist) -{ - struct perf_event_attr *attr = &evsel->attr; - int track = !evsel->idx; /* only the first counter needs these */ - - attr->disabled = 1; - attr->inherit = !no_inherit; - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING | - PERF_FORMAT_ID; - - attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; - - if (evlist->nr_entries > 1) - attr->sample_type |= PERF_SAMPLE_ID; - - /* - * We default some events to a 1 default interval. But keep - * it a weak assumption overridable by the user. - */ - if (!attr->sample_period || (user_freq != UINT_MAX && - user_interval != ULLONG_MAX)) { - if (freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; - attr->freq = 1; - attr->sample_freq = freq; - } else { - attr->sample_period = default_interval; - } - } - - if (no_samples) - attr->sample_freq = 0; - - if (inherit_stat) - attr->inherit_stat = 1; - - if (sample_address) { - attr->sample_type |= PERF_SAMPLE_ADDR; - attr->mmap_data = track; - } - - if (call_graph) - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; - - if (system_wide) - attr->sample_type |= PERF_SAMPLE_CPU; - - if (sample_id_all_avail && - (sample_time || system_wide || !no_inherit || cpu_list)) - attr->sample_type |= PERF_SAMPLE_TIME; - - if (raw_samples) { - attr->sample_type |= PERF_SAMPLE_TIME; - attr->sample_type |= PERF_SAMPLE_RAW; - attr->sample_type |= PERF_SAMPLE_CPU; - } - - if (nodelay) { - attr->watermark = 0; - attr->wakeup_events = 1; - } - - attr->mmap = track; - attr->comm = track; - - if (target_pid == -1 && target_tid == -1 && !system_wide) { - attr->disabled = 1; - attr->enable_on_exec = 1; - } -} - static bool perf_evlist__equal(struct perf_evlist *evlist, struct perf_evlist *other) { @@ -260,15 +174,17 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, return true; } -static void open_counters(struct perf_evlist *evlist) +static void perf_record__open(struct perf_record *rec) { struct perf_evsel *pos, *first; - - if (evlist->cpus->map[0] < 0) - no_inherit = true; + struct perf_evlist *evlist = rec->evlist; + struct perf_session *session = rec->session; + struct perf_record_opts *opts = &rec->opts; first = list_entry(evlist->entries.next, struct perf_evsel, node); + perf_evlist__config_attrs(evlist, opts); + list_for_each_entry(pos, &evlist->entries, node) { struct perf_event_attr *attr = &pos->attr; struct xyarray *group_fd = NULL; @@ -286,29 +202,27 @@ static void open_counters(struct perf_evlist *evlist) */ bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; - if (group && pos != first) + if (opts->group && pos != first) group_fd = first->fd; - - config_attr(pos, evlist); retry_sample_id: - attr->sample_id_all = sample_id_all_avail ? 1 : 0; + attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; try_again: - if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, - group_fd) < 0) { + if (perf_evsel__open(pos, evlist->cpus, evlist->threads, + opts->group, group_fd) < 0) { int err = errno; if (err == EPERM || err == EACCES) { ui__error_paranoid(); exit(EXIT_FAILURE); - } else if (err == ENODEV && cpu_list) { + } else if (err == ENODEV && opts->cpu_list) { die("No such device - did you specify" " an out-of-range profile CPU?\n"); - } else if (err == EINVAL && sample_id_all_avail) { + } else if (err == EINVAL && opts->sample_id_all_avail) { /* * Old kernel, no attr->sample_id_type_all field */ - sample_id_all_avail = false; - if (!sample_time && !raw_samples && !time_needed) + opts->sample_id_all_avail = false; + if (!opts->sample_time && !opts->raw_samples && !time_needed) attr->sample_type &= ~PERF_SAMPLE_TIME; goto retry_sample_id; @@ -358,10 +272,20 @@ try_again: exit(-1); } - if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) + if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { + if (errno == EPERM) + die("Permission error mapping pages.\n" + "Consider increasing " + "/proc/sys/kernel/perf_event_mlock_kb,\n" + "or try again with a smaller value of -m/--mmap_pages.\n" + "(current value: %d)\n", opts->mmap_pages); + else if (!is_power_of_2(opts->mmap_pages)) + die("--mmap_pages/-m value must be a power of two."); + die("failed to mmap with %d (%s)\n", errno, strerror(errno)); + } - if (file_new) + if (rec->file_new) session->evlist = evlist; else { if (!perf_evlist__equal(session->evlist, evlist)) { @@ -373,29 +297,32 @@ try_again: perf_session__update_sample_type(session); } -static int process_buildids(void) +static int process_buildids(struct perf_record *rec) { - u64 size = lseek(output, 0, SEEK_CUR); + u64 size = lseek(rec->output, 0, SEEK_CUR); if (size == 0) return 0; - session->fd = output; - return __perf_session__process_events(session, post_processing_offset, - size - post_processing_offset, + rec->session->fd = rec->output; + return __perf_session__process_events(rec->session, rec->post_processing_offset, + size - rec->post_processing_offset, size, &build_id__mark_dso_hit_ops); } -static void atexit_header(void) +static void perf_record__exit(int status __used, void *arg) { - if (!pipe_output) { - session->header.data_size += bytes_written; - - if (!no_buildid) - process_buildids(); - perf_session__write_header(session, evsel_list, output, true); - perf_session__delete(session); - perf_evlist__delete(evsel_list); + struct perf_record *rec = arg; + + if (!rec->opts.pipe_output) { + rec->session->header.data_size += rec->bytes_written; + + if (!rec->no_buildid) + process_buildids(rec); + perf_session__write_header(rec->session, rec->evlist, + rec->output, true); + perf_session__delete(rec->session); + perf_evlist__delete(rec->evlist); symbol__exit(); } } @@ -403,7 +330,7 @@ static void atexit_header(void) static void perf_event__synthesize_guest_os(struct machine *machine, void *data) { int err; - struct perf_session *psession = data; + struct perf_tool *tool = data; if (machine__is_host(machine)) return; @@ -416,8 +343,8 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) *method is used to avoid symbol missing when the first addr is *in module instead of in guest kernel. */ - err = perf_event__synthesize_modules(process_synthesized_event, - psession, machine); + err = perf_event__synthesize_modules(tool, process_synthesized_event, + machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -426,12 +353,11 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * We use _stext for guest kernel because guest kernel's /proc/kallsyms * have no _text sometimes. */ - err = perf_event__synthesize_kernel_mmap(process_synthesized_event, - psession, machine, "_text"); + err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, + machine, "_text"); if (err < 0) - err = perf_event__synthesize_kernel_mmap(process_synthesized_event, - psession, machine, - "_stext"); + err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, + machine, "_stext"); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -442,73 +368,71 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; -static void mmap_read_all(void) +static void perf_record__mmap_read_all(struct perf_record *rec) { int i; - for (i = 0; i < evsel_list->nr_mmaps; i++) { - if (evsel_list->mmap[i].base) - mmap_read(&evsel_list->mmap[i]); + for (i = 0; i < rec->evlist->nr_mmaps; i++) { + if (rec->evlist->mmap[i].base) + perf_record__mmap_read(rec, &rec->evlist->mmap[i]); } - if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO)) - write_output(&finished_round_event, sizeof(finished_round_event)); + if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO)) + write_output(rec, &finished_round_event, sizeof(finished_round_event)); } -static int __cmd_record(int argc, const char **argv) +static int __cmd_record(struct perf_record *rec, int argc, const char **argv) { struct stat st; int flags; - int err; + int err, output; unsigned long waking = 0; - int child_ready_pipe[2], go_pipe[2]; const bool forks = argc > 0; - char buf; struct machine *machine; + struct perf_tool *tool = &rec->tool; + struct perf_record_opts *opts = &rec->opts; + struct perf_evlist *evsel_list = rec->evlist; + const char *output_name = rec->output_name; + struct perf_session *session; - progname = argv[0]; + rec->progname = argv[0]; - page_size = sysconf(_SC_PAGE_SIZE); + rec->page_size = sysconf(_SC_PAGE_SIZE); - atexit(sig_atexit); + on_exit(perf_record__sig_exit, rec); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); signal(SIGUSR1, sig_handler); - if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { - perror("failed to create pipes"); - exit(-1); - } - if (!output_name) { if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode)) - pipe_output = 1; + opts->pipe_output = true; else - output_name = "perf.data"; + rec->output_name = output_name = "perf.data"; } if (output_name) { if (!strcmp(output_name, "-")) - pipe_output = 1; + opts->pipe_output = true; else if (!stat(output_name, &st) && st.st_size) { - if (write_mode == WRITE_FORCE) { + if (rec->write_mode == WRITE_FORCE) { char oldname[PATH_MAX]; snprintf(oldname, sizeof(oldname), "%s.old", output_name); unlink(oldname); rename(output_name, oldname); } - } else if (write_mode == WRITE_APPEND) { - write_mode = WRITE_FORCE; + } else if (rec->write_mode == WRITE_APPEND) { + rec->write_mode = WRITE_FORCE; } } flags = O_CREAT|O_RDWR; - if (write_mode == WRITE_APPEND) - file_new = 0; + if (rec->write_mode == WRITE_APPEND) + rec->file_new = 0; else flags |= O_TRUNC; - if (pipe_output) + if (opts->pipe_output) output = STDOUT_FILENO; else output = open(output_name, flags, S_IRUSR | S_IWUSR); @@ -517,17 +441,21 @@ static int __cmd_record(int argc, const char **argv) exit(-1); } + rec->output = output; + session = perf_session__new(output_name, O_WRONLY, - write_mode == WRITE_FORCE, false, NULL); + rec->write_mode == WRITE_FORCE, false, NULL); if (session == NULL) { pr_err("Not enough memory for reading perf file header\n"); return -1; } - if (!no_buildid) + rec->session = session; + + if (!rec->no_buildid) perf_header__set_feat(&session->header, HEADER_BUILD_ID); - if (!file_new) { + if (!rec->file_new) { err = perf_session__read_header(session, output); if (err < 0) goto out_delete_session; @@ -549,94 +477,57 @@ static int __cmd_record(int argc, const char **argv) perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY); perf_header__set_feat(&session->header, HEADER_CPUID); - /* 512 kiB: default amount of unprivileged mlocked memory */ - if (mmap_pages == UINT_MAX) - mmap_pages = (512 * 1024) / page_size; - if (forks) { - child_pid = fork(); - if (child_pid < 0) { - perror("failed to fork"); - exit(-1); - } - - if (!child_pid) { - if (pipe_output) - dup2(2, 1); - close(child_ready_pipe[0]); - close(go_pipe[1]); - fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); - - /* - * Do a dummy execvp to get the PLT entry resolved, - * so we avoid the resolver overhead on the real - * execvp call. - */ - execvp("", (char **)argv); - - /* - * Tell the parent we're ready to go - */ - close(child_ready_pipe[1]); - - /* - * Wait until the parent tells us to go. - */ - if (read(go_pipe[0], &buf, 1) == -1) - perror("unable to read pipe"); - - execvp(argv[0], (char **)argv); - - perror(argv[0]); - kill(getppid(), SIGUSR1); - exit(-1); - } - - if (!system_wide && target_tid == -1 && target_pid == -1) - evsel_list->threads->map[0] = child_pid; - - close(child_ready_pipe[1]); - close(go_pipe[0]); - /* - * wait for child to settle - */ - if (read(child_ready_pipe[0], &buf, 1) == -1) { - perror("unable to read pipe"); - exit(-1); + err = perf_evlist__prepare_workload(evsel_list, opts, argv); + if (err < 0) { + pr_err("Couldn't run the workload!\n"); + goto out_delete_session; } - close(child_ready_pipe[0]); } - open_counters(evsel_list); + perf_record__open(rec); /* - * perf_session__delete(session) will be called at atexit_header() + * perf_session__delete(session) will be called at perf_record__exit() */ - atexit(atexit_header); + on_exit(perf_record__exit, rec); - if (pipe_output) { + if (opts->pipe_output) { err = perf_header__write_pipe(output); if (err < 0) return err; - } else if (file_new) { + } else if (rec->file_new) { err = perf_session__write_header(session, evsel_list, output, false); if (err < 0) return err; } - post_processing_offset = lseek(output, 0, SEEK_CUR); + if (!!rec->no_buildid + && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { + pr_err("Couldn't generating buildids. " + "Use --no-buildid to profile anyway.\n"); + return -1; + } - if (pipe_output) { - err = perf_session__synthesize_attrs(session, - process_synthesized_event); + rec->post_processing_offset = lseek(output, 0, SEEK_CUR); + + machine = perf_session__find_host_machine(session); + if (!machine) { + pr_err("Couldn't find native kernel information.\n"); + return -1; + } + + if (opts->pipe_output) { + err = perf_event__synthesize_attrs(tool, session, + process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); return err; } - err = perf_event__synthesize_event_types(process_synthesized_event, - session); + err = perf_event__synthesize_event_types(tool, process_synthesized_event, + machine); if (err < 0) { pr_err("Couldn't synthesize event_types.\n"); return err; @@ -651,56 +542,49 @@ static int __cmd_record(int argc, const char **argv) * return this more properly and also * propagate errors that now are calling die() */ - err = perf_event__synthesize_tracing_data(output, evsel_list, - process_synthesized_event, - session); + err = perf_event__synthesize_tracing_data(tool, output, evsel_list, + process_synthesized_event); if (err <= 0) { pr_err("Couldn't record tracing data.\n"); return err; } - advance_output(err); + advance_output(rec, err); } } - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("Couldn't find native kernel information.\n"); - return -1; - } - - err = perf_event__synthesize_kernel_mmap(process_synthesized_event, - session, machine, "_text"); + err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, + machine, "_text"); if (err < 0) - err = perf_event__synthesize_kernel_mmap(process_synthesized_event, - session, machine, "_stext"); + err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, + machine, "_stext"); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/kallsyms permission or run as root.\n"); - err = perf_event__synthesize_modules(process_synthesized_event, - session, machine); + err = perf_event__synthesize_modules(tool, process_synthesized_event, + machine); if (err < 0) pr_err("Couldn't record kernel module information.\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/modules permission or run as root.\n"); if (perf_guest) - perf_session__process_machines(session, + perf_session__process_machines(session, tool, perf_event__synthesize_guest_os); - if (!system_wide) - perf_event__synthesize_thread_map(evsel_list->threads, + if (!opts->system_wide) + perf_event__synthesize_thread_map(tool, evsel_list->threads, process_synthesized_event, - session); + machine); else - perf_event__synthesize_threads(process_synthesized_event, - session); + perf_event__synthesize_threads(tool, process_synthesized_event, + machine); - if (realtime_prio) { + if (rec->realtime_prio) { struct sched_param param; - param.sched_priority = realtime_prio; + param.sched_priority = rec->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { pr_err("Could not set realtime priority.\n"); exit(-1); @@ -713,14 +597,14 @@ static int __cmd_record(int argc, const char **argv) * Let the child rip */ if (forks) - close(go_pipe[1]); + perf_evlist__start_workload(evsel_list); for (;;) { - int hits = samples; + int hits = rec->samples; - mmap_read_all(); + perf_record__mmap_read_all(rec); - if (hits == samples) { + if (hits == rec->samples) { if (done) break; err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1); @@ -741,9 +625,9 @@ static int __cmd_record(int argc, const char **argv) */ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", - (double)bytes_written / 1024.0 / 1024.0, + (double)rec->bytes_written / 1024.0 / 1024.0, output_name, - bytes_written / 24); + rec->bytes_written / 24); return 0; @@ -758,58 +642,89 @@ static const char * const record_usage[] = { NULL }; -static bool force, append_file; +/* + * XXX Ideally would be local to cmd_record() and passed to a perf_record__new + * because we need to have access to it in perf_record__exit, that is called + * after cmd_record() exits, but since record_options need to be accessible to + * builtin-script, leave it here. + * + * At least we don't ouch it in all the other functions here directly. + * + * Just say no to tons of global variables, sigh. + */ +static struct perf_record record = { + .opts = { + .target_pid = -1, + .target_tid = -1, + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 1000, + .sample_id_all_avail = true, + }, + .write_mode = WRITE_FORCE, + .file_new = true, +}; +/* + * XXX Will stay a global variable till we fix builtin-script.c to stop messing + * with it and switch to use the library functions in perf_evlist that came + * from builtin-record.c, i.e. use perf_record_opts, + * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', + * using pipes, etc. + */ const struct option record_options[] = { - OPT_CALLBACK('e', "event", &evsel_list, "event", + OPT_CALLBACK('e', "event", &record.evlist, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_CALLBACK(0, "filter", &evsel_list, "filter", + OPT_CALLBACK(0, "filter", &record.evlist, "filter", "event filter", parse_filter), - OPT_INTEGER('p', "pid", &target_pid, + OPT_INTEGER('p', "pid", &record.opts.target_pid, "record events on existing process id"), - OPT_INTEGER('t', "tid", &target_tid, + OPT_INTEGER('t', "tid", &record.opts.target_tid, "record events on existing thread id"), - OPT_INTEGER('r', "realtime", &realtime_prio, + OPT_INTEGER('r', "realtime", &record.realtime_prio, "collect data with this RT SCHED_FIFO priority"), - OPT_BOOLEAN('D', "no-delay", &nodelay, + OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay, "collect data without buffering"), - OPT_BOOLEAN('R', "raw-samples", &raw_samples, + OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, "collect raw sample records from all opened counters"), - OPT_BOOLEAN('a', "all-cpus", &system_wide, + OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide, "system-wide collection from all CPUs"), - OPT_BOOLEAN('A', "append", &append_file, + OPT_BOOLEAN('A', "append", &record.append_file, "append to the output file to do incremental profiling"), - OPT_STRING('C', "cpu", &cpu_list, "cpu", + OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu", "list of cpus to monitor"), - OPT_BOOLEAN('f', "force", &force, + OPT_BOOLEAN('f', "force", &record.force, "overwrite existing data file (deprecated)"), - OPT_U64('c', "count", &user_interval, "event period to sample"), - OPT_STRING('o', "output", &output_name, "file", + OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), + OPT_STRING('o', "output", &record.output_name, "file", "output file name"), - OPT_BOOLEAN('i', "no-inherit", &no_inherit, + OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit, "child tasks do not inherit counters"), - OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), - OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), - OPT_BOOLEAN(0, "group", &group, + OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"), + OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages, + "number of mmap data pages"), + OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), - OPT_BOOLEAN('g', "call-graph", &call_graph, + OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph, "do call-graph (stack chain/backtrace) recording"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), - OPT_BOOLEAN('s', "stat", &inherit_stat, + OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat, "per thread counts"), - OPT_BOOLEAN('d', "data", &sample_address, + OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Sample addresses"), - OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"), - OPT_BOOLEAN('n', "no-samples", &no_samples, + OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"), + OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"), + OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples, "don't sample"), - OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache, + OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache, "do not update the buildid cache"), - OPT_BOOLEAN('B', "no-buildid", &no_buildid, + OPT_BOOLEAN('B', "no-buildid", &record.no_buildid, "do not collect buildids in perf.data"), - OPT_CALLBACK('G', "cgroup", &evsel_list, "name", + OPT_CALLBACK('G', "cgroup", &record.evlist, "name", "monitor event in cgroup name only", parse_cgroups), OPT_END() @@ -819,6 +734,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) { int err = -ENOMEM; struct perf_evsel *pos; + struct perf_evlist *evsel_list; + struct perf_record *rec = &record; perf_header__set_cmdline(argc, argv); @@ -826,23 +743,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) if (evsel_list == NULL) return -ENOMEM; + rec->evlist = evsel_list; + argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc && target_pid == -1 && target_tid == -1 && - !system_wide && !cpu_list) + if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 && + !rec->opts.system_wide && !rec->opts.cpu_list) usage_with_options(record_usage, record_options); - if (force && append_file) { + if (rec->force && rec->append_file) { fprintf(stderr, "Can't overwrite and append at the same time." " You need to choose between -f and -A"); usage_with_options(record_usage, record_options); - } else if (append_file) { - write_mode = WRITE_APPEND; + } else if (rec->append_file) { + rec->write_mode = WRITE_APPEND; } else { - write_mode = WRITE_FORCE; + rec->write_mode = WRITE_FORCE; } - if (nr_cgroups && !system_wide) { + if (nr_cgroups && !rec->opts.system_wide) { fprintf(stderr, "cgroup monitoring only available in" " system-wide mode\n"); usage_with_options(record_usage, record_options); @@ -860,7 +779,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" "even with a suitable vmlinux or kallsyms file.\n\n"); - if (no_buildid_cache || no_buildid) + if (rec->no_buildid_cache || rec->no_buildid) disable_buildid_cache(); if (evsel_list->nr_entries == 0 && @@ -869,43 +788,37 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) goto out_symbol_exit; } - if (target_pid != -1) - target_tid = target_pid; + if (rec->opts.target_pid != -1) + rec->opts.target_tid = rec->opts.target_pid; - if (perf_evlist__create_maps(evsel_list, target_pid, - target_tid, cpu_list) < 0) + if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid, + rec->opts.target_tid, rec->opts.cpu_list) < 0) usage_with_options(record_usage, record_options); list_for_each_entry(pos, &evsel_list->entries, node) { - if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, - evsel_list->threads->nr) < 0) - goto out_free_fd; if (perf_header__push_event(pos->attr.config, event_name(pos))) goto out_free_fd; } - if (perf_evlist__alloc_pollfd(evsel_list) < 0) - goto out_free_fd; - - if (user_interval != ULLONG_MAX) - default_interval = user_interval; - if (user_freq != UINT_MAX) - freq = user_freq; + if (rec->opts.user_interval != ULLONG_MAX) + rec->opts.default_interval = rec->opts.user_interval; + if (rec->opts.user_freq != UINT_MAX) + rec->opts.freq = rec->opts.user_freq; /* * User specified count overrides default frequency. */ - if (default_interval) - freq = 0; - else if (freq) { - default_interval = freq; + if (rec->opts.default_interval) + rec->opts.freq = 0; + else if (rec->opts.freq) { + rec->opts.default_interval = rec->opts.freq; } else { fprintf(stderr, "frequency and count are zero, aborting\n"); err = -EINVAL; goto out_free_fd; } - err = __cmd_record(argc, argv); + err = __cmd_record(&record, argc, argv); out_free_fd: perf_evlist__delete_maps(evsel_list); out_symbol_exit: diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 4d7c834..25d34d4 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -25,6 +25,7 @@ #include "util/evsel.h" #include "util/header.h" #include "util/session.h" +#include "util/tool.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -35,38 +36,35 @@ #include <linux/bitmap.h> -static char const *input_name = "perf.data"; - -static bool force, use_tui, use_stdio; -static bool hide_unresolved; -static bool dont_use_callchains; -static bool show_full_info; - -static bool show_threads; -static struct perf_read_values show_threads_values; - -static const char default_pretty_printing_style[] = "normal"; -static const char *pretty_printing_style = default_pretty_printing_style; - -static char callchain_default_opt[] = "fractal,0.5,callee"; -static bool inverted_callchain; -static symbol_filter_t annotate_init; - -static const char *cpu_list; -static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); +struct perf_report { + struct perf_tool tool; + struct perf_session *session; + char const *input_name; + bool force, use_tui, use_stdio; + bool hide_unresolved; + bool dont_use_callchains; + bool show_full_info; + bool show_threads; + bool inverted_callchain; + struct perf_read_values show_threads_values; + const char *pretty_printing_style; + symbol_filter_t annotate_init; + const char *cpu_list; + DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); +}; -static int perf_session__add_hist_entry(struct perf_session *session, - struct addr_location *al, - struct perf_sample *sample, - struct perf_evsel *evsel) +static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, + struct addr_location *al, + struct perf_sample *sample, + struct machine *machine) { struct symbol *parent = NULL; int err = 0; struct hist_entry *he; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = perf_session__resolve_callchain(session, al->thread, - sample->callchain, &parent); + err = machine__resolve_callchain(machine, evsel, al->thread, + sample->callchain, &parent); if (err) return err; } @@ -76,7 +74,8 @@ static int perf_session__add_hist_entry(struct perf_session *session, return -ENOMEM; if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &session->callchain_cursor, + err = callchain_append(he->callchain, + &evsel->hists.callchain_cursor, sample->period); if (err) return err; @@ -92,8 +91,7 @@ static int perf_session__add_hist_entry(struct perf_session *session, assert(evsel != NULL); err = -ENOMEM; - if (notes->src == NULL && - symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0) + if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) goto out; err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); @@ -106,30 +104,32 @@ out: } -static int process_sample_event(union perf_event *event, +static int process_sample_event(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct perf_session *session) + struct machine *machine) { + struct perf_report *rep = container_of(tool, struct perf_report, tool); struct addr_location al; - if (perf_event__preprocess_sample(event, session, &al, sample, - annotate_init) < 0) { + if (perf_event__preprocess_sample(event, machine, &al, sample, + rep->annotate_init) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } - if (al.filtered || (hide_unresolved && al.sym == NULL)) + if (al.filtered || (rep->hide_unresolved && al.sym == NULL)) return 0; - if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) + if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; if (al.map != NULL) al.map->dso->hit = 1; - if (perf_session__add_hist_entry(session, &al, sample, evsel)) { + if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { pr_debug("problem incrementing symbol period, skipping event\n"); return -1; } @@ -137,15 +137,17 @@ static int process_sample_event(union perf_event *event, return 0; } -static int process_read_event(union perf_event *event, +static int process_read_event(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct perf_evsel *evsel, + struct machine *machine __used) { - struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, - event->read.id); - if (show_threads) { + struct perf_report *rep = container_of(tool, struct perf_report, tool); + + if (rep->show_threads) { const char *name = evsel ? event_name(evsel) : "unknown"; - perf_read_values_add_value(&show_threads_values, + perf_read_values_add_value(&rep->show_threads_values, event->read.pid, event->read.tid, event->read.id, name, @@ -159,8 +161,10 @@ static int process_read_event(union perf_event *event, return 0; } -static int perf_session__setup_sample_type(struct perf_session *self) +static int perf_report__setup_sample_type(struct perf_report *rep) { + struct perf_session *self = rep->session; + if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__warning("Selected --sort parent, but no " @@ -173,7 +177,8 @@ static int perf_session__setup_sample_type(struct perf_session *self) "you call 'perf record' without -g?\n"); return -1; } - } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && + } else if (!rep->dont_use_callchains && + callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (callchain_register_param(&callchain_param) < 0) { @@ -186,22 +191,6 @@ static int perf_session__setup_sample_type(struct perf_session *self) return 0; } -static struct perf_event_ops event_ops = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .lost = perf_event__process_lost, - .read = process_read_event, - .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, - .tracing_data = perf_event__process_tracing_data, - .build_id = perf_event__process_build_id, - .ordered_samples = true, - .ordering_requires_timestamps = true, -}; - extern volatile int session_done; static void sig_handler(int sig __used) @@ -224,6 +213,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self, } static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, + struct perf_report *rep, const char *help) { struct perf_evsel *pos; @@ -241,18 +231,18 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, parent_pattern == default_parent_pattern) { fprintf(stdout, "#\n# (%s)\n#\n", help); - if (show_threads) { - bool style = !strcmp(pretty_printing_style, "raw"); - perf_read_values_display(stdout, &show_threads_values, + if (rep->show_threads) { + bool style = !strcmp(rep->pretty_printing_style, "raw"); + perf_read_values_display(stdout, &rep->show_threads_values, style); - perf_read_values_destroy(&show_threads_values); + perf_read_values_destroy(&rep->show_threads_values); } } return 0; } -static int __cmd_report(void) +static int __cmd_report(struct perf_report *rep) { int ret = -EINVAL; u64 nr_samples; @@ -264,27 +254,31 @@ static int __cmd_report(void) signal(SIGINT, sig_handler); - session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops); + session = perf_session__new(rep->input_name, O_RDONLY, + rep->force, false, &rep->tool); if (session == NULL) return -ENOMEM; - if (cpu_list) { - ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap); + rep->session = session; + + if (rep->cpu_list) { + ret = perf_session__cpu_bitmap(session, rep->cpu_list, + rep->cpu_bitmap); if (ret) goto out_delete; } if (use_browser <= 0) - perf_session__fprintf_info(session, stdout, show_full_info); + perf_session__fprintf_info(session, stdout, rep->show_full_info); - if (show_threads) - perf_read_values_init(&show_threads_values); + if (rep->show_threads) + perf_read_values_init(&rep->show_threads_values); - ret = perf_session__setup_sample_type(session); + ret = perf_report__setup_sample_type(rep); if (ret) goto out_delete; - ret = perf_session__process_events(session, &event_ops); + ret = perf_session__process_events(session, &rep->tool); if (ret) goto out_delete; @@ -327,7 +321,7 @@ static int __cmd_report(void) } if (nr_samples == 0) { - ui__warning("The %s file has no samples!\n", input_name); + ui__warning("The %s file has no samples!\n", session->filename); goto out_delete; } @@ -335,7 +329,7 @@ static int __cmd_report(void) perf_evlist__tui_browse_hists(session->evlist, help, NULL, NULL, 0); } else - perf_evlist__tty_browse_hists(session->evlist, help); + perf_evlist__tty_browse_hists(session->evlist, rep, help); out_delete: /* @@ -354,9 +348,9 @@ out_delete: } static int -parse_callchain_opt(const struct option *opt __used, const char *arg, - int unset) +parse_callchain_opt(const struct option *opt, const char *arg, int unset) { + struct perf_report *rep = (struct perf_report *)opt->value; char *tok, *tok2; char *endptr; @@ -364,7 +358,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, * --no-call-graph */ if (unset) { - dont_use_callchains = true; + rep->dont_use_callchains = true; return 0; } @@ -412,7 +406,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, goto setup; if (tok2[0] != 'c') { - callchain_param.print_limit = strtod(tok2, &endptr); + callchain_param.print_limit = strtoul(tok2, &endptr, 0); tok2 = strtok(NULL, ","); if (!tok2) goto setup; @@ -433,13 +427,34 @@ setup: return 0; } -static const char * const report_usage[] = { - "perf report [<options>] <command>", - NULL -}; - -static const struct option options[] = { - OPT_STRING('i', "input", &input_name, "file", +int cmd_report(int argc, const char **argv, const char *prefix __used) +{ + struct stat st; + char callchain_default_opt[] = "fractal,0.5,callee"; + const char * const report_usage[] = { + "perf report [<options>]", + NULL + }; + struct perf_report report = { + .tool = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .exit = perf_event__process_task, + .fork = perf_event__process_task, + .lost = perf_event__process_lost, + .read = process_read_event, + .attr = perf_event__process_attr, + .event_type = perf_event__process_event_type, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + .ordering_requires_timestamps = true, + }, + .pretty_printing_style = "normal", + }; + const struct option options[] = { + OPT_STRING('i', "input", &report.input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), @@ -449,17 +464,18 @@ static const struct option options[] = { "file", "vmlinux pathname"), OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", "kallsyms pathname"), - OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), + OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_BOOLEAN('T', "threads", &show_threads, + OPT_BOOLEAN('T', "threads", &report.show_threads, "Show per-thread event counters"), - OPT_STRING(0, "pretty", &pretty_printing_style, "key", + OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", "pretty printing style key: normal raw"), - OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &report.use_stdio, + "Use the stdio interface"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, @@ -468,13 +484,14 @@ static const struct option options[] = { "regex filter to identify parent, see: '--sort parent'"), OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, "Only display entries with parent-match"), - OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order", - "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. " + OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order", + "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. " "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), - OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"), + OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, + "alias for inverted call graph"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), - OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", + OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), @@ -484,12 +501,13 @@ static const struct option options[] = { OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), - OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved, + OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved, "Only display entries resolved to a symbol"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), - OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), - OPT_BOOLEAN('I', "show-info", &show_full_info, + OPT_STRING('C', "cpu", &report.cpu_list, "cpu", + "list of cpus to profile"), + OPT_BOOLEAN('I', "show-info", &report.show_full_info, "Display extended information about perf.data file"), OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, "Interleave source code with assembly code (default)"), @@ -500,24 +518,30 @@ static const struct option options[] = { OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_END() -}; + }; -int cmd_report(int argc, const char **argv, const char *prefix __used) -{ argc = parse_options(argc, argv, options, report_usage, 0); - if (use_stdio) + if (report.use_stdio) use_browser = 0; - else if (use_tui) + else if (report.use_tui) use_browser = 1; - if (inverted_callchain) + if (report.inverted_callchain) callchain_param.order = ORDER_CALLER; - if (strcmp(input_name, "-") != 0) + if (!report.input_name || !strlen(report.input_name)) { + if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) + report.input_name = "-"; + else + report.input_name = "perf.data"; + } + + if (strcmp(report.input_name, "-") != 0) setup_browser(true); else use_browser = 0; + /* * Only in the newt browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio @@ -525,7 +549,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) */ if (use_browser > 0) { symbol_conf.priv_size = sizeof(struct annotation); - annotate_init = symbol__annotate_init; + report.annotate_init = symbol__annotate_init; /* * For searching by name on the "Browse map details". * providing it only in verbose mode not to bloat too @@ -572,5 +596,5 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); - return __cmd_report(); + return __cmd_report(&report); } diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 5177964..fb8b5f8 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -2,11 +2,14 @@ #include "perf.h" #include "util/util.h" +#include "util/evlist.h" #include "util/cache.h" +#include "util/evsel.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" +#include "util/tool.h" #include "util/parse-options.h" #include "util/trace-event.h" @@ -19,7 +22,7 @@ #include <pthread.h> #include <math.h> -static char const *input_name = "perf.data"; +static const char *input_name; static char default_sort_order[] = "avg, max, switch, runtime"; static const char *sort_order = default_sort_order; @@ -723,21 +726,21 @@ struct trace_migrate_task_event { struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*runtime_event)(struct trace_runtime_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, struct thread *thread); void (*wakeup_event)(struct trace_wakeup_event *, - struct perf_session *, + struct machine *, struct event *, int cpu, u64 timestamp, @@ -750,7 +753,7 @@ struct trace_sched_handler { struct thread *thread); void (*migrate_task_event)(struct trace_migrate_task_event *, - struct perf_session *session, + struct machine *machine, struct event *, int cpu, u64 timestamp, @@ -760,7 +763,7 @@ struct trace_sched_handler { static void replay_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct perf_session *session __used, + struct machine *machine __used, struct event *event, int cpu __used, u64 timestamp __used, @@ -787,7 +790,7 @@ static u64 cpu_last_switched[MAX_CPUS]; static void replay_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session __used, + struct machine *machine __used, struct event *event, int cpu, u64 timestamp, @@ -1021,7 +1024,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) static void latency_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int cpu, u64 timestamp, @@ -1045,8 +1048,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = perf_session__findnew(session, switch_event->prev_pid); - sched_in = perf_session__findnew(session, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, switch_event->prev_pid); + sched_in = machine__findnew_thread(machine, switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1074,13 +1077,13 @@ latency_switch_event(struct trace_switch_event *switch_event, static void latency_runtime_event(struct trace_runtime_event *runtime_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int cpu, u64 timestamp, struct thread *this_thread __used) { - struct thread *thread = perf_session__findnew(session, runtime_event->pid); + struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); @@ -1097,7 +1100,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static void latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct perf_session *session, + struct machine *machine, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1111,7 +1114,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = perf_session__findnew(session, wakeup_event->pid); + wakee = machine__findnew_thread(machine, wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1145,7 +1148,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, static void latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct perf_session *session, + struct machine *machine, struct event *__event __used, int cpu __used, u64 timestamp, @@ -1161,7 +1164,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (profile_cpu == -1) return; - migrant = perf_session__findnew(session, migrate_task_event->pid); + migrant = machine__findnew_thread(machine, migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { thread_atoms_insert(migrant); @@ -1356,12 +1359,13 @@ static void sort_lat(void) static struct trace_sched_handler *trace_handler; static void -process_sched_wakeup_event(void *data, struct perf_session *session, +process_sched_wakeup_event(struct perf_tool *tool __used, struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; FILL_COMMON_FIELDS(wakeup_event, event, data); @@ -1373,8 +1377,8 @@ process_sched_wakeup_event(void *data, struct perf_session *session, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, session, event, - cpu, timestamp, thread); + trace_handler->wakeup_event(&wakeup_event, machine, event, + sample->cpu, sample->time, thread); } /* @@ -1392,7 +1396,7 @@ static char next_shortname2 = '0'; static void map_switch_event(struct trace_switch_event *switch_event, - struct perf_session *session, + struct machine *machine, struct event *event __used, int this_cpu, u64 timestamp, @@ -1420,8 +1424,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - sched_out = perf_session__findnew(session, switch_event->prev_pid); - sched_in = perf_session__findnew(session, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, switch_event->prev_pid); + sched_in = machine__findnew_thread(machine, switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1469,14 +1473,15 @@ map_switch_event(struct trace_switch_event *switch_event, } } - static void -process_sched_switch_event(void *data, struct perf_session *session, +process_sched_switch_event(struct perf_tool *tool __used, struct event *event, - int this_cpu, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + int this_cpu = sample->cpu; + void *data = sample->raw_data; struct trace_switch_event switch_event; FILL_COMMON_FIELDS(switch_event, event, data); @@ -1498,19 +1503,20 @@ process_sched_switch_event(void *data, struct perf_session *session, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, session, event, - this_cpu, timestamp, thread); + trace_handler->switch_event(&switch_event, machine, event, + this_cpu, sample->time, thread); curr_pid[this_cpu] = switch_event.next_pid; } static void -process_sched_runtime_event(void *data, struct perf_session *session, - struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +process_sched_runtime_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_runtime_event runtime_event; FILL_ARRAY(runtime_event, comm, event, data); @@ -1519,16 +1525,18 @@ process_sched_runtime_event(void *data, struct perf_session *session, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread); + trace_handler->runtime_event(&runtime_event, machine, event, + sample->cpu, sample->time, thread); } static void -process_sched_fork_event(void *data, +process_sched_fork_event(struct perf_tool *tool __used, struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct perf_sample *sample, + struct machine *machine __used, + struct thread *thread) { + void *data = sample->raw_data; struct trace_fork_event fork_event; FILL_COMMON_FIELDS(fork_event, event, data); @@ -1540,13 +1548,14 @@ process_sched_fork_event(void *data, if (trace_handler->fork_event) trace_handler->fork_event(&fork_event, event, - cpu, timestamp, thread); + sample->cpu, sample->time, thread); } static void -process_sched_exit_event(struct event *event, - int cpu __used, - u64 timestamp __used, +process_sched_exit_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample __used, + struct machine *machine __used, struct thread *thread __used) { if (verbose) @@ -1554,12 +1563,13 @@ process_sched_exit_event(struct event *event, } static void -process_sched_migrate_task_event(void *data, struct perf_session *session, - struct event *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +process_sched_migrate_task_event(struct perf_tool *tool __used, + struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread) { + void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; FILL_COMMON_FIELDS(migrate_task_event, event, data); @@ -1570,67 +1580,47 @@ process_sched_migrate_task_event(void *data, struct perf_session *session, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, session, - event, cpu, timestamp, thread); + trace_handler->migrate_task_event(&migrate_task_event, machine, + event, sample->cpu, + sample->time, thread); } -static void process_raw_event(union perf_event *raw_event __used, - struct perf_session *session, void *data, int cpu, - u64 timestamp, struct thread *thread) -{ - struct event *event; - int type; - - - type = trace_parse_common_type(data); - event = trace_find_event(type); - - if (!strcmp(event->name, "sched_switch")) - process_sched_switch_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_stat_runtime")) - process_sched_runtime_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_wakeup")) - process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_wakeup_new")) - process_sched_wakeup_event(data, session, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_process_fork")) - process_sched_fork_event(data, event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_process_exit")) - process_sched_exit_event(event, cpu, timestamp, thread); - if (!strcmp(event->name, "sched_migrate_task")) - process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread); -} +typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread); -static int process_sample_event(union perf_event *event, - struct perf_sample *sample, - struct perf_evsel *evsel __used, - struct perf_session *session) +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, + union perf_event *event __used, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) { - struct thread *thread; - - if (!(session->sample_type & PERF_SAMPLE_RAW)) - return 0; + struct thread *thread = machine__findnew_thread(machine, sample->pid); - thread = perf_session__findnew(session, sample->pid); if (thread == NULL) { - pr_debug("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %s event, skipping it.\n", + evsel->name); return -1; } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + evsel->hists.stats.total_period += sample->period; + hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - if (profile_cpu != -1 && profile_cpu != (int)sample->cpu) - return 0; + if (evsel->handler.func != NULL) { + tracepoint_handler f = evsel->handler.func; - process_raw_event(event, session, sample->raw_data, sample->cpu, - sample->time, thread); + if (evsel->handler.data == NULL) + evsel->handler.data = trace_find_event(evsel->attr.config); + + f(tool, evsel->handler.data, sample, machine, thread); + } return 0; } -static struct perf_event_ops event_ops = { - .sample = process_sample_event, +static struct perf_tool perf_sched = { + .sample = perf_sched__process_tracepoint_sample, .comm = perf_event__process_comm, .lost = perf_event__process_lost, .fork = perf_event__process_task, @@ -1640,13 +1630,25 @@ static struct perf_event_ops event_ops = { static void read_events(bool destroy, struct perf_session **psession) { int err = -EINVAL; + const struct perf_evsel_str_handler handlers[] = { + { "sched:sched_switch", process_sched_switch_event, }, + { "sched:sched_stat_runtime", process_sched_runtime_event, }, + { "sched:sched_wakeup", process_sched_wakeup_event, }, + { "sched:sched_wakeup_new", process_sched_wakeup_event, }, + { "sched:sched_process_fork", process_sched_fork_event, }, + { "sched:sched_process_exit", process_sched_exit_event, }, + { "sched:sched_migrate_task", process_sched_migrate_task_event, }, + }; struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &event_ops); + 0, false, &perf_sched); if (session == NULL) die("No Memory"); + err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers); + assert(err == 0); + if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &event_ops); + err = perf_session__process_events(session, &perf_sched); if (err) die("Failed to process events, error %d", err); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 2f62a29..fd1909a 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -7,6 +7,7 @@ #include "util/header.h" #include "util/parse-options.h" #include "util/session.h" +#include "util/tool.h" #include "util/symbol.h" #include "util/thread.h" #include "util/trace-event.h" @@ -23,6 +24,7 @@ static u64 nr_unordered; extern const struct option record_options[]; static bool no_callchain; static bool show_full_info; +static bool system_wide; static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -315,7 +317,7 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr) static void print_sample_addr(union perf_event *event, struct perf_sample *sample, - struct perf_session *session, + struct machine *machine, struct thread *thread, struct perf_event_attr *attr) { @@ -328,11 +330,11 @@ static void print_sample_addr(union perf_event *event, if (!sample_addr_correlates_sym(attr)) return; - thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, - event->ip.pid, sample->addr, &al); + thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, + sample->addr, &al); if (!al.map) - thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE, - event->ip.pid, sample->addr, &al); + thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE, + sample->addr, &al); al.cpu = sample->cpu; al.sym = NULL; @@ -362,7 +364,7 @@ static void print_sample_addr(union perf_event *event, static void process_event(union perf_event *event __unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct perf_session *session, + struct machine *machine, struct thread *thread) { struct perf_event_attr *attr = &evsel->attr; @@ -377,15 +379,15 @@ static void process_event(union perf_event *event __unused, sample->raw_size); if (PRINT_FIELD(ADDR)) - print_sample_addr(event, sample, session, thread, attr); + print_sample_addr(event, sample, machine, thread, attr); if (PRINT_FIELD(IP)) { if (!symbol_conf.use_callchain) printf(" "); else printf("\n"); - perf_session__print_ip(event, sample, session, - PRINT_FIELD(SYM), PRINT_FIELD(DSO)); + perf_event__print_ip(event, sample, machine, evsel, + PRINT_FIELD(SYM), PRINT_FIELD(DSO)); } printf("\n"); @@ -432,14 +434,16 @@ static int cleanup_scripting(void) return scripting_ops->stop_script(); } -static char const *input_name = "perf.data"; +static const char *input_name; -static int process_sample_event(union perf_event *event, +static int process_sample_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct perf_session *session) + struct machine *machine) { - struct thread *thread = perf_session__findnew(session, event->ip.pid); + struct addr_location al; + struct thread *thread = machine__findnew_thread(machine, event->ip.tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -458,16 +462,25 @@ static int process_sample_event(union perf_event *event, return 0; } + if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) { + pr_err("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (al.filtered) + return 0; + if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - scripting_ops->process_event(event, sample, evsel, session, thread); + scripting_ops->process_event(event, sample, evsel, machine, thread); - session->hists.stats.total_period += sample->period; + evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_event_ops event_ops = { +static struct perf_tool perf_script = { .sample = process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, @@ -494,7 +507,7 @@ static int __cmd_script(struct perf_session *session) signal(SIGINT, sig_handler); - ret = perf_session__process_events(session, &event_ops); + ret = perf_session__process_events(session, &perf_script); if (debug_mode) pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); @@ -523,12 +536,6 @@ static struct script_spec *script_spec__new(const char *spec, return s; } -static void script_spec__delete(struct script_spec *s) -{ - free(s->spec); - free(s); -} - static void script_spec__add(struct script_spec *s) { list_add_tail(&s->node, &script_specs); @@ -554,16 +561,11 @@ static struct script_spec *script_spec__findnew(const char *spec, s = script_spec__new(spec, ops); if (!s) - goto out_delete_spec; + return NULL; script_spec__add(s); return s; - -out_delete_spec: - script_spec__delete(s); - - return NULL; } int script_spec_register(const char *spec, struct scripting_ops *ops) @@ -681,7 +683,8 @@ static int parse_output_fields(const struct option *opt __used, type = PERF_TYPE_RAW; else { fprintf(stderr, "Invalid event type in field string.\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } if (output[type].user_set) @@ -923,6 +926,24 @@ static int read_script_info(struct script_desc *desc, const char *filename) return 0; } +static char *get_script_root(struct dirent *script_dirent, const char *suffix) +{ + char *script_root, *str; + + script_root = strdup(script_dirent->d_name); + if (!script_root) + return NULL; + + str = (char *)ends_with(script_root, suffix); + if (!str) { + free(script_root); + return NULL; + } + + *str = '\0'; + return script_root; +} + static int list_available_scripts(const struct option *opt __used, const char *s __used, int unset __used) { @@ -934,7 +955,6 @@ static int list_available_scripts(const struct option *opt __used, struct script_desc *desc; char first_half[BUFSIZ]; char *script_root; - char *str; snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); @@ -950,16 +970,14 @@ static int list_available_scripts(const struct option *opt __used, continue; for_each_script(lang_path, lang_dir, script_dirent, script_next) { - script_root = strdup(script_dirent.d_name); - str = (char *)ends_with(script_root, REPORT_SUFFIX); - if (str) { - *str = '\0'; + script_root = get_script_root(&script_dirent, REPORT_SUFFIX); + if (script_root) { desc = script_desc__findnew(script_root); snprintf(script_path, MAXPATHLEN, "%s/%s", lang_path, script_dirent.d_name); read_script_info(desc, script_path); + free(script_root); } - free(script_root); } } @@ -981,8 +999,7 @@ static char *get_script_path(const char *script_root, const char *suffix) char script_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; char lang_path[MAXPATHLEN]; - char *str, *__script_root; - char *path = NULL; + char *__script_root; snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); @@ -998,23 +1015,18 @@ static char *get_script_path(const char *script_root, const char *suffix) continue; for_each_script(lang_path, lang_dir, script_dirent, script_next) { - __script_root = strdup(script_dirent.d_name); - str = (char *)ends_with(__script_root, suffix); - if (str) { - *str = '\0'; - if (strcmp(__script_root, script_root)) - continue; + __script_root = get_script_root(&script_dirent, suffix); + if (__script_root && !strcmp(script_root, __script_root)) { + free(__script_root); snprintf(script_path, MAXPATHLEN, "%s/%s", lang_path, script_dirent.d_name); - path = strdup(script_path); - free(__script_root); - break; + return strdup(script_path); } free(__script_root); } } - return path; + return NULL; } static bool is_top_script(const char *script_path) @@ -1083,7 +1095,11 @@ static const struct option options[] = { OPT_CALLBACK('f', "fields", NULL, "str", "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr", parse_output_fields), - OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), + OPT_BOOLEAN('a', "all-cpus", &system_wide, + "system-wide collection from all CPUs"), + OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), + OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", + "only display events for these comms"), OPT_BOOLEAN('I', "show-info", &show_full_info, "display extended information from perf.data file"), OPT_END() @@ -1110,7 +1126,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) struct perf_session *session; char *script_path = NULL; const char **__argv; - bool system_wide; int i, j, err; setup_scripting(); @@ -1178,15 +1193,17 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) } if (!pid) { - system_wide = true; j = 0; dup2(live_pipe[1], 1); close(live_pipe[0]); - if (!is_top_script(argv[0])) + if (is_top_script(argv[0])) { + system_wide = true; + } else if (!system_wide) { system_wide = !have_cmd(argc - rep_args, &argv[rep_args]); + } __argv = malloc((argc + 6) * sizeof(const char *)); if (!__argv) @@ -1234,10 +1251,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) script_path = rep_script_path; if (script_path) { - system_wide = false; j = 0; - if (rec_script_path) + if (!rec_script_path) + system_wide = false; + else if (!system_wide) system_wide = !have_cmd(argc - 1, &argv[1]); __argv = malloc((argc + 2) * sizeof(const char *)); @@ -1261,7 +1279,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (!script_name) setup_pager(); - session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops); + session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script); if (session == NULL) return -ENOMEM; @@ -1287,7 +1305,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) return -1; } - input = open(input_name, O_RDONLY); + input = open(session->filename, O_RDONLY); /* input_name */ if (input < 0) { perror("failed to open file"); exit(-1); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 955930e..f5d2a63 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -578,6 +578,33 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) avg / avg_stats(&walltime_nsecs_stats)); } +/* used for get_ratio_color() */ +enum grc_type { + GRC_STALLED_CYCLES_FE, + GRC_STALLED_CYCLES_BE, + GRC_CACHE_MISSES, + GRC_MAX_NR +}; + +static const char *get_ratio_color(enum grc_type type, double ratio) +{ + static const double grc_table[GRC_MAX_NR][3] = { + [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, + [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, + [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, + }; + const char *color = PERF_COLOR_NORMAL; + + if (ratio > grc_table[type][0]) + color = PERF_COLOR_RED; + else if (ratio > grc_table[type][1]) + color = PERF_COLOR_MAGENTA; + else if (ratio > grc_table[type][2]) + color = PERF_COLOR_YELLOW; + + return color; +} + static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; @@ -588,13 +615,7 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 50.0) - color = PERF_COLOR_RED; - else if (ratio > 30.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 10.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -611,13 +632,7 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 75.0) - color = PERF_COLOR_RED; - else if (ratio > 50.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 20.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -634,13 +649,7 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -657,13 +666,7 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -680,13 +683,7 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -703,13 +700,7 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -726,13 +717,7 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -749,13 +734,7 @@ static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, doub if (total) ratio = avg / total * 100.0; - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; + color = get_ratio_color(GRC_CACHE_MISSES, ratio); fprintf(output, " # "); color_fprintf(output, color, "%6.2f%%", ratio); @@ -1108,22 +1087,13 @@ static const struct option options[] = { */ static int add_default_attributes(void) { - struct perf_evsel *pos; - size_t attr_nr = 0; - size_t c; - /* Set attrs if no event is selected and !null_run: */ if (null_run) return 0; if (!evsel_list->nr_entries) { - for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { - pos = perf_evsel__new(default_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - attr_nr += c; + if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) + return -1; } /* Detailed events get appended to the event list: */ @@ -1132,38 +1102,21 @@ static int add_default_attributes(void) return 0; /* Append detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { - pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - attr_nr += c; + if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) + return -1; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { - pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } + if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) + return -1; if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { - pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - - - return 0; + return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); } int cmd_stat(int argc, const char **argv, const char *prefix __used) @@ -1267,8 +1220,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) list_for_each_entry(pos, &evsel_list->entries, node) { if (perf_evsel__alloc_stat_priv(pos) < 0 || - perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 || - perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0) + perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0) goto out_free_fd; } diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 831d1ba..2b9a7f4 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -7,6 +7,7 @@ #include "util/cache.h" #include "util/debug.h" +#include "util/debugfs.h" #include "util/evlist.h" #include "util/parse-options.h" #include "util/parse-events.h" @@ -14,8 +15,6 @@ #include "util/thread_map.h" #include "../../include/linux/hw_breakpoint.h" -static long page_size; - static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) { bool *visited = symbol__priv(sym); @@ -31,6 +30,7 @@ static int test__vmlinux_matches_kallsyms(void) struct map *kallsyms_map, *vmlinux_map; struct machine kallsyms, vmlinux; enum map_type type = MAP__FUNCTION; + long page_size = sysconf(_SC_PAGE_SIZE); struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; /* @@ -247,7 +247,7 @@ static int trace_event__id(const char *evname) if (asprintf(&filename, "%s/syscalls/%s/id", - debugfs_path, evname) < 0) + tracing_events_path, evname) < 0) return -1; fd = open(filename, O_RDONLY); @@ -603,7 +603,7 @@ out_free_threads: #define TEST_ASSERT_VAL(text, cond) \ do { \ - if (!cond) { \ + if (!(cond)) { \ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ return -1; \ } \ @@ -759,6 +759,103 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) return 0; } +static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + + return test__checkevent_tracepoint(evlist); +} + +static int +test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); + + list_for_each_entry(evsel, &evlist->entries, node) { + TEST_ASSERT_VAL("wrong exclude_user", + !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", + evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + } + + return test__checkevent_tracepoint_multi(evlist); +} + +static int test__checkevent_raw_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); + + return test__checkevent_raw(evlist); +} + +static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); + + return test__checkevent_numeric(evlist); +} + +static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + + return test__checkevent_symbolic_name(evlist); +} + +static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + + return test__checkevent_symbolic_alias(evlist); +} + +static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); + + return test__checkevent_genhw(evlist); +} + static struct test__event_st { const char *name; __u32 type; @@ -808,6 +905,34 @@ static struct test__event_st { .name = "mem:0:w", .check = test__checkevent_breakpoint_w, }, + { + .name = "syscalls:sys_enter_open:k", + .check = test__checkevent_tracepoint_modifier, + }, + { + .name = "syscalls:*:u", + .check = test__checkevent_tracepoint_multi_modifier, + }, + { + .name = "r1:kp", + .check = test__checkevent_raw_modifier, + }, + { + .name = "1:1:hp", + .check = test__checkevent_numeric_modifier, + }, + { + .name = "instructions:h", + .check = test__checkevent_symbolic_name_modifier, + }, + { + .name = "faults:u", + .check = test__checkevent_symbolic_alias_modifier, + }, + { + .name = "L1-dcache-load-miss:kp", + .check = test__checkevent_genhw_modifier, + }, }; #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) @@ -841,6 +966,336 @@ static int test__parse_events(void) return ret; } + +static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, + size_t *sizep) +{ + cpu_set_t *mask; + size_t size; + int i, cpu = -1, nrcpus = 1024; +realloc: + mask = CPU_ALLOC(nrcpus); + size = CPU_ALLOC_SIZE(nrcpus); + CPU_ZERO_S(size, mask); + + if (sched_getaffinity(pid, size, mask) == -1) { + CPU_FREE(mask); + if (errno == EINVAL && nrcpus < (1024 << 8)) { + nrcpus = nrcpus << 2; + goto realloc; + } + perror("sched_getaffinity"); + return -1; + } + + for (i = 0; i < nrcpus; i++) { + if (CPU_ISSET_S(i, size, mask)) { + if (cpu == -1) { + cpu = i; + *maskp = mask; + *sizep = size; + } else + CPU_CLR_S(i, size, mask); + } + } + + if (cpu == -1) + CPU_FREE(mask); + + return cpu; +} + +static int test__PERF_RECORD(void) +{ + struct perf_record_opts opts = { + .target_pid = -1, + .target_tid = -1, + .no_delay = true, + .freq = 10, + .mmap_pages = 256, + .sample_id_all_avail = true, + }; + cpu_set_t *cpu_mask = NULL; + size_t cpu_mask_size = 0; + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + struct perf_evsel *evsel; + struct perf_sample sample; + const char *cmd = "sleep"; + const char *argv[] = { cmd, "1", NULL, }; + char *bname; + u64 sample_type, prev_time = 0; + bool found_cmd_mmap = false, + found_libc_mmap = false, + found_vdso_mmap = false, + found_ld_mmap = false; + int err = -1, errs = 0, i, wakeups = 0, sample_size; + u32 cpu; + int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; + + if (evlist == NULL || argv == NULL) { + pr_debug("Not enough memory to create evlist\n"); + goto out; + } + + /* + * We need at least one evsel in the evlist, use the default + * one: "cycles". + */ + err = perf_evlist__add_default(evlist); + if (err < 0) { + pr_debug("Not enough memory to create evsel\n"); + goto out_delete_evlist; + } + + /* + * Create maps of threads and cpus to monitor. In this case + * we start with all threads and cpus (-1, -1) but then in + * perf_evlist__prepare_workload we'll fill in the only thread + * we're monitoring, the one forked there. + */ + err = perf_evlist__create_maps(evlist, opts.target_pid, + opts.target_tid, opts.cpu_list); + if (err < 0) { + pr_debug("Not enough memory to create thread/cpu maps\n"); + goto out_delete_evlist; + } + + /* + * Prepare the workload in argv[] to run, it'll fork it, and then wait + * for perf_evlist__start_workload() to exec it. This is done this way + * so that we have time to open the evlist (calling sys_perf_event_open + * on all the fds) and then mmap them. + */ + err = perf_evlist__prepare_workload(evlist, &opts, argv); + if (err < 0) { + pr_debug("Couldn't run the workload!\n"); + goto out_delete_evlist; + } + + /* + * Config the evsels, setting attr->comm on the first one, etc. + */ + evsel = list_entry(evlist->entries.next, struct perf_evsel, node); + evsel->attr.sample_type |= PERF_SAMPLE_CPU; + evsel->attr.sample_type |= PERF_SAMPLE_TID; + evsel->attr.sample_type |= PERF_SAMPLE_TIME; + perf_evlist__config_attrs(evlist, &opts); + + err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, + &cpu_mask_size); + if (err < 0) { + pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + cpu = err; + + /* + * So that we can check perf_sample.cpu on all the samples. + */ + if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { + pr_debug("sched_setaffinity: %s\n", strerror(errno)); + goto out_free_cpu_mask; + } + + /* + * Call sys_perf_event_open on all the fds on all the evsels, + * grouping them if asked to. + */ + err = perf_evlist__open(evlist, opts.group); + if (err < 0) { + pr_debug("perf_evlist__open: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + /* + * mmap the first fd on a given CPU and ask for events for the other + * fds in the same CPU to be injected in the same mmap ring buffer + * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). + */ + err = perf_evlist__mmap(evlist, opts.mmap_pages, false); + if (err < 0) { + pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + /* + * We'll need these two to parse the PERF_SAMPLE_* fields in each + * event. + */ + sample_type = perf_evlist__sample_type(evlist); + sample_size = __perf_evsel__sample_size(sample_type); + + /* + * Now that all is properly set up, enable the events, they will + * count just on workload.pid, which will start... + */ + perf_evlist__enable(evlist); + + /* + * Now! + */ + perf_evlist__start_workload(evlist); + + while (1) { + int before = total_events; + + for (i = 0; i < evlist->nr_mmaps; i++) { + union perf_event *event; + + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + const u32 type = event->header.type; + const char *name = perf_event__name(type); + + ++total_events; + if (type < PERF_RECORD_MAX) + nr_events[type]++; + + err = perf_event__parse_sample(event, sample_type, + sample_size, true, + &sample, false); + if (err < 0) { + if (verbose) + perf_event__fprintf(event, stderr); + pr_debug("Couldn't parse sample\n"); + goto out_err; + } + + if (verbose) { + pr_info("%" PRIu64" %d ", sample.time, sample.cpu); + perf_event__fprintf(event, stderr); + } + + if (prev_time > sample.time) { + pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", + name, prev_time, sample.time); + ++errs; + } + + prev_time = sample.time; + + if (sample.cpu != cpu) { + pr_debug("%s with unexpected cpu, expected %d, got %d\n", + name, cpu, sample.cpu); + ++errs; + } + + if ((pid_t)sample.pid != evlist->workload.pid) { + pr_debug("%s with unexpected pid, expected %d, got %d\n", + name, evlist->workload.pid, sample.pid); + ++errs; + } + + if ((pid_t)sample.tid != evlist->workload.pid) { + pr_debug("%s with unexpected tid, expected %d, got %d\n", + name, evlist->workload.pid, sample.tid); + ++errs; + } + + if ((type == PERF_RECORD_COMM || + type == PERF_RECORD_MMAP || + type == PERF_RECORD_FORK || + type == PERF_RECORD_EXIT) && + (pid_t)event->comm.pid != evlist->workload.pid) { + pr_debug("%s with unexpected pid/tid\n", name); + ++errs; + } + + if ((type == PERF_RECORD_COMM || + type == PERF_RECORD_MMAP) && + event->comm.pid != event->comm.tid) { + pr_debug("%s with different pid/tid!\n", name); + ++errs; + } + + switch (type) { + case PERF_RECORD_COMM: + if (strcmp(event->comm.comm, cmd)) { + pr_debug("%s with unexpected comm!\n", name); + ++errs; + } + break; + case PERF_RECORD_EXIT: + goto found_exit; + case PERF_RECORD_MMAP: + bname = strrchr(event->mmap.filename, '/'); + if (bname != NULL) { + if (!found_cmd_mmap) + found_cmd_mmap = !strcmp(bname + 1, cmd); + if (!found_libc_mmap) + found_libc_mmap = !strncmp(bname + 1, "libc", 4); + if (!found_ld_mmap) + found_ld_mmap = !strncmp(bname + 1, "ld", 2); + } else if (!found_vdso_mmap) + found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); + break; + + case PERF_RECORD_SAMPLE: + /* Just ignore samples for now */ + break; + default: + pr_debug("Unexpected perf_event->header.type %d!\n", + type); + ++errs; + } + } + } + + /* + * We don't use poll here because at least at 3.1 times the + * PERF_RECORD_{!SAMPLE} events don't honour + * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. + */ + if (total_events == before && false) + poll(evlist->pollfd, evlist->nr_fds, -1); + + sleep(1); + if (++wakeups > 5) { + pr_debug("No PERF_RECORD_EXIT event!\n"); + break; + } + } + +found_exit: + if (nr_events[PERF_RECORD_COMM] > 1) { + pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); + ++errs; + } + + if (nr_events[PERF_RECORD_COMM] == 0) { + pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); + ++errs; + } + + if (!found_cmd_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); + ++errs; + } + + if (!found_libc_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); + ++errs; + } + + if (!found_ld_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); + ++errs; + } + + if (!found_vdso_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); + ++errs; + } +out_err: + perf_evlist__munmap(evlist); +out_free_cpu_mask: + CPU_FREE(cpu_mask); +out_delete_evlist: + perf_evlist__delete(evlist); +out: + return (err < 0 || errs > 0) ? -1 : 0; +} + static struct test { const char *desc; int (*func)(void); @@ -866,45 +1321,89 @@ static struct test { .func = test__parse_events, }, { + .desc = "Validate PERF_RECORD_* events & perf_sample fields", + .func = test__PERF_RECORD, + }, + { .func = NULL, }, }; -static int __cmd_test(void) +static bool perf_test__matches(int curr, int argc, const char *argv[]) { - int i = 0; + int i; + + if (argc == 0) + return true; - page_size = sysconf(_SC_PAGE_SIZE); + for (i = 0; i < argc; ++i) { + char *end; + long nr = strtoul(argv[i], &end, 10); + + if (*end == '\0') { + if (nr == curr + 1) + return true; + continue; + } + + if (strstr(tests[curr].desc, argv[i])) + return true; + } + + return false; +} + +static int __cmd_test(int argc, const char *argv[]) +{ + int i = 0; while (tests[i].func) { - int err; - pr_info("%2d: %s:", i + 1, tests[i].desc); + int curr = i++, err; + + if (!perf_test__matches(curr, argc, argv)) + continue; + + pr_info("%2d: %s:", i, tests[curr].desc); pr_debug("\n--- start ---\n"); - err = tests[i].func(); - pr_debug("---- end ----\n%s:", tests[i].desc); + err = tests[curr].func(); + pr_debug("---- end ----\n%s:", tests[curr].desc); pr_info(" %s\n", err ? "FAILED!\n" : "Ok"); - ++i; } return 0; } -static const char * const test_usage[] = { - "perf test [<options>]", - NULL, -}; +static int perf_test__list(int argc, const char **argv) +{ + int i = 0; + + while (tests[i].func) { + int curr = i++; -static const struct option test_options[] = { + if (argc > 1 && !strstr(tests[curr].desc, argv[1])) + continue; + + pr_info("%2d: %s\n", i, tests[curr].desc); + } + + return 0; +} + +int cmd_test(int argc, const char **argv, const char *prefix __used) +{ + const char * const test_usage[] = { + "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", + NULL, + }; + const struct option test_options[] = { OPT_INTEGER('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_END() -}; + }; -int cmd_test(int argc, const char **argv, const char *prefix __used) -{ argc = parse_options(argc, argv, test_options, test_usage, 0); - if (argc) - usage_with_options(test_usage, test_options); + if (argc >= 1 && !strcmp(argv[0], "list")) + return perf_test__list(argc, argv); symbol_conf.priv_size = sizeof(int); symbol_conf.sort_by_name = true; @@ -915,5 +1414,5 @@ int cmd_test(int argc, const char **argv, const char *prefix __used) setup_pager(); - return __cmd_test(); + return __cmd_test(argc, argv); } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index aa26f4d..3b75b2e 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -19,6 +19,7 @@ #include "util/color.h" #include <linux/list.h> #include "util/cache.h" +#include "util/evsel.h" #include <linux/rbtree.h> #include "util/symbol.h" #include "util/callchain.h" @@ -31,13 +32,14 @@ #include "util/event.h" #include "util/session.h" #include "util/svghelper.h" +#include "util/tool.h" #define SUPPORT_OLD_POWER_EVENTS 1 #define PWR_EVENT_EXIT -1 -static char const *input_name = "perf.data"; -static char const *output_name = "output.svg"; +static const char *input_name; +static const char *output_name = "output.svg"; static unsigned int numcpus; static u64 min_freq; /* Lowest CPU frequency seen */ @@ -273,25 +275,28 @@ static int cpus_cstate_state[MAX_CPUS]; static u64 cpus_pstate_start_times[MAX_CPUS]; static u64 cpus_pstate_state[MAX_CPUS]; -static int process_comm_event(union perf_event *event, +static int process_comm_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session __used) + struct machine *machine __used) { pid_set_comm(event->comm.tid, event->comm.comm); return 0; } -static int process_fork_event(union perf_event *event, +static int process_fork_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session __used) + struct machine *machine __used) { pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); return 0; } -static int process_exit_event(union perf_event *event, +static int process_exit_event(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session __used) + struct machine *machine __used) { pid_exit(event->fork.pid, event->fork.time); return 0; @@ -486,14 +491,15 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) } -static int process_sample_event(union perf_event *event __used, +static int process_sample_event(struct perf_tool *tool __used, + union perf_event *event __used, struct perf_sample *sample, - struct perf_evsel *evsel __used, - struct perf_session *session) + struct perf_evsel *evsel, + struct machine *machine __used) { struct trace_entry *te; - if (session->sample_type & PERF_SAMPLE_TIME) { + if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { if (!first_time || first_time > sample->time) first_time = sample->time; if (last_time < sample->time) @@ -501,7 +507,7 @@ static int process_sample_event(union perf_event *event __used, } te = (void *)sample->raw_data; - if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) { + if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) { char *event_str; #ifdef SUPPORT_OLD_POWER_EVENTS struct power_entry_old *peo; @@ -974,7 +980,7 @@ static void write_svg_file(const char *filename) svg_close(); } -static struct perf_event_ops event_ops = { +static struct perf_tool perf_timechart = { .comm = process_comm_event, .fork = process_fork_event, .exit = process_exit_event, @@ -985,7 +991,7 @@ static struct perf_event_ops event_ops = { static int __cmd_timechart(void) { struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &event_ops); + 0, false, &perf_timechart); int ret = -EINVAL; if (session == NULL) @@ -994,7 +1000,7 @@ static int __cmd_timechart(void) if (!perf_session__has_traces(session, "timechart record")) goto out_delete; - ret = perf_session__process_events(session, &event_ops); + ret = perf_session__process_events(session, &perf_timechart); if (ret) goto out_delete; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c9cdedb..4f81eeb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -64,44 +64,6 @@ #include <linux/unistd.h> #include <linux/types.h> -static struct perf_top top = { - .count_filter = 5, - .delay_secs = 2, - .target_pid = -1, - .target_tid = -1, - .freq = 1000, /* 1 KHz */ -}; - -static bool system_wide = false; - -static bool use_tui, use_stdio; - -static bool sort_has_symbols; - -static bool dont_use_callchains; -static char callchain_default_opt[] = "fractal,0.5,callee"; - - -static int default_interval = 0; - -static bool kptr_restrict_warned; -static bool vmlinux_warned; -static bool inherit = false; -static int realtime_prio = 0; -static bool group = false; -static bool sample_id_all_avail = true; -static unsigned int mmap_pages = 128; - -static bool dump_symtab = false; - -static struct winsize winsize; - -static const char *sym_filter = NULL; -static int sym_pcnt_filter = 5; - -/* - * Source functions - */ void get_term_dimensions(struct winsize *ws) { @@ -125,21 +87,23 @@ void get_term_dimensions(struct winsize *ws) ws->ws_col = 80; } -static void update_print_entries(struct winsize *ws) +static void perf_top__update_print_entries(struct perf_top *top) { - top.print_entries = ws->ws_row; + top->print_entries = top->winsize.ws_row; - if (top.print_entries > 9) - top.print_entries -= 9; + if (top->print_entries > 9) + top->print_entries -= 9; } -static void sig_winch_handler(int sig __used) +static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) { - get_term_dimensions(&winsize); - update_print_entries(&winsize); + struct perf_top *top = arg; + + get_term_dimensions(&top->winsize); + perf_top__update_print_entries(top); } -static int parse_source(struct hist_entry *he) +static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) { struct symbol *sym; struct annotation *notes; @@ -170,7 +134,7 @@ static int parse_source(struct hist_entry *he) pthread_mutex_lock(¬es->lock); - if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) { + if (symbol__alloc_hist(sym) < 0) { pthread_mutex_unlock(¬es->lock); pr_err("Not enough memory for annotating '%s' symbol!\n", sym->name); @@ -181,7 +145,7 @@ static int parse_source(struct hist_entry *he) err = symbol__annotate(sym, map, 0); if (err == 0) { out_assign: - top.sym_filter_entry = he; + top->sym_filter_entry = he; } pthread_mutex_unlock(¬es->lock); @@ -194,14 +158,16 @@ static void __zero_source_counters(struct hist_entry *he) symbol__annotate_zero_histograms(sym); } -static void record_precise_ip(struct hist_entry *he, int counter, u64 ip) +static void perf_top__record_precise_ip(struct perf_top *top, + struct hist_entry *he, + int counter, u64 ip) { struct annotation *notes; struct symbol *sym; if (he == NULL || he->ms.sym == NULL || - ((top.sym_filter_entry == NULL || - top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1)) + ((top->sym_filter_entry == NULL || + top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1)) return; sym = he->ms.sym; @@ -210,8 +176,7 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip) if (pthread_mutex_trylock(¬es->lock)) return; - if (notes->src == NULL && - symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) { + if (notes->src == NULL && symbol__alloc_hist(sym) < 0) { pthread_mutex_unlock(¬es->lock); pr_err("Not enough memory for annotating '%s' symbol!\n", sym->name); @@ -225,8 +190,9 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip) pthread_mutex_unlock(¬es->lock); } -static void show_details(struct hist_entry *he) +static void perf_top__show_details(struct perf_top *top) { + struct hist_entry *he = top->sym_filter_entry; struct annotation *notes; struct symbol *symbol; int more; @@ -242,15 +208,15 @@ static void show_details(struct hist_entry *he) if (notes->src == NULL) goto out_unlock; - printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name); - printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); + printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name); + printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter); - more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx, - 0, sym_pcnt_filter, top.print_entries, 4); - if (top.zero) - symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx); + more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx, + 0, top->sym_pcnt_filter, top->print_entries, 4); + if (top->zero) + symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); else - symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx); + symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx); if (more != 0) printf("%d lines not displayed, maybe increase display entries [e]\n", more); out_unlock: @@ -259,11 +225,9 @@ out_unlock: static const char CONSOLE_CLEAR[] = "[H[2J"; -static struct hist_entry * - perf_session__add_hist_entry(struct perf_session *session, - struct addr_location *al, - struct perf_sample *sample, - struct perf_evsel *evsel) +static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, + struct addr_location *al, + struct perf_sample *sample) { struct hist_entry *he; @@ -271,50 +235,51 @@ static struct hist_entry * if (he == NULL) return NULL; - session->hists.stats.total_period += sample->period; + evsel->hists.stats.total_period += sample->period; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); return he; } -static void print_sym_table(void) +static void perf_top__print_sym_table(struct perf_top *top) { char bf[160]; int printed = 0; - const int win_width = winsize.ws_col - 1; + const int win_width = top->winsize.ws_col - 1; puts(CONSOLE_CLEAR); - perf_top__header_snprintf(&top, bf, sizeof(bf)); + perf_top__header_snprintf(top, bf, sizeof(bf)); printf("%s\n", bf); - perf_top__reset_sample_counters(&top); + perf_top__reset_sample_counters(top); printf("%-*.*s\n", win_width, win_width, graph_dotted_line); - if (top.sym_evsel->hists.stats.nr_lost_warned != - top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) { - top.sym_evsel->hists.stats.nr_lost_warned = - top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]; + if (top->sym_evsel->hists.stats.nr_lost_warned != + top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) { + top->sym_evsel->hists.stats.nr_lost_warned = + top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]; color_fprintf(stdout, PERF_COLOR_RED, "WARNING: LOST %d chunks, Check IO/CPU overload", - top.sym_evsel->hists.stats.nr_lost_warned); + top->sym_evsel->hists.stats.nr_lost_warned); ++printed; } - if (top.sym_filter_entry) { - show_details(top.sym_filter_entry); + if (top->sym_filter_entry) { + perf_top__show_details(top); return; } - hists__collapse_resort_threaded(&top.sym_evsel->hists); - hists__output_resort_threaded(&top.sym_evsel->hists); - hists__decay_entries_threaded(&top.sym_evsel->hists, - top.hide_user_symbols, - top.hide_kernel_symbols); - hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3); + hists__collapse_resort_threaded(&top->sym_evsel->hists); + hists__output_resort_threaded(&top->sym_evsel->hists); + hists__decay_entries_threaded(&top->sym_evsel->hists, + top->hide_user_symbols, + top->hide_kernel_symbols); + hists__output_recalc_col_len(&top->sym_evsel->hists, + top->winsize.ws_row - 3); putchar('\n'); - hists__fprintf(&top.sym_evsel->hists, NULL, false, false, - winsize.ws_row - 4 - printed, win_width, stdout); + hists__fprintf(&top->sym_evsel->hists, NULL, false, false, + top->winsize.ws_row - 4 - printed, win_width, stdout); } static void prompt_integer(int *target, const char *msg) @@ -352,17 +317,17 @@ static void prompt_percent(int *target, const char *msg) *target = tmp; } -static void prompt_symbol(struct hist_entry **target, const char *msg) +static void perf_top__prompt_symbol(struct perf_top *top, const char *msg) { char *buf = malloc(0), *p; - struct hist_entry *syme = *target, *n, *found = NULL; + struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL; struct rb_node *next; size_t dummy = 0; /* zero counters of active symbol */ if (syme) { __zero_source_counters(syme); - *target = NULL; + top->sym_filter_entry = NULL; } fprintf(stdout, "\n%s: ", msg); @@ -373,7 +338,7 @@ static void prompt_symbol(struct hist_entry **target, const char *msg) if (p) *p = 0; - next = rb_first(&top.sym_evsel->hists.entries); + next = rb_first(&top->sym_evsel->hists.entries); while (next) { n = rb_entry(next, struct hist_entry, rb_node); if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) { @@ -386,47 +351,46 @@ static void prompt_symbol(struct hist_entry **target, const char *msg) if (!found) { fprintf(stderr, "Sorry, %s is not active.\n", buf); sleep(1); - return; } else - parse_source(found); + perf_top__parse_source(top, found); out_free: free(buf); } -static void print_mapped_keys(void) +static void perf_top__print_mapped_keys(struct perf_top *top) { char *name = NULL; - if (top.sym_filter_entry) { - struct symbol *sym = top.sym_filter_entry->ms.sym; + if (top->sym_filter_entry) { + struct symbol *sym = top->sym_filter_entry->ms.sym; name = sym->name; } fprintf(stdout, "\nMapped keys:\n"); - fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top.delay_secs); - fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top.print_entries); + fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs); + fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries); - if (top.evlist->nr_entries > 1) - fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top.sym_evsel)); + if (top->evlist->nr_entries > 1) + fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel)); - fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top.count_filter); + fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); - fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); + fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter); fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); fprintf(stdout, "\t[S] stop annotation.\n"); fprintf(stdout, "\t[K] hide kernel_symbols symbols. \t(%s)\n", - top.hide_kernel_symbols ? "yes" : "no"); + top->hide_kernel_symbols ? "yes" : "no"); fprintf(stdout, "\t[U] hide user symbols. \t(%s)\n", - top.hide_user_symbols ? "yes" : "no"); - fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top.zero ? 1 : 0); + top->hide_user_symbols ? "yes" : "no"); + fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0); fprintf(stdout, "\t[qQ] quit.\n"); } -static int key_mapped(int c) +static int perf_top__key_mapped(struct perf_top *top, int c) { switch (c) { case 'd': @@ -442,7 +406,7 @@ static int key_mapped(int c) case 'S': return 1; case 'E': - return top.evlist->nr_entries > 1 ? 1 : 0; + return top->evlist->nr_entries > 1 ? 1 : 0; default: break; } @@ -450,13 +414,13 @@ static int key_mapped(int c) return 0; } -static void handle_keypress(int c) +static void perf_top__handle_keypress(struct perf_top *top, int c) { - if (!key_mapped(c)) { + if (!perf_top__key_mapped(top, c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; - print_mapped_keys(); + perf_top__print_mapped_keys(top); fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); fflush(stdout); @@ -471,81 +435,86 @@ static void handle_keypress(int c) c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); - if (!key_mapped(c)) + if (!perf_top__key_mapped(top, c)) return; } switch (c) { case 'd': - prompt_integer(&top.delay_secs, "Enter display delay"); - if (top.delay_secs < 1) - top.delay_secs = 1; + prompt_integer(&top->delay_secs, "Enter display delay"); + if (top->delay_secs < 1) + top->delay_secs = 1; break; case 'e': - prompt_integer(&top.print_entries, "Enter display entries (lines)"); - if (top.print_entries == 0) { - sig_winch_handler(SIGWINCH); - signal(SIGWINCH, sig_winch_handler); + prompt_integer(&top->print_entries, "Enter display entries (lines)"); + if (top->print_entries == 0) { + struct sigaction act = { + .sa_sigaction = perf_top__sig_winch, + .sa_flags = SA_SIGINFO, + }; + perf_top__sig_winch(SIGWINCH, NULL, top); + sigaction(SIGWINCH, &act, NULL); } else signal(SIGWINCH, SIG_DFL); break; case 'E': - if (top.evlist->nr_entries > 1) { + if (top->evlist->nr_entries > 1) { /* Select 0 as the default event: */ int counter = 0; fprintf(stderr, "\nAvailable events:"); - list_for_each_entry(top.sym_evsel, &top.evlist->entries, node) - fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel)); + list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) + fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel)); prompt_integer(&counter, "Enter details event counter"); - if (counter >= top.evlist->nr_entries) { - top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); - fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel)); + if (counter >= top->evlist->nr_entries) { + top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); + fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel)); sleep(1); break; } - list_for_each_entry(top.sym_evsel, &top.evlist->entries, node) - if (top.sym_evsel->idx == counter) + list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) + if (top->sym_evsel->idx == counter) break; } else - top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); + top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); break; case 'f': - prompt_integer(&top.count_filter, "Enter display event count filter"); + prompt_integer(&top->count_filter, "Enter display event count filter"); break; case 'F': - prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); + prompt_percent(&top->sym_pcnt_filter, + "Enter details display event filter (percent)"); break; case 'K': - top.hide_kernel_symbols = !top.hide_kernel_symbols; + top->hide_kernel_symbols = !top->hide_kernel_symbols; break; case 'q': case 'Q': printf("exiting.\n"); - if (dump_symtab) - perf_session__fprintf_dsos(top.session, stderr); + if (top->dump_symtab) + perf_session__fprintf_dsos(top->session, stderr); exit(0); case 's': - prompt_symbol(&top.sym_filter_entry, "Enter details symbol"); + perf_top__prompt_symbol(top, "Enter details symbol"); break; case 'S': - if (!top.sym_filter_entry) + if (!top->sym_filter_entry) break; else { - struct hist_entry *syme = top.sym_filter_entry; + struct hist_entry *syme = top->sym_filter_entry; - top.sym_filter_entry = NULL; + top->sym_filter_entry = NULL; __zero_source_counters(syme); } break; case 'U': - top.hide_user_symbols = !top.hide_user_symbols; + top->hide_user_symbols = !top->hide_user_symbols; break; case 'z': - top.zero = !top.zero; + top->zero = !top->zero; break; default: break; @@ -563,28 +532,30 @@ static void perf_top__sort_new_samples(void *arg) hists__collapse_resort_threaded(&t->sym_evsel->hists); hists__output_resort_threaded(&t->sym_evsel->hists); hists__decay_entries_threaded(&t->sym_evsel->hists, - top.hide_user_symbols, - top.hide_kernel_symbols); + t->hide_user_symbols, + t->hide_kernel_symbols); } -static void *display_thread_tui(void *arg __used) +static void *display_thread_tui(void *arg) { + struct perf_top *top = arg; const char *help = "For a higher level overview, try: perf top --sort comm,dso"; - perf_top__sort_new_samples(&top); - perf_evlist__tui_browse_hists(top.evlist, help, + perf_top__sort_new_samples(top); + perf_evlist__tui_browse_hists(top->evlist, help, perf_top__sort_new_samples, - &top, top.delay_secs); + top, top->delay_secs); exit_browser(0); exit(0); return NULL; } -static void *display_thread(void *arg __used) +static void *display_thread(void *arg) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; + struct perf_top *top = arg; int delay_msecs, c; tcgetattr(0, &save); @@ -595,13 +566,13 @@ static void *display_thread(void *arg __used) pthread__unblock_sigwinch(); repeat: - delay_msecs = top.delay_secs * 1000; + delay_msecs = top->delay_secs * 1000; tcsetattr(0, TCSANOW, &tc); /* trash return*/ getc(stdin); while (1) { - print_sym_table(); + perf_top__print_sym_table(top); /* * Either timeout expired or we got an EINTR due to SIGWINCH, * refresh screen in both cases. @@ -621,7 +592,7 @@ process_hotkey: c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); - handle_keypress(c); + perf_top__handle_keypress(top, c); goto repeat; return NULL; @@ -673,47 +644,17 @@ static int symbol_filter(struct map *map __used, struct symbol *sym) return 0; } -static void perf_event__process_sample(const union perf_event *event, +static void perf_event__process_sample(struct perf_tool *tool, + const union perf_event *event, struct perf_evsel *evsel, struct perf_sample *sample, - struct perf_session *session) + struct machine *machine) { + struct perf_top *top = container_of(tool, struct perf_top, tool); struct symbol *parent = NULL; u64 ip = event->ip.ip; struct addr_location al; - struct machine *machine; int err; - u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - ++top.samples; - - switch (origin) { - case PERF_RECORD_MISC_USER: - ++top.us_samples; - if (top.hide_user_symbols) - return; - machine = perf_session__find_host_machine(session); - break; - case PERF_RECORD_MISC_KERNEL: - ++top.kernel_samples; - if (top.hide_kernel_symbols) - return; - machine = perf_session__find_host_machine(session); - break; - case PERF_RECORD_MISC_GUEST_KERNEL: - ++top.guest_kernel_samples; - machine = perf_session__find_machine(session, event->ip.pid); - break; - case PERF_RECORD_MISC_GUEST_USER: - ++top.guest_us_samples; - /* - * TODO: we don't process guest user from host side - * except simple counting. - */ - return; - default: - return; - } if (!machine && perf_guest) { pr_err("Can't find guest [%d]'s kernel information\n", @@ -722,14 +663,14 @@ static void perf_event__process_sample(const union perf_event *event, } if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) - top.exact_samples++; + top->exact_samples++; - if (perf_event__preprocess_sample(event, session, &al, sample, + if (perf_event__preprocess_sample(event, machine, &al, sample, symbol_filter) < 0 || al.filtered) return; - if (!kptr_restrict_warned && + if (!top->kptr_restrict_warned && symbol_conf.kptr_restrict && al.cpumode == PERF_RECORD_MISC_KERNEL) { ui__warning( @@ -740,7 +681,7 @@ static void perf_event__process_sample(const union perf_event *event, " modules" : ""); if (use_browser <= 0) sleep(5); - kptr_restrict_warned = true; + top->kptr_restrict_warned = true; } if (al.sym == NULL) { @@ -756,7 +697,7 @@ static void perf_event__process_sample(const union perf_event *event, * --hide-kernel-symbols, even if the user specifies an * invalid --vmlinux ;-) */ - if (!kptr_restrict_warned && !vmlinux_warned && + if (!top->kptr_restrict_warned && !top->vmlinux_warned && al.map == machine->vmlinux_maps[MAP__FUNCTION] && RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { if (symbol_conf.vmlinux_name) { @@ -769,7 +710,7 @@ static void perf_event__process_sample(const union perf_event *event, if (use_browser <= 0) sleep(5); - vmlinux_warned = true; + top->vmlinux_warned = true; } } @@ -778,70 +719,109 @@ static void perf_event__process_sample(const union perf_event *event, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = perf_session__resolve_callchain(session, al.thread, - sample->callchain, &parent); + err = machine__resolve_callchain(machine, evsel, al.thread, + sample->callchain, &parent); if (err) return; } - he = perf_session__add_hist_entry(session, &al, sample, evsel); + he = perf_evsel__add_hist_entry(evsel, &al, sample); if (he == NULL) { pr_err("Problem incrementing symbol period, skipping event\n"); return; } if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &session->callchain_cursor, + err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, sample->period); if (err) return; } - if (sort_has_symbols) - record_precise_ip(he, evsel->idx, ip); + if (top->sort_has_symbols) + perf_top__record_precise_ip(top, he, evsel->idx, ip); } return; } -static void perf_session__mmap_read_idx(struct perf_session *self, int idx) +static void perf_top__mmap_read_idx(struct perf_top *top, int idx) { struct perf_sample sample; struct perf_evsel *evsel; + struct perf_session *session = top->session; union perf_event *event; + struct machine *machine; + u8 origin; int ret; - while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { - ret = perf_session__parse_sample(self, event, &sample); + while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { + ret = perf_session__parse_sample(session, event, &sample); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); continue; } - evsel = perf_evlist__id2evsel(self->evlist, sample.id); + evsel = perf_evlist__id2evsel(session->evlist, sample.id); assert(evsel != NULL); + origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + if (event->header.type == PERF_RECORD_SAMPLE) - perf_event__process_sample(event, evsel, &sample, self); - else if (event->header.type < PERF_RECORD_MAX) { + ++top->samples; + + switch (origin) { + case PERF_RECORD_MISC_USER: + ++top->us_samples; + if (top->hide_user_symbols) + continue; + machine = perf_session__find_host_machine(session); + break; + case PERF_RECORD_MISC_KERNEL: + ++top->kernel_samples; + if (top->hide_kernel_symbols) + continue; + machine = perf_session__find_host_machine(session); + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + ++top->guest_kernel_samples; + machine = perf_session__find_machine(session, event->ip.pid); + break; + case PERF_RECORD_MISC_GUEST_USER: + ++top->guest_us_samples; + /* + * TODO: we don't process guest user from host side + * except simple counting. + */ + /* Fall thru */ + default: + continue; + } + + + if (event->header.type == PERF_RECORD_SAMPLE) { + perf_event__process_sample(&top->tool, event, evsel, + &sample, machine); + } else if (event->header.type < PERF_RECORD_MAX) { hists__inc_nr_events(&evsel->hists, event->header.type); - perf_event__process(event, &sample, self); + perf_event__process(&top->tool, event, &sample, machine); } else - ++self->hists.stats.nr_unknown_events; + ++session->hists.stats.nr_unknown_events; } } -static void perf_session__mmap_read(struct perf_session *self) +static void perf_top__mmap_read(struct perf_top *top) { int i; - for (i = 0; i < top.evlist->nr_mmaps; i++) - perf_session__mmap_read_idx(self, i); + for (i = 0; i < top->evlist->nr_mmaps; i++) + perf_top__mmap_read_idx(top, i); } -static void start_counters(struct perf_evlist *evlist) +static void perf_top__start_counters(struct perf_top *top) { struct perf_evsel *counter, *first; + struct perf_evlist *evlist = top->evlist; first = list_entry(evlist->entries.next, struct perf_evsel, node); @@ -849,15 +829,15 @@ static void start_counters(struct perf_evlist *evlist) struct perf_event_attr *attr = &counter->attr; struct xyarray *group_fd = NULL; - if (group && counter != first) + if (top->group && counter != first) group_fd = first->fd; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - if (top.freq) { + if (top->freq) { attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; - attr->sample_freq = top.freq; + attr->sample_freq = top->freq; } if (evlist->nr_entries > 1) { @@ -870,23 +850,23 @@ static void start_counters(struct perf_evlist *evlist) attr->mmap = 1; attr->comm = 1; - attr->inherit = inherit; + attr->inherit = top->inherit; retry_sample_id: - attr->sample_id_all = sample_id_all_avail ? 1 : 0; + attr->sample_id_all = top->sample_id_all_avail ? 1 : 0; try_again: - if (perf_evsel__open(counter, top.evlist->cpus, - top.evlist->threads, group, + if (perf_evsel__open(counter, top->evlist->cpus, + top->evlist->threads, top->group, group_fd) < 0) { int err = errno; if (err == EPERM || err == EACCES) { ui__error_paranoid(); goto out_err; - } else if (err == EINVAL && sample_id_all_avail) { + } else if (err == EINVAL && top->sample_id_all_avail) { /* * Old kernel, no attr->sample_id_type_all field */ - sample_id_all_avail = false; + top->sample_id_all_avail = false; goto retry_sample_id; } /* @@ -920,7 +900,7 @@ try_again: } } - if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) { + if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { ui__warning("Failed to mmap with %d (%s)\n", errno, strerror(errno)); goto out_err; @@ -933,14 +913,14 @@ out_err: exit(0); } -static int setup_sample_type(void) +static int perf_top__setup_sample_type(struct perf_top *top) { - if (!sort_has_symbols) { + if (!top->sort_has_symbols) { if (symbol_conf.use_callchain) { ui__warning("Selected -g but \"sym\" not present in --sort/-s."); return -EINVAL; } - } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) { + } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { if (callchain_register_param(&callchain_param) < 0) { ui__warning("Can't register callchain params.\n"); return -EINVAL; @@ -950,7 +930,7 @@ static int setup_sample_type(void) return 0; } -static int __cmd_top(void) +static int __cmd_top(struct perf_top *top) { pthread_t thread; int ret; @@ -958,39 +938,40 @@ static int __cmd_top(void) * FIXME: perf_session__new should allow passing a O_MMAP, so that all this * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. */ - top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL); - if (top.session == NULL) + top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL); + if (top->session == NULL) return -ENOMEM; - ret = setup_sample_type(); + ret = perf_top__setup_sample_type(top); if (ret) goto out_delete; - if (top.target_tid != -1) - perf_event__synthesize_thread_map(top.evlist->threads, - perf_event__process, top.session); + if (top->target_tid != -1) + perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, + perf_event__process, + &top->session->host_machine); else - perf_event__synthesize_threads(perf_event__process, top.session); - - start_counters(top.evlist); - top.session->evlist = top.evlist; - perf_session__update_sample_type(top.session); + perf_event__synthesize_threads(&top->tool, perf_event__process, + &top->session->host_machine); + perf_top__start_counters(top); + top->session->evlist = top->evlist; + perf_session__update_sample_type(top->session); /* Wait for a minimal set of events before starting the snapshot */ - poll(top.evlist->pollfd, top.evlist->nr_fds, 100); + poll(top->evlist->pollfd, top->evlist->nr_fds, 100); - perf_session__mmap_read(top.session); + perf_top__mmap_read(top); if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : - display_thread), NULL)) { + display_thread), top)) { printf("Could not create display thread.\n"); exit(-1); } - if (realtime_prio) { + if (top->realtime_prio) { struct sched_param param; - param.sched_priority = realtime_prio; + param.sched_priority = top->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { printf("Could not set realtime priority.\n"); exit(-1); @@ -998,25 +979,25 @@ static int __cmd_top(void) } while (1) { - u64 hits = top.samples; + u64 hits = top->samples; - perf_session__mmap_read(top.session); + perf_top__mmap_read(top); - if (hits == top.samples) - ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100); + if (hits == top->samples) + ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100); } out_delete: - perf_session__delete(top.session); - top.session = NULL; + perf_session__delete(top->session); + top->session = NULL; return 0; } static int -parse_callchain_opt(const struct option *opt __used, const char *arg, - int unset) +parse_callchain_opt(const struct option *opt, const char *arg, int unset) { + struct perf_top *top = (struct perf_top *)opt->value; char *tok, *tok2; char *endptr; @@ -1024,7 +1005,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, * --no-call-graph */ if (unset) { - dont_use_callchains = true; + top->dont_use_callchains = true; return 0; } @@ -1052,9 +1033,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, symbol_conf.use_callchain = false; return 0; - } - - else + } else return -1; /* get the min percentage */ @@ -1098,17 +1077,32 @@ static const char * const top_usage[] = { NULL }; -static const struct option options[] = { +int cmd_top(int argc, const char **argv, const char *prefix __used) +{ + struct perf_evsel *pos; + int status = -ENOMEM; + struct perf_top top = { + .count_filter = 5, + .delay_secs = 2, + .target_pid = -1, + .target_tid = -1, + .freq = 1000, /* 1 KHz */ + .sample_id_all_avail = true, + .mmap_pages = 128, + .sym_pcnt_filter = 5, + }; + char callchain_default_opt[] = "fractal,0.5,callee"; + const struct option options[] = { OPT_CALLBACK('e', "event", &top.evlist, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_INTEGER('c', "count", &default_interval, + OPT_INTEGER('c', "count", &top.default_interval, "event period to sample"), OPT_INTEGER('p', "pid", &top.target_pid, "profile events on existing process id"), OPT_INTEGER('t', "tid", &top.target_tid, "profile events on existing thread id"), - OPT_BOOLEAN('a', "all-cpus", &system_wide, + OPT_BOOLEAN('a', "all-cpus", &top.system_wide, "system-wide collection from all CPUs"), OPT_STRING('C', "cpu", &top.cpu_list, "cpu", "list of cpus to monitor"), @@ -1116,20 +1110,20 @@ static const struct option options[] = { "file", "vmlinux pathname"), OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, "hide kernel symbols"), - OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), - OPT_INTEGER('r', "realtime", &realtime_prio, + OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), + OPT_INTEGER('r', "realtime", &top.realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_INTEGER('d', "delay", &top.delay_secs, "number of seconds to delay between refreshes"), - OPT_BOOLEAN('D', "dump-symtab", &dump_symtab, + OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab, "dump the symbol table used for profiling"), OPT_INTEGER('f', "count-filter", &top.count_filter, "only display functions with more events than this"), - OPT_BOOLEAN('g', "group", &group, + OPT_BOOLEAN('g', "group", &top.group, "put the counters into a counter group"), - OPT_BOOLEAN('i', "inherit", &inherit, + OPT_BOOLEAN('i', "inherit", &top.inherit, "child tasks inherit counters"), - OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name", + OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", "symbol to annotate"), OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), @@ -1139,15 +1133,15 @@ static const struct option options[] = { "display this many functions"), OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, "hide user symbols"), - OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"), - OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"), + OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order", + OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), @@ -1166,12 +1160,7 @@ static const struct option options[] = { OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_END() -}; - -int cmd_top(int argc, const char **argv, const char *prefix __used) -{ - struct perf_evsel *pos; - int status = -ENOMEM; + }; top.evlist = perf_evlist__new(NULL, NULL); if (top.evlist == NULL) @@ -1188,9 +1177,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) setup_sorting(top_usage, options); - if (use_stdio) + if (top.use_stdio) use_browser = 0; - else if (use_tui) + else if (top.use_tui) use_browser = 1; setup_browser(false); @@ -1215,38 +1204,31 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) return -ENOMEM; } + symbol_conf.nr_events = top.evlist->nr_entries; + if (top.delay_secs < 1) top.delay_secs = 1; /* * User specified count overrides default frequency. */ - if (default_interval) + if (top.default_interval) top.freq = 0; else if (top.freq) { - default_interval = top.freq; + top.default_interval = top.freq; } else { fprintf(stderr, "frequency and count are zero, aborting\n"); exit(EXIT_FAILURE); } list_for_each_entry(pos, &top.evlist->entries, node) { - if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr, - top.evlist->threads->nr) < 0) - goto out_free_fd; /* * Fill in the ones not specifically initialized via -c: */ - if (pos->attr.sample_period) - continue; - - pos->attr.sample_period = default_interval; + if (!pos->attr.sample_period) + pos->attr.sample_period = top.default_interval; } - if (perf_evlist__alloc_pollfd(top.evlist) < 0 || - perf_evlist__alloc_mmap(top.evlist) < 0) - goto out_free_fd; - top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); symbol_conf.priv_size = sizeof(struct annotation); @@ -1263,16 +1245,20 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) * Avoid annotation data structures overhead when symbols aren't on the * sort list. */ - sort_has_symbols = sort_sym.list.next != NULL; + top.sort_has_symbols = sort_sym.list.next != NULL; - get_term_dimensions(&winsize); + get_term_dimensions(&top.winsize); if (top.print_entries == 0) { - update_print_entries(&winsize); - signal(SIGWINCH, sig_winch_handler); + struct sigaction act = { + .sa_sigaction = perf_top__sig_winch, + .sa_flags = SA_SIGINFO, + }; + perf_top__update_print_entries(&top); + sigaction(SIGWINCH, &act, NULL); } - status = __cmd_top(); -out_free_fd: + status = __cmd_top(&top); + perf_evlist__delete(top.evlist); return status; diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 73d0cac..2b2e225 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -29,8 +29,6 @@ struct pager_config { int val; }; -static char debugfs_mntpt[MAXPATHLEN]; - static int pager_command_config(const char *var, const char *value, void *data) { struct pager_config *c = data; @@ -81,15 +79,6 @@ static void commit_pager_choice(void) } } -static void set_debugfs_path(void) -{ - char *path; - - path = getenv(PERF_DEBUGFS_ENVIRONMENT); - snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt, - "tracing/events"); -} - static int handle_options(const char ***argv, int *argc, int *envchanged) { int handled = 0; @@ -161,15 +150,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) fprintf(stderr, "No directory given for --debugfs-dir.\n"); usage(perf_usage_string); } - strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN); - debugfs_mntpt[MAXPATHLEN - 1] = '\0'; + debugfs_set_path((*argv)[1]); if (envchanged) *envchanged = 1; (*argv)++; (*argc)--; } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { - strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); - debugfs_mntpt[MAXPATHLEN - 1] = '\0'; + debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR)); + fprintf(stderr, "dir: %s\n", debugfs_mountpoint); if (envchanged) *envchanged = 1; } else { @@ -281,7 +269,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (use_pager == -1 && p->option & USE_PAGER) use_pager = 1; commit_pager_choice(); - set_debugfs_path(); status = p->fn(argc, argv, prefix); exit_browser(status); @@ -416,17 +403,6 @@ static int run_argv(int *argcp, const char ***argv) return done_alias; } -/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ -static void get_debugfs_mntpt(void) -{ - const char *path = debugfs_mount(NULL); - - if (path) - strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); - else - debugfs_mntpt[0] = '\0'; -} - static void pthread__block_sigwinch(void) { sigset_t set; @@ -453,7 +429,7 @@ int main(int argc, const char **argv) if (!cmd) cmd = "perf-help"; /* get debugfs mount point from /proc/mounts */ - get_debugfs_mntpt(); + debugfs_mount(NULL); /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * @@ -476,7 +452,6 @@ int main(int argc, const char **argv) argc--; handle_options(&argv, &argc, NULL); commit_pager_choice(); - set_debugfs_path(); set_buildid_dir(); if (argc > 0) { diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 914c895..64f8bee 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -185,4 +185,28 @@ extern const char perf_version_string[]; void pthread__unblock_sigwinch(void); +struct perf_record_opts { + pid_t target_pid; + pid_t target_tid; + bool call_graph; + bool group; + bool inherit_stat; + bool no_delay; + bool no_inherit; + bool no_samples; + bool pipe_output; + bool raw_samples; + bool sample_address; + bool sample_time; + bool sample_id_all_avail; + bool system_wide; + bool period; + unsigned int freq; + unsigned int mmap_pages; + unsigned int user_freq; + u64 default_interval; + u64 user_interval; + const char *cpu_list; +}; + #endif diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 119e996..011ed26 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -25,17 +25,17 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym) return 0; } -int symbol__alloc_hist(struct symbol *sym, int nevents) +int symbol__alloc_hist(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); size_t sizeof_sym_hist = (sizeof(struct sym_hist) + (sym->end - sym->start) * sizeof(u64)); - notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist); + notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); if (notes->src == NULL) return -1; notes->src->sizeof_sym_hist = sizeof_sym_hist; - notes->src->nr_histograms = nevents; + notes->src->nr_histograms = symbol_conf.nr_events; INIT_LIST_HEAD(¬es->src->source); return 0; } @@ -334,7 +334,7 @@ fallback: disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", map__rip_2objdump(map, sym->start), - map__rip_2objdump(map, sym->end), + map__rip_2objdump(map, sym->end+1), symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", symbol_conf.annotate_src ? "-S" : "", symfs_filename, filename); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index d907252..efa5dc8 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -72,7 +72,7 @@ static inline struct annotation *symbol__annotation(struct symbol *sym) int symbol__inc_addr_samples(struct symbol *sym, struct map *map, int evidx, u64 addr); -int symbol__alloc_hist(struct symbol *sym, int nevents); +int symbol__alloc_hist(struct symbol *sym); void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); @@ -99,8 +99,7 @@ static inline int symbol__tui_annotate(struct symbol *sym __used, } #else int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - int nr_events, void(*timer)(void *arg), void *arg, - int delay_secs); + void(*timer)(void *arg), void *arg, int delay_secs); #endif extern const char *disassembler_style; diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index a91cd99..dff9c7a 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -13,15 +13,18 @@ #include "symbol.h" #include <linux/kernel.h> #include "debug.h" +#include "session.h" +#include "tool.h" -static int build_id__mark_dso_hit(union perf_event *event, +static int build_id__mark_dso_hit(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct perf_session *session) + struct machine *machine) { struct addr_location al; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = perf_session__findnew(session, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", @@ -29,8 +32,8 @@ static int build_id__mark_dso_hit(union perf_event *event, return -1; } - thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, - event->ip.pid, event->ip.ip, &al); + thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, + event->ip.ip, &al); if (al.map != NULL) al.map->dso->hit = 1; @@ -38,25 +41,26 @@ static int build_id__mark_dso_hit(union perf_event *event, return 0; } -static int perf_event__exit_del_thread(union perf_event *event, +static int perf_event__exit_del_thread(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine) { - struct thread *thread = perf_session__findnew(session, event->fork.tid); + struct thread *thread = machine__findnew_thread(machine, event->fork.tid); dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, event->fork.ppid, event->fork.ptid); if (thread) { - rb_erase(&thread->rb_node, &session->threads); - session->last_match = NULL; + rb_erase(&thread->rb_node, &machine->threads); + machine->last_match = NULL; thread__delete(thread); } return 0; } -struct perf_event_ops build_id__mark_dso_hit_ops = { +struct perf_tool build_id__mark_dso_hit_ops = { .sample = build_id__mark_dso_hit, .mmap = perf_event__process_mmap, .fork = perf_event__process_task, diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h index 5dafb00..a993ba8 100644 --- a/tools/perf/util/build-id.h +++ b/tools/perf/util/build-id.h @@ -3,7 +3,7 @@ #include "session.h" -extern struct perf_event_ops build_id__mark_dso_hit_ops; +extern struct perf_tool build_id__mark_dso_hit_ops; char *dso__build_id_filename(struct dso *self, char *bf, size_t size); diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 9b4ff16c..7f9c0f1 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -101,6 +101,9 @@ int callchain_append(struct callchain_root *root, int callchain_merge(struct callchain_cursor *cursor, struct callchain_root *dst, struct callchain_root *src); +struct ip_callchain; +union perf_event; + bool ip_callchain__valid(struct ip_callchain *chain, const union perf_event *event); /* diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index 96bee5c..dbe2f16 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -3,7 +3,6 @@ #include "parse-options.h" #include "evsel.h" #include "cgroup.h" -#include "debugfs.h" /* MAX_PATH, STR() */ #include "evlist.h" int nr_cgroups; @@ -12,7 +11,7 @@ static int cgroupfs_find_mountpoint(char *buf, size_t maxlen) { FILE *fp; - char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; + char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1]; char *token, *saved_ptr = NULL; int found = 0; @@ -25,8 +24,8 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) * and inspect every cgroupfs mount point to find one that has * perf_event subsystem */ - while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %" - STR(MAX_PATH)"s %*d %*d\n", + while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %" + STR(PATH_MAX)"s %*d %*d\n", mountpoint, type, tokens) == 3) { if (!strcmp(type, "cgroup")) { @@ -57,15 +56,15 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen) static int open_cgroup(char *name) { - char path[MAX_PATH+1]; - char mnt[MAX_PATH+1]; + char path[PATH_MAX + 1]; + char mnt[PATH_MAX + 1]; int fd; - if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1)) + if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1)) return -1; - snprintf(path, MAX_PATH, "%s/%s", mnt, name); + snprintf(path, PATH_MAX, "%s/%s", mnt, name); fd = open(path, O_RDONLY); if (fd == -1) diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 80d9598..0deac6a 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -1,5 +1,8 @@ /* - * GIT - The information manager from hell + * config.c + * + * Helper functions for parsing config items. + * Originally copied from GIT source. * * Copyright (C) Linus Torvalds, 2005 * Copyright (C) Johannes Schindelin, 2005 diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c index a88fefc..ffc35e7 100644 --- a/tools/perf/util/debugfs.c +++ b/tools/perf/util/debugfs.c @@ -2,8 +2,12 @@ #include "debugfs.h" #include "cache.h" +#include <linux/kernel.h> +#include <sys/mount.h> + static int debugfs_premounted; -static char debugfs_mountpoint[MAX_PATH+1]; +char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; +char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; static const char *debugfs_known_mountpoints[] = { "/sys/kernel/debug/", @@ -62,11 +66,9 @@ const char *debugfs_find_mountpoint(void) /* give up and parse /proc/mounts */ fp = fopen("/proc/mounts", "r"); if (fp == NULL) - die("Can't open /proc/mounts for read"); + return NULL; - while (fscanf(fp, "%*s %" - STR(MAX_PATH) - "s %99s %*s %*d %*d\n", + while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", debugfs_mountpoint, type) == 2) { if (strcmp(type, "debugfs") == 0) break; @@ -106,6 +108,12 @@ int debugfs_valid_entry(const char *path) return 0; } +static void debugfs_set_tracing_events_path(const char *mountpoint) +{ + snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", + mountpoint, "tracing/events"); +} + /* mount the debugfs somewhere if it's not mounted */ char *debugfs_mount(const char *mountpoint) @@ -113,7 +121,7 @@ char *debugfs_mount(const char *mountpoint) /* see if it's already mounted */ if (debugfs_find_mountpoint()) { debugfs_premounted = 1; - return debugfs_mountpoint; + goto out; } /* if not mounted and no argument */ @@ -129,12 +137,19 @@ char *debugfs_mount(const char *mountpoint) return NULL; /* save the mountpoint */ - strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); debugfs_found = 1; - + strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); +out: + debugfs_set_tracing_events_path(debugfs_mountpoint); return debugfs_mountpoint; } +void debugfs_set_path(const char *mountpoint) +{ + snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint); + debugfs_set_tracing_events_path(mountpoint); +} + /* umount the debugfs */ int debugfs_umount(void) @@ -158,7 +173,7 @@ int debugfs_umount(void) int debugfs_write(const char *entry, const char *value) { - char path[MAX_PATH+1]; + char path[PATH_MAX + 1]; int ret, count; int fd; @@ -203,7 +218,7 @@ int debugfs_write(const char *entry, const char *value) */ int debugfs_read(const char *entry, char *buffer, size_t size) { - char path[MAX_PATH+1]; + char path[PATH_MAX + 1]; int ret; int fd; diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h index 83a0287..4a878f7 100644 --- a/tools/perf/util/debugfs.h +++ b/tools/perf/util/debugfs.h @@ -1,25 +1,18 @@ #ifndef __DEBUGFS_H__ #define __DEBUGFS_H__ -#include <sys/mount.h> +const char *debugfs_find_mountpoint(void); +int debugfs_valid_mountpoint(const char *debugfs); +int debugfs_valid_entry(const char *path); +char *debugfs_mount(const char *mountpoint); +int debugfs_umount(void); +void debugfs_set_path(const char *mountpoint); +int debugfs_write(const char *entry, const char *value); +int debugfs_read(const char *entry, char *buffer, size_t size); +void debugfs_force_cleanup(void); +int debugfs_make_path(const char *element, char *buffer, int size); -#ifndef MAX_PATH -# define MAX_PATH 256 -#endif - -#ifndef STR -# define _STR(x) #x -# define STR(x) _STR(x) -#endif - -extern const char *debugfs_find_mountpoint(void); -extern int debugfs_valid_mountpoint(const char *debugfs); -extern int debugfs_valid_entry(const char *path); -extern char *debugfs_mount(const char *mountpoint); -extern int debugfs_umount(void); -extern int debugfs_write(const char *entry, const char *value); -extern int debugfs_read(const char *entry, char *buffer, size_t size); -extern void debugfs_force_cleanup(void); -extern int debugfs_make_path(const char *element, char *buffer, int size); +extern char debugfs_mountpoint[]; +extern char tracing_events_path[]; #endif /* __DEBUGFS_H__ */ diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 437f8ca..73ddaf0 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -1,7 +1,6 @@ #include <linux/types.h> #include "event.h" #include "debug.h" -#include "session.h" #include "sort.h" #include "string.h" #include "strlist.h" @@ -44,36 +43,27 @@ static struct perf_sample synth_sample = { .period = 1, }; -static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid, - int full, perf_event__handler_t process, - struct perf_session *session) +static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) { char filename[PATH_MAX]; char bf[BUFSIZ]; FILE *fp; size_t size = 0; - DIR *tasks; - struct dirent dirent, *next; - pid_t tgid = 0; + pid_t tgid = -1; snprintf(filename, sizeof(filename), "/proc/%d/status", pid); fp = fopen(filename, "r"); if (fp == NULL) { -out_race: - /* - * We raced with a task exiting - just return: - */ pr_debug("couldn't open %s\n", filename); return 0; } - memset(&event->comm, 0, sizeof(event->comm)); - - while (!event->comm.comm[0] || !event->comm.pid) { + while (!comm[0] || (tgid < 0)) { if (fgets(bf, sizeof(bf), fp) == NULL) { - pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); - goto out; + pr_warning("couldn't get COMM and pgid, malformed %s\n", + filename); + break; } if (memcmp(bf, "Name:", 5) == 0) { @@ -81,33 +71,65 @@ out_race: while (*name && isspace(*name)) ++name; size = strlen(name) - 1; - memcpy(event->comm.comm, name, size++); + if (size >= len) + size = len - 1; + memcpy(comm, name, size); + } else if (memcmp(bf, "Tgid:", 5) == 0) { char *tgids = bf + 5; while (*tgids && isspace(*tgids)) ++tgids; - tgid = event->comm.pid = atoi(tgids); + tgid = atoi(tgids); } } + fclose(fp); + + return tgid; +} + +static pid_t perf_event__synthesize_comm(struct perf_tool *tool, + union perf_event *event, pid_t pid, + int full, + perf_event__handler_t process, + struct machine *machine) +{ + char filename[PATH_MAX]; + size_t size; + DIR *tasks; + struct dirent dirent, *next; + pid_t tgid; + + memset(&event->comm, 0, sizeof(event->comm)); + + tgid = perf_event__get_comm_tgid(pid, event->comm.comm, + sizeof(event->comm.comm)); + if (tgid < 0) + goto out; + + event->comm.pid = tgid; event->comm.header.type = PERF_RECORD_COMM; + + size = strlen(event->comm.comm) + 1; size = ALIGN(size, sizeof(u64)); - memset(event->comm.comm + size, 0, session->id_hdr_size); + memset(event->comm.comm + size, 0, machine->id_hdr_size); event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + - session->id_hdr_size); + machine->id_hdr_size); if (!full) { event->comm.tid = pid; - process(event, &synth_sample, session); + process(tool, event, &synth_sample, machine); goto out; } snprintf(filename, sizeof(filename), "/proc/%d/task", pid); tasks = opendir(filename); - if (tasks == NULL) - goto out_race; + if (tasks == NULL) { + pr_debug("couldn't open %s\n", filename); + return 0; + } while (!readdir_r(tasks, &dirent, &next) && next) { char *end; @@ -115,22 +137,32 @@ out_race: if (*end) continue; + /* already have tgid; jut want to update the comm */ + (void) perf_event__get_comm_tgid(pid, event->comm.comm, + sizeof(event->comm.comm)); + + size = strlen(event->comm.comm) + 1; + size = ALIGN(size, sizeof(u64)); + memset(event->comm.comm + size, 0, machine->id_hdr_size); + event->comm.header.size = (sizeof(event->comm) - + (sizeof(event->comm.comm) - size) + + machine->id_hdr_size); + event->comm.tid = pid; - process(event, &synth_sample, session); + process(tool, event, &synth_sample, machine); } closedir(tasks); out: - fclose(fp); - return tgid; } -static int perf_event__synthesize_mmap_events(union perf_event *event, +static int perf_event__synthesize_mmap_events(struct perf_tool *tool, + union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, - struct perf_session *session) + struct machine *machine) { char filename[PATH_MAX]; FILE *fp; @@ -193,12 +225,12 @@ static int perf_event__synthesize_mmap_events(union perf_event *event, event->mmap.len -= event->mmap.start; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, session->id_hdr_size); - event->mmap.header.size += session->id_hdr_size; + memset(event->mmap.filename + size, 0, machine->id_hdr_size); + event->mmap.header.size += machine->id_hdr_size; event->mmap.pid = tgid; event->mmap.tid = pid; - process(event, &synth_sample, session); + process(tool, event, &synth_sample, machine); } } @@ -206,14 +238,14 @@ static int perf_event__synthesize_mmap_events(union perf_event *event, return 0; } -int perf_event__synthesize_modules(perf_event__handler_t process, - struct perf_session *session, +int perf_event__synthesize_modules(struct perf_tool *tool, + perf_event__handler_t process, struct machine *machine) { struct rb_node *nd; struct map_groups *kmaps = &machine->kmaps; union perf_event *event = zalloc((sizeof(event->mmap) + - session->id_hdr_size)); + machine->id_hdr_size)); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); @@ -243,15 +275,15 @@ int perf_event__synthesize_modules(perf_event__handler_t process, event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, session->id_hdr_size); - event->mmap.header.size += session->id_hdr_size; + memset(event->mmap.filename + size, 0, machine->id_hdr_size); + event->mmap.header.size += machine->id_hdr_size; event->mmap.start = pos->start; event->mmap.len = pos->end - pos->start; event->mmap.pid = machine->pid; memcpy(event->mmap.filename, pos->dso->long_name, pos->dso->long_name_len + 1); - process(event, &synth_sample, session); + process(tool, event, &synth_sample, machine); } free(event); @@ -260,40 +292,69 @@ int perf_event__synthesize_modules(perf_event__handler_t process, static int __event__synthesize_thread(union perf_event *comm_event, union perf_event *mmap_event, - pid_t pid, perf_event__handler_t process, - struct perf_session *session) + pid_t pid, int full, + perf_event__handler_t process, + struct perf_tool *tool, + struct machine *machine) { - pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process, - session); + pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, + process, machine); if (tgid == -1) return -1; - return perf_event__synthesize_mmap_events(mmap_event, pid, tgid, - process, session); + return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine); } -int perf_event__synthesize_thread_map(struct thread_map *threads, +int perf_event__synthesize_thread_map(struct perf_tool *tool, + struct thread_map *threads, perf_event__handler_t process, - struct perf_session *session) + struct machine *machine) { union perf_event *comm_event, *mmap_event; - int err = -1, thread; + int err = -1, thread, j; - comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); if (comm_event == NULL) goto out; - mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); + mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); if (mmap_event == NULL) goto out_free_comm; err = 0; for (thread = 0; thread < threads->nr; ++thread) { if (__event__synthesize_thread(comm_event, mmap_event, - threads->map[thread], - process, session)) { + threads->map[thread], 0, + process, tool, machine)) { err = -1; break; } + + /* + * comm.pid is set to thread group id by + * perf_event__synthesize_comm + */ + if ((int) comm_event->comm.pid != threads->map[thread]) { + bool need_leader = true; + + /* is thread group leader in thread_map? */ + for (j = 0; j < threads->nr; ++j) { + if ((int) comm_event->comm.pid == threads->map[j]) { + need_leader = false; + break; + } + } + + /* if not, generate events for it */ + if (need_leader && + __event__synthesize_thread(comm_event, + mmap_event, + comm_event->comm.pid, 0, + process, tool, machine)) { + err = -1; + break; + } + } } free(mmap_event); out_free_comm: @@ -302,19 +363,20 @@ out: return err; } -int perf_event__synthesize_threads(perf_event__handler_t process, - struct perf_session *session) +int perf_event__synthesize_threads(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) { DIR *proc; struct dirent dirent, *next; union perf_event *comm_event, *mmap_event; int err = -1; - comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); if (comm_event == NULL) goto out; - mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); + mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); if (mmap_event == NULL) goto out_free_comm; @@ -329,8 +391,8 @@ int perf_event__synthesize_threads(perf_event__handler_t process, if (*end) /* only interested in proper numerical dirents */ continue; - __event__synthesize_thread(comm_event, mmap_event, pid, - process, session); + __event__synthesize_thread(comm_event, mmap_event, pid, 1, + process, tool, machine); } closedir(proc); @@ -365,8 +427,8 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } -int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, - struct perf_session *session, +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, struct machine *machine, const char *symbol_name) { @@ -383,7 +445,7 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, */ struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + - session->id_hdr_size)); + machine->id_hdr_size)); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); @@ -417,25 +479,32 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, size = ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size) + session->id_hdr_size); + (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); event->mmap.pgoff = args.start; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; - err = process(event, &synth_sample, session); + err = process(tool, event, &synth_sample, machine); free(event); return err; } -int perf_event__process_comm(union perf_event *event, +size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) +{ + return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); +} + +int perf_event__process_comm(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine) { - struct thread *thread = perf_session__findnew(session, event->comm.tid); + struct thread *thread = machine__findnew_thread(machine, event->comm.tid); - dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); + if (dump_trace) + perf_event__fprintf_comm(event, stdout); if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); @@ -445,13 +514,13 @@ int perf_event__process_comm(union perf_event *event, return 0; } -int perf_event__process_lost(union perf_event *event, +int perf_event__process_lost(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine __used) { dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", event->lost.id, event->lost.lost); - session->hists.stats.total_lost += event->lost.lost; return 0; } @@ -468,21 +537,15 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, maps[MAP__FUNCTION]->end = ~0ULL; } -static int perf_event__process_kernel_mmap(union perf_event *event, - struct perf_session *session) +static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, + union perf_event *event, + struct machine *machine) { struct map *map; char kmmap_prefix[PATH_MAX]; - struct machine *machine; enum dso_kernel_type kernel_type; bool is_kernel_mmap; - machine = perf_session__findnew_machine(session, event->mmap.pid); - if (!machine) { - pr_err("Can't find id %d's machine\n", event->mmap.pid); - goto out_problem; - } - machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); if (machine__is_host(machine)) kernel_type = DSO_TYPE_KERNEL; @@ -549,9 +612,9 @@ static int perf_event__process_kernel_mmap(union perf_event *event, * time /proc/sys/kernel/kptr_restrict was non zero. */ if (event->mmap.pgoff != 0) { - perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, - symbol_name, - event->mmap.pgoff); + maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, + symbol_name, + event->mmap.pgoff); } if (machine__is_default_guest(machine)) { @@ -567,32 +630,35 @@ out_problem: return -1; } -int perf_event__process_mmap(union perf_event *event, +size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) +{ + return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", + event->mmap.pid, event->mmap.tid, event->mmap.start, + event->mmap.len, event->mmap.pgoff, event->mmap.filename); +} + +int perf_event__process_mmap(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine) { - struct machine *machine; struct thread *thread; struct map *map; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; int ret = 0; - dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", - event->mmap.pid, event->mmap.tid, event->mmap.start, - event->mmap.len, event->mmap.pgoff, event->mmap.filename); + if (dump_trace) + perf_event__fprintf_mmap(event, stdout); if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || cpumode == PERF_RECORD_MISC_KERNEL) { - ret = perf_event__process_kernel_mmap(event, session); + ret = perf_event__process_kernel_mmap(tool, event, machine); if (ret < 0) goto out_problem; return 0; } - machine = perf_session__find_host_machine(session); - if (machine == NULL) - goto out_problem; - thread = perf_session__findnew(session, event->mmap.pid); + thread = machine__findnew_thread(machine, event->mmap.pid); if (thread == NULL) goto out_problem; map = map__new(&machine->user_dsos, event->mmap.start, @@ -610,18 +676,26 @@ out_problem: return 0; } -int perf_event__process_task(union perf_event *event, +size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) +{ + return fprintf(fp, "(%d:%d):(%d:%d)\n", + event->fork.pid, event->fork.tid, + event->fork.ppid, event->fork.ptid); +} + +int perf_event__process_task(struct perf_tool *tool __used, + union perf_event *event, struct perf_sample *sample __used, - struct perf_session *session) + struct machine *machine) { - struct thread *thread = perf_session__findnew(session, event->fork.tid); - struct thread *parent = perf_session__findnew(session, event->fork.ptid); + struct thread *thread = machine__findnew_thread(machine, event->fork.tid); + struct thread *parent = machine__findnew_thread(machine, event->fork.ptid); - dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, - event->fork.ppid, event->fork.ptid); + if (dump_trace) + perf_event__fprintf_task(event, stdout); if (event->header.type == PERF_RECORD_EXIT) { - perf_session__remove_thread(session, thread); + machine__remove_thread(machine, thread); return 0; } @@ -634,22 +708,45 @@ int perf_event__process_task(union perf_event *event, return 0; } -int perf_event__process(union perf_event *event, struct perf_sample *sample, - struct perf_session *session) +size_t perf_event__fprintf(union perf_event *event, FILE *fp) +{ + size_t ret = fprintf(fp, "PERF_RECORD_%s", + perf_event__name(event->header.type)); + + switch (event->header.type) { + case PERF_RECORD_COMM: + ret += perf_event__fprintf_comm(event, fp); + break; + case PERF_RECORD_FORK: + case PERF_RECORD_EXIT: + ret += perf_event__fprintf_task(event, fp); + break; + case PERF_RECORD_MMAP: + ret += perf_event__fprintf_mmap(event, fp); + break; + default: + ret += fprintf(fp, "\n"); + } + + return ret; +} + +int perf_event__process(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, struct machine *machine) { switch (event->header.type) { case PERF_RECORD_COMM: - perf_event__process_comm(event, sample, session); + perf_event__process_comm(tool, event, sample, machine); break; case PERF_RECORD_MMAP: - perf_event__process_mmap(event, sample, session); + perf_event__process_mmap(tool, event, sample, machine); break; case PERF_RECORD_FORK: case PERF_RECORD_EXIT: - perf_event__process_task(event, sample, session); + perf_event__process_task(tool, event, sample, machine); break; case PERF_RECORD_LOST: - perf_event__process_lost(event, sample, session); + perf_event__process_lost(tool, event, sample, machine); default: break; } @@ -658,36 +755,29 @@ int perf_event__process(union perf_event *event, struct perf_sample *sample, } void thread__find_addr_map(struct thread *self, - struct perf_session *session, u8 cpumode, - enum map_type type, pid_t pid, u64 addr, + struct machine *machine, u8 cpumode, + enum map_type type, u64 addr, struct addr_location *al) { struct map_groups *mg = &self->mg; - struct machine *machine = NULL; al->thread = self; al->addr = addr; al->cpumode = cpumode; al->filtered = false; + if (machine == NULL) { + al->map = NULL; + return; + } + if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { al->level = 'k'; - machine = perf_session__find_host_machine(session); - if (machine == NULL) { - al->map = NULL; - return; - } mg = &machine->kmaps; } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { al->level = '.'; - machine = perf_session__find_host_machine(session); } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { al->level = 'g'; - machine = perf_session__find_machine(session, pid); - if (machine == NULL) { - al->map = NULL; - return; - } mg = &machine->kmaps; } else { /* @@ -733,13 +823,12 @@ try_again: al->addr = al->map->map_ip(al->map, al->addr); } -void thread__find_addr_location(struct thread *self, - struct perf_session *session, u8 cpumode, - enum map_type type, pid_t pid, u64 addr, +void thread__find_addr_location(struct thread *thread, struct machine *machine, + u8 cpumode, enum map_type type, u64 addr, struct addr_location *al, symbol_filter_t filter) { - thread__find_addr_map(self, session, cpumode, type, pid, addr, al); + thread__find_addr_map(thread, machine, cpumode, type, addr, al); if (al->map != NULL) al->sym = map__find_symbol(al->map, al->addr, filter); else @@ -747,13 +836,13 @@ void thread__find_addr_location(struct thread *self, } int perf_event__preprocess_sample(const union perf_event *event, - struct perf_session *session, + struct machine *machine, struct addr_location *al, struct perf_sample *sample, symbol_filter_t filter) { u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = perf_session__findnew(session, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, event->ip.pid); if (thread == NULL) return -1; @@ -764,18 +853,18 @@ int perf_event__preprocess_sample(const union perf_event *event, dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); /* - * Have we already created the kernel maps for the host machine? + * Have we already created the kernel maps for this machine? * * This should have happened earlier, when we processed the kernel MMAP * events, but for older perf.data files there was no such thing, so do * it now. */ if (cpumode == PERF_RECORD_MISC_KERNEL && - session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) - machine__create_kernel_maps(&session->host_machine); + machine->vmlinux_maps[MAP__FUNCTION] == NULL) + machine__create_kernel_maps(machine); - thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, - event->ip.pid, event->ip.ip, al); + thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, + event->ip.ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : al->level == 'H' ? "[hypervisor]" : "<not found>"); @@ -783,13 +872,14 @@ int perf_event__preprocess_sample(const union perf_event *event, al->cpu = sample->cpu; if (al->map) { + struct dso *dso = al->map->dso; + if (symbol_conf.dso_list && - (!al->map || !al->map->dso || - !(strlist__has_entry(symbol_conf.dso_list, - al->map->dso->short_name) || - (al->map->dso->short_name != al->map->dso->long_name && - strlist__has_entry(symbol_conf.dso_list, - al->map->dso->long_name))))) + (!dso || !(strlist__has_entry(symbol_conf.dso_list, + dso->short_name) || + (dso->short_name != dso->long_name && + strlist__has_entry(symbol_conf.dso_list, + dso->long_name))))) goto out_filtered; al->sym = map__find_symbol(al->map, al->addr, filter); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 357a85b..cbdeaad 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -2,6 +2,7 @@ #define __PERF_RECORD_H #include <limits.h> +#include <stdio.h> #include "../perf.h" #include "map.h" @@ -141,43 +142,54 @@ union perf_event { void perf_event__print_totals(void); -struct perf_session; +struct perf_tool; struct thread_map; -typedef int (*perf_event__handler_synth_t)(union perf_event *event, - struct perf_session *session); -typedef int (*perf_event__handler_t)(union perf_event *event, +typedef int (*perf_event__handler_t)(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, - struct perf_session *session); + struct machine *machine); -int perf_event__synthesize_thread_map(struct thread_map *threads, +int perf_event__synthesize_thread_map(struct perf_tool *tool, + struct thread_map *threads, perf_event__handler_t process, - struct perf_session *session); -int perf_event__synthesize_threads(perf_event__handler_t process, - struct perf_session *session); -int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, - struct perf_session *session, + struct machine *machine); +int perf_event__synthesize_threads(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, struct machine *machine, const char *symbol_name); -int perf_event__synthesize_modules(perf_event__handler_t process, - struct perf_session *session, +int perf_event__synthesize_modules(struct perf_tool *tool, + perf_event__handler_t process, struct machine *machine); -int perf_event__process_comm(union perf_event *event, struct perf_sample *sample, - struct perf_session *session); -int perf_event__process_lost(union perf_event *event, struct perf_sample *sample, - struct perf_session *session); -int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample, - struct perf_session *session); -int perf_event__process_task(union perf_event *event, struct perf_sample *sample, - struct perf_session *session); -int perf_event__process(union perf_event *event, struct perf_sample *sample, - struct perf_session *session); +int perf_event__process_comm(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_lost(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_mmap(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_task(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); struct addr_location; int perf_event__preprocess_sample(const union perf_event *self, - struct perf_session *session, + struct machine *machine, struct addr_location *al, struct perf_sample *sample, symbol_filter_t filter); @@ -187,5 +199,13 @@ const char *perf_event__name(unsigned int id); int perf_event__parse_sample(const union perf_event *event, u64 type, int sample_size, bool sample_id_all, struct perf_sample *sample, bool swapped); +int perf_event__synthesize_sample(union perf_event *event, u64 type, + const struct perf_sample *sample, + bool swapped); + +size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); +size_t perf_event__fprintf(union perf_event *event, FILE *fp); #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index fbb4b4a..fa18370 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -6,12 +6,16 @@ * * Released under the GPL v2. (and only v2, not any later version) */ +#include "util.h" +#include "debugfs.h" #include <poll.h> #include "cpumap.h" #include "thread_map.h" #include "evlist.h" #include "evsel.h" -#include "util.h" +#include <unistd.h> + +#include "parse-events.h" #include <sys/mman.h> @@ -30,6 +34,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, INIT_HLIST_HEAD(&evlist->heads[i]); INIT_LIST_HEAD(&evlist->entries); perf_evlist__set_maps(evlist, cpus, threads); + evlist->workload.pid = -1; } struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, @@ -43,6 +48,22 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, return evlist; } +void perf_evlist__config_attrs(struct perf_evlist *evlist, + struct perf_record_opts *opts) +{ + struct perf_evsel *evsel; + + if (evlist->cpus->map[0] < 0) + opts->no_inherit = true; + + list_for_each_entry(evsel, &evlist->entries, node) { + perf_evsel__config(evsel, opts); + + if (evlist->nr_entries > 1) + evsel->attr.sample_type |= PERF_SAMPLE_ID; + } +} + static void perf_evlist__purge(struct perf_evlist *evlist) { struct perf_evsel *pos, *n; @@ -76,6 +97,14 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) ++evlist->nr_entries; } +static void perf_evlist__splice_list_tail(struct perf_evlist *evlist, + struct list_head *list, + int nr_entries) +{ + list_splice_tail(list, &evlist->entries); + evlist->nr_entries += nr_entries; +} + int perf_evlist__add_default(struct perf_evlist *evlist) { struct perf_event_attr attr = { @@ -100,6 +129,126 @@ error: return -ENOMEM; } +int perf_evlist__add_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs) +{ + struct perf_evsel *evsel, *n; + LIST_HEAD(head); + size_t i; + + for (i = 0; i < nr_attrs; i++) { + evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); + if (evsel == NULL) + goto out_delete_partial_list; + list_add_tail(&evsel->node, &head); + } + + perf_evlist__splice_list_tail(evlist, &head, nr_attrs); + + return 0; + +out_delete_partial_list: + list_for_each_entry_safe(evsel, n, &head, node) + perf_evsel__delete(evsel); + return -1; +} + +static int trace_event__id(const char *evname) +{ + char *filename, *colon; + int err = -1, fd; + + if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0) + return -1; + + colon = strrchr(filename, ':'); + if (colon != NULL) + *colon = '/'; + + fd = open(filename, O_RDONLY); + if (fd >= 0) { + char id[16]; + if (read(fd, id, sizeof(id)) > 0) + err = atoi(id); + close(fd); + } + + free(filename); + return err; +} + +int perf_evlist__add_tracepoints(struct perf_evlist *evlist, + const char *tracepoints[], + size_t nr_tracepoints) +{ + int err; + size_t i; + struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs)); + + if (attrs == NULL) + return -1; + + for (i = 0; i < nr_tracepoints; i++) { + err = trace_event__id(tracepoints[i]); + + if (err < 0) + goto out_free_attrs; + + attrs[i].type = PERF_TYPE_TRACEPOINT; + attrs[i].config = err; + attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | + PERF_SAMPLE_CPU); + attrs[i].sample_period = 1; + } + + err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints); +out_free_attrs: + free(attrs); + return err; +} + +static struct perf_evsel * + perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) +{ + struct perf_evsel *evsel; + + list_for_each_entry(evsel, &evlist->entries, node) { + if (evsel->attr.type == PERF_TYPE_TRACEPOINT && + (int)evsel->attr.config == id) + return evsel; + } + + return NULL; +} + +int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, + const struct perf_evsel_str_handler *assocs, + size_t nr_assocs) +{ + struct perf_evsel *evsel; + int err; + size_t i; + + for (i = 0; i < nr_assocs; i++) { + err = trace_event__id(assocs[i].name); + if (err < 0) + goto out; + + evsel = perf_evlist__find_tracepoint_by_id(evlist, err); + if (evsel == NULL) + continue; + + err = -EEXIST; + if (evsel->handler.func != NULL) + goto out; + evsel->handler.func = assocs[i].handler; + } + + err = 0; +out: + return err; +} + void perf_evlist__disable(struct perf_evlist *evlist) { int cpu, thread; @@ -126,7 +275,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) } } -int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) +static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); @@ -282,7 +431,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist) evlist->mmap = NULL; } -int perf_evlist__alloc_mmap(struct perf_evlist *evlist) +static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { evlist->nr_mmaps = evlist->cpus->nr; if (evlist->cpus->map[0] == -1) @@ -298,8 +447,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, evlist->mmap[idx].mask = mask; evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, MAP_SHARED, fd, 0); - if (evlist->mmap[idx].base == MAP_FAILED) + if (evlist->mmap[idx].base == MAP_FAILED) { + evlist->mmap[idx].base = NULL; return -1; + } perf_evlist__add_pollfd(evlist, fd); return 0; @@ -400,14 +551,22 @@ out_unmap: * * Using perf_evlist__read_on_cpu does this automatically. */ -int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, + bool overwrite) { unsigned int page_size = sysconf(_SC_PAGE_SIZE); - int mask = pages * page_size - 1; struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; - int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); + int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; + + /* 512 kiB: default amount of unprivileged mlocked memory */ + if (pages == UINT_MAX) + pages = (512 * 1024) / page_size; + else if (!is_power_of_2(pages)) + return -EINVAL; + + mask = pages * page_size - 1; if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; @@ -512,6 +671,38 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist) return first->attr.sample_type; } +u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) +{ + struct perf_evsel *first; + struct perf_sample *data; + u64 sample_type; + u16 size = 0; + + first = list_entry(evlist->entries.next, struct perf_evsel, node); + + if (!first->attr.sample_id_all) + goto out; + + sample_type = first->attr.sample_type; + + if (sample_type & PERF_SAMPLE_TID) + size += sizeof(data->tid) * 2; + + if (sample_type & PERF_SAMPLE_TIME) + size += sizeof(data->time); + + if (sample_type & PERF_SAMPLE_ID) + size += sizeof(data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + size += sizeof(data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + size += sizeof(data->cpu) * 2; +out: + return size; +} + bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) { struct perf_evsel *pos, *first; @@ -569,3 +760,97 @@ out_err: return err; } + +int perf_evlist__prepare_workload(struct perf_evlist *evlist, + struct perf_record_opts *opts, + const char *argv[]) +{ + int child_ready_pipe[2], go_pipe[2]; + char bf; + + if (pipe(child_ready_pipe) < 0) { + perror("failed to create 'ready' pipe"); + return -1; + } + + if (pipe(go_pipe) < 0) { + perror("failed to create 'go' pipe"); + goto out_close_ready_pipe; + } + + evlist->workload.pid = fork(); + if (evlist->workload.pid < 0) { + perror("failed to fork"); + goto out_close_pipes; + } + + if (!evlist->workload.pid) { + if (opts->pipe_output) + dup2(2, 1); + + close(child_ready_pipe[0]); + close(go_pipe[1]); + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + + /* + * Do a dummy execvp to get the PLT entry resolved, + * so we avoid the resolver overhead on the real + * execvp call. + */ + execvp("", (char **)argv); + + /* + * Tell the parent we're ready to go + */ + close(child_ready_pipe[1]); + + /* + * Wait until the parent tells us to go. + */ + if (read(go_pipe[0], &bf, 1) == -1) + perror("unable to read pipe"); + + execvp(argv[0], (char **)argv); + + perror(argv[0]); + kill(getppid(), SIGUSR1); + exit(-1); + } + + if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1) + evlist->threads->map[0] = evlist->workload.pid; + + close(child_ready_pipe[1]); + close(go_pipe[0]); + /* + * wait for child to settle + */ + if (read(child_ready_pipe[0], &bf, 1) == -1) { + perror("unable to read pipe"); + goto out_close_pipes; + } + + evlist->workload.cork_fd = go_pipe[1]; + close(child_ready_pipe[0]); + return 0; + +out_close_pipes: + close(go_pipe[0]); + close(go_pipe[1]); +out_close_ready_pipe: + close(child_ready_pipe[0]); + close(child_ready_pipe[1]); + return -1; +} + +int perf_evlist__start_workload(struct perf_evlist *evlist) +{ + if (evlist->workload.cork_fd > 0) { + /* + * Remove the cork, let it rip! + */ + return close(evlist->workload.cork_fd); + } + + return 0; +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 1779ffe..8922aee 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -2,12 +2,16 @@ #define __PERF_EVLIST_H 1 #include <linux/list.h> +#include <stdio.h> #include "../perf.h" #include "event.h" +#include "util.h" +#include <unistd.h> struct pollfd; struct thread_map; struct cpu_map; +struct perf_record_opts; #define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) @@ -19,6 +23,10 @@ struct perf_evlist { int nr_fds; int nr_mmaps; int mmap_len; + struct { + int cork_fd; + pid_t pid; + } workload; bool overwrite; union perf_event event_copy; struct perf_mmap *mmap; @@ -28,6 +36,11 @@ struct perf_evlist { struct perf_evsel *selected; }; +struct perf_evsel_str_handler { + const char *name; + void *handler; +}; + struct perf_evsel; struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, @@ -39,11 +52,26 @@ void perf_evlist__delete(struct perf_evlist *evlist); void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); int perf_evlist__add_default(struct perf_evlist *evlist); +int perf_evlist__add_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs); +int perf_evlist__add_tracepoints(struct perf_evlist *evlist, + const char *tracepoints[], size_t nr_tracepoints); +int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, + const struct perf_evsel_str_handler *assocs, + size_t nr_assocs); + +#define perf_evlist__add_attrs_array(evlist, array) \ + perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) + +#define perf_evlist__add_tracepoints_array(evlist, array) \ + perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) + +#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ + perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id); -int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); @@ -52,8 +80,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); int perf_evlist__open(struct perf_evlist *evlist, bool group); -int perf_evlist__alloc_mmap(struct perf_evlist *evlist); -int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); +void perf_evlist__config_attrs(struct perf_evlist *evlist, + struct perf_record_opts *opts); + +int perf_evlist__prepare_workload(struct perf_evlist *evlist, + struct perf_record_opts *opts, + const char *argv[]); +int perf_evlist__start_workload(struct perf_evlist *evlist); + +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, + bool overwrite); void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__disable(struct perf_evlist *evlist); @@ -77,6 +113,7 @@ int perf_evlist__set_filters(struct perf_evlist *evlist); u64 perf_evlist__sample_type(const struct perf_evlist *evlist); bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); +u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index d7915d4..667f3b7 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -63,6 +63,79 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) return evsel; } +void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) +{ + struct perf_event_attr *attr = &evsel->attr; + int track = !evsel->idx; /* only the first counter needs these */ + + attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0; + attr->inherit = !opts->no_inherit; + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING | + PERF_FORMAT_ID; + + attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + + /* + * We default some events to a 1 default interval. But keep + * it a weak assumption overridable by the user. + */ + if (!attr->sample_period || (opts->user_freq != UINT_MAX && + opts->user_interval != ULLONG_MAX)) { + if (opts->freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = opts->freq; + } else { + attr->sample_period = opts->default_interval; + } + } + + if (opts->no_samples) + attr->sample_freq = 0; + + if (opts->inherit_stat) + attr->inherit_stat = 1; + + if (opts->sample_address) { + attr->sample_type |= PERF_SAMPLE_ADDR; + attr->mmap_data = track; + } + + if (opts->call_graph) + attr->sample_type |= PERF_SAMPLE_CALLCHAIN; + + if (opts->system_wide) + attr->sample_type |= PERF_SAMPLE_CPU; + + if (opts->period) + attr->sample_type |= PERF_SAMPLE_PERIOD; + + if (opts->sample_id_all_avail && + (opts->sample_time || opts->system_wide || + !opts->no_inherit || opts->cpu_list)) + attr->sample_type |= PERF_SAMPLE_TIME; + + if (opts->raw_samples) { + attr->sample_type |= PERF_SAMPLE_TIME; + attr->sample_type |= PERF_SAMPLE_RAW; + attr->sample_type |= PERF_SAMPLE_CPU; + } + + if (opts->no_delay) { + attr->watermark = 0; + attr->wakeup_events = 1; + } + + attr->mmap = track; + attr->comm = track; + + if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) { + attr->disabled = 1; + attr->enable_on_exec = 1; + } +} + int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; @@ -387,7 +460,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, u32 val32[2]; } u; - + memset(data, 0, sizeof(*data)); data->cpu = data->pid = data->tid = -1; data->stream_id = data->id = data->time = -1ULL; @@ -504,3 +577,82 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, return 0; } + +int perf_event__synthesize_sample(union perf_event *event, u64 type, + const struct perf_sample *sample, + bool swapped) +{ + u64 *array; + + /* + * used for cross-endian analysis. See git commit 65014ab3 + * for why this goofiness is needed. + */ + union { + u64 val64; + u32 val32[2]; + } u; + + array = event->sample.array; + + if (type & PERF_SAMPLE_IP) { + event->ip.ip = sample->ip; + array++; + } + + if (type & PERF_SAMPLE_TID) { + u.val32[0] = sample->pid; + u.val32[1] = sample->tid; + if (swapped) { + /* + * Inverse of what is done in perf_event__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + u.val64 = bswap_64(u.val64); + } + + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_TIME) { + *array = sample->time; + array++; + } + + if (type & PERF_SAMPLE_ADDR) { + *array = sample->addr; + array++; + } + + if (type & PERF_SAMPLE_ID) { + *array = sample->id; + array++; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + *array = sample->stream_id; + array++; + } + + if (type & PERF_SAMPLE_CPU) { + u.val32[0] = sample->cpu; + if (swapped) { + /* + * Inverse of what is done in perf_event__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val64 = bswap_64(u.val64); + } + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_PERIOD) { + *array = sample->period; + array++; + } + + return 0; +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b1d15e6..326b8e4 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -61,12 +61,17 @@ struct perf_evsel { off_t id_offset; }; struct cgroup_sel *cgrp; + struct { + void *func; + void *data; + } handler; bool supported; }; struct cpu_map; struct thread_map; struct perf_evlist; +struct perf_record_opts; struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); void perf_evsel__init(struct perf_evsel *evsel, @@ -74,6 +79,9 @@ void perf_evsel__init(struct perf_evsel *evsel, void perf_evsel__exit(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel); +void perf_evsel__config(struct perf_evsel *evsel, + struct perf_record_opts *opts); + int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 33c17a2..3e7e0b0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -8,6 +8,7 @@ #include <stdlib.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/bitops.h> #include <sys/utsname.h> #include "evlist.h" @@ -28,9 +29,6 @@ static struct perf_trace_event_type *events; static u32 header_argc; static const char **header_argv; -static int dsos__write_buildid_table(struct perf_header *header, int fd); -static int perf_session__cache_build_ids(struct perf_session *session); - int perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) @@ -187,6 +185,252 @@ perf_header__set_cmdline(int argc, const char **argv) return 0; } +#define dsos__for_each_with_build_id(pos, head) \ + list_for_each_entry(pos, head, node) \ + if (!pos->has_build_id) \ + continue; \ + else + +static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, + u16 misc, int fd) +{ + struct dso *pos; + + dsos__for_each_with_build_id(pos, head) { + int err; + struct build_id_event b; + size_t len; + + if (!pos->hit) + continue; + len = pos->long_name_len + 1; + len = ALIGN(len, NAME_ALIGN); + memset(&b, 0, sizeof(b)); + memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); + b.pid = pid; + b.header.misc = misc; + b.header.size = sizeof(b) + len; + err = do_write(fd, &b, sizeof(b)); + if (err < 0) + return err; + err = write_padded(fd, pos->long_name, + pos->long_name_len + 1, len); + if (err < 0) + return err; + } + + return 0; +} + +static int machine__write_buildid_table(struct machine *machine, int fd) +{ + int err; + u16 kmisc = PERF_RECORD_MISC_KERNEL, + umisc = PERF_RECORD_MISC_USER; + + if (!machine__is_host(machine)) { + kmisc = PERF_RECORD_MISC_GUEST_KERNEL; + umisc = PERF_RECORD_MISC_GUEST_USER; + } + + err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, + kmisc, fd); + if (err == 0) + err = __dsos__write_buildid_table(&machine->user_dsos, + machine->pid, umisc, fd); + return err; +} + +static int dsos__write_buildid_table(struct perf_header *header, int fd) +{ + struct perf_session *session = container_of(header, + struct perf_session, header); + struct rb_node *nd; + int err = machine__write_buildid_table(&session->host_machine, fd); + + if (err) + return err; + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + err = machine__write_buildid_table(pos, fd); + if (err) + break; + } + return err; +} + +int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, + const char *name, bool is_kallsyms) +{ + const size_t size = PATH_MAX; + char *realname, *filename = zalloc(size), + *linkname = zalloc(size), *targetname; + int len, err = -1; + + if (is_kallsyms) { + if (symbol_conf.kptr_restrict) { + pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); + return 0; + } + realname = (char *)name; + } else + realname = realpath(name, NULL); + + if (realname == NULL || filename == NULL || linkname == NULL) + goto out_free; + + len = snprintf(filename, size, "%s%s%s", + debugdir, is_kallsyms ? "/" : "", realname); + if (mkdir_p(filename, 0755)) + goto out_free; + + snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); + + if (access(filename, F_OK)) { + if (is_kallsyms) { + if (copyfile("/proc/kallsyms", filename)) + goto out_free; + } else if (link(realname, filename) && copyfile(name, filename)) + goto out_free; + } + + len = snprintf(linkname, size, "%s/.build-id/%.2s", + debugdir, sbuild_id); + + if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) + goto out_free; + + snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); + targetname = filename + strlen(debugdir) - 5; + memcpy(targetname, "../..", 5); + + if (symlink(targetname, linkname) == 0) + err = 0; +out_free: + if (!is_kallsyms) + free(realname); + free(filename); + free(linkname); + return err; +} + +static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, + const char *name, const char *debugdir, + bool is_kallsyms) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(build_id, build_id_size, sbuild_id); + + return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); +} + +int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) +{ + const size_t size = PATH_MAX; + char *filename = zalloc(size), + *linkname = zalloc(size); + int err = -1; + + if (filename == NULL || linkname == NULL) + goto out_free; + + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, sbuild_id + 2); + + if (access(linkname, F_OK)) + goto out_free; + + if (readlink(linkname, filename, size - 1) < 0) + goto out_free; + + if (unlink(linkname)) + goto out_free; + + /* + * Since the link is relative, we must make it absolute: + */ + snprintf(linkname, size, "%s/.build-id/%.2s/%s", + debugdir, sbuild_id, filename); + + if (unlink(linkname)) + goto out_free; + + err = 0; +out_free: + free(filename); + free(linkname); + return err; +} + +static int dso__cache_build_id(struct dso *dso, const char *debugdir) +{ + bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; + + return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), + dso->long_name, debugdir, is_kallsyms); +} + +static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) +{ + struct dso *pos; + int err = 0; + + dsos__for_each_with_build_id(pos, head) + if (dso__cache_build_id(pos, debugdir)) + err = -1; + + return err; +} + +static int machine__cache_build_ids(struct machine *machine, const char *debugdir) +{ + int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); + ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); + return ret; +} + +static int perf_session__cache_build_ids(struct perf_session *session) +{ + struct rb_node *nd; + int ret; + char debugdir[PATH_MAX]; + + snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); + + if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) + return -1; + + ret = machine__cache_build_ids(&session->host_machine, debugdir); + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__cache_build_ids(pos, debugdir); + } + return ret ? -1 : 0; +} + +static bool machine__read_build_ids(struct machine *machine, bool with_hits) +{ + bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); + ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); + return ret; +} + +static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) +{ + struct rb_node *nd; + bool ret = machine__read_build_ids(&session->host_machine, with_hits); + + for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__read_build_ids(pos, with_hits); + } + + return ret; +} + static int write_trace_info(int fd, struct perf_header *h __used, struct perf_evlist *evlist) { @@ -202,6 +446,9 @@ static int write_build_id(int fd, struct perf_header *h, session = container_of(h, struct perf_session, header); + if (!perf_session__read_build_ids(session, true)) + return -1; + err = dsos__write_buildid_table(h, fd); if (err < 0) { pr_debug("failed to write buildid table\n"); @@ -1065,26 +1312,30 @@ struct feature_ops { bool full_only; }; -#define FEAT_OPA(n, w, p) \ - [n] = { .name = #n, .write = w, .print = p } -#define FEAT_OPF(n, w, p) \ - [n] = { .name = #n, .write = w, .print = p, .full_only = true } +#define FEAT_OPA(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func } +#define FEAT_OPF(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true } + +/* feature_ops not implemented: */ +#define print_trace_info NULL +#define print_build_id NULL static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { - FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL), - FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL), - FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname), - FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease), - FEAT_OPA(HEADER_VERSION, write_version, print_version), - FEAT_OPA(HEADER_ARCH, write_arch, print_arch), - FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus), - FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc), - FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid), - FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem), - FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc), - FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline), - FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology), - FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology), + FEAT_OPA(HEADER_TRACE_INFO, trace_info), + FEAT_OPA(HEADER_BUILD_ID, build_id), + FEAT_OPA(HEADER_HOSTNAME, hostname), + FEAT_OPA(HEADER_OSRELEASE, osrelease), + FEAT_OPA(HEADER_VERSION, version), + FEAT_OPA(HEADER_ARCH, arch), + FEAT_OPA(HEADER_NRCPUS, nrcpus), + FEAT_OPA(HEADER_CPUDESC, cpudesc), + FEAT_OPA(HEADER_CPUID, cpuid), + FEAT_OPA(HEADER_TOTAL_MEM, total_mem), + FEAT_OPA(HEADER_EVENT_DESC, event_desc), + FEAT_OPA(HEADER_CMDLINE, cmdline), + FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), + FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), }; struct header_print_data { @@ -1103,9 +1354,9 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section, "%d, continuing...\n", section->offset, feat); return 0; } - if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) { + if (feat >= HEADER_LAST_FEATURE) { pr_warning("unknown feature %d\n", feat); - return -1; + return 0; } if (!feat_ops[feat].print) return 0; @@ -1132,252 +1383,6 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) return 0; } -#define dsos__for_each_with_build_id(pos, head) \ - list_for_each_entry(pos, head, node) \ - if (!pos->has_build_id) \ - continue; \ - else - -static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, - u16 misc, int fd) -{ - struct dso *pos; - - dsos__for_each_with_build_id(pos, head) { - int err; - struct build_id_event b; - size_t len; - - if (!pos->hit) - continue; - len = pos->long_name_len + 1; - len = ALIGN(len, NAME_ALIGN); - memset(&b, 0, sizeof(b)); - memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); - b.pid = pid; - b.header.misc = misc; - b.header.size = sizeof(b) + len; - err = do_write(fd, &b, sizeof(b)); - if (err < 0) - return err; - err = write_padded(fd, pos->long_name, - pos->long_name_len + 1, len); - if (err < 0) - return err; - } - - return 0; -} - -static int machine__write_buildid_table(struct machine *machine, int fd) -{ - int err; - u16 kmisc = PERF_RECORD_MISC_KERNEL, - umisc = PERF_RECORD_MISC_USER; - - if (!machine__is_host(machine)) { - kmisc = PERF_RECORD_MISC_GUEST_KERNEL; - umisc = PERF_RECORD_MISC_GUEST_USER; - } - - err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, - kmisc, fd); - if (err == 0) - err = __dsos__write_buildid_table(&machine->user_dsos, - machine->pid, umisc, fd); - return err; -} - -static int dsos__write_buildid_table(struct perf_header *header, int fd) -{ - struct perf_session *session = container_of(header, - struct perf_session, header); - struct rb_node *nd; - int err = machine__write_buildid_table(&session->host_machine, fd); - - if (err) - return err; - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - err = machine__write_buildid_table(pos, fd); - if (err) - break; - } - return err; -} - -int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, - const char *name, bool is_kallsyms) -{ - const size_t size = PATH_MAX; - char *realname, *filename = zalloc(size), - *linkname = zalloc(size), *targetname; - int len, err = -1; - - if (is_kallsyms) { - if (symbol_conf.kptr_restrict) { - pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); - return 0; - } - realname = (char *)name; - } else - realname = realpath(name, NULL); - - if (realname == NULL || filename == NULL || linkname == NULL) - goto out_free; - - len = snprintf(filename, size, "%s%s%s", - debugdir, is_kallsyms ? "/" : "", realname); - if (mkdir_p(filename, 0755)) - goto out_free; - - snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); - - if (access(filename, F_OK)) { - if (is_kallsyms) { - if (copyfile("/proc/kallsyms", filename)) - goto out_free; - } else if (link(realname, filename) && copyfile(name, filename)) - goto out_free; - } - - len = snprintf(linkname, size, "%s/.build-id/%.2s", - debugdir, sbuild_id); - - if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) - goto out_free; - - snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); - targetname = filename + strlen(debugdir) - 5; - memcpy(targetname, "../..", 5); - - if (symlink(targetname, linkname) == 0) - err = 0; -out_free: - if (!is_kallsyms) - free(realname); - free(filename); - free(linkname); - return err; -} - -static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, - const char *name, const char *debugdir, - bool is_kallsyms) -{ - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - - build_id__sprintf(build_id, build_id_size, sbuild_id); - - return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); -} - -int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) -{ - const size_t size = PATH_MAX; - char *filename = zalloc(size), - *linkname = zalloc(size); - int err = -1; - - if (filename == NULL || linkname == NULL) - goto out_free; - - snprintf(linkname, size, "%s/.build-id/%.2s/%s", - debugdir, sbuild_id, sbuild_id + 2); - - if (access(linkname, F_OK)) - goto out_free; - - if (readlink(linkname, filename, size - 1) < 0) - goto out_free; - - if (unlink(linkname)) - goto out_free; - - /* - * Since the link is relative, we must make it absolute: - */ - snprintf(linkname, size, "%s/.build-id/%.2s/%s", - debugdir, sbuild_id, filename); - - if (unlink(linkname)) - goto out_free; - - err = 0; -out_free: - free(filename); - free(linkname); - return err; -} - -static int dso__cache_build_id(struct dso *dso, const char *debugdir) -{ - bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; - - return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), - dso->long_name, debugdir, is_kallsyms); -} - -static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) -{ - struct dso *pos; - int err = 0; - - dsos__for_each_with_build_id(pos, head) - if (dso__cache_build_id(pos, debugdir)) - err = -1; - - return err; -} - -static int machine__cache_build_ids(struct machine *machine, const char *debugdir) -{ - int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); - ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); - return ret; -} - -static int perf_session__cache_build_ids(struct perf_session *session) -{ - struct rb_node *nd; - int ret; - char debugdir[PATH_MAX]; - - snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); - - if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) - return -1; - - ret = machine__cache_build_ids(&session->host_machine, debugdir); - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret |= machine__cache_build_ids(pos, debugdir); - } - return ret ? -1 : 0; -} - -static bool machine__read_build_ids(struct machine *machine, bool with_hits) -{ - bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); - ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); - return ret; -} - -static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) -{ - struct rb_node *nd; - bool ret = machine__read_build_ids(&session->host_machine, with_hits); - - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret |= machine__read_build_ids(pos, with_hits); - } - - return ret; -} - static int do_write_feat(int fd, struct perf_header *h, int type, struct perf_file_section **p, struct perf_evlist *evlist) @@ -1386,6 +1391,8 @@ static int do_write_feat(int fd, struct perf_header *h, int type, int ret = 0; if (perf_header__has_feat(h, type)) { + if (!feat_ops[type].write) + return -1; (*p)->offset = lseek(fd, 0, SEEK_CUR); @@ -1408,18 +1415,12 @@ static int perf_header__adds_write(struct perf_header *header, struct perf_evlist *evlist, int fd) { int nr_sections; - struct perf_session *session; struct perf_file_section *feat_sec, *p; int sec_size; u64 sec_start; + int feat; int err; - session = container_of(header, struct perf_session, header); - - if (perf_header__has_feat(header, HEADER_BUILD_ID && - !perf_session__read_build_ids(session, true))) - perf_header__clear_feat(header, HEADER_BUILD_ID); - nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; @@ -1433,64 +1434,11 @@ static int perf_header__adds_write(struct perf_header *header, sec_start = header->data_offset + header->data_size; lseek(fd, sec_start + sec_size, SEEK_SET); - err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist); - if (err) - goto out_free; - - err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist); - if (err) { - perf_header__clear_feat(header, HEADER_BUILD_ID); - goto out_free; + for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { + if (do_write_feat(fd, header, feat, &p, evlist)) + perf_header__clear_feat(header, feat); } - err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_HOSTNAME); - - err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_OSRELEASE); - - err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_VERSION); - - err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_ARCH); - - err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_NRCPUS); - - err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_CPUDESC); - - err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_CPUID); - - err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_TOTAL_MEM); - - err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_CMDLINE); - - err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_EVENT_DESC); - - err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY); - - err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist); - if (err) - perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY); - lseek(fd, sec_start, SEEK_SET); /* * may write more than needed due to dropped feature, but @@ -1499,7 +1447,6 @@ static int perf_header__adds_write(struct perf_header *header, err = do_write(fd, feat_sec, sec_size); if (err < 0) pr_debug("failed to write feature section\n"); -out_free: free(feat_sec); return err; } @@ -1637,20 +1584,20 @@ static int perf_header__getbuffer64(struct perf_header *header, int perf_header__process_sections(struct perf_header *header, int fd, void *data, int (*process)(struct perf_file_section *section, - struct perf_header *ph, - int feat, int fd, void *data)) + struct perf_header *ph, + int feat, int fd, void *data)) { - struct perf_file_section *feat_sec; + struct perf_file_section *feat_sec, *sec; int nr_sections; int sec_size; - int idx = 0; - int err = -1, feat = 1; + int feat; + int err; nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); if (!nr_sections) return 0; - feat_sec = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); if (!feat_sec) return -1; @@ -1658,20 +1605,16 @@ int perf_header__process_sections(struct perf_header *header, int fd, lseek(fd, header->data_offset + header->data_size, SEEK_SET); - if (perf_header__getbuffer64(header, fd, feat_sec, sec_size)) + err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); + if (err < 0) goto out_free; - err = 0; - while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { - if (perf_header__has_feat(header, feat)) { - struct perf_file_section *sec = &feat_sec[idx++]; - - err = process(sec, header, feat, fd, data); - if (err < 0) - break; - } - ++feat; + for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { + err = process(sec++, header, feat, fd, data); + if (err < 0) + goto out_free; } + err = 0; out_free: free(feat_sec); return err; @@ -1906,32 +1849,21 @@ static int perf_file_section__process(struct perf_file_section *section, return 0; } + if (feat >= HEADER_LAST_FEATURE) { + pr_debug("unknown feature %d, continuing...\n", feat); + return 0; + } + switch (feat) { case HEADER_TRACE_INFO: trace_report(fd, false); break; - case HEADER_BUILD_ID: if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); break; - - case HEADER_HOSTNAME: - case HEADER_OSRELEASE: - case HEADER_VERSION: - case HEADER_ARCH: - case HEADER_NRCPUS: - case HEADER_CPUDESC: - case HEADER_CPUID: - case HEADER_TOTAL_MEM: - case HEADER_CMDLINE: - case HEADER_EVENT_DESC: - case HEADER_CPU_TOPOLOGY: - case HEADER_NUMA_TOPOLOGY: - break; - default: - pr_debug("unknown feature %d, continuing...\n", feat); + break; } return 0; @@ -2041,6 +1973,8 @@ int perf_session__read_header(struct perf_session *session, int fd) lseek(fd, tmp, SEEK_SET); } + symbol_conf.nr_events = nr_attrs; + if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); events = malloc(f_header.event_types.size); @@ -2068,9 +2002,9 @@ out_delete_evlist: return -ENOMEM; } -int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, - perf_event__handler_t process, - struct perf_session *session) +int perf_event__synthesize_attr(struct perf_tool *tool, + struct perf_event_attr *attr, u16 ids, u64 *id, + perf_event__handler_t process) { union perf_event *ev; size_t size; @@ -2092,22 +2026,23 @@ int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, ev->attr.header.type = PERF_RECORD_HEADER_ATTR; ev->attr.header.size = size; - err = process(ev, NULL, session); + err = process(tool, ev, NULL, NULL); free(ev); return err; } -int perf_session__synthesize_attrs(struct perf_session *session, +int perf_event__synthesize_attrs(struct perf_tool *tool, + struct perf_session *session, perf_event__handler_t process) { struct perf_evsel *attr; int err = 0; list_for_each_entry(attr, &session->evlist->entries, node) { - err = perf_event__synthesize_attr(&attr->attr, attr->ids, - attr->id, process, session); + err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, + attr->id, process); if (err) { pr_debug("failed to create perf header attribute\n"); return err; @@ -2118,23 +2053,23 @@ int perf_session__synthesize_attrs(struct perf_session *session, } int perf_event__process_attr(union perf_event *event, - struct perf_session *session) + struct perf_evlist **pevlist) { unsigned int i, ids, n_ids; struct perf_evsel *evsel; + struct perf_evlist *evlist = *pevlist; - if (session->evlist == NULL) { - session->evlist = perf_evlist__new(NULL, NULL); - if (session->evlist == NULL) + if (evlist == NULL) { + *pevlist = evlist = perf_evlist__new(NULL, NULL); + if (evlist == NULL) return -ENOMEM; } - evsel = perf_evsel__new(&event->attr.attr, - session->evlist->nr_entries); + evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); if (evsel == NULL) return -ENOMEM; - perf_evlist__add(session->evlist, evsel); + perf_evlist__add(evlist, evsel); ids = event->header.size; ids -= (void *)&event->attr.id - (void *)event; @@ -2148,18 +2083,16 @@ int perf_event__process_attr(union perf_event *event, return -ENOMEM; for (i = 0; i < n_ids; i++) { - perf_evlist__id_add(session->evlist, evsel, 0, i, - event->attr.id[i]); + perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); } - perf_session__update_sample_type(session); - return 0; } -int perf_event__synthesize_event_type(u64 event_id, char *name, +int perf_event__synthesize_event_type(struct perf_tool *tool, + u64 event_id, char *name, perf_event__handler_t process, - struct perf_session *session) + struct machine *machine) { union perf_event ev; size_t size = 0; @@ -2177,13 +2110,14 @@ int perf_event__synthesize_event_type(u64 event_id, char *name, ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); - err = process(&ev, NULL, session); + err = process(tool, &ev, NULL, machine); return err; } -int perf_event__synthesize_event_types(perf_event__handler_t process, - struct perf_session *session) +int perf_event__synthesize_event_types(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) { struct perf_trace_event_type *type; int i, err = 0; @@ -2191,9 +2125,9 @@ int perf_event__synthesize_event_types(perf_event__handler_t process, for (i = 0; i < event_count; i++) { type = &events[i]; - err = perf_event__synthesize_event_type(type->event_id, + err = perf_event__synthesize_event_type(tool, type->event_id, type->name, process, - session); + machine); if (err) { pr_debug("failed to create perf header event type\n"); return err; @@ -2203,8 +2137,8 @@ int perf_event__synthesize_event_types(perf_event__handler_t process, return err; } -int perf_event__process_event_type(union perf_event *event, - struct perf_session *session __unused) +int perf_event__process_event_type(struct perf_tool *tool __unused, + union perf_event *event) { if (perf_header__push_event(event->event_type.event_type.event_id, event->event_type.event_type.name) < 0) @@ -2213,9 +2147,9 @@ int perf_event__process_event_type(union perf_event *event, return 0; } -int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, - perf_event__handler_t process, - struct perf_session *session __unused) +int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, + struct perf_evlist *evlist, + perf_event__handler_t process) { union perf_event ev; struct tracing_data *tdata; @@ -2246,7 +2180,7 @@ int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, ev.tracing_data.header.size = sizeof(ev.tracing_data); ev.tracing_data.size = aligned_size; - process(&ev, NULL, session); + process(tool, &ev, NULL, NULL); /* * The put function will copy all the tracing data @@ -2288,10 +2222,10 @@ int perf_event__process_tracing_data(union perf_event *event, return size_read + padding; } -int perf_event__synthesize_build_id(struct dso *pos, u16 misc, +int perf_event__synthesize_build_id(struct perf_tool *tool, + struct dso *pos, u16 misc, perf_event__handler_t process, - struct machine *machine, - struct perf_session *session) + struct machine *machine) { union perf_event ev; size_t len; @@ -2311,12 +2245,13 @@ int perf_event__synthesize_build_id(struct dso *pos, u16 misc, ev.build_id.header.size = sizeof(ev.build_id) + len; memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); - err = process(&ev, NULL, session); + err = process(tool, &ev, NULL, machine); return err; } -int perf_event__process_build_id(union perf_event *event, +int perf_event__process_build_id(struct perf_tool *tool __used, + union perf_event *event, struct perf_session *session) { __event_process_build_id(&event->build_id, diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 3d5a742..ac4ec95 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -10,7 +10,8 @@ #include <linux/bitmap.h> enum { - HEADER_TRACE_INFO = 1, + HEADER_RESERVED = 0, /* always cleared */ + HEADER_TRACE_INFO = 1, HEADER_BUILD_ID, HEADER_HOSTNAME, @@ -27,10 +28,9 @@ enum { HEADER_NUMA_TOPOLOGY, HEADER_LAST_FEATURE, + HEADER_FEAT_BITS = 256, }; -#define HEADER_FEAT_BITS 256 - struct perf_file_section { u64 offset; u64 size; @@ -68,6 +68,7 @@ struct perf_header { }; struct perf_evlist; +struct perf_session; int perf_session__read_header(struct perf_session *session, int fd); int perf_session__write_header(struct perf_session *session, @@ -96,32 +97,36 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms); int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); -int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, - perf_event__handler_t process, - struct perf_session *session); -int perf_session__synthesize_attrs(struct perf_session *session, - perf_event__handler_t process); -int perf_event__process_attr(union perf_event *event, struct perf_session *session); +int perf_event__synthesize_attr(struct perf_tool *tool, + struct perf_event_attr *attr, u16 ids, u64 *id, + perf_event__handler_t process); +int perf_event__synthesize_attrs(struct perf_tool *tool, + struct perf_session *session, + perf_event__handler_t process); +int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist); -int perf_event__synthesize_event_type(u64 event_id, char *name, +int perf_event__synthesize_event_type(struct perf_tool *tool, + u64 event_id, char *name, perf_event__handler_t process, - struct perf_session *session); -int perf_event__synthesize_event_types(perf_event__handler_t process, - struct perf_session *session); -int perf_event__process_event_type(union perf_event *event, - struct perf_session *session); - -int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, - perf_event__handler_t process, - struct perf_session *session); + struct machine *machine); +int perf_event__synthesize_event_types(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); +int perf_event__process_event_type(struct perf_tool *tool, + union perf_event *event); + +int perf_event__synthesize_tracing_data(struct perf_tool *tool, + int fd, struct perf_evlist *evlist, + perf_event__handler_t process); int perf_event__process_tracing_data(union perf_event *event, struct perf_session *session); -int perf_event__synthesize_build_id(struct dso *pos, u16 misc, +int perf_event__synthesize_build_id(struct perf_tool *tool, + struct dso *pos, u16 misc, perf_event__handler_t process, - struct machine *machine, - struct perf_session *session); -int perf_event__process_build_id(union perf_event *event, + struct machine *machine); +int perf_event__process_build_id(struct perf_tool *tool, + union perf_event *event, struct perf_session *session); /* diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 89289c8..ff6f9d5 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -117,7 +117,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, static inline int hist_entry__tui_annotate(struct hist_entry *self __used, int evidx __used, - int nr_events __used, void(*timer)(void *arg) __used, void *arg __used, int delay_secs __used) @@ -128,7 +127,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used, #define K_RIGHT -2 #else #include "ui/keysyms.h" -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events, +int hist_entry__tui_annotate(struct hist_entry *he, int evidx, void(*timer)(void *arg), void *arg, int delay_secs); int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index 305c848..62cdee7 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -9,6 +9,17 @@ #define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_cont(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + static inline void set_bit(int nr, unsigned long *addr) { addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); @@ -30,4 +41,111 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __always_inline unsigned long __ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +/* + * Find the first set bit in a memory region. + */ +static inline unsigned long +find_first_bit(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + __ffs(tmp); +} + +/* + * Find the next set bit in a memory region. + */ +static inline unsigned long +find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + #endif diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 78284b1..316aa0a 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -562,6 +562,10 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid) INIT_LIST_HEAD(&self->user_dsos); INIT_LIST_HEAD(&self->kernel_dsos); + self->threads = RB_ROOT; + INIT_LIST_HEAD(&self->dead_threads); + self->last_match = NULL; + self->kmaps.machine = self; self->pid = pid; self->root_dir = strdup(root_dir); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 890d855..2b8017f 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -18,9 +18,11 @@ enum map_type { extern const char *map_type__name[MAP__NR_TYPES]; struct dso; +struct ip_callchain; struct ref_reloc_sym; struct map_groups; struct machine; +struct perf_evsel; struct map { union { @@ -61,7 +63,11 @@ struct map_groups { struct machine { struct rb_node rb_node; pid_t pid; + u16 id_hdr_size; char *root_dir; + struct rb_root threads; + struct list_head dead_threads; + struct thread *last_match; struct list_head user_dsos; struct list_head kernel_dsos; struct map_groups kmaps; @@ -148,6 +154,13 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid); void machine__exit(struct machine *self); void machine__delete(struct machine *self); +int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent); +int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, + u64 addr); + /* * Default guest kernel is defined by parameter --guestkallsyms * and --guestmodules @@ -190,6 +203,12 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, struct map **mapp, symbol_filter_t filter); + +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); +void machine__remove_thread(struct machine *machine, struct thread *th); + +size_t machine__fprintf(struct machine *machine, FILE *fp); + static inline struct symbol *machine__find_kernel_symbol(struct machine *self, enum map_type type, u64 addr, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 928918b..531c283 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -25,8 +25,6 @@ enum event_result { EVT_HANDLED_ALL }; -char debugfs_path[MAXPATHLEN]; - #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x @@ -40,6 +38,7 @@ static struct event_symbol event_symbols[] = { { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, { CHW(BRANCH_MISSES), "branch-misses", "" }, { CHW(BUS_CYCLES), "bus-cycles", "" }, + { CHW(REF_CPU_CYCLES), "ref-cycles", "" }, { CSW(CPU_CLOCK), "cpu-clock", "" }, { CSW(TASK_CLOCK), "task-clock", "" }, @@ -70,6 +69,7 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = { "bus-cycles", "stalled-cycles-frontend", "stalled-cycles-backend", + "ref-cycles", }; static const char *sw_event_names[PERF_COUNT_SW_MAX] = { @@ -140,7 +140,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) char evt_path[MAXPATHLEN]; int fd; - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, + snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, sys_dir->d_name, evt_dir->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) @@ -171,16 +171,16 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(debugfs_path)) + if (debugfs_valid_mountpoint(tracing_events_path)) return NULL; - sys_dir = opendir(debugfs_path); + sys_dir = opendir(tracing_events_path); if (!sys_dir) return NULL; for_each_subsystem(sys_dir, sys_dirent, sys_next) { - snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) @@ -447,7 +447,7 @@ parse_single_tracepoint_event(char *sys_name, u64 id; int fd; - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, + snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, sys_name, evt_name); fd = open(evt_path, O_RDONLY); @@ -485,7 +485,7 @@ parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name, struct dirent *evt_ent; DIR *evt_dir; - snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name); + snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); evt_dir = opendir(evt_path); if (!evt_dir) { @@ -528,7 +528,7 @@ parse_tracepoint_event(struct perf_evlist *evlist, const char **strp, char sys_name[MAX_EVENT_LENGTH]; unsigned int sys_length, evt_length; - if (debugfs_valid_mountpoint(debugfs_path)) + if (debugfs_valid_mountpoint(tracing_events_path)) return 0; evt_name = strchr(*strp, ':'); @@ -920,10 +920,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(debugfs_path)) + if (debugfs_valid_mountpoint(tracing_events_path)) return; - sys_dir = opendir(debugfs_path); + sys_dir = opendir(tracing_events_path); if (!sys_dir) return; @@ -932,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) !strglobmatch(sys_dirent.d_name, subsys_glob)) continue; - snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) @@ -964,16 +964,16 @@ int is_valid_tracepoint(const char *event_string) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(debugfs_path)) + if (debugfs_valid_mountpoint(tracing_events_path)) return 0; - sys_dir = opendir(debugfs_path); + sys_dir = opendir(tracing_events_path); if (!sys_dir) return 0; for_each_subsystem(sys_dir, sys_dirent, sys_next) { - snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 2f8e375..7e0cbe7 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -39,7 +39,6 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob); int print_hwcache_events(const char *event_glob); extern int is_valid_tracepoint(const char *event_string); -extern char debugfs_path[]; extern int valid_debugfs_mount(const char *debugfs); #endif /* __PERF_PARSE_EVENTS_H */ diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 1132c8f..17e94d0 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -5,7 +5,6 @@ #include "util.h" #include "probe-event.h" -#define MAX_PATH_LEN 256 #define MAX_PROBE_BUFFER 1024 #define MAX_PROBES 128 diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 74350ff..e30749e 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -27,7 +27,10 @@ #include "../../perf.h" #include "../util.h" +#include "../thread.h" +#include "../event.h" #include "../trace-event.h" +#include "../evsel.h" #include <EXTERN.h> #include <perl.h> @@ -245,11 +248,11 @@ static inline struct event *find_cache_event(int type) return event; } -static void perl_process_event(union perf_event *pevent __unused, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct perf_session *session __unused, - struct thread *thread) +static void perl_process_tracepoint(union perf_event *pevent __unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine __unused, + struct thread *thread) { struct format_field *field; static char handler[256]; @@ -265,6 +268,9 @@ static void perl_process_event(union perf_event *pevent __unused, dSP; + if (evsel->attr.type != PERF_TYPE_TRACEPOINT) + return; + type = trace_parse_common_type(data); event = find_cache_event(type); @@ -332,6 +338,42 @@ static void perl_process_event(union perf_event *pevent __unused, LEAVE; } +static void perl_process_event_generic(union perf_event *pevent __unused, + struct perf_sample *sample, + struct perf_evsel *evsel __unused, + struct machine *machine __unused, + struct thread *thread __unused) +{ + dSP; + + if (!get_cv("process_event", 0)) + return; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size))); + XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); + XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); + XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); + PUTBACK; + call_pv("process_event", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void perl_process_event(union perf_event *pevent, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine, + struct thread *thread) +{ + perl_process_tracepoint(pevent, sample, evsel, machine, thread); + perl_process_event_generic(pevent, sample, evsel, machine, thread); +} + static void run_start_sub(void) { dSP; /* access to Perl stack */ @@ -553,7 +595,28 @@ static int perl_generate_script(const char *outfile) fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " - "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); + "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); + + fprintf(ofp, + "\n# Packed byte string args of process_event():\n" + "#\n" + "# $event:\tunion perf_event\tutil/event.h\n" + "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" + "# $sample:\tstruct perf_sample\tutil/event.h\n" + "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" + "\n" + "sub process_event\n" + "{\n" + "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" + "\n" + "\tmy @event\t= unpack(\"LSS\", $event);\n" + "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" + "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" + "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" + "\n" + "\tuse Data::Dumper;\n" + "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" + "}\n"); fclose(ofp); diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 6ccf70e..0b2a487 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -29,6 +29,8 @@ #include "../../perf.h" #include "../util.h" +#include "../event.h" +#include "../thread.h" #include "../trace-event.h" PyMODINIT_FUNC initperf_trace_context(void); @@ -207,7 +209,7 @@ static inline struct event *find_cache_event(int type) static void python_process_event(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel __unused, - struct perf_session *session __unused, + struct machine *machine __unused, struct thread *thread) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 0f4555c..b5ca255 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -10,6 +10,7 @@ #include "evlist.h" #include "evsel.h" #include "session.h" +#include "tool.h" #include "sort.h" #include "util.h" #include "cpumap.h" @@ -78,39 +79,13 @@ out_close: return -1; } -static void perf_session__id_header_size(struct perf_session *session) -{ - struct perf_sample *data; - u64 sample_type = session->sample_type; - u16 size = 0; - - if (!session->sample_id_all) - goto out; - - if (sample_type & PERF_SAMPLE_TID) - size += sizeof(data->tid) * 2; - - if (sample_type & PERF_SAMPLE_TIME) - size += sizeof(data->time); - - if (sample_type & PERF_SAMPLE_ID) - size += sizeof(data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - size += sizeof(data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - size += sizeof(data->cpu) * 2; -out: - session->id_hdr_size = size; -} - void perf_session__update_sample_type(struct perf_session *self) { self->sample_type = perf_evlist__sample_type(self->evlist); self->sample_size = __perf_evsel__sample_size(self->sample_type); self->sample_id_all = perf_evlist__sample_id_all(self->evlist); - perf_session__id_header_size(self); + self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); + self->host_machine.id_hdr_size = self->id_hdr_size; } int perf_session__create_kernel_maps(struct perf_session *self) @@ -130,18 +105,26 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self) struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe, - struct perf_event_ops *ops) + struct perf_tool *tool) { - size_t len = filename ? strlen(filename) + 1 : 0; - struct perf_session *self = zalloc(sizeof(*self) + len); + struct perf_session *self; + struct stat st; + size_t len; + + if (!filename || !strlen(filename)) { + if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) + filename = "-"; + else + filename = "perf.data"; + } + + len = strlen(filename); + self = zalloc(sizeof(*self) + len); if (self == NULL) goto out; memcpy(self->filename, filename, len); - self->threads = RB_ROOT; - INIT_LIST_HEAD(&self->dead_threads); - self->last_match = NULL; /* * On 64bit we can mmap the data file in one go. No need for tiny mmap * slices. On 32bit we use 32MB. @@ -171,10 +154,10 @@ struct perf_session *perf_session__new(const char *filename, int mode, goto out_delete; } - if (ops && ops->ordering_requires_timestamps && - ops->ordered_samples && !self->sample_id_all) { + if (tool && tool->ordering_requires_timestamps && + tool->ordered_samples && !self->sample_id_all) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); - ops->ordered_samples = false; + tool->ordered_samples = false; } out: @@ -184,17 +167,22 @@ out_delete: return NULL; } -static void perf_session__delete_dead_threads(struct perf_session *self) +static void machine__delete_dead_threads(struct machine *machine) { struct thread *n, *t; - list_for_each_entry_safe(t, n, &self->dead_threads, node) { + list_for_each_entry_safe(t, n, &machine->dead_threads, node) { list_del(&t->node); thread__delete(t); } } -static void perf_session__delete_threads(struct perf_session *self) +static void perf_session__delete_dead_threads(struct perf_session *session) +{ + machine__delete_dead_threads(&session->host_machine); +} + +static void machine__delete_threads(struct machine *self) { struct rb_node *nd = rb_first(&self->threads); @@ -207,6 +195,11 @@ static void perf_session__delete_threads(struct perf_session *self) } } +static void perf_session__delete_threads(struct perf_session *session) +{ + machine__delete_threads(&session->host_machine); +} + void perf_session__delete(struct perf_session *self) { perf_session__destroy_kernel_maps(self); @@ -217,7 +210,7 @@ void perf_session__delete(struct perf_session *self) free(self); } -void perf_session__remove_thread(struct perf_session *self, struct thread *th) +void machine__remove_thread(struct machine *self, struct thread *th) { self->last_match = NULL; rb_erase(&th->rb_node, &self->threads); @@ -236,16 +229,16 @@ static bool symbol__match_parent_regex(struct symbol *sym) return 0; } -int perf_session__resolve_callchain(struct perf_session *self, - struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent) +int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent) { u8 cpumode = PERF_RECORD_MISC_USER; unsigned int i; int err; - callchain_cursor_reset(&self->callchain_cursor); + callchain_cursor_reset(&evsel->hists.callchain_cursor); for (i = 0; i < chain->nr; i++) { u64 ip; @@ -272,7 +265,7 @@ int perf_session__resolve_callchain(struct perf_session *self, al.filtered = false; thread__find_addr_location(thread, self, cpumode, - MAP__FUNCTION, thread->pid, ip, &al, NULL); + MAP__FUNCTION, ip, &al, NULL); if (al.sym != NULL) { if (sort__has_parent && !*parent && symbol__match_parent_regex(al.sym)) @@ -281,7 +274,7 @@ int perf_session__resolve_callchain(struct perf_session *self, break; } - err = callchain_cursor_append(&self->callchain_cursor, + err = callchain_cursor_append(&evsel->hists.callchain_cursor, ip, al.map, al.sym); if (err) return err; @@ -290,75 +283,91 @@ int perf_session__resolve_callchain(struct perf_session *self, return 0; } -static int process_event_synth_stub(union perf_event *event __used, - struct perf_session *session __used) +static int process_event_synth_tracing_data_stub(union perf_event *event __used, + struct perf_session *session __used) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_event_synth_attr_stub(union perf_event *event __used, + struct perf_evlist **pevlist __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_sample_stub(union perf_event *event __used, +static int process_event_sample_stub(struct perf_tool *tool __used, + union perf_event *event __used, struct perf_sample *sample __used, struct perf_evsel *evsel __used, - struct perf_session *session __used) + struct machine *machine __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_stub(union perf_event *event __used, +static int process_event_stub(struct perf_tool *tool __used, + union perf_event *event __used, struct perf_sample *sample __used, - struct perf_session *session __used) + struct machine *machine __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_finished_round_stub(union perf_event *event __used, - struct perf_session *session __used, - struct perf_event_ops *ops __used) +static int process_finished_round_stub(struct perf_tool *tool __used, + union perf_event *event __used, + struct perf_session *perf_session __used) { dump_printf(": unhandled!\n"); return 0; } -static int process_finished_round(union perf_event *event, - struct perf_session *session, - struct perf_event_ops *ops); +static int process_event_type_stub(struct perf_tool *tool __used, + union perf_event *event __used) +{ + dump_printf(": unhandled!\n"); + return 0; +} -static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) +static int process_finished_round(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); + +static void perf_tool__fill_defaults(struct perf_tool *tool) { - if (handler->sample == NULL) - handler->sample = process_event_sample_stub; - if (handler->mmap == NULL) - handler->mmap = process_event_stub; - if (handler->comm == NULL) - handler->comm = process_event_stub; - if (handler->fork == NULL) - handler->fork = process_event_stub; - if (handler->exit == NULL) - handler->exit = process_event_stub; - if (handler->lost == NULL) - handler->lost = perf_event__process_lost; - if (handler->read == NULL) - handler->read = process_event_stub; - if (handler->throttle == NULL) - handler->throttle = process_event_stub; - if (handler->unthrottle == NULL) - handler->unthrottle = process_event_stub; - if (handler->attr == NULL) - handler->attr = process_event_synth_stub; - if (handler->event_type == NULL) - handler->event_type = process_event_synth_stub; - if (handler->tracing_data == NULL) - handler->tracing_data = process_event_synth_stub; - if (handler->build_id == NULL) - handler->build_id = process_event_synth_stub; - if (handler->finished_round == NULL) { - if (handler->ordered_samples) - handler->finished_round = process_finished_round; + if (tool->sample == NULL) + tool->sample = process_event_sample_stub; + if (tool->mmap == NULL) + tool->mmap = process_event_stub; + if (tool->comm == NULL) + tool->comm = process_event_stub; + if (tool->fork == NULL) + tool->fork = process_event_stub; + if (tool->exit == NULL) + tool->exit = process_event_stub; + if (tool->lost == NULL) + tool->lost = perf_event__process_lost; + if (tool->read == NULL) + tool->read = process_event_sample_stub; + if (tool->throttle == NULL) + tool->throttle = process_event_stub; + if (tool->unthrottle == NULL) + tool->unthrottle = process_event_stub; + if (tool->attr == NULL) + tool->attr = process_event_synth_attr_stub; + if (tool->event_type == NULL) + tool->event_type = process_event_type_stub; + if (tool->tracing_data == NULL) + tool->tracing_data = process_event_synth_tracing_data_stub; + if (tool->build_id == NULL) + tool->build_id = process_finished_round_stub; + if (tool->finished_round == NULL) { + if (tool->ordered_samples) + tool->finished_round = process_finished_round; else - handler->finished_round = process_finished_round_stub; + tool->finished_round = process_finished_round_stub; } } @@ -490,11 +499,11 @@ static void perf_session_free_sample_buffers(struct perf_session *session) static int perf_session_deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, - struct perf_event_ops *ops, + struct perf_tool *tool, u64 file_offset); static void flush_sample_queue(struct perf_session *s, - struct perf_event_ops *ops) + struct perf_tool *tool) { struct ordered_samples *os = &s->ordered_samples; struct list_head *head = &os->samples; @@ -505,7 +514,7 @@ static void flush_sample_queue(struct perf_session *s, unsigned idx = 0, progress_next = os->nr_samples / 16; int ret; - if (!ops->ordered_samples || !limit) + if (!tool->ordered_samples || !limit) return; list_for_each_entry_safe(iter, tmp, head, list) { @@ -516,7 +525,7 @@ static void flush_sample_queue(struct perf_session *s, if (ret) pr_err("Can't parse sample, err = %d\n", ret); else - perf_session_deliver_event(s, iter->event, &sample, ops, + perf_session_deliver_event(s, iter->event, &sample, tool, iter->file_offset); os->last_flush = iter->timestamp; @@ -578,11 +587,11 @@ static void flush_sample_queue(struct perf_session *s, * Flush every events below timestamp 7 * etc... */ -static int process_finished_round(union perf_event *event __used, - struct perf_session *session, - struct perf_event_ops *ops) +static int process_finished_round(struct perf_tool *tool, + union perf_event *event __used, + struct perf_session *session) { - flush_sample_queue(session, ops); + flush_sample_queue(session, tool); session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; return 0; @@ -737,13 +746,26 @@ static void dump_sample(struct perf_session *session, union perf_event *event, callchain__printf(sample); } +static struct machine * + perf_session__find_machine_for_cpumode(struct perf_session *session, + union perf_event *event) +{ + const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) + return perf_session__find_machine(session, event->ip.pid); + + return perf_session__find_host_machine(session); +} + static int perf_session_deliver_event(struct perf_session *session, union perf_event *event, struct perf_sample *sample, - struct perf_event_ops *ops, + struct perf_tool *tool, u64 file_offset) { struct perf_evsel *evsel; + struct machine *machine; dump_event(session, event, file_offset, sample); @@ -765,6 +787,8 @@ static int perf_session_deliver_event(struct perf_session *session, hists__inc_nr_events(&evsel->hists, event->header.type); } + machine = perf_session__find_machine_for_cpumode(session, event); + switch (event->header.type) { case PERF_RECORD_SAMPLE: dump_sample(session, event, sample); @@ -772,23 +796,25 @@ static int perf_session_deliver_event(struct perf_session *session, ++session->hists.stats.nr_unknown_id; return -1; } - return ops->sample(event, sample, evsel, session); + return tool->sample(tool, event, sample, evsel, machine); case PERF_RECORD_MMAP: - return ops->mmap(event, sample, session); + return tool->mmap(tool, event, sample, machine); case PERF_RECORD_COMM: - return ops->comm(event, sample, session); + return tool->comm(tool, event, sample, machine); case PERF_RECORD_FORK: - return ops->fork(event, sample, session); + return tool->fork(tool, event, sample, machine); case PERF_RECORD_EXIT: - return ops->exit(event, sample, session); + return tool->exit(tool, event, sample, machine); case PERF_RECORD_LOST: - return ops->lost(event, sample, session); + if (tool->lost == perf_event__process_lost) + session->hists.stats.total_lost += event->lost.lost; + return tool->lost(tool, event, sample, machine); case PERF_RECORD_READ: - return ops->read(event, sample, session); + return tool->read(tool, event, sample, evsel, machine); case PERF_RECORD_THROTTLE: - return ops->throttle(event, sample, session); + return tool->throttle(tool, event, sample, machine); case PERF_RECORD_UNTHROTTLE: - return ops->unthrottle(event, sample, session); + return tool->unthrottle(tool, event, sample, machine); default: ++session->hists.stats.nr_unknown_events; return -1; @@ -812,24 +838,29 @@ static int perf_session__preprocess_sample(struct perf_session *session, } static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, - struct perf_event_ops *ops, u64 file_offset) + struct perf_tool *tool, u64 file_offset) { + int err; + dump_event(session, event, file_offset, NULL); /* These events are processed right away */ switch (event->header.type) { case PERF_RECORD_HEADER_ATTR: - return ops->attr(event, session); + err = tool->attr(event, &session->evlist); + if (err == 0) + perf_session__update_sample_type(session); + return err; case PERF_RECORD_HEADER_EVENT_TYPE: - return ops->event_type(event, session); + return tool->event_type(tool, event); case PERF_RECORD_HEADER_TRACING_DATA: /* setup for reading amidst mmap */ lseek(session->fd, file_offset, SEEK_SET); - return ops->tracing_data(event, session); + return tool->tracing_data(event, session); case PERF_RECORD_HEADER_BUILD_ID: - return ops->build_id(event, session); + return tool->build_id(tool, event, session); case PERF_RECORD_FINISHED_ROUND: - return ops->finished_round(event, session, ops); + return tool->finished_round(tool, event, session); default: return -EINVAL; } @@ -837,7 +868,7 @@ static int perf_session__process_user_event(struct perf_session *session, union static int perf_session__process_event(struct perf_session *session, union perf_event *event, - struct perf_event_ops *ops, + struct perf_tool *tool, u64 file_offset) { struct perf_sample sample; @@ -853,7 +884,7 @@ static int perf_session__process_event(struct perf_session *session, hists__inc_nr_events(&session->hists, event->header.type); if (event->header.type >= PERF_RECORD_USER_TYPE_START) - return perf_session__process_user_event(session, event, ops, file_offset); + return perf_session__process_user_event(session, event, tool, file_offset); /* * For all kernel events we get the sample data @@ -866,14 +897,14 @@ static int perf_session__process_event(struct perf_session *session, if (perf_session__preprocess_sample(session, event, &sample)) return 0; - if (ops->ordered_samples) { + if (tool->ordered_samples) { ret = perf_session_queue_event(session, event, &sample, file_offset); if (ret != -ETIME) return ret; } - return perf_session_deliver_event(session, event, &sample, ops, + return perf_session_deliver_event(session, event, &sample, tool, file_offset); } @@ -884,6 +915,11 @@ void perf_event_header__bswap(struct perf_event_header *self) self->size = bswap_16(self->size); } +struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) +{ + return machine__findnew_thread(&session->host_machine, pid); +} + static struct thread *perf_session__register_idle_thread(struct perf_session *self) { struct thread *thread = perf_session__findnew(self, 0); @@ -897,9 +933,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se } static void perf_session__warn_about_errors(const struct perf_session *session, - const struct perf_event_ops *ops) + const struct perf_tool *tool) { - if (ops->lost == perf_event__process_lost && + if (tool->lost == perf_event__process_lost && session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { ui__warning("Processed %d events and lost %d chunks!\n\n" "Check IO/CPU overload!\n\n", @@ -934,7 +970,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session, volatile int session_done; static int __perf_session__process_pipe_events(struct perf_session *self, - struct perf_event_ops *ops) + struct perf_tool *tool) { union perf_event event; uint32_t size; @@ -943,7 +979,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, int err; void *p; - perf_event_ops__fill_defaults(ops); + perf_tool__fill_defaults(tool); head = 0; more: @@ -979,8 +1015,7 @@ more: } } - if (size == 0 || - (skip = perf_session__process_event(self, &event, ops, head)) < 0) { + if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) { dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", head, event.header.size, event.header.type); /* @@ -1003,7 +1038,7 @@ more: done: err = 0; out_err: - perf_session__warn_about_errors(self, ops); + perf_session__warn_about_errors(self, tool); perf_session_free_sample_buffers(self); return err; } @@ -1034,7 +1069,7 @@ fetch_mmaped_event(struct perf_session *session, int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, - u64 file_size, struct perf_event_ops *ops) + u64 file_size, struct perf_tool *tool) { u64 head, page_offset, file_offset, file_pos, progress_next; int err, mmap_prot, mmap_flags, map_idx = 0; @@ -1043,7 +1078,7 @@ int __perf_session__process_events(struct perf_session *session, union perf_event *event; uint32_t size; - perf_event_ops__fill_defaults(ops); + perf_tool__fill_defaults(tool); page_size = sysconf(_SC_PAGESIZE); @@ -1098,7 +1133,7 @@ more: size = event->header.size; if (size == 0 || - perf_session__process_event(session, event, ops, file_pos) < 0) { + perf_session__process_event(session, event, tool, file_pos) < 0) { dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", file_offset + head, event->header.size, event->header.type); @@ -1127,15 +1162,15 @@ more: err = 0; /* do the final flush for ordered samples */ session->ordered_samples.next_flush = ULLONG_MAX; - flush_sample_queue(session, ops); + flush_sample_queue(session, tool); out_err: - perf_session__warn_about_errors(session, ops); + perf_session__warn_about_errors(session, tool); perf_session_free_sample_buffers(session); return err; } int perf_session__process_events(struct perf_session *self, - struct perf_event_ops *ops) + struct perf_tool *tool) { int err; @@ -1146,9 +1181,9 @@ int perf_session__process_events(struct perf_session *self, err = __perf_session__process_events(self, self->header.data_offset, self->header.data_size, - self->size, ops); + self->size, tool); else - err = __perf_session__process_pipe_events(self, ops); + err = __perf_session__process_pipe_events(self, tool); return err; } @@ -1163,9 +1198,8 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg) return true; } -int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, - const char *symbol_name, - u64 addr) +int maps__set_kallsyms_ref_reloc_sym(struct map **maps, + const char *symbol_name, u64 addr) { char *bracket; enum map_type i; @@ -1224,6 +1258,27 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) return ret; } +size_t perf_session__fprintf(struct perf_session *session, FILE *fp) +{ + /* + * FIXME: Here we have to actually print all the machines in this + * session, not just the host... + */ + return machine__fprintf(&session->host_machine, fp); +} + +void perf_session__remove_thread(struct perf_session *session, + struct thread *th) +{ + /* + * FIXME: This one makes no sense, we need to remove the thread from + * the machine it belongs to, perf_session can have many machines, so + * doing it always on ->host_machine is wrong. Fix when auditing all + * the 'perf kvm' code. + */ + machine__remove_thread(&session->host_machine, th); +} + struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type) { @@ -1236,17 +1291,16 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, return NULL; } -void perf_session__print_ip(union perf_event *event, - struct perf_sample *sample, - struct perf_session *session, - int print_sym, int print_dso) +void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, + struct machine *machine, struct perf_evsel *evsel, + int print_sym, int print_dso) { struct addr_location al; const char *symname, *dsoname; - struct callchain_cursor *cursor = &session->callchain_cursor; + struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; struct callchain_cursor_node *node; - if (perf_event__preprocess_sample(event, session, &al, sample, + if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { error("problem processing %d event, skipping it.\n", event->header.type); @@ -1255,7 +1309,7 @@ void perf_session__print_ip(union perf_event *event, if (symbol_conf.use_callchain && sample->callchain) { - if (perf_session__resolve_callchain(session, al.thread, + if (machine__resolve_callchain(machine, evsel, al.thread, sample->callchain, NULL) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 6e393c9..37bc383 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -30,9 +30,6 @@ struct perf_session { struct perf_header header; unsigned long size; unsigned long mmap_window; - struct rb_root threads; - struct list_head dead_threads; - struct thread *last_match; struct machine host_machine; struct rb_root machines; struct perf_evlist *evlist; @@ -53,65 +50,31 @@ struct perf_session { int cwdlen; char *cwd; struct ordered_samples ordered_samples; - struct callchain_cursor callchain_cursor; - char filename[0]; + char filename[1]; }; -struct perf_evsel; -struct perf_event_ops; - -typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel, struct perf_session *session); -typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, - struct perf_session *session); -typedef int (*event_synth_op)(union perf_event *self, - struct perf_session *session); -typedef int (*event_op2)(union perf_event *self, struct perf_session *session, - struct perf_event_ops *ops); - -struct perf_event_ops { - event_sample sample; - event_op mmap, - comm, - fork, - exit, - lost, - read, - throttle, - unthrottle; - event_synth_op attr, - event_type, - tracing_data, - build_id; - event_op2 finished_round; - bool ordered_samples; - bool ordering_requires_timestamps; -}; +struct perf_tool; struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe, - struct perf_event_ops *ops); + struct perf_tool *tool); void perf_session__delete(struct perf_session *self); void perf_event_header__bswap(struct perf_event_header *self); int __perf_session__process_events(struct perf_session *self, u64 data_offset, u64 data_size, u64 size, - struct perf_event_ops *ops); + struct perf_tool *tool); int perf_session__process_events(struct perf_session *self, - struct perf_event_ops *event_ops); + struct perf_tool *tool); -int perf_session__resolve_callchain(struct perf_session *self, +int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, struct thread *thread, struct ip_callchain *chain, struct symbol **parent); bool perf_session__has_traces(struct perf_session *self, const char *msg); -int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, - const char *symbol_name, - u64 addr); - void mem_bswap_64(void *src, int byte_size); void perf_event__attr_swap(struct perf_event_attr *attr); @@ -144,12 +107,16 @@ struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t p static inline void perf_session__process_machines(struct perf_session *self, + struct perf_tool *tool, machine__process_t process) { - process(&self->host_machine, self); - return machines__process(&self->machines, process, self); + process(&self->host_machine, tool); + return machines__process(&self->machines, process, tool); } +struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); +size_t perf_session__fprintf(struct perf_session *self, FILE *fp); + size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, @@ -167,13 +134,20 @@ static inline int perf_session__parse_sample(struct perf_session *session, session->header.needs_swap); } +static inline int perf_session__synthesize_sample(struct perf_session *session, + union perf_event *event, + const struct perf_sample *sample) +{ + return perf_event__synthesize_sample(event, session->sample_type, + sample, session->header.needs_swap); +} + struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); -void perf_session__print_ip(union perf_event *event, - struct perf_sample *sample, - struct perf_session *session, - int print_sym, int print_dso); +void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, + struct machine *machine, struct perf_evsel *evsel, + int print_sym, int print_dso); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 95d3700..36d4c56 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -27,7 +27,8 @@ build_tmp = getenv('PYTHON_EXTBUILD_TMP') perf = Extension('perf', sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', - 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'], + 'util/util.c', 'util/xyarray.c', 'util/cgroup.c', + 'util/debugfs.c'], include_dirs = ['util/include'], extra_compile_args = cflags, ) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 632b50c..215d50f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1757,7 +1757,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, struct stat st; /*sshfs might return bad dent->d_type, so we have to stat*/ - sprintf(path, "%s/%s", dir_name, dent->d_name); + snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); if (stat(path, &st)) continue; @@ -1766,8 +1766,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, !strcmp(dent->d_name, "..")) continue; - snprintf(path, sizeof(path), "%s/%s", - dir_name, dent->d_name); ret = map_groups__set_modules_path_dir(mg, path); if (ret < 0) goto out; @@ -1788,9 +1786,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, if (map == NULL) continue; - snprintf(path, sizeof(path), "%s/%s", - dir_name, dent->d_name); - long_name = strdup(path); if (long_name == NULL) { ret = -1; @@ -2609,10 +2604,10 @@ int symbol__init(void) symbol_conf.initialized = true; return 0; -out_free_dso_list: - strlist__delete(symbol_conf.dso_list); out_free_comm_list: strlist__delete(symbol_conf.comm_list); +out_free_dso_list: + strlist__delete(symbol_conf.dso_list); return -1; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 29f8d74..123c2e1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -68,6 +68,7 @@ struct strlist; struct symbol_conf { unsigned short priv_size; + unsigned short nr_events; bool try_vmlinux_path, use_modules, sort_by_name, diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index d5d3b22..fb4b7ea 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -61,7 +61,7 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) map_groups__fprintf(&self->mg, verbose, fp); } -struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) +struct thread *machine__findnew_thread(struct machine *self, pid_t pid) { struct rb_node **p = &self->threads.rb_node; struct rb_node *parent = NULL; @@ -125,12 +125,12 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -size_t perf_session__fprintf(struct perf_session *self, FILE *fp) +size_t machine__fprintf(struct machine *machine, FILE *fp) { size_t ret = 0; struct rb_node *nd; - for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index e5f2401..70c2c13 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -18,16 +18,14 @@ struct thread { int comm_len; }; -struct perf_session; +struct machine; void thread__delete(struct thread *self); int thread__set_comm(struct thread *self, const char *comm); int thread__comm_len(struct thread *self); -struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -size_t perf_session__fprintf(struct perf_session *self, FILE *fp); static inline struct map *thread__find_map(struct thread *self, enum map_type type, u64 addr) @@ -35,14 +33,12 @@ static inline struct map *thread__find_map(struct thread *self, return self ? map_groups__find(&self->mg, type, addr) : NULL; } -void thread__find_addr_map(struct thread *self, - struct perf_session *session, u8 cpumode, - enum map_type type, pid_t pid, u64 addr, +void thread__find_addr_map(struct thread *thread, struct machine *machine, + u8 cpumode, enum map_type type, u64 addr, struct addr_location *al); -void thread__find_addr_location(struct thread *self, - struct perf_session *session, u8 cpumode, - enum map_type type, pid_t pid, u64 addr, +void thread__find_addr_location(struct thread *thread, struct machine *machine, + u8 cpumode, enum map_type type, u64 addr, struct addr_location *al, symbol_filter_t filter); #endif /* __PERF_THREAD_H */ diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h new file mode 100644 index 0000000..b0e1aad --- /dev/null +++ b/tools/perf/util/tool.h @@ -0,0 +1,50 @@ +#ifndef __PERF_TOOL_H +#define __PERF_TOOL_H + +#include <stdbool.h> + +struct perf_session; +union perf_event; +struct perf_evlist; +struct perf_evsel; +struct perf_sample; +struct perf_tool; +struct machine; + +typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, struct machine *machine); + +typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, struct machine *machine); + +typedef int (*event_attr_op)(union perf_event *event, + struct perf_evlist **pevlist); +typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event); + +typedef int (*event_synth_op)(union perf_event *event, + struct perf_session *session); + +typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, + struct perf_session *session); + +struct perf_tool { + event_sample sample, + read; + event_op mmap, + comm, + fork, + exit, + lost, + throttle, + unthrottle; + event_attr_op attr; + event_synth_op tracing_data; + event_simple_op event_type; + event_op2 finished_round, + build_id; + bool ordered_samples; + bool ordering_requires_timestamps; +}; + +#endif /* __PERF_TOOL_H */ diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 3996509..a248f3c 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -1,15 +1,17 @@ #ifndef __PERF_TOP_H #define __PERF_TOP_H 1 +#include "tool.h" #include "types.h" -#include "../perf.h" #include <stddef.h> +#include <stdbool.h> struct perf_evlist; struct perf_evsel; struct perf_session; struct perf_top { + struct perf_tool tool; struct perf_evlist *evlist; /* * Symbols will be added here in perf_event__process_sample and will @@ -23,10 +25,26 @@ struct perf_top { int freq; pid_t target_pid, target_tid; bool hide_kernel_symbols, hide_user_symbols, zero; + bool system_wide; + bool use_tui, use_stdio; + bool sort_has_symbols; + bool dont_use_callchains; + bool kptr_restrict_warned; + bool vmlinux_warned; + bool inherit; + bool group; + bool sample_id_all_avail; + bool dump_symtab; const char *cpu_list; struct hist_entry *sym_filter_entry; struct perf_evsel *sym_evsel; struct perf_session *session; + struct winsize winsize; + unsigned int mmap_pages; + int default_interval; + int realtime_prio; + int sym_pcnt_filter; + const char *sym_filter; }; size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index d2655f0..ac6830d 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -18,7 +18,8 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define _GNU_SOURCE +#include <ctype.h> +#include "util.h" #include <dirent.h> #include <mntent.h> #include <stdio.h> @@ -31,7 +32,6 @@ #include <pthread.h> #include <fcntl.h> #include <unistd.h> -#include <ctype.h> #include <errno.h> #include <stdbool.h> #include <linux/list.h> @@ -44,10 +44,6 @@ #define VERSION "0.5" -#define _STR(x) #x -#define STR(x) _STR(x) -#define MAX_PATH 256 - #define TRACE_CTRL "tracing_on" #define TRACE "trace" #define AVAILABLE "available_tracers" @@ -73,26 +69,6 @@ struct events { }; - -static void die(const char *fmt, ...) -{ - va_list ap; - int ret = errno; - - if (errno) - perror("perf"); - else - ret = -1; - - va_start(ap, fmt); - fprintf(stderr, " "); - vfprintf(stderr, fmt, ap); - va_end(ap); - - fprintf(stderr, "\n"); - exit(ret); -} - void *malloc_or_die(unsigned int size) { void *data; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index c9dcbec..a3fdf55 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -39,7 +39,7 @@ static int stop_script_unsupported(void) static void process_event_unsupported(union perf_event *event __unused, struct perf_sample *sample __unused, struct perf_evsel *evsel __unused, - struct perf_session *session __unused, + struct machine *machine __unused, struct thread *thread __unused) { } diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index a841008..58ae14c 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -3,7 +3,11 @@ #include <stdbool.h> #include "parse-events.h" -#include "session.h" + +struct machine; +struct perf_sample; +union perf_event; +struct thread; #define __unused __attribute__((unused)) @@ -292,7 +296,7 @@ struct scripting_ops { void (*process_event) (union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct perf_session *session, + struct machine *machine, struct thread *thread); int (*generate_script) (const char *outfile); }; diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 0575905..295a9c9 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c @@ -224,7 +224,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) } static int annotate_browser__run(struct annotate_browser *self, int evidx, - int nr_events, void(*timer)(void *arg), + void(*timer)(void *arg), void *arg, int delay_secs) { struct rb_node *nd = NULL; @@ -328,8 +328,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, notes = symbol__annotation(target); pthread_mutex_lock(¬es->lock); - if (notes->src == NULL && - symbol__alloc_hist(target, nr_events) < 0) { + if (notes->src == NULL && symbol__alloc_hist(target) < 0) { pthread_mutex_unlock(¬es->lock); ui__warning("Not enough memory for annotating '%s' symbol!\n", target->name); @@ -337,7 +336,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx, } pthread_mutex_unlock(¬es->lock); - symbol__tui_annotate(target, ms->map, evidx, nr_events, + symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs); } continue; @@ -358,15 +357,15 @@ out: return key; } -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events, +int hist_entry__tui_annotate(struct hist_entry *he, int evidx, void(*timer)(void *arg), void *arg, int delay_secs) { - return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, nr_events, + return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, timer, arg, delay_secs); } int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - int nr_events, void(*timer)(void *arg), void *arg, + void(*timer)(void *arg), void *arg, int delay_secs) { struct objdump_line *pos, *n; @@ -419,8 +418,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, browser.b.nr_entries = browser.nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ - ret = annotate_browser__run(&browser, evidx, nr_events, - timer, arg, delay_secs); + ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs); list_for_each_entry_safe(pos, n, ¬es->src->source, node) { list_del(&pos->node); objdump_line__free(pos); diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index d0c94b4..1212a38 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c @@ -1020,7 +1020,7 @@ do_annotate: * Don't let this be freed, say, by hists__decay_entry. */ he->used = true; - err = hist_entry__tui_annotate(he, evsel->idx, nr_events, + err = hist_entry__tui_annotate(he, evsel->idx, timer, arg, delay_secs); he->used = false; ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c index 295e366..13aa64e 100644 --- a/tools/perf/util/ui/progress.c +++ b/tools/perf/util/ui/progress.c @@ -14,6 +14,9 @@ void ui_progress__update(u64 curr, u64 total, const char *title) if (use_browser <= 0) return; + if (total == 0) + return; + ui__refresh_dimensions(true); pthread_mutex_lock(&ui__lock); y = SLtt_Screen_Rows / 2 - 2; diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c index e16bf9a7..d76d1c0 100644 --- a/tools/perf/util/usage.c +++ b/tools/perf/util/usage.c @@ -1,5 +1,8 @@ /* - * GIT - The information manager from hell + * usage.c + * + * Various reporting routines. + * Originally copied from GIT source. * * Copyright (C) Linus Torvalds, 2005 */ diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 0128906..37be34d 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -245,4 +245,15 @@ int readn(int fd, void *buf, size_t size); #define _STR(x) #x #define STR(x) _STR(x) +/* + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + */ + +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + #endif diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c index bdd3347..697c8b4 100644 --- a/tools/perf/util/values.c +++ b/tools/perf/util/values.c @@ -32,6 +32,7 @@ void perf_read_values_destroy(struct perf_read_values *values) for (i = 0; i < values->threads; i++) free(values->value[i]); + free(values->value); free(values->pid); free(values->tid); free(values->counterrawid); |