diff options
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 52 |
1 files changed, 41 insertions, 11 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5bb91bf..186c8cb 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event, u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow) + int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; @@ -193,13 +193,7 @@ again: new_raw_count) != prev_raw_count) goto again; - new_raw_count &= armpmu->max_period; - prev_raw_count &= armpmu->max_period; - - if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; - else - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); } static void @@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event) hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { - hwc->sample_period = armpmu->max_period; + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event) int err = 0; atomic_t *active_events = &armpmu->active_events; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + if (armpmu->map_event(event) == -ENOENT) return -ENOENT; @@ -680,6 +684,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) } /* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit pmu_cpu_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + + if (cpu_pmu && cpu_pmu->reset) + cpu_pmu->reset(NULL); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata pmu_cpu_notifier = { + .notifier_call = pmu_cpu_notify, +}; + +/* * CPU PMU identification and registration. */ static int __init @@ -712,6 +738,9 @@ init_hw_perf_events(void) case 0xC0F0: /* Cortex-A15 */ cpu_pmu = armv7_a15_pmu_init(); break; + case 0xC070: /* Cortex-A7 */ + cpu_pmu = armv7_a7_pmu_init(); + break; } /* Intel CPUs [xscale]. */ } else if (0x69 == implementor) { @@ -730,6 +759,7 @@ init_hw_perf_events(void) pr_info("enabled with %s PMU driver, %d counters available\n", cpu_pmu->name, cpu_pmu->num_events); cpu_pmu_init(cpu_pmu); + register_cpu_notifier(&pmu_cpu_notifier); armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); } else { pr_info("no hardware support available\n"); |