diff options
Diffstat (limited to 'arch/mips/kernel')
41 files changed, 2049 insertions, 146 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 06f8482..22b2e0e 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -96,10 +96,14 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o +obj-$(CONFIG_OF) += prom.o + CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o + CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 0176ed0..32103cc 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -40,7 +40,6 @@ int __compute_return_epc(struct pt_regs *regs) return -EFAULT; } - regs->regs[0] = 0; switch (insn.i_format.opcode) { /* * jr and jalr are in r_format format. diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index bfea327..36c3898 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/smp.h> +#include <linux/irq.h> #include <asm/addrspace.h> #include <asm/io.h> diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 00a4da2..939157e 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mc146818rtc.h> +#include <linux/irq.h> #include <asm/time.h> diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 392ef37..339f363 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/irq.h> #include <asm/gt64120.h> #include <asm/time.h> diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 2a4d50f..98c5a97 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/smp.h> +#include <linux/irq.h> #include <asm/smtc_ipi.h> #include <asm/time.h> @@ -31,7 +32,7 @@ static int mips_next_event(unsigned long delta, cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); - res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; + res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; return res; } diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index da78eea..590c54f 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c @@ -17,6 +17,7 @@ */ #include <linux/clockchips.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/percpu.h> #include <linux/smp.h> diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index b102e4f..2e72d30 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/smp.h> +#include <linux/irq.h> #include <asm/smtc_ipi.h> #include <asm/time.h> diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 218ee6b..0b73773 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -13,6 +13,7 @@ */ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <asm/time.h> #include <asm/txx9tmr.h> diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b1b304e..68dae7b 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -25,6 +25,8 @@ #include <asm/system.h> #include <asm/watch.h> #include <asm/spram.h> +#include <asm/uaccess.h> + /* * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, * the implementation of the "wait" feature differs between CPU families. This @@ -181,12 +183,13 @@ void __init check_wait(void) case CPU_5KC: case CPU_25KF: case CPU_PR4450: - case CPU_BCM3302: - case CPU_BCM6338: - case CPU_BCM6348: - case CPU_BCM6358: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: case CPU_JZRISC: cpu_wait = r4k_wait; break; @@ -902,35 +905,36 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); switch (c->processor_id & 0xff00) { - case PRID_IMP_BCM3302: - /* same as PRID_IMP_BCM6338 */ - c->cputype = CPU_BCM3302; - __cpu_name[cpu] = "Broadcom BCM3302"; - break; - case PRID_IMP_BCM4710: - c->cputype = CPU_BCM4710; - __cpu_name[cpu] = "Broadcom BCM4710"; - break; - case PRID_IMP_BCM6345: - c->cputype = CPU_BCM6345; - __cpu_name[cpu] = "Broadcom BCM6345"; - break; - case PRID_IMP_BCM6348: - c->cputype = CPU_BCM6348; - __cpu_name[cpu] = "Broadcom BCM6348"; - break; - case PRID_IMP_BCM4350: - switch (c->processor_id & 0xf0) { - case PRID_REV_BCM6358: - c->cputype = CPU_BCM6358; - __cpu_name[cpu] = "Broadcom BCM6358"; - break; - default: - c->cputype = CPU_UNKNOWN; - break; + case PRID_IMP_BMIPS32_REV4: + case PRID_IMP_BMIPS32_REV8: + c->cputype = CPU_BMIPS32; + __cpu_name[cpu] = "Broadcom BMIPS32"; + break; + case PRID_IMP_BMIPS3300: + case PRID_IMP_BMIPS3300_ALT: + case PRID_IMP_BMIPS3300_BUG: + c->cputype = CPU_BMIPS3300; + __cpu_name[cpu] = "Broadcom BMIPS3300"; + break; + case PRID_IMP_BMIPS43XX: { + int rev = c->processor_id & 0xff; + + if (rev >= PRID_REV_BMIPS4380_LO && + rev <= PRID_REV_BMIPS4380_HI) { + c->cputype = CPU_BMIPS4380; + __cpu_name[cpu] = "Broadcom BMIPS4380"; + } else { + c->cputype = CPU_BMIPS4350; + __cpu_name[cpu] = "Broadcom BMIPS4350"; } break; } + case PRID_IMP_BMIPS5000: + c->cputype = CPU_BMIPS5000; + __cpu_name[cpu] = "Broadcom BMIPS5000"; + c->options |= MIPS_CPU_ULRI; + break; + } } static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) @@ -953,6 +957,12 @@ platform: if (cpu == 0) __elf_platform = "octeon"; break; + case PRID_IMP_CAVIUM_CN63XX: + c->cputype = CPU_CAVIUM_OCTEON2; + __cpu_name[cpu] = "Cavium Octeon II"; + if (cpu == 0) + __elf_platform = "octeon2"; + break; default: printk(KERN_INFO "Unknown Octeon chip!\n"); c->cputype = CPU_UNKNOWN; @@ -976,6 +986,12 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) } } +#ifdef CONFIG_64BIT +/* For use by uaccess.h */ +u64 __ua_limit; +EXPORT_SYMBOL(__ua_limit); +#endif + const char *__cpu_name[NR_CPUS]; const char *__elf_platform; @@ -1053,6 +1069,11 @@ __cpuinit void cpu_probe(void) c->srsets = 1; cpu_probe_vmbits(c); + +#ifdef CONFIG_64BIT + if (cpu == 0) + __ua_limit = ~((1ull << cpu_vmbits) - 1); +#endif } __cpuinit void cpu_report(void) diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 9479406..2392a7a2 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/smp.h> #include <linux/spinlock.h> +#include <linux/irq.h> #include <asm/delay.h> #include <asm/i8253.h> diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2779911..c58176c 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/sysdev.h> +#include <linux/irq.h> #include <asm/i8259.h> #include <asm/io.h> diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index b181f2f..1774271 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -3,12 +3,11 @@ #include <linux/bitmap.h> #include <linux/init.h> #include <linux/smp.h> +#include <linux/irq.h> #include <asm/io.h> #include <asm/gic.h> #include <asm/gcmpregs.h> -#include <asm/mips-boards/maltaint.h> -#include <asm/irq.h> #include <linux/hardirq.h> #include <asm-generic/bitops/find.h> @@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) int i; irq -= _irqbase; - pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); + pr_debug("%s(%d) called\n", __func__, irq); cpumask_and(&tmp, cpumask, cpu_online_mask); if (cpus_empty(tmp)) return -1; @@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, /* Setup specifics */ for (i = 0; i < mapsize; i++) { cpu = intrmap[i].cpunum; - if (cpu == X) + if (cpu == GIC_UNUSED) continue; if (cpu == 0 && i != 0 && intrmap[i].flags == 0) continue; diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index fb50cc7..9731e8b4 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c @@ -11,6 +11,7 @@ */ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <asm/irq_cpu.h> diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index b47e461..b7e4025 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c @@ -11,6 +11,7 @@ */ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index c6345f5..4f93db5 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -151,6 +151,29 @@ void __init init_IRQ(void) #endif } +#ifdef DEBUG_STACKOVERFLOW +static inline void check_stack_overflow(void) +{ + unsigned long sp; + + __asm__ __volatile__("move %0, $sp" : "=r" (sp)); + sp &= THREAD_MASK; + + /* + * Check for stack overflow: is there less than STACK_WARN free? + * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. + */ + if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { + printk("do_IRQ: stack overflow: %ld\n", + sp - sizeof(struct thread_info)); + dump_stack(); + } +} +#else +static inline void check_stack_overflow(void) {} +#endif + + /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific @@ -159,6 +182,7 @@ void __init init_IRQ(void) void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); + check_stack_overflow(); __DO_IRQ_SMTC_HOOK(irq); generic_handle_irq(irq); irq_exit(); diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 55c8a3c..0262abe 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -30,6 +30,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index 9b78029..95a96f6 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/types.h> +#include <linux/irq.h> #include <asm/txx9irq.h> struct txx9_irc_reg { diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 1f4e2fa..f4546e9 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; - /* Userpace events, ignore. */ + /* Userspace events, ignore. */ if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 80e2ba6..29811f0 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -251,7 +251,7 @@ void sp_work_handle_request(void) memset(&tz, 0, sizeof(tz)); if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, (int)&tz, 0, 0)) == 0) - ret.retval = tv.tv_sec; + ret.retval = tv.tv_sec; break; case MTSP_SYSCALL_EXIT: diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index c2dab14..876a75c 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -251,14 +251,15 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz, SYSCALL_DEFINE1(32_personality, unsigned long, personality) { + unsigned int p = personality & 0xffffffff; int ret; - personality &= 0xffffffff; + if (personality(current->personality) == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; - ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + personality(p) == PER_LINUX) + p = (p & ~PER_MASK) | PER_LINUX32; + ret = sys_personality(p); + if (ret != -1 && personality(ret) == PER_LINUX32) + ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } @@ -341,3 +342,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, { return sys_lookup_dcookie(merge_64(a0, a1), buf, len); } + +SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, + u64, a3, u64, a4, int, dfd, const char __user *, pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), + dfd, pathname); +} diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 2340f11..802e616 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; - retval = security_task_setscheduler(p, 0, NULL); + retval = security_task_setscheduler(p); if (retval) goto out_unlock; diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c new file mode 100644 index 0000000..2b7f3f7 --- /dev/null +++ b/arch/mips/kernel/perf_event.c @@ -0,0 +1,601 @@ +/* + * Linux performance counter support for MIPS. + * + * Copyright (C) 2010 MIPS Technologies, Inc. + * Author: Deng-Cheng Zhu + * + * This code is based on the implementation for ARM, which is in turn + * based on the sparc64 perf event code and the x86 code. Performance + * counter access is based on the MIPS Oprofile code. And the callchain + * support references the code of MIPS stacktrace.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/uaccess.h> + +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/stacktrace.h> +#include <asm/time.h> /* For perf_irq */ + +/* These are for 32bit counters. For 64bit ones, define them accordingly. */ +#define MAX_PERIOD ((1ULL << 32) - 1) +#define VALID_COUNT 0x7fffffff +#define TOTAL_BITS 32 +#define HIGHEST_BIT 31 + +#define MIPS_MAX_HWEVENTS 4 + +struct cpu_hw_events { + /* Array of events on this cpu. */ + struct perf_event *events[MIPS_MAX_HWEVENTS]; + + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; + + /* + * The borrowed MSB for the performance counter. A MIPS performance + * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit + * counters) as a factor of determining whether a counter overflow + * should be signaled. So here we use a separate MSB for each + * counter to make things easy. + */ + unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; + + /* + * Software copy of the control register for each performance counter. + * MIPS CPUs vary in performance counters. They use this differently, + * and even may not use it. + */ + unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .saved_ctrl = {0}, +}; + +/* The description of MIPS performance events. */ +struct mips_perf_event { + unsigned int event_id; + /* + * MIPS performance counters are indexed starting from 0. + * CNTR_EVEN indicates the indexes of the counters to be used are + * even numbers. + */ + unsigned int cntr_mask; + #define CNTR_EVEN 0x55555555 + #define CNTR_ODD 0xaaaaaaaa +#ifdef CONFIG_MIPS_MT_SMP + enum { + T = 0, + V = 1, + P = 2, + } range; +#else + #define T + #define V + #define P +#endif +}; + +static struct mips_perf_event raw_event; +static DEFINE_MUTEX(raw_event_mutex); + +#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff +#define C(x) PERF_COUNT_HW_CACHE_##x + +struct mips_pmu { + const char *name; + int irq; + irqreturn_t (*handle_irq)(int irq, void *dev); + int (*handle_shared_irq)(void); + void (*start)(void); + void (*stop)(void); + int (*alloc_counter)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); + u64 (*read_counter)(unsigned int idx); + void (*write_counter)(unsigned int idx, u64 val); + void (*enable_event)(struct hw_perf_event *evt, int idx); + void (*disable_event)(int idx); + const struct mips_perf_event *(*map_raw_event)(u64 config); + const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; + const struct mips_perf_event (*cache_event_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + unsigned int num_counters; +}; + +static const struct mips_pmu *mipspmu; + +static int +mipspmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + u64 uleft; + unsigned long flags; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)MAX_PERIOD) + left = MAX_PERIOD; + + local64_set(&hwc->prev_count, (u64)-left); + + local_irq_save(flags); + uleft = (u64)(-left) & MAX_PERIOD; + uleft > VALID_COUNT ? + set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); + mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); + local_irq_restore(flags); + + perf_event_update_userpage(event); + + return ret; +} + +static int mipspmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; + mipspmu->disable_event(idx); + cpuc->events[idx] = event; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, idx); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + return err; +} + +static void mipspmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long flags; + int shift = 64 - TOTAL_BITS; + s64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + local_irq_save(flags); + /* Make the counter value be a "real" one. */ + new_raw_count = mipspmu->read_counter(idx); + if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { + new_raw_count &= VALID_COUNT; + clear_bit(idx, cpuc->msbs); + } else + new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); + local_irq_restore(flags); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return; +} + +static void mipspmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + /* We are working on a local event. */ + mipspmu->disable_event(idx); + + barrier(); + + mipspmu_event_update(event, hwc, idx); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void mipspmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + mipspmu_event_update(event, hwc, hwc->idx); +} + +static struct pmu pmu = { + .enable = mipspmu_enable, + .disable = mipspmu_disable, + .unthrottle = mipspmu_unthrottle, + .read = mipspmu_read, +}; + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmu_reserve_mutex); +static int (*save_perf_irq)(void); + +static int mipspmu_get_irq(void) +{ + int err; + + if (mipspmu->irq >= 0) { + /* Request my own irq handler. */ + err = request_irq(mipspmu->irq, mipspmu->handle_irq, + IRQF_DISABLED | IRQF_NOBALANCING, + "mips_perf_pmu", NULL); + if (err) { + pr_warning("Unable to request IRQ%d for MIPS " + "performance counters!\n", mipspmu->irq); + } + } else if (cp0_perfcount_irq < 0) { + /* + * We are sharing the irq number with the timer interrupt. + */ + save_perf_irq = perf_irq; + perf_irq = mipspmu->handle_shared_irq; + err = 0; + } else { + pr_warning("The platform hasn't properly defined its " + "interrupt controller.\n"); + err = -ENOENT; + } + + return err; +} + +static void mipspmu_free_irq(void) +{ + if (mipspmu->irq >= 0) + free_irq(mipspmu->irq, NULL); + else if (cp0_perfcount_irq < 0) + perf_irq = save_perf_irq; +} + +static inline unsigned int +mipspmu_perf_event_encode(const struct mips_perf_event *pev) +{ +/* + * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for + * event_id. + */ +#ifdef CONFIG_MIPS_MT_SMP + return ((unsigned int)pev->range << 24) | + (pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff); +#else + return (pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff); +#endif +} + +static const struct mips_perf_event * +mipspmu_map_general_event(int idx) +{ + const struct mips_perf_event *pev; + + pev = ((*mipspmu->general_event_map)[idx].event_id == + UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : + &(*mipspmu->general_event_map)[idx]); + + return pev; +} + +static const struct mips_perf_event * +mipspmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct mips_perf_event *pev; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pev = &((*mipspmu->cache_event_map) + [cache_type] + [cache_op] + [cache_result]); + + if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) + return ERR_PTR(-EOPNOTSUPP); + + return pev; + +} + +static int validate_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event fake_hwc = event->hw; + + if (event->pmu && event->pmu != &pmu) + return 0; + + return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_cpuc; + + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); + + if (!validate_event(&fake_cpuc, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_cpuc, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_cpuc, event)) + return -ENOSPC; + + return 0; +} + +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +const struct pmu *hw_perf_event_init(struct perf_event *event) +{ + int err = 0; + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return ERR_PTR(-ENODEV); + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return ERR_PTR(-ENOSPC); + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return ERR_PTR(err); + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err ? ERR_PTR(err) : &pmu; +} + +void hw_perf_enable(void) +{ + if (mipspmu) + mipspmu->start(); +} + +void hw_perf_disable(void) +{ + if (mipspmu) + mipspmu->stop(); +} + +/* This is needed by specific irq handlers in perf_event_*.c */ +static void +handle_associated_event(struct cpu_hw_events *cpuc, + int idx, struct perf_sample_data *data, struct pt_regs *regs) +{ + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc = &event->hw; + + mipspmu_event_update(event, hwc, idx); + data->period = event->hw.last_period; + if (!mipspmu_event_set_period(event, hwc, idx)) + return; + + if (perf_event_overflow(event, 0, data, regs)) + mipspmu->disable_event(idx); +} + +#include "perf_event_mipsxx.c" + +/* Callchain handling code. */ +static inline void +callchain_store(struct perf_callchain_entry *entry, + u64 ip) +{ + if (entry->nr < PERF_MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +/* + * Leave userspace callchain empty for now. When we find a way to trace + * the user stack callchains, we add here. + */ +static void +perf_callchain_user(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ +} + +static void save_raw_perf_callchain(struct perf_callchain_entry *entry, + unsigned long reg29) +{ + unsigned long *sp = (unsigned long *)reg29; + unsigned long addr; + + while (!kstack_end(sp)) { + addr = *sp++; + if (__kernel_text_address(addr)) { + callchain_store(entry, addr); + if (entry->nr >= PERF_MAX_STACK_DEPTH) + break; + } + } +} + +static void +perf_callchain_kernel(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + unsigned long sp = regs->regs[29]; +#ifdef CONFIG_KALLSYMS + unsigned long ra = regs->regs[31]; + unsigned long pc = regs->cp0_epc; + + callchain_store(entry, PERF_CONTEXT_KERNEL); + if (raw_show_trace || !__kernel_text_address(pc)) { + unsigned long stack_page = + (unsigned long)task_stack_page(current); + if (stack_page && sp >= stack_page && + sp <= stack_page + THREAD_SIZE - 32) + save_raw_perf_callchain(entry, sp); + return; + } + do { + callchain_store(entry, pc); + if (entry->nr >= PERF_MAX_STACK_DEPTH) + break; + pc = unwind_stack(current, &sp, pc, &ra); + } while (pc); +#else + callchain_store(entry, PERF_CONTEXT_KERNEL); + save_raw_perf_callchain(entry, sp); +#endif +} + +static void +perf_do_callchain(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + int is_user; + + if (!regs) + return; + + is_user = user_mode(regs); + + if (!current || !current->pid) + return; + + if (is_user && current->state != TASK_RUNNING) + return; + + if (!is_user) { + perf_callchain_kernel(regs, entry); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + if (regs) + perf_callchain_user(regs, entry); +} + +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); + +struct perf_callchain_entry * +perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); + + entry->nr = 0; + perf_do_callchain(regs, entry); + return entry; +} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c new file mode 100644 index 0000000..5c7c6fc --- /dev/null +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -0,0 +1,1052 @@ +#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \ + defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1) + +#define M_CONFIG1_PC (1 << 4) + +#define M_PERFCTL_EXL (1UL << 0) +#define M_PERFCTL_KERNEL (1UL << 1) +#define M_PERFCTL_SUPERVISOR (1UL << 2) +#define M_PERFCTL_USER (1UL << 3) +#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4) +#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) +#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) +#define M_PERFCTL_MT_EN(filter) ((filter) << 20) +#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) +#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) +#define M_TC_EN_TC M_PERFCTL_MT_EN(2) +#define M_PERFCTL_TCID(tcid) ((tcid) << 22) +#define M_PERFCTL_WIDE (1UL << 30) +#define M_PERFCTL_MORE (1UL << 31) + +#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ + M_PERFCTL_KERNEL | \ + M_PERFCTL_USER | \ + M_PERFCTL_SUPERVISOR | \ + M_PERFCTL_INTERRUPT_ENABLE) + +#ifdef CONFIG_MIPS_MT_SMP +#define M_PERFCTL_CONFIG_MASK 0x3fff801f +#else +#define M_PERFCTL_CONFIG_MASK 0x1f +#endif +#define M_PERFCTL_EVENT_MASK 0xfe0 + +#define M_COUNTER_OVERFLOW (1UL << 31) + +#ifdef CONFIG_MIPS_MT_SMP +static int cpu_has_mipsmt_pertccounters; + +/* + * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because + * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. + */ +#if defined(CONFIG_HW_PERF_EVENTS) +#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ + 0 : smp_processor_id()) +#else +#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ + 0 : cpu_data[smp_processor_id()].vpe_id) +#endif + +/* Copied from op_model_mipsxx.c */ +static inline unsigned int vpe_shift(void) +{ + if (num_possible_cpus() > 1) + return 1; + + return 0; +} +#else /* !CONFIG_MIPS_MT_SMP */ +#define vpe_id() 0 + +static inline unsigned int vpe_shift(void) +{ + return 0; +} +#endif /* CONFIG_MIPS_MT_SMP */ + +static inline unsigned int +counters_total_to_per_cpu(unsigned int counters) +{ + return counters >> vpe_shift(); +} + +static inline unsigned int +counters_per_cpu_to_total(unsigned int counters) +{ + return counters << vpe_shift(); +} + +#define __define_perf_accessors(r, n, np) \ + \ +static inline unsigned int r_c0_ ## r ## n(void) \ +{ \ + unsigned int cpu = vpe_id(); \ + \ + switch (cpu) { \ + case 0: \ + return read_c0_ ## r ## n(); \ + case 1: \ + return read_c0_ ## r ## np(); \ + default: \ + BUG(); \ + } \ + return 0; \ +} \ + \ +static inline void w_c0_ ## r ## n(unsigned int value) \ +{ \ + unsigned int cpu = vpe_id(); \ + \ + switch (cpu) { \ + case 0: \ + write_c0_ ## r ## n(value); \ + return; \ + case 1: \ + write_c0_ ## r ## np(value); \ + return; \ + default: \ + BUG(); \ + } \ + return; \ +} \ + +__define_perf_accessors(perfcntr, 0, 2) +__define_perf_accessors(perfcntr, 1, 3) +__define_perf_accessors(perfcntr, 2, 0) +__define_perf_accessors(perfcntr, 3, 1) + +__define_perf_accessors(perfctrl, 0, 2) +__define_perf_accessors(perfctrl, 1, 3) +__define_perf_accessors(perfctrl, 2, 0) +__define_perf_accessors(perfctrl, 3, 1) + +static inline int __n_counters(void) +{ + if (!(read_c0_config1() & M_CONFIG1_PC)) + return 0; + if (!(read_c0_perfctrl0() & M_PERFCTL_MORE)) + return 1; + if (!(read_c0_perfctrl1() & M_PERFCTL_MORE)) + return 2; + if (!(read_c0_perfctrl2() & M_PERFCTL_MORE)) + return 3; + + return 4; +} + +static inline int n_counters(void) +{ + int counters; + + switch (current_cpu_type()) { + case CPU_R10000: + counters = 2; + break; + + case CPU_R12000: + case CPU_R14000: + counters = 4; + break; + + default: + counters = __n_counters(); + } + + return counters; +} + +static void reset_counters(void *arg) +{ + int counters = (int)(long)arg; + switch (counters) { + case 4: + w_c0_perfctrl3(0); + w_c0_perfcntr3(0); + case 3: + w_c0_perfctrl2(0); + w_c0_perfcntr2(0); + case 2: + w_c0_perfctrl1(0); + w_c0_perfcntr1(0); + case 1: + w_c0_perfctrl0(0); + w_c0_perfcntr0(0); + } +} + +static inline u64 +mipsxx_pmu_read_counter(unsigned int idx) +{ + switch (idx) { + case 0: + return r_c0_perfcntr0(); + case 1: + return r_c0_perfcntr1(); + case 2: + return r_c0_perfcntr2(); + case 3: + return r_c0_perfcntr3(); + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } +} + +static inline void +mipsxx_pmu_write_counter(unsigned int idx, u64 val) +{ + switch (idx) { + case 0: + w_c0_perfcntr0(val); + return; + case 1: + w_c0_perfcntr1(val); + return; + case 2: + w_c0_perfcntr2(val); + return; + case 3: + w_c0_perfcntr3(val); + return; + } +} + +static inline unsigned int +mipsxx_pmu_read_control(unsigned int idx) +{ + switch (idx) { + case 0: + return r_c0_perfctrl0(); + case 1: + return r_c0_perfctrl1(); + case 2: + return r_c0_perfctrl2(); + case 3: + return r_c0_perfctrl3(); + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } +} + +static inline void +mipsxx_pmu_write_control(unsigned int idx, unsigned int val) +{ + switch (idx) { + case 0: + w_c0_perfctrl0(val); + return; + case 1: + w_c0_perfctrl1(val); + return; + case 2: + w_c0_perfctrl2(val); + return; + case 3: + w_c0_perfctrl3(val); + return; + } +} + +#ifdef CONFIG_MIPS_MT_SMP +static DEFINE_RWLOCK(pmuint_rwlock); +#endif + +/* 24K/34K/1004K cores can share the same event map. */ +static const struct mips_perf_event mipsxxcore_event_map + [PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, +}; + +/* 74K core has different branch event code. */ +static const struct mips_perf_event mipsxx74Kcore_event_map + [PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, + [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, +}; + +/* 24K/34K/1004K cores can share the same cache event map. */ +static const struct mips_perf_event mipsxxcore_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T }, + /* + * Note that MIPS has only "hit" events countable for + * the prefetch operation. + */ + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + +/* 74K core has completely different cache event map. */ +static const struct mips_perf_event mipsxx74Kcore_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, + [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, + [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T }, + /* + * Note that MIPS has only "hit" events countable for + * the prefetch operation. + */ + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, + [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(DTLB)] = { + /* 74K core does not have specific DTLB events. */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, + [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, +}; + +#ifdef CONFIG_MIPS_MT_SMP +static void +check_and_calc_range(struct perf_event *event, + const struct mips_perf_event *pev) +{ + struct hw_perf_event *hwc = &event->hw; + + if (event->cpu >= 0) { + if (pev->range > V) { + /* + * The user selected an event that is processor + * wide, while expecting it to be VPE wide. + */ + hwc->config_base |= M_TC_EN_ALL; + } else { + /* + * FIXME: cpu_data[event->cpu].vpe_id reports 0 + * for both CPUs. + */ + hwc->config_base |= M_PERFCTL_VPEID(event->cpu); + hwc->config_base |= M_TC_EN_VPE; + } + } else + hwc->config_base |= M_TC_EN_ALL; +} +#else +static void +check_and_calc_range(struct perf_event *event, + const struct mips_perf_event *pev) +{ +} +#endif + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct mips_perf_event *pev; + int err; + + /* Returning MIPS event descriptor for generic perf event. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -EINVAL; + pev = mipspmu_map_general_event(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + pev = mipspmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + /* We are working on the global raw event. */ + mutex_lock(&raw_event_mutex); + pev = mipspmu->map_raw_event(event->attr.config); + } else { + /* The event type is not (yet) supported. */ + return -EOPNOTSUPP; + } + + if (IS_ERR(pev)) { + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + return PTR_ERR(pev); + } + + /* + * We allow max flexibility on how each individual counter shared + * by the single CPU operates (the mode exclusion and the range). + */ + hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE; + + /* Calculate range bits and validate it. */ + if (num_possible_cpus() > 1) + check_and_calc_range(event, pev); + + hwc->event_base = mipspmu_perf_event_encode(pev); + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + + if (!attr->exclude_user) + hwc->config_base |= M_PERFCTL_USER; + if (!attr->exclude_kernel) { + hwc->config_base |= M_PERFCTL_KERNEL; + /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ + hwc->config_base |= M_PERFCTL_EXL; + } + if (!attr->exclude_hv) + hwc->config_base |= M_PERFCTL_SUPERVISOR; + + hwc->config_base &= M_PERFCTL_CONFIG_MASK; + /* + * The event can belong to another cpu. We do not assign a local + * counter for it for now. + */ + hwc->idx = -1; + hwc->config = 0; + + if (!hwc->sample_period) { + hwc->sample_period = MAX_PERIOD; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) { + err = validate_group(event); + if (err) + return -EINVAL; + } + + event->destroy = hw_perf_event_destroy; + + return err; +} + +static void pause_local_counters(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int counters = mipspmu->num_counters; + unsigned long flags; + + local_irq_save(flags); + switch (counters) { + case 4: + cpuc->saved_ctrl[3] = r_c0_perfctrl3(); + w_c0_perfctrl3(cpuc->saved_ctrl[3] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + case 3: + cpuc->saved_ctrl[2] = r_c0_perfctrl2(); + w_c0_perfctrl2(cpuc->saved_ctrl[2] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + case 2: + cpuc->saved_ctrl[1] = r_c0_perfctrl1(); + w_c0_perfctrl1(cpuc->saved_ctrl[1] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + case 1: + cpuc->saved_ctrl[0] = r_c0_perfctrl0(); + w_c0_perfctrl0(cpuc->saved_ctrl[0] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + } + local_irq_restore(flags); +} + +static void resume_local_counters(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int counters = mipspmu->num_counters; + unsigned long flags; + + local_irq_save(flags); + switch (counters) { + case 4: + w_c0_perfctrl3(cpuc->saved_ctrl[3]); + case 3: + w_c0_perfctrl2(cpuc->saved_ctrl[2]); + case 2: + w_c0_perfctrl1(cpuc->saved_ctrl[1]); + case 1: + w_c0_perfctrl0(cpuc->saved_ctrl[0]); + } + local_irq_restore(flags); +} + +static int mipsxx_pmu_handle_shared_irq(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct perf_sample_data data; + unsigned int counters = mipspmu->num_counters; + unsigned int counter; + int handled = IRQ_NONE; + struct pt_regs *regs; + + if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) + return handled; + + /* + * First we pause the local counters, so that when we are locked + * here, the counters are all paused. When it gets locked due to + * perf_disable(), the timer interrupt handler will be delayed. + * + * See also mipsxx_pmu_start(). + */ + pause_local_counters(); +#ifdef CONFIG_MIPS_MT_SMP + read_lock(&pmuint_rwlock); +#endif + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + switch (counters) { +#define HANDLE_COUNTER(n) \ + case n + 1: \ + if (test_bit(n, cpuc->used_mask)) { \ + counter = r_c0_perfcntr ## n(); \ + if (counter & M_COUNTER_OVERFLOW) { \ + w_c0_perfcntr ## n(counter & \ + VALID_COUNT); \ + if (test_and_change_bit(n, cpuc->msbs)) \ + handle_associated_event(cpuc, \ + n, &data, regs); \ + handled = IRQ_HANDLED; \ + } \ + } + HANDLE_COUNTER(3) + HANDLE_COUNTER(2) + HANDLE_COUNTER(1) + HANDLE_COUNTER(0) + } + + /* + * Do all the work for the pending perf events. We can do this + * in here because the performance counter interrupt is a regular + * interrupt, not NMI. + */ + if (handled == IRQ_HANDLED) + perf_event_do_pending(); + +#ifdef CONFIG_MIPS_MT_SMP + read_unlock(&pmuint_rwlock); +#endif + resume_local_counters(); + return handled; +} + +static irqreturn_t +mipsxx_pmu_handle_irq(int irq, void *dev) +{ + return mipsxx_pmu_handle_shared_irq(); +} + +static void mipsxx_pmu_start(void) +{ +#ifdef CONFIG_MIPS_MT_SMP + write_unlock(&pmuint_rwlock); +#endif + resume_local_counters(); +} + +/* + * MIPS performance counters can be per-TC. The control registers can + * not be directly accessed accross CPUs. Hence if we want to do global + * control, we need cross CPU calls. on_each_cpu() can help us, but we + * can not make sure this function is called with interrupts enabled. So + * here we pause local counters and then grab a rwlock and leave the + * counters on other CPUs alone. If any counter interrupt raises while + * we own the write lock, simply pause local counters on that CPU and + * spin in the handler. Also we know we won't be switched to another + * CPU after pausing local counters and before grabbing the lock. + */ +static void mipsxx_pmu_stop(void) +{ + pause_local_counters(); +#ifdef CONFIG_MIPS_MT_SMP + write_lock(&pmuint_rwlock); +#endif +} + +static int +mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc) +{ + int i; + + /* + * We only need to care the counter mask. The range has been + * checked definitely. + */ + unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; + + for (i = mipspmu->num_counters - 1; i >= 0; i--) { + /* + * Note that some MIPS perf events can be counted by both + * even and odd counters, wheresas many other are only by + * even _or_ odd counters. This introduces an issue that + * when the former kind of event takes the counter the + * latter kind of event wants to use, then the "counter + * allocation" for the latter event will fail. In fact if + * they can be dynamically swapped, they both feel happy. + * But here we leave this issue alone for now. + */ + if (test_bit(i, &cntr_mask) && + !test_and_set_bit(i, cpuc->used_mask)) + return i; + } + + return -EAGAIN; +} + +static void +mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long flags; + + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + local_irq_save(flags); + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | + (evt->config_base & M_PERFCTL_CONFIG_MASK) | + /* Make sure interrupt enabled. */ + M_PERFCTL_INTERRUPT_ENABLE; + /* + * We do not actually let the counter run. Leave it until start(). + */ + local_irq_restore(flags); +} + +static void +mipsxx_pmu_disable_event(int idx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + unsigned long flags; + + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + local_irq_save(flags); + cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & + ~M_PERFCTL_COUNT_EVENT_WHENEVER; + mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); + local_irq_restore(flags); +} + +/* 24K */ +#define IS_UNSUPPORTED_24K_EVENT(r, b) \ + ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \ + (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \ + (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \ + (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \ + ((b) >= 68 && (b) <= 127)) +#define IS_BOTH_COUNTERS_24K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) + +/* 34K */ +#define IS_UNSUPPORTED_34K_EVENT(r, b) \ + ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \ + (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \ + ((b) >= 68 && (b) <= 127)) +#define IS_BOTH_COUNTERS_34K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) +#ifdef CONFIG_MIPS_MT_SMP +#define IS_RANGE_P_34K_EVENT(r, b) \ + ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ + (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ + (r) == 176 || ((b) >= 50 && (b) <= 55) || \ + ((b) >= 64 && (b) <= 67)) +#define IS_RANGE_V_34K_EVENT(r) ((r) == 47) +#endif + +/* 74K */ +#define IS_UNSUPPORTED_74K_EVENT(r, b) \ + ((r) == 5 || ((r) >= 135 && (r) <= 137) || \ + ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \ + (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \ + (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \ + (b) == 61 || (r) == 62 || (r) == 191 || \ + ((b) >= 64 && (b) <= 127)) +#define IS_BOTH_COUNTERS_74K_EVENT(b) \ + ((b) == 0 || (b) == 1) + +/* 1004K */ +#define IS_UNSUPPORTED_1004K_EVENT(r, b) \ + ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \ + (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127)) +#define IS_BOTH_COUNTERS_1004K_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) +#ifdef CONFIG_MIPS_MT_SMP +#define IS_RANGE_P_1004K_EVENT(r, b) \ + ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ + (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ + (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ + (r) == 188 || (b) == 61 || (b) == 62 || \ + ((b) >= 64 && (b) <= 67)) +#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) +#endif + +/* + * User can use 0-255 raw events, where 0-127 for the events of even + * counters, and 128-255 for odd counters. Note that bit 7 is used to + * indicate the parity. So, for example, when user wants to take the + * Event Num of 15 for odd counters (by referring to the user manual), + * then 128 needs to be added to 15 as the input for the event config, + * i.e., 143 (0x8F) to be used. + */ +static const struct mips_perf_event * +mipsxx_pmu_map_raw_event(u64 config) +{ + unsigned int raw_id = config & 0xff; + unsigned int base_id = raw_id & 0x7f; + + switch (current_cpu_type()) { + case CPU_24K: + if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id)) + return ERR_PTR(-EOPNOTSUPP); + raw_event.event_id = base_id; + if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + /* + * This is actually doing nothing. Non-multithreading + * CPUs will not check and calculate the range. + */ + raw_event.range = P; +#endif + break; + case CPU_34K: + if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id)) + return ERR_PTR(-EOPNOTSUPP); + raw_event.event_id = base_id; + if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) + raw_event.range = P; + else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) + raw_event.range = V; + else + raw_event.range = T; +#endif + break; + case CPU_74K: + if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id)) + return ERR_PTR(-EOPNOTSUPP); + raw_event.event_id = base_id; + if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + raw_event.range = P; +#endif + break; + case CPU_1004K: + if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id)) + return ERR_PTR(-EOPNOTSUPP); + raw_event.event_id = base_id; + if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) + raw_event.range = P; + else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) + raw_event.range = V; + else + raw_event.range = T; +#endif + break; + } + + return &raw_event; +} + +static struct mips_pmu mipsxxcore_pmu = { + .handle_irq = mipsxx_pmu_handle_irq, + .handle_shared_irq = mipsxx_pmu_handle_shared_irq, + .start = mipsxx_pmu_start, + .stop = mipsxx_pmu_stop, + .alloc_counter = mipsxx_pmu_alloc_counter, + .read_counter = mipsxx_pmu_read_counter, + .write_counter = mipsxx_pmu_write_counter, + .enable_event = mipsxx_pmu_enable_event, + .disable_event = mipsxx_pmu_disable_event, + .map_raw_event = mipsxx_pmu_map_raw_event, + .general_event_map = &mipsxxcore_event_map, + .cache_event_map = &mipsxxcore_cache_map, +}; + +static struct mips_pmu mipsxx74Kcore_pmu = { + .handle_irq = mipsxx_pmu_handle_irq, + .handle_shared_irq = mipsxx_pmu_handle_shared_irq, + .start = mipsxx_pmu_start, + .stop = mipsxx_pmu_stop, + .alloc_counter = mipsxx_pmu_alloc_counter, + .read_counter = mipsxx_pmu_read_counter, + .write_counter = mipsxx_pmu_write_counter, + .enable_event = mipsxx_pmu_enable_event, + .disable_event = mipsxx_pmu_disable_event, + .map_raw_event = mipsxx_pmu_map_raw_event, + .general_event_map = &mipsxx74Kcore_event_map, + .cache_event_map = &mipsxx74Kcore_cache_map, +}; + +static int __init +init_hw_perf_events(void) +{ + int counters, irq; + + pr_info("Performance counters: "); + + counters = n_counters(); + if (counters == 0) { + pr_cont("No available PMU.\n"); + return -ENODEV; + } + +#ifdef CONFIG_MIPS_MT_SMP + cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); + if (!cpu_has_mipsmt_pertccounters) + counters = counters_total_to_per_cpu(counters); +#endif + +#ifdef MSC01E_INT_BASE + if (cpu_has_veic) { + /* + * Using platform specific interrupt controller defines. + */ + irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; + } else { +#endif + if (cp0_perfcount_irq >= 0) + irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; + else + irq = -1; +#ifdef MSC01E_INT_BASE + } +#endif + + on_each_cpu(reset_counters, (void *)(long)counters, 1); + + switch (current_cpu_type()) { + case CPU_24K: + mipsxxcore_pmu.name = "mips/24K"; + mipsxxcore_pmu.num_counters = counters; + mipsxxcore_pmu.irq = irq; + mipspmu = &mipsxxcore_pmu; + break; + case CPU_34K: + mipsxxcore_pmu.name = "mips/34K"; + mipsxxcore_pmu.num_counters = counters; + mipsxxcore_pmu.irq = irq; + mipspmu = &mipsxxcore_pmu; + break; + case CPU_74K: + mipsxx74Kcore_pmu.name = "mips/74K"; + mipsxx74Kcore_pmu.num_counters = counters; + mipsxx74Kcore_pmu.irq = irq; + mipspmu = &mipsxx74Kcore_pmu; + break; + case CPU_1004K: + mipsxxcore_pmu.name = "mips/1004K"; + mipsxxcore_pmu.num_counters = counters; + mipsxxcore_pmu.irq = irq; + mipspmu = &mipsxxcore_pmu; + break; + default: + pr_cont("Either hardware does not support performance " + "counters, or not yet implemented.\n"); + return -ENODEV; + } + + if (mipspmu) + pr_cont("%s PMU enabled, %d counters available to each " + "CPU, irq %d%s\n", mipspmu->name, counters, irq, + irq < 0 ? " (share with timer interrupt)" : ""); + + return 0; +} +arch_initcall(init_hw_perf_events); + +#endif /* defined(CONFIG_CPU_MIPS32)... */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 9996094..ae167df 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -142,7 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ - regs->regs[2] = p->pid; if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c new file mode 100644 index 0000000..9dbe583 --- /dev/null +++ b/arch/mips/kernel/prom.c @@ -0,0 +1,112 @@ +/* + * MIPS support for CONFIG_OF device tree support + * + * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/bootmem.h> +#include <linux/initrd.h> +#include <linux/debugfs.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> + +#include <asm/page.h> +#include <asm/prom.h> + +int __init early_init_dt_scan_memory_arch(unsigned long node, + const char *uname, int depth, + void *data) +{ + return early_init_dt_scan_memory(node, uname, depth, data); +} + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + return add_memory_region(base, size, BOOT_MEM_RAM); +} + +int __init reserve_mem_mach(unsigned long addr, unsigned long size) +{ + return reserve_bootmem(addr, size, BOOTMEM_DEFAULT); +} + +void __init free_mem_mach(unsigned long addr, unsigned long size) +{ + return free_bootmem(addr, size); +} + +u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return virt_to_phys( + __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)) + ); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void __init early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end) +{ + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; +} +#endif + +/* + * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq# + * + * Currently the mapping mechanism is trivial; simple flat hwirq numbers are + * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not + * supported. + */ +unsigned int irq_create_of_mapping(struct device_node *controller, + const u32 *intspec, unsigned int intsize) +{ + return intspec[0]; +} +EXPORT_SYMBOL_GPL(irq_create_of_mapping); + +void __init early_init_devtree(void *params) +{ + /* Setup flat device-tree pointer */ + initial_boot_params = params; + + /* Retrieve various informations from the /chosen node of the + * device-tree, including the platform type, initrd location and + * size, and more ... + */ + of_scan_flat_dt(early_init_dt_scan_chosen, NULL); + + /* Scan memory nodes */ + of_scan_flat_dt(early_init_dt_scan_root, NULL); + of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); +} + +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_mem_mach(base, size); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_mem_mach(base, size); +} diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c51b95f..d21c388 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child, return 0; } -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) { int ret; + void __user *addrp = (void __user *) addr; + void __user *datavp = (void __user *) data; + unsigned long __user *datalp = (void __user *) data; switch (request) { /* when I and D space are separate, these will need to be fixed. */ @@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ret = -EIO; goto out; } - ret = put_user(tmp, (unsigned long __user *) data); + ret = put_user(tmp, datalp); break; } @@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) } case PTRACE_GETREGS: - ret = ptrace_getregs(child, (__s64 __user *) data); + ret = ptrace_getregs(child, datavp); break; case PTRACE_SETREGS: - ret = ptrace_setregs(child, (__s64 __user *) data); + ret = ptrace_setregs(child, datavp); break; case PTRACE_GETFPREGS: - ret = ptrace_getfpregs(child, (__u32 __user *) data); + ret = ptrace_getfpregs(child, datavp); break; case PTRACE_SETFPREGS: - ret = ptrace_setfpregs(child, (__u32 __user *) data); + ret = ptrace_setfpregs(child, datavp); break; case PTRACE_GET_THREAD_AREA: - ret = put_user(task_thread_info(child)->tp_value, - (unsigned long __user *) data); + ret = put_user(task_thread_info(child)->tp_value, datalp); break; case PTRACE_GET_WATCH_REGS: - ret = ptrace_get_watch_regs(child, - (struct pt_watch_regs __user *) addr); + ret = ptrace_get_watch_regs(child, addrp); break; case PTRACE_SET_WATCH_REGS: - ret = ptrace_set_watch_regs(child, - (struct pt_watch_regs __user *) addr); + ret = ptrace_set_watch_regs(child, addrp); break; default: @@ -536,7 +537,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) { /* do the secure computing check first */ if (!entryexit) - secure_computing(regs->regs[0]); + secure_computing(regs->regs[2]); if (unlikely(current->audit_context) && entryexit) audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), @@ -565,7 +566,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) out: if (unlikely(current->audit_context) && !entryexit) - audit_syscall_entry(audit_arch(), regs->regs[0], + audit_syscall_entry(audit_arch(), regs->regs[2], regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); } diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 26f9b9a..557ef72 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -468,7 +468,8 @@ static const struct file_operations rtlx_fops = { .release = file_release, .write = file_write, .read = file_read, - .poll = file_poll + .poll = file_poll, + .llseek = noop_llseek, }; static struct irqaction rtlx_irq = { diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 17202bb..fbaabad 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -63,9 +63,9 @@ stack_done: sw t0, PT_R7(sp) # set error flag beqz t0, 1f + lw t1, PT_R2(sp) # syscall number negu v0 # error - sw v0, PT_R0(sp) # set flag for syscall - # restarting + sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result o32_syscall_exit: @@ -104,9 +104,9 @@ syscall_trace_entry: sw t0, PT_R7(sp) # set error flag beqz t0, 1f + lw t1, PT_R2(sp) # syscall number negu v0 # error - sw v0, PT_R0(sp) # set flag for syscall - # restarting + sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result j syscall_exit @@ -169,8 +169,7 @@ stackargs: * We probably should handle this case a bit more drastic. */ bad_stack: - negu v0 # error - sw v0, PT_R0(sp) + li v0, EFAULT sw v0, PT_R2(sp) li t0, 1 # set error flag sw t0, PT_R7(sp) @@ -583,7 +582,10 @@ einval: li v0, -ENOSYS sys sys_rt_tgsigqueueinfo 4 sys sys_perf_event_open 5 sys sys_accept4 4 - sys sys_recvmmsg 5 + sys sys_recvmmsg 5 /* 4335 */ + sys sys_fanotify_init 2 + sys sys_fanotify_mark 6 + sys sys_prlimit64 4 .endm /* We pre-compute the number of _instruction_ bytes needed to diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a8a6c59..3f41792 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp) sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # set flag for syscall - # restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result n64_syscall_exit: @@ -109,8 +109,9 @@ syscall_trace_entry: sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # set flag for syscall restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result j syscall_exit @@ -416,9 +417,12 @@ sys_call_table: PTR sys_pipe2 PTR sys_inotify_init1 PTR sys_preadv - PTR sys_pwritev /* 5390 */ + PTR sys_pwritev /* 5290 */ PTR sys_rt_tgsigqueueinfo PTR sys_perf_event_open PTR sys_accept4 - PTR sys_recvmmsg + PTR sys_recvmmsg + PTR sys_fanotify_init /* 5295 */ + PTR sys_fanotify_mark + PTR sys_prlimit64 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a3d6613..f08ece6 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -65,8 +65,9 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # set flag for syscall restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result local_irq_disable # make sure need_resched and @@ -106,8 +107,9 @@ n32_syscall_trace_entry: sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # set flag for syscall restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result j syscall_exit @@ -320,10 +322,10 @@ EXPORT(sysn32_call_table) PTR sys_cacheflush PTR sys_cachectl PTR sys_sysmips - PTR sys_io_setup /* 6200 */ + PTR compat_sys_io_setup /* 6200 */ PTR sys_io_destroy - PTR sys_io_getevents - PTR sys_io_submit + PTR compat_sys_io_getevents + PTR compat_sys_io_submit PTR sys_io_cancel PTR sys_exit_group /* 6205 */ PTR sys_lookup_dcookie @@ -419,5 +421,8 @@ EXPORT(sysn32_call_table) PTR sys_perf_event_open PTR sys_accept4 PTR compat_sys_recvmmsg - PTR sys_getdents + PTR sys_getdents64 + PTR sys_fanotify_init /* 6300 */ + PTR sys_fanotify_mark + PTR sys_prlimit64 .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 813689e..78d768a 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -93,8 +93,9 @@ NESTED(handle_sys, PT_SIZE, sp) sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # flag for syscall restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result o32_syscall_exit: @@ -142,8 +143,9 @@ trace_a_syscall: sd t0, PT_R7(sp) # set error flag beqz t0, 1f + ld t1, PT_R2(sp) # syscall number dnegu v0 # error - sd v0, PT_R0(sp) # set flag for syscall restarting + sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result j syscall_exit @@ -154,8 +156,7 @@ trace_a_syscall: * The stackpointer for a call with more than 4 arguments is bad. */ bad_stack: - dnegu v0 # error - sd v0, PT_R0(sp) + li v0, EFAULT sd v0, PT_R2(sp) li t0, 1 # set error flag sd t0, PT_R7(sp) @@ -444,10 +445,10 @@ sys_call_table: PTR compat_sys_futex PTR compat_sys_sched_setaffinity PTR compat_sys_sched_getaffinity /* 4240 */ - PTR sys_io_setup + PTR compat_sys_io_setup PTR sys_io_destroy - PTR sys_io_getevents - PTR sys_io_submit + PTR compat_sys_io_getevents + PTR compat_sys_io_submit PTR sys_io_cancel /* 4245 */ PTR sys_exit_group PTR sys32_lookup_dcookie @@ -538,5 +539,8 @@ sys_call_table: PTR compat_sys_rt_tgsigqueueinfo PTR sys_perf_event_open PTR sys_accept4 - PTR compat_sys_recvmmsg + PTR compat_sys_recvmmsg /* 4335 */ + PTR sys_fanotify_init + PTR sys_32_fanotify_mark + PTR sys_prlimit64 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 85aef3f..acd3f2c 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -31,6 +31,7 @@ #include <asm/setup.h> #include <asm/smp-ops.h> #include <asm/system.h> +#include <asm/prom.h> struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; @@ -487,7 +488,9 @@ static void __init arch_mem_init(char **cmdline_p) } bootmem_init(); + device_tree_init(); sparse_init(); + plat_swiotlb_setup(); paging_init(); } diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2099d5a..5922342 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -390,7 +390,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) { struct rt_sigframe __user *frame; sigset_t set; - stack_t st; int sig; frame = (struct rt_sigframe __user *) regs.regs[29]; @@ -411,11 +410,9 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) else if (sig) force_sig(sig, current); - if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) - goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); + do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); /* * Don't let your children do this ... @@ -550,23 +547,26 @@ static int handle_signal(unsigned long sig, siginfo_t *info, struct mips_abi *abi = current->thread.abi; void *vdso = current->mm->context.vdso; - switch(regs->regs[0]) { - case ERESTART_RESTARTBLOCK: - case ERESTARTNOHAND: - regs->regs[2] = EINTR; - break; - case ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { + if (regs->regs[0]) { + switch(regs->regs[2]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: regs->regs[2] = EINTR; break; + case ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->regs[2] = EINTR; + break; + } + /* fallthrough */ + case ERESTARTNOINTR: + regs->regs[7] = regs->regs[26]; + regs->regs[2] = regs->regs[0]; + regs->cp0_epc -= 4; } - /* fallthrough */ - case ERESTARTNOINTR: /* Userland will reload $v0. */ - regs->regs[7] = regs->regs[26]; - regs->cp0_epc -= 8; - } - regs->regs[0] = 0; /* Don't deal with this again. */ + regs->regs[0] = 0; /* Don't deal with this again. */ + } if (sig_uses_siginfo(ka)) ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, @@ -575,6 +575,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info, ret = abi->setup_frame(vdso + abi->signal_return_offset, ka, regs, sig, oldset); + if (ret) + return ret; + spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) @@ -622,17 +625,13 @@ static void do_signal(struct pt_regs *regs) return; } - /* - * Who's code doesn't conform to the restartable syscall convention - * dies here!!! The li instruction, a single machine instruction, - * must directly be followed by the syscall instruction. - */ if (regs->regs[0]) { if (regs->regs[2] == ERESTARTNOHAND || regs->regs[2] == ERESTARTSYS || regs->regs[2] == ERESTARTNOINTR) { + regs->regs[2] = regs->regs[0]; regs->regs[7] = regs->regs[26]; - regs->cp0_epc -= 8; + regs->cp0_epc -= 4; } if (regs->regs[2] == ERESTART_RESTARTBLOCK) { regs->regs[2] = current->thread.abi->restart; diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 2c5df81..ee24d81 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -109,6 +109,7 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) { struct rt_sigframe_n32 __user *frame; + mm_segment_t old_fs; sigset_t set; stack_t st; s32 sp; @@ -143,7 +144,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) /* It is more difficult to avoid calling this function than to call it and ignore errors. */ + old_fs = get_fs(); + set_fs(KERNEL_DS); do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]); + set_fs(old_fs); + /* * Don't let your children do this ... diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 43e7cdc5..c0e8141 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -153,7 +153,7 @@ static void __cpuinit vsmp_init_secondary(void) { extern int gic_present; - /* This is Malta specific: IPI,performance and timer inetrrupts */ + /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | STATUSF_IP7); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index cfeb2c1..39c0825 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1038,7 +1038,7 @@ void deferred_smtc_ipi(void) * but it's more efficient, given that we're already * running down the IPI queue. */ - __raw_local_irq_restore(flags); + __arch_local_irq_restore(flags); } } @@ -1190,7 +1190,7 @@ void smtc_ipi_replay(void) /* ** But use a raw restore here to avoid recursion. */ - __raw_local_irq_restore(flags); + __arch_local_irq_restore(flags); if (pipi) { self_ipi(pipi); diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 58bab2e..1dc6edf 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -254,12 +254,14 @@ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) int error; char * filename; - filename = getname((char __user *) (long)regs.regs[4]); + filename = getname((const char __user *) (long)regs.regs[4]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; - error = do_execve(filename, (char __user *__user *) (long)regs.regs[5], - (char __user *__user *) (long)regs.regs[6], ®s); + error = do_execve(filename, + (const char __user *const __user *) (long)regs.regs[5], + (const char __user *const __user *) (long)regs.regs[6], + ®s); putname(filename); out: @@ -436,7 +438,9 @@ asmlinkage void bad_stack(void) * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ -int kernel_execve(const char *filename, char *const argv[], char *const envp[]) +int kernel_execve(const char *filename, + const char *const argv[], + const char *const envp[]) { register unsigned long __a0 asm("$4") = (unsigned long) filename; register unsigned long __a1 asm("$5") = (unsigned long) argv; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 03ec001..e971043 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -28,6 +28,8 @@ #include <linux/kprobes.h> #include <linux/notifier.h> #include <linux/kdb.h> +#include <linux/irq.h> +#include <linux/perf_event.h> #include <asm/bootinfo.h> #include <asm/branch.h> @@ -51,7 +53,6 @@ #include <asm/mmu_context.h> #include <asm/types.h> #include <asm/stacktrace.h> -#include <asm/irq.h> #include <asm/uasm.h> extern void check_wait(void); @@ -82,7 +83,8 @@ extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, - struct mips_fpu_struct *ctx, int has_fpu); + struct mips_fpu_struct *ctx, int has_fpu, + void *__user *fault_addr); void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); @@ -576,10 +578,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) */ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { - if ((opcode & OPCODE) == LL) + if ((opcode & OPCODE) == LL) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, 0, regs, 0); return simulate_ll(regs, opcode); - if ((opcode & OPCODE) == SC) + } + if ((opcode & OPCODE) == SC) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, 0, regs, 0); return simulate_sc(regs, opcode); + } return -1; /* Must be something else ... */ } @@ -595,6 +603,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, 0, regs, 0); switch (rd) { case 0: /* CPU number */ regs->regs[rt] = smp_processor_id(); @@ -630,8 +640,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { - if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) + if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, 0, regs, 0); return 0; + } return -1; /* Must be something else ... */ } @@ -649,12 +662,36 @@ asmlinkage void do_ov(struct pt_regs *regs) force_sig_info(SIGFPE, &info, current); } +static int process_fpemu_return(int sig, void __user *fault_addr) +{ + if (sig == SIGSEGV || sig == SIGBUS) { + struct siginfo si = {0}; + si.si_addr = fault_addr; + si.si_signo = sig; + if (sig == SIGSEGV) { + if (find_vma(current->mm, (unsigned long)fault_addr)) + si.si_code = SEGV_ACCERR; + else + si.si_code = SEGV_MAPERR; + } else { + si.si_code = BUS_ADRERR; + } + force_sig_info(sig, &si, current); + return 1; + } else if (sig) { + force_sig(sig, current); + return 1; + } else { + return 0; + } +} + /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { - siginfo_t info; + siginfo_t info = {0}; if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) @@ -663,6 +700,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) if (fcr31 & FPU_CSR_UNI_X) { int sig; + void __user *fault_addr = NULL; /* * Unimplemented operation exception. If we've got the full @@ -678,7 +716,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) lose_fpu(1); /* Run the emulator */ - sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1); + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); /* * We can't allow the emulated instruction to leave any of @@ -690,8 +729,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) own_fpu(1); /* Using the FPU again. */ /* If something went wrong, signal */ - if (sig) - force_sig(sig, current); + process_fpemu_return(sig, fault_addr); return; } else if (fcr31 & FPU_CSR_INV_X) @@ -984,11 +1022,11 @@ asmlinkage void do_cpu(struct pt_regs *regs) if (!raw_cpu_has_fpu) { int sig; + void __user *fault_addr = NULL; sig = fpu_emulator_cop1Handler(regs, - ¤t->thread.fpu, 0); - if (sig) - force_sig(sig, current); - else + ¤t->thread.fpu, + 0, &fault_addr); + if (!process_fpemu_return(sig, fault_addr)) mt_ase_fp_affinity(); } @@ -1469,6 +1507,7 @@ void __cpuinit per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; + unsigned int hwrena = cpu_hwrena_impl_bits; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); @@ -1501,14 +1540,14 @@ void __cpuinit per_cpu_trap_init(void) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); - if (cpu_has_mips_r2) { - unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; + if (cpu_has_mips_r2) + hwrena |= 0x0000000f; - if (!noulri && cpu_has_userlocal) - enable |= (1 << 29); + if (!noulri && cpu_has_userlocal) + hwrena |= (1 << 29); - write_c0_hwrena(enable); - } + if (hwrena) + write_c0_hwrena(hwrena); #ifdef CONFIG_MIPS_MT_SMTC if (!secondaryTC) { diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 69b039c..cfea1ad 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -78,6 +78,8 @@ #include <linux/smp.h> #include <linux/sched.h> #include <linux/debugfs.h> +#include <linux/perf_event.h> + #include <asm/asm.h> #include <asm/branch.h> #include <asm/byteorder.h> @@ -109,7 +111,8 @@ static void emulate_load_store_insn(struct pt_regs *regs, unsigned long value; unsigned int res; - regs->regs[0] = 0; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, 0, regs, 0); /* * This load never faults. @@ -513,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs) unsigned int __user *pc; mm_segment_t seg; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, 0, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? * Or are we running in MIPS16 mode? diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2bd2151..6a1fdfe 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1092,6 +1092,10 @@ static int vpe_open(struct inode *inode, struct file *filp) /* this of-course trashes what was there before... */ v->pbuffer = vmalloc(P_SIZE); + if (!v->pbuffer) { + pr_warning("VPE loader: unable to allocate memory\n"); + return -ENOMEM; + } v->plen = P_SIZE; v->load_addr = NULL; v->len = 0; @@ -1149,10 +1153,9 @@ static int vpe_release(struct inode *inode, struct file *filp) if (ret < 0) v->shared_ptr = NULL; - // cleanup any temp buffers - if (v->pbuffer) - vfree(v->pbuffer); + vfree(v->pbuffer); v->plen = 0; + return ret; } @@ -1169,11 +1172,6 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, if (v == NULL) return -ENODEV; - if (v->pbuffer == NULL) { - printk(KERN_ERR "VPE loader: no buffer for program\n"); - return -ENOMEM; - } - if ((count + v->len) > v->plen) { printk(KERN_WARNING "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); @@ -1192,7 +1190,8 @@ static const struct file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, - .write = vpe_write + .write = vpe_write, + .llseek = noop_llseek, }; /* module wrapper entry points */ |