diff options
Diffstat (limited to 'arch/arm/kernel')
64 files changed, 9200 insertions, 1989 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index ff89d0b..185ee82 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -2,18 +2,26 @@ # Makefile for the linux kernel. # -AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) +AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) -ifdef CONFIG_DYNAMIC_FTRACE +ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg endif +CFLAGS_REMOVE_return_address.o = -pg + # Object file lists. -obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ - process.o ptrace.o setup.o signal.o \ +obj-y := elf.o entry-armv.o entry-common.o irq.o \ + process.o ptrace.o return_address.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o +obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o + +obj-$(CONFIG_LEDS) += leds.o +obj-$(CONFIG_OC_ETM) += etm.o + obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_ARCH_ACORN) += ecard.o obj-$(CONFIG_FIQ) += fiq.o @@ -21,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o -obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o +obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o obj-$(CONFIG_ATAGS_PROC) += atags.o @@ -32,6 +42,11 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARM_UNWIND) += unwind.o +obj-$(CONFIG_HAVE_TCM) += tcm.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o +CFLAGS_swp_emulate.o := -Wa,-march=armv7-a +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 @@ -39,7 +54,10 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o +obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_CPU_HAS_PMU) += pmu.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) @@ -48,5 +66,6 @@ endif head-y := head$(MMUEXT).o obj-$(CONFIG_DEBUG_LL) += debug.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o extra-y := $(head-y) init_task.o vmlinux.lds diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 531e186..e5e1e53 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -48,27 +48,7 @@ extern void __aeabi_uidivmod(void); extern void __aeabi_ulcmp(void); extern void fpundefinstr(void); -extern void fp_enter(void); -/* - * This has a special calling convention; it doesn't - * modify any of the usual registers, except for LR. - */ -#define EXPORT_CRC_ALIAS(sym) __CRC_SYMBOL(sym, "") - -#define EXPORT_SYMBOL_ALIAS(sym,orig) \ - EXPORT_CRC_ALIAS(sym) \ - static const struct kernel_symbol __ksymtab_##sym \ - __used __attribute__((section("__ksymtab"))) = \ - { (unsigned long)&orig, #sym }; - -/* - * floating point math emulator support. - * These symbols will never change their calling convention... - */ -EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter); -EXPORT_SYMBOL_ALIAS(fp_printk,printk); -EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig); EXPORT_SYMBOL(__backtrace); @@ -185,5 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_OLD_MCOUNT EXPORT_SYMBOL(mcount); #endif +EXPORT_SYMBOL(__gnu_mcount_nc); +#endif diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a88125..82da661 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -12,6 +12,7 @@ */ #include <linux/sched.h> #include <linux/mm.h> +#include <linux/dma-mapping.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -39,6 +40,9 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); +#ifdef CONFIG_CC_STACKPROTECTOR + DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); @@ -98,8 +102,6 @@ int main(void) DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr)); DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); - DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io)); - DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst)); BLANK(); DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); @@ -112,5 +114,9 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 8096819..c6273a3 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw) if (!sys) panic("PCI: unable to allocate sys data!"); +#ifdef CONFIG_PCI_DOMAINS + sys->domain = hw->domain; +#endif sys->hw = hw; sys->busnr = busnr; sys->swizzle = hw->swizzle; @@ -616,15 +619,17 @@ char * __init pcibios_setup(char *str) * but we want to try to avoid allocating at 0x2900-0x2bff * which might be mirrored at 0x0100-0x03ff.. */ -void pcibios_align_resource(void *data, struct resource *res, - resource_size_t size, resource_size_t align) +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) { resource_size_t start = res->start; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; - res->start = (start + align - 1) & ~(align - 1); + start = (start + align - 1) & ~(align - 1); + + return start; } /** diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index f776e72..5c26ecc 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -81,7 +81,7 @@ CALL(sys_ni_syscall) /* was sys_ssetmask */ /* 70 */ CALL(sys_setreuid16) CALL(sys_setregid16) - CALL(sys_sigsuspend_wrapper) + CALL(sys_sigsuspend) CALL(sys_sigpending) CALL(sys_sethostname) /* 75 */ CALL(sys_setrlimit) @@ -91,7 +91,7 @@ CALL(sys_settimeofday) /* 80 */ CALL(sys_getgroups16) CALL(sys_setgroups16) - CALL(OBSOLETE(old_select)) /* used by libc4 */ + CALL(OBSOLETE(sys_old_select)) /* used by libc4 */ CALL(sys_symlink) CALL(sys_ni_syscall) /* was sys_lstat */ /* 85 */ CALL(sys_readlink) @@ -99,7 +99,7 @@ CALL(sys_swapon) CALL(sys_reboot) CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ -/* 90 */ CALL(OBSOLETE(old_mmap)) /* used by libc4 */ +/* 90 */ CALL(OBSOLETE(sys_old_mmap)) /* used by libc4 */ CALL(sys_munmap) CALL(sys_truncate) CALL(sys_ftruncate) @@ -172,7 +172,7 @@ /* 160 */ CALL(sys_sched_get_priority_min) CALL(sys_sched_rr_get_interval) CALL(sys_nanosleep) - CALL(sys_arm_mremap) + CALL(sys_mremap) CALL(sys_setresuid16) /* 165 */ CALL(sys_getresuid16) CALL(sys_ni_syscall) /* vm86 */ @@ -188,7 +188,7 @@ CALL(sys_rt_sigpending) CALL(sys_rt_sigtimedwait) CALL(sys_rt_sigqueueinfo) - CALL(sys_rt_sigsuspend_wrapper) + CALL(sys_rt_sigsuspend) /* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64)) CALL(ABI(sys_pwrite64, sys_oabi_pwrite64)) CALL(sys_chown16) @@ -344,8 +344,8 @@ CALL(sys_readlinkat) CALL(sys_fchmodat) CALL(sys_faccessat) -/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */ - CALL(sys_ni_syscall) /* eventually ppoll */ +/* 335 */ CALL(sys_pselect6) + CALL(sys_ppoll) CALL(sys_unshare) CALL(sys_set_robust_list) CALL(sys_get_robust_list) @@ -355,7 +355,7 @@ CALL(sys_vmsplice) CALL(sys_move_pages) /* 345 */ CALL(sys_getcpu) - CALL(sys_ni_syscall) /* eventually epoll_pwait */ + CALL(sys_epoll_pwait) CALL(sys_kexec_load) CALL(sys_utimensat) CALL(sys_signalfd) @@ -373,7 +373,12 @@ CALL(sys_preadv) CALL(sys_pwritev) CALL(sys_rt_tgsigqueueinfo) - CALL(sys_perf_counter_open) + CALL(sys_perf_event_open) +/* 365 */ CALL(sys_recvmmsg) + CALL(sys_accept4) + CALL(sys_fanotify_init) + CALL(sys_fanotify_mark) + CALL(sys_prlimit64) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c index 0a13854..9256523 100644 --- a/arch/arm/kernel/compat.c +++ b/arch/arm/kernel/compat.c @@ -217,10 +217,3 @@ void __init convert_to_tag_list(struct tag *tags) struct param_struct *params = (struct param_struct *)tags; build_tag_list(params, ¶ms->u2); } - -void __init squash_mem_tags(struct tag *tag) -{ - for (; tag->hdr.size; tag = tag_next(tag)) - if (tag->hdr.tag == ATAG_MEM) - tag->hdr.tag = ATAG_NONE; -} diff --git a/arch/arm/kernel/compat.h b/arch/arm/kernel/compat.h index 27e61a6..39264ab 100644 --- a/arch/arm/kernel/compat.h +++ b/arch/arm/kernel/compat.h @@ -9,5 +9,3 @@ */ extern void convert_to_tag_list(struct tag *tags); - -extern void squash_mem_tags(struct tag *tag); diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c new file mode 100644 index 0000000..cd3b853 --- /dev/null +++ b/arch/arm/kernel/crash_dump.c @@ -0,0 +1,60 @@ +/* + * arch/arm/kernel/crash_dump.c + * + * Copyright (C) 2010 Nokia Corporation. + * Author: Mika Westerberg + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/errno.h> +#include <linux/crash_dump.h> +#include <linux/uaccess.h> +#include <linux/io.h> + +/* stores the physical address of elf header of crash image */ +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c index 99995c2..25ef223 100644 --- a/arch/arm/kernel/crunch.c +++ b/arch/arm/kernel/crunch.c @@ -31,7 +31,7 @@ void crunch_task_release(struct thread_info *thread) static int crunch_enabled(u32 devcfg) { - return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE); + return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA); } static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) @@ -51,16 +51,21 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) * initialised state information on the first fault. */ - case THREAD_NOTIFY_RELEASE: + case THREAD_NOTIFY_EXIT: crunch_task_release(thread); break; case THREAD_NOTIFY_SWITCH: - devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG); + devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG); if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { - devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE; + /* + * We don't use ep93xx_syscon_swlocked_write() here + * because we are on the context switch path and + * preemption is already disabled. + */ + devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); - __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG); + __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG); } break; } diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index b121b60..a0f0752 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -22,11 +22,11 @@ #if defined(CONFIG_DEBUG_ICEDCC) @@ debug using ARM EmbeddedICE DCC channel -#if defined(CONFIG_CPU_V6) - - .macro addruart, rx + .macro addruart, rp, rv .endm +#if defined(CONFIG_CPU_V6) + .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 .endm @@ -49,11 +49,25 @@ 1002: .endm -#elif defined(CONFIG_CPU_XSCALE) +#elif defined(CONFIG_CPU_V7) + + .macro senduart, rd, rx + mcr p14, 0, \rd, c0, c5, 0 + .endm - .macro addruart, rx + .macro busyuart, rd, rx +busy: mrc p14, 0, pc, c0, c1, 0 + bcs busy .endm + .macro waituart, rd, rx +wait: mrc p14, 0, pc, c0, c1, 0 + bcs wait + + .endm + +#elif defined(CONFIG_CPU_XSCALE) + .macro senduart, rd, rx mcr p14, 0, \rd, c8, c0, 0 .endm @@ -78,9 +92,6 @@ #else - .macro addruart, rx - .endm - .macro senduart, rd, rx mcr p14, 0, \rd, c1, c0, 0 .endm @@ -110,6 +121,22 @@ #include <mach/debug-macro.S> #endif /* CONFIG_DEBUG_ICEDCC */ +#ifdef CONFIG_MMU + .macro addruart_current, rx, tmp1, tmp2 + addruart \tmp1, \tmp2 + mrc p15, 0, \rx, c1, c0 + tst \rx, #1 + moveq \rx, \tmp1 + movne \rx, \tmp2 + .endm + +#else /* !CONFIG_MMU */ + .macro addruart_current, rx, tmp1, tmp2 + addruart \rx, \tmp1 + .endm + +#endif /* CONFIG_MMU */ + /* * Useful debugging routines */ @@ -144,7 +171,7 @@ ENDPROC(printhex2) .ltorg ENTRY(printascii) - addruart r3 + addruart_current r3, r1, r2 b 2f 1: waituart r2, r3 senduart r1, r3 @@ -160,7 +187,7 @@ ENTRY(printascii) ENDPROC(printascii) ENTRY(printch) - addruart r3 + addruart_current r3, r1, r2 mov r1, r0 mov r0, #0 b 1b diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 0e88e46..360bb6d 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c @@ -207,8 +207,6 @@ void __init isa_init_dma(void) outb(0x32, 0x4d6); outb(0x33, 0x4d6); - request_dma(DMA_ISA_CASCADE, "cascade"); - for (i = 0; i < ARRAY_SIZE(dma_resources); i++) request_resource(&ioport_resource, dma_resources + i); @@ -218,5 +216,7 @@ void __init isa_init_dma(void) printk(KERN_ERR "ISADMA%u: unable to register: %d\n", chan, ret); } + + request_dma(DMA_ISA_CASCADE, "cascade"); } } diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 7d5b9fb..2c4a185 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> #include <asm/dma.h> @@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan) return ret; } EXPORT_SYMBOL(get_dma_residue); + +#ifdef CONFIG_PROC_FS +static int proc_dma_show(struct seq_file *m, void *v) +{ + int i; + + for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { + dma_t *dma = dma_channel(i); + if (dma && dma->lock) + seq_printf(m, "%2d: %s\n", i, dma->device_id); + } + return 0; +} + +static int proc_dma_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_dma_show, NULL); +} + +static const struct file_operations proc_dma_operations = { + .open = proc_dma_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init proc_dma_init(void) +{ + proc_create("dma", 0, NULL, &proc_dma_operations); + return 0; +} + +__initcall(proc_dma_init); +#endif diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c new file mode 100644 index 0000000..85aa2b2 --- /dev/null +++ b/arch/arm/kernel/early_printk.c @@ -0,0 +1,57 @@ +/* + * linux/arch/arm/kernel/early_printk.c + * + * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> + +extern void printch(int); + +static void early_write(const char *s, unsigned n) +{ + while (n-- > 0) { + if (*s == '\n') + printch('\r'); + printch(*s); + s++; + } +} + +static void early_console_write(struct console *con, const char *s, unsigned n) +{ + early_write(s, n); +} + +static struct console early_console = { + .name = "earlycon", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, +}; + +asmlinkage void early_printk(const char *fmt, ...) +{ + char buf[512]; + int n; + va_list ap; + + va_start(ap, fmt); + n = vscnprintf(buf, sizeof(buf), fmt, ap); + early_write(buf, n); + va_end(ap); +} + +static int __init setup_early_printk(char *buf) +{ + register_console(&early_console); + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index 950391f..d4a0da1 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,15 +78,6 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; -#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) - /* - * If we have support for OABI programs, we can never allow NX - * support - our signal syscall restart mechanism relies upon - * being able to execute code placed on the user stack. - */ - return 1; -#else return 0; -#endif } EXPORT_SYMBOL(arm_elf_read_implies_exec); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fc8af43..2b46fea 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -21,42 +21,26 @@ #include <mach/entry-macro.S> #include <asm/thread_notify.h> #include <asm/unwind.h> +#include <asm/unistd.h> +#include <asm/tls.h> #include "entry-header.S" +#include <asm/entry-macro-multi.S> /* * Interrupt handling. Preserves r7, r8, r9 */ .macro irq_handler - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ - -#ifdef CONFIG_SMP - /* - * XXX - * - * this macro assumes that irqstat (r6) and base (r5) are - * preserved from get_irqnr_and_base above - */ - test_for_ipi r0, r6, r5, lr - movne r0, sp - adrne lr, 1b - bne do_IPI - -#ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr - movne r0, sp - adrne lr, 1b - bne do_local_timer -#endif +#ifdef CONFIG_MULTI_IRQ_HANDLER + ldr r5, =handle_arch_irq + mov r0, sp + ldr r5, [r5] + adr lr, BSYM(9997f) + teq r5, #0 + movne pc, r5 #endif - + arch_irq_handler_default +9997: .endm #ifdef CONFIG_KPROBES @@ -70,7 +54,10 @@ */ .macro inv_entry, reason sub sp, sp, #S_FRAME_SIZE - stmib sp, {r1 - lr} + ARM( stmib sp, {r1 - lr} ) + THUMB( stmia sp, {r0 - r12} ) + THUMB( str sp, [sp, #S_SP] ) + THUMB( str lr, [sp, #S_LR] ) mov r1, #\reason .endm @@ -126,17 +113,24 @@ ENDPROC(__und_invalid) .macro svc_entry, stack_hole=0 UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) - sub sp, sp, #(S_FRAME_SIZE + \stack_hole) + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) +#ifdef CONFIG_THUMB2_KERNEL + SPFIX( str r0, [sp] ) @ temporarily saved + SPFIX( mov r0, sp ) + SPFIX( tst r0, #4 ) @ test original stack alignment + SPFIX( ldr r0, [sp] ) @ restored +#else SPFIX( tst sp, #4 ) - SPFIX( bicne sp, sp, #4 ) - stmib sp, {r1 - r12} +#endif + SPFIX( subeq sp, sp, #4 ) + stmia sp, {r1 - r12} ldmia r0, {r1 - r3} - add r5, sp, #S_SP @ here for interlock avoidance + add r5, sp, #S_SP - 4 @ here for interlock avoidance mov r4, #-1 @ "" "" "" "" - add r0, sp, #(S_FRAME_SIZE + \stack_hole) - SPFIX( addne r0, r0, #4 ) - str r1, [sp] @ save the "real" r0 copied + add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) + SPFIX( addeq r0, r0, #4 ) + str r1, [sp, #-4]! @ save the "real" r0 copied @ from the exception stack mov r1, lr @@ -184,6 +178,7 @@ __dabt_svc: @ @ set desired IRQ state, then call main handler @ + debug_entry r1 msr cpsr_c, r9 mov r2, sp bl do_DataAbort @@ -191,14 +186,13 @@ __dabt_svc: @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @ - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + ldr r2, [sp, #S_PSR] + svc_exit r2 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) @@ -225,13 +219,12 @@ __irq_svc: tst r0, #_TIF_NEED_RESCHED blne svc_preempt #endif - ldr r0, [sp, #S_PSR] @ irqs are already disabled - msr spsr_cxsf, r0 + ldr r4, [sp, #S_PSR] @ irqs are already disabled #ifdef CONFIG_TRACE_IRQFLAGS - tst r0, #PSR_I_BIT + tst r4, #PSR_I_BIT bleq trace_hardirqs_on #endif - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + svc_exit r4 @ return from exception UNWIND(.fnend ) ENDPROC(__irq_svc) @@ -265,8 +258,16 @@ __und_svc: @ @ r0 - instruction @ +#ifndef CONFIG_THUMB2_KERNEL ldr r0, [r2, #-4] - adr r9, 1f +#else + ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 + and r9, r0, #0xf800 + cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 + ldrhhs r9, [r2] @ bottom 16 bits + orrhs r0, r9, r0, lsl #16 +#endif + adr r9, BSYM(1f) bl call_fpe mov r0, sp @ struct pt_regs *regs @@ -275,14 +276,13 @@ __und_svc: @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq +1: disable_irq_notrace @ @ restore SPSR and restart the instruction @ - ldr lr, [sp, #S_PSR] @ Get SVC cpsr - msr spsr_cxsf, lr - ldmia sp, {r0 - pc}^ @ Restore SVC registers + ldr r2, [sp, #S_PSR] @ Get SVC cpsr + svc_exit r2 @ return from exception UNWIND(.fnend ) ENDPROC(__und_svc) @@ -297,35 +297,29 @@ __pabt_svc: tst r3, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT - @ - @ set args, then call main handler - @ - @ r0 - address of faulting instruction - @ r1 - pointer to registers on stack - @ -#ifdef MULTI_PABORT mov r0, r2 @ pass address of aborted instruction. +#ifdef MULTI_PABORT ldr r4, .LCprocfns mov lr, pc ldr pc, [r4, #PROCESSOR_PABT_FUNC] #else - CPU_PABORT_HANDLER(r0, r2) + bl CPU_PABORT_HANDLER #endif + debug_entry r1 msr cpsr_c, r9 @ Maybe enable interrupts - mov r1, sp @ regs + mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @ - ldr r0, [sp, #S_PSR] - msr spsr_cxsf, r0 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr + ldr r2, [sp, #S_PSR] + svc_exit r2 @ return from exception UNWIND(.fnend ) ENDPROC(__pabt_svc) @@ -353,7 +347,8 @@ ENDPROC(__pabt_svc) UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space sub sp, sp, #S_FRAME_SIZE - stmib sp, {r1 - r12} + ARM( stmib sp, {r1 - r12} ) + THUMB( stmia sp, {r0 - r12} ) ldmia r0, {r1 - r3} add r0, sp, #S_PC @ here for interlock avoidance @@ -372,7 +367,8 @@ ENDPROC(__pabt_svc) @ Also, separately save sp_usr and lr_usr @ stmia r0, {r2 - r4} - stmdb r0, {sp, lr}^ + ARM( stmdb r0, {sp, lr}^ ) + THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) @ @ Enable the alignment trap while in kernel mode @@ -425,9 +421,10 @@ __dabt_usr: @ @ IRQs on, then call the main handler @ + debug_entry r1 enable_irq mov r2, sp - adr lr, ret_from_exception + adr lr, BSYM(ret_from_exception) b do_DataAbort UNWIND(.fnend ) ENDPROC(__dabt_usr) @@ -437,9 +434,6 @@ __irq_usr: usr_entry kuser_cmpxchg_check -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif get_thread_info tsk #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -452,10 +446,9 @@ __irq_usr: ldr r0, [tsk, #TI_PREEMPT] str r8, [tsk, #TI_PREEMPT] teq r0, r7 - strne r0, [r0, -r0] -#endif -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on + ARM( strne r0, [r0, -r0] ) + THUMB( movne r0, #0 ) + THUMB( strne r0, [r0] ) #endif mov why, #0 @@ -476,9 +469,10 @@ __und_usr: @ @ r0 - instruction @ - adr r9, ret_from_exception - adr lr, __und_usr_unknown + adr r9, BSYM(ret_from_exception) + adr lr, BSYM(__und_usr_unknown) tst r3, #PSR_T_BIT @ Thumb mode? + itet eq @ explicit IT needed for the 1f label subeq r4, r2, #4 @ ARM instr at LR - 4 subne r4, r2, #2 @ Thumb instr at LR - 2 1: ldreqt r0, [r4] @@ -488,7 +482,10 @@ __und_usr: beq call_fpe @ Thumb instruction #if __LINUX_ARM_ARCH__ >= 7 -2: ldrht r5, [r4], #2 +2: + ARM( ldrht r5, [r4], #2 ) + THUMB( ldrht r5, [r4] ) + THUMB( add r4, r4, #2 ) and r0, r5, #0xf800 @ mask bits 111x x... .... .... cmp r0, #0xe800 @ 32bit instruction if xx != 0 blo __und_usr_unknown @@ -508,16 +505,16 @@ ENDPROC(__und_usr) /* * The out of line fixup for the ldrt above. */ - .section .fixup, "ax" + .pushsection .fixup, "ax" 4: mov pc, r9 - .previous - .section __ex_table,"a" + .popsection + .pushsection __ex_table,"a" .long 1b, 4b #if __LINUX_ARM_ARCH__ >= 7 .long 2b, 4b .long 3b, 4b #endif - .previous + .popsection /* * Check whether the instruction is a co-processor instruction. @@ -577,9 +574,11 @@ call_fpe: moveq pc, lr get_thread_info r10 @ get current thread and r8, r0, #0x00000f00 @ mask out CP number + THUMB( lsr r8, r8, #8 ) mov r7, #1 add r6, r10, #TI_USED_CP - strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] + ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] + THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] #ifdef CONFIG_IWMMXT @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] @@ -587,36 +586,38 @@ call_fpe: movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) bcs iwmmxt_task_enable #endif - add pc, pc, r8, lsr #6 - mov r0, r0 - - mov pc, lr @ CP#0 - b do_fpe @ CP#1 (FPE) - b do_fpe @ CP#2 (FPE) - mov pc, lr @ CP#3 + ARM( add pc, pc, r8, lsr #6 ) + THUMB( lsl r8, r8, #2 ) + THUMB( add pc, r8 ) + nop + + movw_pc lr @ CP#0 + W(b) do_fpe @ CP#1 (FPE) + W(b) do_fpe @ CP#2 (FPE) + movw_pc lr @ CP#3 #ifdef CONFIG_CRUNCH b crunch_task_enable @ CP#4 (MaverickCrunch) b crunch_task_enable @ CP#5 (MaverickCrunch) b crunch_task_enable @ CP#6 (MaverickCrunch) #else - mov pc, lr @ CP#4 - mov pc, lr @ CP#5 - mov pc, lr @ CP#6 + movw_pc lr @ CP#4 + movw_pc lr @ CP#5 + movw_pc lr @ CP#6 #endif - mov pc, lr @ CP#7 - mov pc, lr @ CP#8 - mov pc, lr @ CP#9 + movw_pc lr @ CP#7 + movw_pc lr @ CP#8 + movw_pc lr @ CP#9 #ifdef CONFIG_VFP - b do_vfp @ CP#10 (VFP) - b do_vfp @ CP#11 (VFP) + W(b) do_vfp @ CP#10 (VFP) + W(b) do_vfp @ CP#11 (VFP) #else - mov pc, lr @ CP#10 (VFP) - mov pc, lr @ CP#11 (VFP) + movw_pc lr @ CP#10 (VFP) + movw_pc lr @ CP#11 (VFP) #endif - mov pc, lr @ CP#12 - mov pc, lr @ CP#13 - mov pc, lr @ CP#14 (Debug) - mov pc, lr @ CP#15 (Control) + movw_pc lr @ CP#12 + movw_pc lr @ CP#13 + movw_pc lr @ CP#14 (Debug) + movw_pc lr @ CP#15 (Control) #ifdef CONFIG_NEON .align 6 @@ -657,17 +658,19 @@ do_fpe: * lr = unrecognised FP instruction return address */ - .data + .pushsection .data ENTRY(fp_enter) .word no_fp - .previous + .popsection -no_fp: mov pc, lr +ENTRY(no_fp) + mov pc, lr +ENDPROC(no_fp) __und_usr_unknown: enable_irq mov r0, sp - adr lr, ret_from_exception + adr lr, BSYM(ret_from_exception) b do_undefinstr ENDPROC(__und_usr_unknown) @@ -675,16 +678,17 @@ ENDPROC(__und_usr_unknown) __pabt_usr: usr_entry -#ifdef MULTI_PABORT mov r0, r2 @ pass address of aborted instruction. +#ifdef MULTI_PABORT ldr r4, .LCprocfns mov lr, pc ldr pc, [r4, #PROCESSOR_PABT_FUNC] #else - CPU_PABORT_HANDLER(r0, r2) + bl CPU_PABORT_HANDLER #endif + debug_entry r1 enable_irq @ Enable interrupts - mov r1, sp @ regs + mov r2, sp @ regs bl do_PrefetchAbort @ call abort handler UNWIND(.fnend ) /* fall through */ @@ -711,24 +715,20 @@ ENTRY(__switch_to) UNWIND(.cantunwind ) add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] - stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack -#ifdef CONFIG_MMU + ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack + THUMB( str sp, [ip], #4 ) + THUMB( str lr, [ip], #4 ) +#ifdef CONFIG_CPU_USE_DOMAINS ldr r6, [r2, #TI_CPU_DOMAIN] #endif -#if __LINUX_ARM_ARCH__ >= 6 -#ifdef CONFIG_CPU_32v6K - clrex -#else - strex r5, r4, [ip] @ Clear exclusive monitor -#endif + set_tls r3, r4, r5 +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + ldr r7, [r2, #TI_TASK] + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] #endif -#if defined(CONFIG_HAS_TLS_REG) - mcr p15, 0, r3, c13, c0, 3 @ set TLS register -#elif !defined(CONFIG_TLS_REG_EMUL) - mov r4, #0xffff0fff - str r3, [r4, #-15] @ TLS val at 0xffff0ff0 -#endif -#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_USE_DOMAINS mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif mov r5, r0 @@ -736,8 +736,15 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + str r7, [r8] +#endif + THUMB( mov ip, r4 ) mov r0, r5 - ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously + THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously + THUMB( ldr sp, [ip], #4 ) + THUMB( ldr pc, [ip] ) UNWIND(.fnend ) ENDPROC(__switch_to) @@ -772,6 +779,7 @@ ENDPROC(__switch_to) * if your compiled code is not going to use the new instructions for other * purpose. */ + THUMB( .arm ) .macro usr_ret, reg #ifdef CONFIG_ARM_THUMB @@ -818,7 +826,7 @@ __kuser_helper_start: */ __kuser_memory_barrier: @ 0xffff0fa0 - smp_dmb + smp_dmb arm usr_ret lr .align 5 @@ -887,10 +895,10 @@ __kuser_cmpxchg: @ 0xffff0fc0 * A special ghost syscall is used for that (see traps.c). */ stmfd sp!, {r7, lr} - mov r7, #0xff00 @ 0xfff0 into r7 for EABI - orr r7, r7, #0xf0 - swi #0x9ffff0 + ldr r7, 1f @ it's 20 bits + swi __ARM_NR_cmpxchg ldmfd sp!, {r7, pc} +1: .word __ARM_NR_cmpxchg #elif __LINUX_ARM_ARCH__ < 6 @@ -935,9 +943,7 @@ kuser_cmpxchg_fixup: #else -#ifdef CONFIG_SMP - mcr p15, 0, r0, c7, c10, 5 @ dmb -#endif + smp_dmb arm 1: ldrex r3, [r2] subs r3, r3, r0 strexeq r3, r1, [r2] @@ -945,11 +951,8 @@ kuser_cmpxchg_fixup: beq 1b rsbs r0, r3, #0 /* beware -- each __kuser slot must be 8 instructions max */ -#ifdef CONFIG_SMP - b __kuser_memory_barrier -#else - usr_ret lr -#endif + ALT_SMP(b __kuser_memory_barrier) + ALT_UP(usr_ret lr) #endif @@ -989,17 +992,12 @@ kuser_cmpxchg_fixup: */ __kuser_get_tls: @ 0xffff0fe0 - -#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) - ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 -#else - mrc p15, 0, r0, c13, c0, 3 @ read TLS register -#endif + ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr - - .rep 5 - .word 0 @ pad up to __kuser_helper_version - .endr + mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code + .rep 4 + .word 0 @ 0xffff0ff0 software TLS value, then + .endr @ pad up to __kuser_helper_version /* * Reference declaration: @@ -1020,6 +1018,7 @@ __kuser_helper_version: @ 0xffff0ffc .globl __kuser_helper_end __kuser_helper_end: + THUMB( .thumb ) /* * Vector stubs. @@ -1054,17 +1053,23 @@ vector_\name: @ Prepare for SVC32 mode. IRQs remain disabled. @ mrs r0, cpsr - eor r0, r0, #(\mode ^ SVC_MODE) + eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) msr spsr_cxsf, r0 @ @ the branch table must immediately follow this code @ and lr, lr, #0x0f + THUMB( adr r0, 1f ) + THUMB( ldr lr, [r0, lr, lsl #2] ) mov r0, sp - ldr lr, [pc, lr, lsl #2] + ARM( ldr lr, [pc, lr, lsl #2] ) movs pc, lr @ branch to handler in SVC mode ENDPROC(vector_\name) + + .align 2 + @ handler addresses follow this label +1: .endm .globl __stubs_start @@ -1202,14 +1207,16 @@ __stubs_end: .globl __vectors_start __vectors_start: - swi SYS_ERROR0 - b vector_und + stubs_offset - ldr pc, .LCvswi + stubs_offset - b vector_pabt + stubs_offset - b vector_dabt + stubs_offset - b vector_addrexcptn + stubs_offset - b vector_irq + stubs_offset - b vector_fiq + stubs_offset + ARM( swi SYS_ERROR0 ) + THUMB( svc #0 ) + THUMB( nop ) + W(b) vector_und + stubs_offset + W(ldr) pc, .LCvswi + stubs_offset + W(b) vector_pabt + stubs_offset + W(b) vector_dabt + stubs_offset + W(b) vector_addrexcptn + stubs_offset + W(b) vector_irq + stubs_offset + W(b) vector_fiq + stubs_offset .globl __vectors_end __vectors_end: @@ -1222,3 +1229,9 @@ cr_alignment: .space 4 cr_no_alignment: .space 4 + +#ifdef CONFIG_MULTI_IRQ_HANDLER + .globl handle_arch_irq +handle_arch_irq: + .space 4 +#endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 366e509..1e7b04a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -29,18 +29,14 @@ ret_fast_syscall: ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne fast_work_pending +#if defined(CONFIG_IRQSOFF_TRACER) + asm_trace_hardirqs_on +#endif /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr - @ fast_restore_user_regs - ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr - ldr lr, [sp, #S_OFF + S_PC]! @ get pc - msr spsr_cxsf, r1 @ save in spsr_svc - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr - mov r0, r0 - add sp, sp, #S_FRAME_SIZE - S_PC - movs pc, lr @ return & move spsr_svc into cpsr + restore_user_regs fast = 1, offset = S_OFF UNWIND(.fnend ) /* @@ -51,10 +47,12 @@ fast_work_pending: work_pending: tst r1, #_TIF_NEED_RESCHED bne work_resched - tst r1, #_TIF_SIGPENDING + tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' + tst r1, #_TIF_SIGPENDING @ delivering a signal? + movne why, #0 @ prevent further restarts bl do_notify_resume b ret_slow_syscall @ Check work again @@ -70,17 +68,13 @@ ret_slow_syscall: tst r1, #_TIF_WORK_MASK bne work_pending no_work_pending: +#if defined(CONFIG_IRQSOFF_TRACER) + asm_trace_hardirqs_on +#endif /* perform architecture specific actions before user return */ arch_ret_to_user r1, lr - @ slow_restore_user_regs - ldr r1, [sp, #S_PSR] @ get calling cpsr - ldr lr, [sp, #S_PC]! @ get pc - msr spsr_cxsf, r1 @ save in spsr_svc - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr - mov r0, r0 - add sp, sp, #S_FRAME_SIZE - S_PC - movs pc, lr @ return & move spsr_svc into cpsr + restore_user_regs fast = 0, offset = 0 ENDPROC(ret_to_user) /* @@ -106,56 +100,222 @@ ENDPROC(ret_from_fork) #define CALL(x) .long x #ifdef CONFIG_FUNCTION_TRACER +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). + */ + +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + +.macro __mcount suffix + mcount_enter + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, .Lftrace_stub + cmp r0, r2 + bne 1f + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldr r1, =ftrace_graph_return + ldr r2, [r1] + cmp r0, r2 + bne ftrace_graph_caller\suffix + + ldr r1, =ftrace_graph_entry + ldr r2, [r1] + ldr r0, =ftrace_graph_entry_stub + cmp r0, r2 + bne ftrace_graph_caller\suffix +#endif + + mcount_exit + +1: mcount_get_lr r1 @ lr of instrumented func + mov r0, lr @ instrumented function + sub r0, r0, #MCOUNT_INSN_SIZE + adr lr, BSYM(2f) + mov pc, r2 +2: mcount_exit +.endm + +.macro __ftrace_caller suffix + mcount_enter + + mcount_get_lr r1 @ lr of instrumented func + mov r0, lr @ instrumented function + sub r0, r0, #MCOUNT_INSN_SIZE + + .globl ftrace_call\suffix +ftrace_call\suffix: + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call\suffix +ftrace_graph_call\suffix: + mov r0, r0 +#endif + + mcount_exit +.endm + +.macro __ftrace_graph_caller + sub r0, fp, #4 @ &lr of instrumented routine (&parent) #ifdef CONFIG_DYNAMIC_FTRACE + @ called from __ftrace_caller, saved in mcount_enter + ldr r1, [sp, #16] @ instrumented routine (func) +#else + @ called from __mcount, untouched in lr + mov r1, lr @ instrumented routine (func) +#endif + sub r1, r1, #MCOUNT_INSN_SIZE + mov r2, fp @ frame pointer + bl prepare_ftrace_return + mcount_exit +.endm + +#ifdef CONFIG_OLD_MCOUNT +/* + * mcount + */ + +.macro mcount_enter + stmdb sp!, {r0-r3, lr} +.endm + +.macro mcount_get_lr reg + ldr \reg, [fp, #-4] +.endm + +.macro mcount_exit + ldr lr, [fp, #-4] + ldmia sp!, {r0-r3, pc} +.endm + ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE +#ifdef CONFIG_DYNAMIC_FTRACE + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} +#else + __mcount _old +#endif +ENDPROC(mcount) - .globl mcount_call -mcount_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller_old) + __ftrace_caller _old +ENDPROC(ftrace_caller_old) +#endif -ENTRY(ftrace_caller) - stmdb sp!, {r0-r3, lr} - ldr r1, [fp, #-4] - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller_old) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller_old) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit +#endif - .globl ftrace_call -ftrace_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} +/* + * __gnu_mcount_nc + */ +.macro mcount_enter + stmdb sp!, {r0-r3, lr} +.endm + +.macro mcount_get_lr reg + ldr \reg, [sp, #20] +.endm + +.macro mcount_exit + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +.endm + +ENTRY(__gnu_mcount_nc) +#ifdef CONFIG_DYNAMIC_FTRACE + mov ip, lr + ldmia sp!, {lr} + mov pc, ip #else + __mcount +#endif +ENDPROC(__gnu_mcount_nc) -ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne trace - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} - -trace: - ldr r1, [fp, #-4] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - mov lr, r1 @ restore lr - ldmia sp!, {r0-r3, pc} - -#endif /* CONFIG_DYNAMIC_FTRACE */ - - .globl ftrace_stub -ftrace_stub: - mov pc, lr +#ifdef CONFIG_DYNAMIC_FTRACE +ENTRY(ftrace_caller) + __ftrace_caller +ENDPROC(ftrace_caller) +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + __ftrace_graph_caller +ENDPROC(ftrace_graph_caller) +#endif + +.purgem mcount_enter +.purgem mcount_get_lr +.purgem mcount_exit + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl return_to_handler +return_to_handler: + stmdb sp!, {r0-r3} + mov r0, fp @ frame pointer + bl ftrace_return_to_handler + mov lr, r0 @ r0 has real ret addr + ldmia sp!, {r0-r3} + mov pc, lr +#endif + +ENTRY(ftrace_stub) +.Lftrace_stub: + mov pc, lr +ENDPROC(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ @@ -182,8 +342,10 @@ ftrace_stub: ENTRY(vector_swi) sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 - add r8, sp, #S_PC - stmdb r8, {sp, lr}^ @ Calling sp, lr + ARM( add r8, sp, #S_PC ) + ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr + THUMB( mov r8, sp ) + THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr mrs r8, spsr @ called from non-FIQ mode, so ok. str lr, [sp, #S_PC] @ Save calling PC str r8, [sp, #S_PSR] @ Save CPSR @@ -250,7 +412,6 @@ ENTRY(vector_swi) get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -267,12 +428,24 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + +#ifdef CONFIG_SECCOMP + tst r10, #_TIF_SECCOMP + beq 1f + mov r0, scno + bl __secure_computing + add r0, sp, #S_R0 + S_OFF @ pointer to regs + ldmia r0, {r0 - r3} @ have to reload r0 - r3 +1: +#endif + + tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit - adr lr, ret_fast_syscall @ return address + adr lr, BSYM(ret_fast_syscall) @ return address ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF @@ -293,7 +466,7 @@ __sys_trace: mov r0, #0 @ trace entry [IP = 0] bl syscall_trace - adr lr, __sys_trace_return @ return address + adr lr, BSYM(__sys_trace_return) @ return address mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit @@ -373,23 +546,15 @@ sys_clone_wrapper: b sys_clone ENDPROC(sys_clone_wrapper) -sys_sigsuspend_wrapper: - add r3, sp, #S_OFF - b sys_sigsuspend -ENDPROC(sys_sigsuspend_wrapper) - -sys_rt_sigsuspend_wrapper: - add r2, sp, #S_OFF - b sys_rt_sigsuspend -ENDPROC(sys_rt_sigsuspend_wrapper) - sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) @@ -419,22 +584,15 @@ sys_mmap2: tst r5, #PGOFF_MASK moveq r5, r5, lsr #PAGE_SHIFT - 12 streq r5, [sp, #4] - beq do_mmap2 + beq sys_mmap_pgoff mov r0, #-EINVAL mov pc, lr #else str r5, [sp, #4] - b do_mmap2 + b sys_mmap_pgoff #endif ENDPROC(sys_mmap2) -ENTRY(pabort_ifar) - mrc p15, 0, r0, cr6, cr0, 2 -ENTRY(pabort_noifar) - mov pc, lr -ENDPROC(pabort_ifar) -ENDPROC(pabort_noifar) - #ifdef CONFIG_OABI_COMPAT /* diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 87ab4e1..ae94649 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -36,11 +36,6 @@ #endif .endm - .macro get_thread_info, rd - mov \rd, sp, lsr #13 - mov \rd, \rd, lsl #13 - .endm - .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP ldr \rtemp, .LCcralign @@ -49,6 +44,145 @@ #endif .endm + @ + @ Store/load the USER SP and LR registers by switching to the SYS + @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not + @ available. Should only be called from SVC mode + @ + .macro store_user_sp_lr, rd, rtemp, offset = 0 + mrs \rtemp, cpsr + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch to the SYS mode + + str sp, [\rd, #\offset] @ save sp_usr + str lr, [\rd, #\offset + 4] @ save lr_usr + + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + + .macro load_user_sp_lr, rd, rtemp, offset = 0 + mrs \rtemp, cpsr + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch to the SYS mode + + ldr sp, [\rd, #\offset] @ load sp_usr + ldr lr, [\rd, #\offset + 4] @ load lr_usr + + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + +#ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr + msr spsr_cxsf, \rpsr +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#elif defined (CONFIG_CPU_V6) + ldr r0, [sp] + strex r1, r2, [sp] @ clear the exclusive monitor + ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr +#else + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#endif + .endm + + .macro restore_user_regs, fast = 0, offset = 0 + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr + ldr lr, [sp, #\offset + S_PC]! @ get pc + msr spsr_cxsf, r1 @ save in spsr_svc +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor +#elif defined (CONFIG_CPU_V6) + strex r1, r2, [sp] @ clear the exclusive monitor +#endif + .if \fast + ldmdb sp, {r1 - lr}^ @ get calling r1 - lr + .else + ldmdb sp, {r0 - lr}^ @ get calling r0 - lr + .endif + mov r0, r0 @ ARMv5T and earlier require a nop + @ after ldm {}^ + add sp, sp, #S_FRAME_SIZE - S_PC + movs pc, lr @ return & move spsr_svc into cpsr + .endm + + .macro get_thread_info, rd + mov \rd, sp, lsr #13 + mov \rd, \rd, lsl #13 + .endm + + @ + @ 32-bit wide "mov pc, reg" + @ + .macro movw_pc, reg + mov pc, \reg + .endm +#else /* CONFIG_THUMB2_KERNEL */ + .macro svc_exit, rpsr + clrex @ clear the exclusive monitor + ldr r0, [sp, #S_SP] @ top of the stack + ldr r1, [sp, #S_PC] @ return address + tst r0, #4 @ orig stack 8-byte aligned? + stmdb r0, {r1, \rpsr} @ rfe context + ldmia sp, {r0 - r12} + ldr lr, [sp, #S_LR] + addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned + addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned + rfeia sp! + .endm + + .macro restore_user_regs, fast = 0, offset = 0 + clrex @ clear the exclusive monitor + mov r2, sp + load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr + ldr lr, [sp, #\offset + S_PC] @ get pc + add sp, sp, #\offset + S_SP + msr spsr_cxsf, r1 @ save in spsr_svc + .if \fast + ldmdb sp, {r1 - r12} @ get calling r1 - r12 + .else + ldmdb sp, {r0 - r12} @ get calling r0 - r12 + .endif + add sp, sp, #S_FRAME_SIZE - S_SP + movs pc, lr @ return & move spsr_svc into cpsr + .endm + + .macro get_thread_info, rd + mov \rd, sp + lsr \rd, \rd, #13 + mov \rd, \rd, lsl #13 + .endm + + @ + @ 32-bit wide "mov pc, reg" + @ + .macro movw_pc, reg + mov pc, \reg + nop + .endm +#endif /* !CONFIG_THUMB2_KERNEL */ + + @ + @ Debug exceptions are taken as prefetch or data aborts. + @ We must disable preemption during the handler so that + @ we can access the debug registers safely. + @ + .macro debug_entry, fsr +#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) + ldr r4, =0x40f @ mask out fsr.fs + and r5, r4, \fsr + cmp r5, #2 @ debug exception + bne 1f + get_thread_info r10 + ldr r6, [r10, #TI_PREEMPT] @ get preempt count + add r11, r6, #1 @ increment it + str r11, [r10, #TI_PREEMPT] +1: +#endif + .endm /* * These are the registers used in the syscall handler, and allow us to diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c new file mode 100644 index 0000000..11db628 --- /dev/null +++ b/arch/arm/kernel/etm.c @@ -0,0 +1,659 @@ +/* + * linux/arch/arm/kernel/etm.c + * + * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer. + * + * Copyright (C) 2009 Nokia Corporation. + * Alexander Shishkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/sysrq.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/amba/bus.h> +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/vmalloc.h> +#include <linux/mutex.h> +#include <asm/hardware/coresight.h> +#include <asm/sections.h> + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alexander Shishkin"); + +/* + * ETM tracer state + */ +struct tracectx { + unsigned int etb_bufsz; + void __iomem *etb_regs; + void __iomem *etm_regs; + unsigned long flags; + int ncmppairs; + int etm_portsz; + struct device *dev; + struct clk *emu_clk; + struct mutex mutex; +}; + +static struct tracectx tracer; + +static inline bool trace_isrunning(struct tracectx *t) +{ + return !!(t->flags & TRACER_RUNNING); +} + +static int etm_setup_address_range(struct tracectx *t, int n, + unsigned long start, unsigned long end, int exclude, int data) +{ + u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \ + ETMAAT_NOVALCMP; + + if (n < 1 || n > t->ncmppairs) + return -EINVAL; + + /* comparators and ranges are numbered starting with 1 as opposed + * to bits in a word */ + n--; + + if (data) + flags |= ETMAAT_DLOADSTORE; + else + flags |= ETMAAT_IEXEC; + + /* first comparator for the range */ + etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2)); + etm_writel(t, start, ETMR_COMP_VAL(n * 2)); + + /* second comparator is right next to it */ + etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1)); + etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1)); + + flags = exclude ? ETMTE_INCLEXCL : 0; + etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL); + + return 0; +} + +static int trace_start(struct tracectx *t) +{ + u32 v; + unsigned long timeout = TRACER_TIMEOUT; + + etb_unlock(t); + + etb_writel(t, 0, ETBR_FORMATTERCTRL); + etb_writel(t, 1, ETBR_CTRL); + + etb_lock(t); + + /* configure etm */ + v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); + + if (t->flags & TRACER_CYCLE_ACC) + v |= ETMCTRL_CYCLEACCURATE; + + etm_unlock(t); + + etm_writel(t, v, ETMR_CTRL); + + while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) + ; + if (!timeout) { + dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); + etm_lock(t); + return -EFAULT; + } + + etm_setup_address_range(t, 1, (unsigned long)_stext, + (unsigned long)_etext, 0, 0); + etm_writel(t, 0, ETMR_TRACEENCTRL2); + etm_writel(t, 0, ETMR_TRACESSCTRL); + etm_writel(t, 0x6f, ETMR_TRACEENEVT); + + v &= ~ETMCTRL_PROGRAM; + v |= ETMCTRL_PORTSEL; + + etm_writel(t, v, ETMR_CTRL); + + timeout = TRACER_TIMEOUT; + while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) + ; + if (!timeout) { + dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); + etm_lock(t); + return -EFAULT; + } + + etm_lock(t); + + t->flags |= TRACER_RUNNING; + + return 0; +} + +static int trace_stop(struct tracectx *t) +{ + unsigned long timeout = TRACER_TIMEOUT; + + etm_unlock(t); + + etm_writel(t, 0x440, ETMR_CTRL); + while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) + ; + if (!timeout) { + dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); + etm_lock(t); + return -EFAULT; + } + + etm_lock(t); + + etb_unlock(t); + etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); + + timeout = TRACER_TIMEOUT; + while (etb_readl(t, ETBR_FORMATTERCTRL) & + ETBFF_MANUAL_FLUSH && --timeout) + ; + if (!timeout) { + dev_dbg(t->dev, "Waiting for formatter flush to commence " + "timed out\n"); + etb_lock(t); + return -EFAULT; + } + + etb_writel(t, 0, ETBR_CTRL); + + etb_lock(t); + + t->flags &= ~TRACER_RUNNING; + + return 0; +} + +static int etb_getdatalen(struct tracectx *t) +{ + u32 v; + int rp, wp; + + v = etb_readl(t, ETBR_STATUS); + + if (v & 1) + return t->etb_bufsz; + + rp = etb_readl(t, ETBR_READADDR); + wp = etb_readl(t, ETBR_WRITEADDR); + + if (rp > wp) { + etb_writel(t, 0, ETBR_READADDR); + etb_writel(t, 0, ETBR_WRITEADDR); + + return 0; + } + + return wp - rp; +} + +/* sysrq+v will always stop the running trace and leave it at that */ +static void etm_dump(void) +{ + struct tracectx *t = &tracer; + u32 first = 0; + int length; + + if (!t->etb_regs) { + printk(KERN_INFO "No tracing hardware found\n"); + return; + } + + if (trace_isrunning(t)) + trace_stop(t); + + etb_unlock(t); + + length = etb_getdatalen(t); + + if (length == t->etb_bufsz) + first = etb_readl(t, ETBR_WRITEADDR); + + etb_writel(t, first, ETBR_READADDR); + + printk(KERN_INFO "Trace buffer contents length: %d\n", length); + printk(KERN_INFO "--- ETB buffer begin ---\n"); + for (; length; length--) + printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); + printk(KERN_INFO "\n--- ETB buffer end ---\n"); + + /* deassert the overflow bit */ + etb_writel(t, 1, ETBR_CTRL); + etb_writel(t, 0, ETBR_CTRL); + + etb_writel(t, 0, ETBR_TRIGGERCOUNT); + etb_writel(t, 0, ETBR_READADDR); + etb_writel(t, 0, ETBR_WRITEADDR); + + etb_lock(t); +} + +static void sysrq_etm_dump(int key) +{ + dev_dbg(tracer.dev, "Dumping ETB buffer\n"); + etm_dump(); +} + +static struct sysrq_key_op sysrq_etm_op = { + .handler = sysrq_etm_dump, + .help_msg = "ETM buffer dump", + .action_msg = "etm", +}; + +static int etb_open(struct inode *inode, struct file *file) +{ + if (!tracer.etb_regs) + return -ENODEV; + + file->private_data = &tracer; + + return nonseekable_open(inode, file); +} + +static ssize_t etb_read(struct file *file, char __user *data, + size_t len, loff_t *ppos) +{ + int total, i; + long length; + struct tracectx *t = file->private_data; + u32 first = 0; + u32 *buf; + + mutex_lock(&t->mutex); + + if (trace_isrunning(t)) { + length = 0; + goto out; + } + + etb_unlock(t); + + total = etb_getdatalen(t); + if (total == t->etb_bufsz) + first = etb_readl(t, ETBR_WRITEADDR); + + etb_writel(t, first, ETBR_READADDR); + + length = min(total * 4, (int)len); + buf = vmalloc(length); + + dev_dbg(t->dev, "ETB buffer length: %d\n", total); + dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS)); + for (i = 0; i < length / 4; i++) + buf[i] = etb_readl(t, ETBR_READMEM); + + /* the only way to deassert overflow bit in ETB status is this */ + etb_writel(t, 1, ETBR_CTRL); + etb_writel(t, 0, ETBR_CTRL); + + etb_writel(t, 0, ETBR_WRITEADDR); + etb_writel(t, 0, ETBR_READADDR); + etb_writel(t, 0, ETBR_TRIGGERCOUNT); + + etb_lock(t); + + length -= copy_to_user(data, buf, length); + vfree(buf); + +out: + mutex_unlock(&t->mutex); + + return length; +} + +static int etb_release(struct inode *inode, struct file *file) +{ + /* there's nothing to do here, actually */ + return 0; +} + +static const struct file_operations etb_fops = { + .owner = THIS_MODULE, + .read = etb_read, + .open = etb_open, + .release = etb_release, + .llseek = no_llseek, +}; + +static struct miscdevice etb_miscdev = { + .name = "tracebuf", + .minor = 0, + .fops = &etb_fops, +}; + +static int __init etb_probe(struct amba_device *dev, struct amba_id *id) +{ + struct tracectx *t = &tracer; + int ret = 0; + + ret = amba_request_regions(dev, NULL); + if (ret) + goto out; + + t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); + if (!t->etb_regs) { + ret = -ENOMEM; + goto out_release; + } + + amba_set_drvdata(dev, t); + + etb_miscdev.parent = &dev->dev; + + ret = misc_register(&etb_miscdev); + if (ret) + goto out_unmap; + + t->emu_clk = clk_get(&dev->dev, "emu_src_ck"); + if (IS_ERR(t->emu_clk)) { + dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n"); + return -EFAULT; + } + + clk_enable(t->emu_clk); + + etb_unlock(t); + t->etb_bufsz = etb_readl(t, ETBR_DEPTH); + dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz); + + /* make sure trace capture is disabled */ + etb_writel(t, 0, ETBR_CTRL); + etb_writel(t, 0x1000, ETBR_FORMATTERCTRL); + etb_lock(t); + + dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n"); + +out: + return ret; + +out_unmap: + amba_set_drvdata(dev, NULL); + iounmap(t->etb_regs); + +out_release: + amba_release_regions(dev); + + return ret; +} + +static int etb_remove(struct amba_device *dev) +{ + struct tracectx *t = amba_get_drvdata(dev); + + amba_set_drvdata(dev, NULL); + + iounmap(t->etb_regs); + t->etb_regs = NULL; + + clk_disable(t->emu_clk); + clk_put(t->emu_clk); + + amba_release_regions(dev); + + return 0; +} + +static struct amba_id etb_ids[] = { + { + .id = 0x0003b907, + .mask = 0x0007ffff, + }, + { 0, 0 }, +}; + +static struct amba_driver etb_driver = { + .drv = { + .name = "etb", + .owner = THIS_MODULE, + }, + .probe = etb_probe, + .remove = etb_remove, + .id_table = etb_ids, +}; + +/* use a sysfs file "trace_running" to start/stop tracing */ +static ssize_t trace_running_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%x\n", trace_isrunning(&tracer)); +} + +static ssize_t trace_running_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned int value; + int ret; + + if (sscanf(buf, "%u", &value) != 1) + return -EINVAL; + + mutex_lock(&tracer.mutex); + ret = value ? trace_start(&tracer) : trace_stop(&tracer); + mutex_unlock(&tracer.mutex); + + return ret ? : n; +} + +static struct kobj_attribute trace_running_attr = + __ATTR(trace_running, 0644, trace_running_show, trace_running_store); + +static ssize_t trace_info_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; + int datalen; + + etb_unlock(&tracer); + datalen = etb_getdatalen(&tracer); + etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); + etb_ra = etb_readl(&tracer, ETBR_READADDR); + etb_st = etb_readl(&tracer, ETBR_STATUS); + etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); + etb_lock(&tracer); + + etm_unlock(&tracer); + etm_ctrl = etm_readl(&tracer, ETMR_CTRL); + etm_st = etm_readl(&tracer, ETMR_STATUS); + etm_lock(&tracer); + + return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" + "ETBR_WRITEADDR:\t%08x\n" + "ETBR_READADDR:\t%08x\n" + "ETBR_STATUS:\t%08x\n" + "ETBR_FORMATTERCTRL:\t%08x\n" + "ETMR_CTRL:\t%08x\n" + "ETMR_STATUS:\t%08x\n", + datalen, + tracer.ncmppairs, + etb_wa, + etb_ra, + etb_st, + etb_fc, + etm_ctrl, + etm_st + ); +} + +static struct kobj_attribute trace_info_attr = + __ATTR(trace_info, 0444, trace_info_show, NULL); + +static ssize_t trace_mode_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d %d\n", + !!(tracer.flags & TRACER_CYCLE_ACC), + tracer.etm_portsz); +} + +static ssize_t trace_mode_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned int cycacc, portsz; + + if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2) + return -EINVAL; + + mutex_lock(&tracer.mutex); + if (cycacc) + tracer.flags |= TRACER_CYCLE_ACC; + else + tracer.flags &= ~TRACER_CYCLE_ACC; + + tracer.etm_portsz = portsz & 0x0f; + mutex_unlock(&tracer.mutex); + + return n; +} + +static struct kobj_attribute trace_mode_attr = + __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); + +static int __init etm_probe(struct amba_device *dev, struct amba_id *id) +{ + struct tracectx *t = &tracer; + int ret = 0; + + if (t->etm_regs) { + dev_dbg(&dev->dev, "ETM already initialized\n"); + ret = -EBUSY; + goto out; + } + + ret = amba_request_regions(dev, NULL); + if (ret) + goto out; + + t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); + if (!t->etm_regs) { + ret = -ENOMEM; + goto out_release; + } + + amba_set_drvdata(dev, t); + + mutex_init(&t->mutex); + t->dev = &dev->dev; + t->flags = TRACER_CYCLE_ACC; + t->etm_portsz = 1; + + etm_unlock(t); + (void)etm_readl(t, ETMMR_PDSR); + /* dummy first read */ + (void)etm_readl(&tracer, ETMMR_OSSRR); + + t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; + etm_writel(t, 0x440, ETMR_CTRL); + etm_lock(t); + + ret = sysfs_create_file(&dev->dev.kobj, + &trace_running_attr.attr); + if (ret) + goto out_unmap; + + /* failing to create any of these two is not fatal */ + ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr); + if (ret) + dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n"); + + ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr); + if (ret) + dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); + + dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); + +out: + return ret; + +out_unmap: + amba_set_drvdata(dev, NULL); + iounmap(t->etm_regs); + +out_release: + amba_release_regions(dev); + + return ret; +} + +static int etm_remove(struct amba_device *dev) +{ + struct tracectx *t = amba_get_drvdata(dev); + + amba_set_drvdata(dev, NULL); + + iounmap(t->etm_regs); + t->etm_regs = NULL; + + amba_release_regions(dev); + + sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr); + sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr); + sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr); + + return 0; +} + +static struct amba_id etm_ids[] = { + { + .id = 0x0003b921, + .mask = 0x0007ffff, + }, + { 0, 0 }, +}; + +static struct amba_driver etm_driver = { + .drv = { + .name = "etm", + .owner = THIS_MODULE, + }, + .probe = etm_probe, + .remove = etm_remove, + .id_table = etm_ids, +}; + +static int __init etm_init(void) +{ + int retval; + + retval = amba_driver_register(&etb_driver); + if (retval) { + printk(KERN_ERR "Failed to register etb\n"); + return retval; + } + + retval = amba_driver_register(&etm_driver); + if (retval) { + amba_driver_unregister(&etb_driver); + printk(KERN_ERR "Failed to probe etm\n"); + return retval; + } + + /* not being able to install this handler is not fatal */ + (void)register_sysrq_key('v', &sysrq_etm_op); + + return 0; +} + +device_initcall(etm_init); + diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 6ff7919..e72dc34 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -45,6 +45,7 @@ #include <asm/fiq.h> #include <asm/irq.h> #include <asm/system.h> +#include <asm/traps.h> static unsigned long no_fiq_insn; @@ -67,17 +68,22 @@ static struct fiq_handler default_owner = { static struct fiq_handler *current_fiq = &default_owner; -int show_fiq_list(struct seq_file *p, void *v) +int show_fiq_list(struct seq_file *p, int prec) { if (current_fiq != &default_owner) - seq_printf(p, "FIQ: %s\n", current_fiq->name); + seq_printf(p, "%*s: %s\n", prec, "FIQ", + current_fiq->name); return 0; } void set_fiq_handler(void *start, unsigned int length) { +#if defined(CONFIG_CPU_USE_DOMAINS) memcpy((void *)0xffff001c, start, length); +#else + memcpy(vectors_page + 0x1c, start, length); +#endif flush_icache_range(0xffff001c, 0xffff001c + length); if (!vectors_high()) flush_icache_range(0x1c, 0x1c + length); diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c638427..c0062ad 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -2,102 +2,287 @@ * Dynamic function tracing support. * * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> + * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the - * mcount call-sites get patched lazily with NOP till they are - * enabled. All code mutation routines here take effect atomically. + * mcount call-sites get patched with NOP till they are enabled. + * All code mutation routines here are called under stop_machine(). */ #include <linux/ftrace.h> +#include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/ftrace.h> -#define PC_OFFSET 8 -#define BL_OPCODE 0xeb000000 -#define BL_OFFSET_MASK 0x00ffffff +#ifdef CONFIG_THUMB2_KERNEL +#define NOP 0xeb04f85d /* pop.w {lr} */ +#else +#define NOP 0xe8bd4000 /* pop {lr} */ +#endif -static unsigned long bl_insn; -static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ +#ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_OLD_MCOUNT +#define OLD_MCOUNT_ADDR ((unsigned long) mcount) +#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) -unsigned char *ftrace_nop_replace(void) +#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ + +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) +{ + return rec->arch.old_mcount ? OLD_NOP : NOP; +} + +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) +{ + if (!rec->arch.old_mcount) + return addr; + + if (addr == MCOUNT_ADDR) + addr = OLD_MCOUNT_ADDR; + else if (addr == FTRACE_ADDR) + addr = OLD_FTRACE_ADDR; + + return addr; +} +#else +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) +{ + return NOP; +} + +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) { - return (char *)&NOP; + return addr; } +#endif + +#ifdef CONFIG_THUMB2_KERNEL +static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, + bool link) +{ + unsigned long s, j1, j2, i1, i2, imm10, imm11; + unsigned long first, second; + long offset; + + offset = (long)addr - (long)(pc + 4); + if (offset < -16777216 || offset > 16777214) { + WARN_ON_ONCE(1); + return 0; + } + + s = (offset >> 24) & 0x1; + i1 = (offset >> 23) & 0x1; + i2 = (offset >> 22) & 0x1; + imm10 = (offset >> 12) & 0x3ff; + imm11 = (offset >> 1) & 0x7ff; + + j1 = (!i1) ^ s; + j2 = (!i2) ^ s; + + first = 0xf000 | (s << 10) | imm10; + second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; + if (link) + second |= 1 << 14; -/* construct a branch (BL) instruction to addr */ -unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) + return (second << 16) | first; +} +#else +static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, + bool link) { + unsigned long opcode = 0xea000000; long offset; - offset = (long)addr - (long)(pc + PC_OFFSET); + if (link) + opcode |= 1 << 24; + + offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { /* Can't generate branches that far (from ARM ARM). Ftrace * doesn't generate branches outside of kernel text. */ WARN_ON_ONCE(1); - return NULL; + return 0; } - offset = (offset >> 2) & BL_OFFSET_MASK; - bl_insn = BL_OPCODE | offset; - return (unsigned char *)&bl_insn; + + offset = (offset >> 2) & 0x00ffffff; + + return opcode | offset; } +#endif -int ftrace_modify_code(unsigned long pc, unsigned char *old_code, - unsigned char *new_code) +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) { - unsigned long err = 0, replaced = 0, old, new; - - old = *(unsigned long *)old_code; - new = *(unsigned long *)new_code; + return ftrace_gen_branch(pc, addr, true); +} - __asm__ __volatile__ ( - "1: ldr %1, [%2] \n" - " cmp %1, %4 \n" - "2: streq %3, [%2] \n" - " cmpne %1, %3 \n" - " movne %0, #2 \n" - "3:\n" +static int ftrace_modify_code(unsigned long pc, unsigned long old, + unsigned long new) +{ + unsigned long replaced; - ".section .fixup, \"ax\"\n" - "4: mov %0, #1 \n" - " b 3b \n" - ".previous\n" + if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) + return -EFAULT; - ".section __ex_table, \"a\"\n" - " .long 1b, 4b \n" - " .long 2b, 4b \n" - ".previous\n" + if (replaced != old) + return -EINVAL; - : "=r"(err), "=r"(replaced) - : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) - : "memory"); + if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) + return -EPERM; - if (!err && (replaced == old)) - flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); - return err; + return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { - int ret; unsigned long pc, old; - unsigned char *new; + unsigned long new; + int ret; pc = (unsigned long)&ftrace_call; memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(pc, (unsigned long)func); - ret = ftrace_modify_code(pc, (unsigned char *)&old, new); + + ret = ftrace_modify_code(pc, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (!ret) { + pc = (unsigned long)&ftrace_call_old; + memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(pc, (unsigned long)func); + + ret = ftrace_modify_code(pc, old, new); + } +#endif + + return ret; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long new, old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(rec); + new = ftrace_call_replace(ip, adjust_address(rec, addr)); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + unsigned long old; + unsigned long new; + int ret; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (ret == -EINVAL && addr == MCOUNT_ADDR) { + rec->arch.old_mcount = true; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + } +#endif + return ret; } -/* run from ftrace_init with irqs disabled */ int __init ftrace_dyn_arch_init(void *data) { - ftrace_mcount_set(data); + *(unsigned long *)data = 0; + return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + struct ftrace_graph_ent trace; + unsigned long old; + int err; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + *parent = return_hooker; + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + *parent = old; + } +} + +#ifdef CONFIG_DYNAMIC_FTRACE +extern unsigned long ftrace_graph_call; +extern unsigned long ftrace_graph_call_old; +extern void ftrace_graph_caller_old(void); + +static int __ftrace_modify_caller(unsigned long *callsite, + void (*func) (void), bool enable) +{ + unsigned long caller_fn = (unsigned long) func; + unsigned long pc = (unsigned long) callsite; + unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); + unsigned long nop = 0xe1a00000; /* mov r0, r0 */ + unsigned long old = enable ? nop : branch; + unsigned long new = enable ? branch : nop; + + return ftrace_modify_code(pc, old, new); +} + +static int ftrace_modify_graph_caller(bool enable) +{ + int ret; + + ret = __ftrace_modify_caller(&ftrace_graph_call, + ftrace_graph_caller, + enable); + +#ifdef CONFIG_OLD_MCOUNT + if (!ret) + ret = __ftrace_modify_caller(&ftrace_graph_call_old, + ftrace_graph_caller_old, + enable); +#endif + + return ret; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 991952c..bbecaac 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -13,52 +13,7 @@ #define ATAG_CORE 0x54410001 #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) - - .type __switch_data, %object -__switch_data: - .long __mmap_switched - .long __data_loc @ r4 - .long _data @ r5 - .long __bss_start @ r6 - .long _end @ r7 - .long processor_id @ r4 - .long __machine_arch_type @ r5 - .long __atags_pointer @ r6 - .long cr_alignment @ r7 - .long init_thread_union + THREAD_START_SP @ sp - -/* - * The following fragment of code is executed with the MMU on in MMU mode, - * and uses absolute addresses; this is not position independent. - * - * r0 = cp#15 control register - * r1 = machine ID - * r2 = atags pointer - * r9 = processor ID - */ -__mmap_switched: - adr r3, __switch_data + 4 - - ldmia r3!, {r4, r5, r6, r7} - cmp r4, r5 @ Copy data segment if needed -1: cmpne r5, r6 - ldrne fp, [r4], #4 - strne fp, [r5], #4 - bne 1b - - mov fp, #0 @ Clear BSS (and zero fp) -1: cmp r6, r7 - strcc fp, [r6],#4 - bcc 1b - - ldmia r3, {r4, r5, r6, r7, sp} - str r9, [r4] @ Save processor ID - str r1, [r5] @ Save machine type - str r2, [r6] @ Save atags pointer - bic r4, r0, #CR_A @ Clear 'A' bit - stmia r7, {r0, r4} @ Save control register values - b start_kernel -ENDPROC(__mmap_switched) +#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) /* * Exception handling. Something went wrong and we can't proceed. We @@ -69,21 +24,7 @@ ENDPROC(__mmap_switched) * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ -__error_p: -#ifdef CONFIG_DEBUG_LL - adr r0, str_p1 - bl printascii - mov r0, r9 - bl printhex8 - adr r0, str_p2 - bl printascii - b __error -str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" -str_p2: .asciz ").\n" - .align -#endif -ENDPROC(__error_p) - + __HEAD __error_a: #ifdef CONFIG_DEBUG_LL mov r4, r1 @ preserve machine ID @@ -93,7 +34,7 @@ __error_a: bl printhex8 adr r0, str_a2 bl printascii - adr r3, 3f + adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} @ get machine desc list sub r4, r3, r4 @ get offset between virt&phys add r5, r5, r4 @ convert virt addresses to @@ -121,76 +62,6 @@ str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align #endif -__error: -#ifdef CONFIG_ARCH_RPC -/* - * Turn the screen red on a error - RiscPC only. - */ - mov r0, #0x02000000 - mov r3, #0x11 - orr r3, r3, r3, lsl #8 - orr r3, r3, r3, lsl #16 - str r3, [r0], #4 - str r3, [r0], #4 - str r3, [r0], #4 - str r3, [r0], #4 -#endif -1: mov r0, r0 - b 1b -ENDPROC(__error) - - -/* - * Read processor ID register (CP#15, CR0), and look up in the linker-built - * supported processor list. Note that we can't use the absolute addresses - * for the __proc_info lists since we aren't running with the MMU on - * (and therefore, we are not in the correct address space). We have to - * calculate the offset. - * - * r9 = cpuid - * Returns: - * r3, r4, r6 corrupted - * r5 = proc_info pointer in physical address space - * r9 = cpuid (preserved) - */ -__lookup_processor_type: - adr r3, 3f - ldmda r3, {r5 - r7} - sub r3, r3, r7 @ get offset between virt&phys - add r5, r5, r3 @ convert virt addresses to - add r6, r6, r3 @ physical address space -1: ldmia r5, {r3, r4} @ value, mask - and r4, r4, r9 @ mask wanted bits - teq r3, r4 - beq 2f - add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) - cmp r5, r6 - blo 1b - mov r5, #0 @ unknown processor -2: mov pc, lr -ENDPROC(__lookup_processor_type) - -/* - * This provides a C-API version of the above function. - */ -ENTRY(lookup_processor_type) - stmfd sp!, {r4 - r7, r9, lr} - mov r9, r0 - bl __lookup_processor_type - mov r0, r5 - ldmfd sp!, {r4 - r7, r9, pc} -ENDPROC(lookup_processor_type) - -/* - * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for - * more information about the __proc_info and __arch_info structures. - */ - .long __proc_info_begin - .long __proc_info_end -3: .long . - .long __arch_info_begin - .long __arch_info_end - /* * Lookup machine architecture in the linker-build list of architectures. * Note that we can't use the absolute addresses for the __arch_info @@ -203,7 +74,7 @@ ENDPROC(lookup_processor_type) * r5 = mach_info pointer in physical address space */ __lookup_machine_type: - adr r3, 3b + adr r3, __lookup_machine_type_data ldmia r3, {r4, r5, r6} sub r3, r3, r4 @ get offset between virt&phys add r5, r5, r3 @ convert virt addresses to @@ -219,15 +90,16 @@ __lookup_machine_type: ENDPROC(__lookup_machine_type) /* - * This provides a C-API version of the above function. + * Look in arch/arm/kernel/arch.[ch] for information about the + * __arch_info structures. */ -ENTRY(lookup_machine_type) - stmfd sp!, {r4 - r6, lr} - mov r1, r0 - bl __lookup_machine_type - mov r0, r5 - ldmfd sp!, {r4 - r6, pc} -ENDPROC(lookup_machine_type) + .align 2 + .type __lookup_machine_type_data, %object +__lookup_machine_type_data: + .long . + .long __arch_info_begin + .long __arch_info_end + .size __lookup_machine_type_data, . - __lookup_machine_type_data /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and @@ -246,7 +118,8 @@ __vet_atags: bne 1f ldr r5, [r2, #0] @ is first tag ATAG_CORE? - subs r5, r5, #ATAG_CORE_SIZE + cmp r5, #ATAG_CORE_SIZE + cmpne r5, #ATAG_CORE_SIZE_EMPTY bne 1f ldr r5, [r2, #4] ldr r6, =ATAG_CORE @@ -258,3 +131,150 @@ __vet_atags: 1: mov r2, #0 mov pc, lr ENDPROC(__vet_atags) + +/* + * The following fragment of code is executed with the MMU on in MMU mode, + * and uses absolute addresses; this is not position independent. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r9 = processor ID + */ + __INIT +__mmap_switched: + adr r3, __mmap_switched_data + + ldmia r3!, {r4, r5, r6, r7} + cmp r4, r5 @ Copy data segment if needed +1: cmpne r5, r6 + ldrne fp, [r4], #4 + strne fp, [r5], #4 + bne 1b + + mov fp, #0 @ Clear BSS (and zero fp) +1: cmp r6, r7 + strcc fp, [r6],#4 + bcc 1b + + ARM( ldmia r3, {r4, r5, r6, r7, sp}) + THUMB( ldmia r3, {r4, r5, r6, r7} ) + THUMB( ldr sp, [r3, #16] ) + str r9, [r4] @ Save processor ID + str r1, [r5] @ Save machine type + str r2, [r6] @ Save atags pointer + bic r4, r0, #CR_A @ Clear 'A' bit + stmia r7, {r0, r4} @ Save control register values + b start_kernel +ENDPROC(__mmap_switched) + + .align 2 + .type __mmap_switched_data, %object +__mmap_switched_data: + .long __data_loc @ r4 + .long _sdata @ r5 + .long __bss_start @ r6 + .long _end @ r7 + .long processor_id @ r4 + .long __machine_arch_type @ r5 + .long __atags_pointer @ r6 + .long cr_alignment @ r7 + .long init_thread_union + THREAD_START_SP @ sp + .size __mmap_switched_data, . - __mmap_switched_data + +/* + * This provides a C-API version of __lookup_machine_type + */ +ENTRY(lookup_machine_type) + stmfd sp!, {r4 - r6, lr} + mov r1, r0 + bl __lookup_machine_type + mov r0, r5 + ldmfd sp!, {r4 - r6, pc} +ENDPROC(lookup_machine_type) + +/* + * This provides a C-API version of __lookup_processor_type + */ +ENTRY(lookup_processor_type) + stmfd sp!, {r4 - r6, r9, lr} + mov r9, r0 + bl __lookup_processor_type + mov r0, r5 + ldmfd sp!, {r4 - r6, r9, pc} +ENDPROC(lookup_processor_type) + +/* + * Read processor ID register (CP#15, CR0), and look up in the linker-built + * supported processor list. Note that we can't use the absolute addresses + * for the __proc_info lists since we aren't running with the MMU on + * (and therefore, we are not in the correct address space). We have to + * calculate the offset. + * + * r9 = cpuid + * Returns: + * r3, r4, r6 corrupted + * r5 = proc_info pointer in physical address space + * r9 = cpuid (preserved) + */ + __CPUINIT +__lookup_processor_type: + adr r3, __lookup_processor_type_data + ldmia r3, {r4 - r6} + sub r3, r3, r4 @ get offset between virt&phys + add r5, r5, r3 @ convert virt addresses to + add r6, r6, r3 @ physical address space +1: ldmia r5, {r3, r4} @ value, mask + and r4, r4, r9 @ mask wanted bits + teq r3, r4 + beq 2f + add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) + cmp r5, r6 + blo 1b + mov r5, #0 @ unknown processor +2: mov pc, lr +ENDPROC(__lookup_processor_type) + +/* + * Look in <asm/procinfo.h> for information about the __proc_info structure. + */ + .align 2 + .type __lookup_processor_type_data, %object +__lookup_processor_type_data: + .long . + .long __proc_info_begin + .long __proc_info_end + .size __lookup_processor_type_data, . - __lookup_processor_type_data + +__error_p: +#ifdef CONFIG_DEBUG_LL + adr r0, str_p1 + bl printascii + mov r0, r9 + bl printhex8 + adr r0, str_p2 + bl printascii + b __error +str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" +str_p2: .asciz ").\n" + .align +#endif +ENDPROC(__error_p) + +__error: +#ifdef CONFIG_ARCH_RPC +/* + * Turn the screen red on a error - RiscPC only. + */ + mov r0, #0x02000000 + mov r3, #0x11 + orr r3, r3, r3, lsl #8 + orr r3, r3, r3, lsl #16 + str r3, [r0], #4 + str r3, [r0], #4 + str r3, [r0], #4 + str r3, [r0], #4 +#endif +1: mov r0, r0 + b 1b +ENDPROC(__error) diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index cc87e17..814ce1a 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -32,9 +32,9 @@ * numbers for r1. * */ - .section ".text.head", "ax" + __HEAD ENTRY(stext) - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled #ifndef CONFIG_CPU_CP15 ldr r9, =CONFIG_PROCESSOR_ID @@ -48,10 +48,10 @@ ENTRY(stext) movs r8, r5 @ invalid machine (r5=0)? beq __error_a @ yes, error 'a' - ldr r13, __switch_data @ address to jump to after - @ the initialization is done - adr lr, __after_proc_init @ return (PIC) address - add pc, r10, #PROCINFO_INITFUNC + adr lr, BSYM(__after_proc_init) @ return (PIC) address + ARM( add pc, r10, #PROCINFO_INITFUNC ) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) ENDPROC(stext) /* @@ -59,7 +59,10 @@ ENDPROC(stext) */ __after_proc_init: #ifdef CONFIG_CPU_CP15 - mrc p15, 0, r0, c1, c0, 0 @ read control reg + /* + * CP15 system control register value returned in r0 from + * the CPU init function. + */ #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A #else @@ -82,7 +85,7 @@ __after_proc_init: mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ - mov pc, r13 @ clear the BSS and jump + b __mmap_switched @ clear the BSS and jump @ to start_kernel ENDPROC(__after_proc_init) .ltorg diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 21e17dc..f17d9a0 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -22,6 +22,10 @@ #include <asm/thread_info.h> #include <asm/system.h> +#ifdef CONFIG_DEBUG_LL +#include <mach/debug-macro.S> +#endif + #if (PHYS_OFFSET & 0x001fffff) #error "PHYS_OFFSET must be at an even 2MiB boundary!" #endif @@ -74,18 +78,28 @@ * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ - .section ".text.head", "ax" + __HEAD ENTRY(stext) - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode @ and irqs disabled mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? + THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' bl __lookup_machine_type @ r5=machinfo movs r8, r5 @ invalid machine (r5=0)? + THUMB( it eq ) @ force fixup-able long branch encoding beq __error_a @ yes, error 'a' + + /* + * r1 = machine no, r2 = atags, + * r8 = machinfo, r9 = cpuid, r10 = procinfo + */ bl __vet_atags +#ifdef CONFIG_SMP_ON_UP + bl __fixup_smp +#endif bl __create_page_tables /* @@ -95,108 +109,15 @@ ENTRY(stext) * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ - ldr r13, __switch_data @ address to jump to after + ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled - adr lr, __enable_mmu @ return (PIC) address - add pc, r10, #PROCINFO_INITFUNC + adr lr, BSYM(1f) @ return (PIC) address + ARM( add pc, r10, #PROCINFO_INITFUNC ) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) +1: b __enable_mmu ENDPROC(stext) - -#if defined(CONFIG_SMP) -ENTRY(secondary_startup) - /* - * Common entry point for secondary CPUs. - * - * Ensure that we're in SVC mode, and IRQs are disabled. Lookup - * the processor type - there is no need to check the machine type - * as it has already been validated by the primary processor. - */ - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - mrc p15, 0, r9, c0, c0 @ get processor id - bl __lookup_processor_type - movs r10, r5 @ invalid processor? - moveq r0, #'p' @ yes, error 'p' - beq __error - - /* - * Use the page tables supplied from __cpu_up. - */ - adr r4, __secondary_data - ldmia r4, {r5, r7, r13} @ address to jump to after - sub r4, r4, r5 @ mmu has been enabled - ldr r4, [r7, r4] @ get secondary_data.pgdir - adr lr, __enable_mmu @ return address - add pc, r10, #PROCINFO_INITFUNC @ initialise processor - @ (return control reg) -ENDPROC(secondary_startup) - - /* - * r6 = &secondary_data - */ -ENTRY(__secondary_switched) - ldr sp, [r7, #4] @ get secondary_data.stack - mov fp, #0 - b secondary_start_kernel -ENDPROC(__secondary_switched) - - .type __secondary_data, %object -__secondary_data: - .long . - .long secondary_data - .long __secondary_switched -#endif /* defined(CONFIG_SMP) */ - - - -/* - * Setup common bits before finally enabling the MMU. Essentially - * this is just loading the page table pointer and domain access - * registers. - */ -__enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP - orr r0, r0, #CR_A -#else - bic r0, r0, #CR_A -#endif -#ifdef CONFIG_CPU_DCACHE_DISABLE - bic r0, r0, #CR_C -#endif -#ifdef CONFIG_CPU_BPREDICT_DISABLE - bic r0, r0, #CR_Z -#endif -#ifdef CONFIG_CPU_ICACHE_DISABLE - bic r0, r0, #CR_I -#endif - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) - mcr p15, 0, r5, c3, c0, 0 @ load domain access register - mcr p15, 0, r4, c2, c0, 0 @ load page table pointer - b __turn_mmu_on -ENDPROC(__enable_mmu) - -/* - * Enable the MMU. This completely changes the structure of the visible - * memory space. You will not be able to trace execution through this. - * If you have an enquiry about this, *please* check the linux-arm-kernel - * mailing list archives BEFORE sending another post to the list. - * - * r0 = cp#15 control register - * r13 = *virtual* address to jump to upon completion - * - * other registers depend on the function called upon completion - */ - .align 5 -__turn_mmu_on: - mov r0, r0 - mcr p15, 0, r0, c1, c0, 0 @ write control reg - mrc p15, 0, r3, c0, c0, 0 @ read id reg - mov r3, r3 - mov r3, r3 - mov pc, r13 -ENDPROC(__turn_mmu_on) - + .ltorg /* * Setup the initial page tables. We only setup the barest @@ -208,7 +129,7 @@ ENDPROC(__turn_mmu_on) * r10 = procinfo * * Returns: - * r0, r3, r6, r7 corrupted + * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: @@ -230,19 +151,30 @@ __create_page_tables: ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* - * Create identity mapping for first MB of kernel to - * cater for the MMU enable. This identity mapping - * will be removed by paging_init(). We use our current program - * counter to determine corresponding section base address. + * Create identity mapping to cater for __enable_mmu. + * This identity mapping will be removed by paging_init(). */ - mov r6, pc, lsr #20 @ start of kernel section - orr r3, r7, r6, lsl #20 @ flags + kernel base - str r3, [r4, r6, lsl #2] @ identity mapping + adr r0, __enable_mmu_loc + ldmia r0, {r3, r5, r6} + sub r0, r0, r3 @ virt->phys offset + add r5, r5, r0 @ phys __enable_mmu + add r6, r6, r0 @ phys __enable_mmu_end + mov r5, r5, lsr #20 + mov r6, r6, lsr #20 + +1: orr r3, r7, r5, lsl #20 @ flags + kernel base + str r3, [r4, r5, lsl #2] @ identity mapping + teq r5, r6 + addne r5, r5, #1 @ next section + bne 1b /* * Now setup the pagetables for our kernel direct * mapped region. */ + mov r3, pc + mov r3, r3, lsr #20 + orr r3, r7, r3, lsl #20 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! ldr r6, =(KERNEL_END - 1) @@ -283,24 +215,35 @@ __create_page_tables: str r6, [r0] #ifdef CONFIG_DEBUG_LL - ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags +#ifndef CONFIG_DEBUG_ICEDCC /* * Map in IO space for serial debugging. * This allows debug messages to be output * via a serial console before paging_init. */ - ldr r3, [r8, #MACHINFO_PGOFFIO] + addruart r7, r3 + + mov r3, r3, lsr #20 + mov r3, r3, lsl #2 + add r0, r4, r3 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) cmp r3, #0x0800 @ limit to 512MB movhi r3, #0x0800 add r6, r0, r3 - ldr r3, [r8, #MACHINFO_PHYSIO] - orr r3, r3, r7 + mov r3, r7, lsr #20 + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags + orr r3, r7, r3, lsl #20 1: str r3, [r0], #4 add r3, r3, #1 << 20 teq r0, r6 bne 1b + +#else /* CONFIG_DEBUG_ICEDCC */ + /* we don't need any serial debugging mappings for ICEDCC */ + ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags +#endif /* !CONFIG_DEBUG_ICEDCC */ + #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* * If we're using the NetWinder or CATS, we also need to map @@ -326,5 +269,180 @@ __create_page_tables: mov pc, lr ENDPROC(__create_page_tables) .ltorg + .align +__enable_mmu_loc: + .long . + .long __enable_mmu + .long __enable_mmu_end + +#if defined(CONFIG_SMP) + __CPUINIT +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 + mrc p15, 0, r9, c0, c0 @ get processor id + bl __lookup_processor_type + movs r10, r5 @ invalid processor? + moveq r0, #'p' @ yes, error 'p' + THUMB( it eq ) @ force fixup-able long branch encoding + beq __error_p + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r7, r12} @ address to jump to after + sub r4, r4, r5 @ mmu has been enabled + ldr r4, [r7, r4] @ get secondary_data.pgdir + adr lr, BSYM(__enable_mmu) @ return address + mov r13, r12 @ __secondary_switched address + ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor + @ (return control reg) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) +ENDPROC(secondary_startup) + + /* + * r6 = &secondary_data + */ +ENTRY(__secondary_switched) + ldr sp, [r7, #4] @ get secondary_data.stack + mov fp, #0 + b secondary_start_kernel +ENDPROC(__secondary_switched) + + .align + + .type __secondary_data, %object +__secondary_data: + .long . + .long secondary_data + .long __secondary_switched +#endif /* defined(CONFIG_SMP) */ + + + +/* + * Setup common bits before finally enabling the MMU. Essentially + * this is just loading the page table pointer and domain access + * registers. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r4 = page table pointer + * r9 = processor ID + * r13 = *virtual* address to jump to upon completion + */ +__enable_mmu: +#ifdef CONFIG_ALIGNMENT_TRAP + orr r0, r0, #CR_A +#else + bic r0, r0, #CR_A +#endif +#ifdef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CR_C +#endif +#ifdef CONFIG_CPU_BPREDICT_DISABLE + bic r0, r0, #CR_Z +#endif +#ifdef CONFIG_CPU_ICACHE_DISABLE + bic r0, r0, #CR_I +#endif + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT)) + mcr p15, 0, r5, c3, c0, 0 @ load domain access register + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer + b __turn_mmu_on +ENDPROC(__enable_mmu) + +/* + * Enable the MMU. This completely changes the structure of the visible + * memory space. You will not be able to trace execution through this. + * If you have an enquiry about this, *please* check the linux-arm-kernel + * mailing list archives BEFORE sending another post to the list. + * + * r0 = cp#15 control register + * r1 = machine ID + * r2 = atags pointer + * r9 = processor ID + * r13 = *virtual* address to jump to upon completion + * + * other registers depend on the function called upon completion + */ + .align 5 +__turn_mmu_on: + mov r0, r0 + mcr p15, 0, r0, c1, c0, 0 @ write control reg + mrc p15, 0, r3, c0, c0, 0 @ read id reg + mov r3, r3 + mov r3, r13 + mov pc, r3 +__enable_mmu_end: +ENDPROC(__turn_mmu_on) + + +#ifdef CONFIG_SMP_ON_UP +__fixup_smp: + mov r4, #0x00070000 + orr r3, r4, #0xff000000 @ mask 0xff070000 + orr r4, r4, #0x41000000 @ val 0x41070000 + and r0, r9, r3 + teq r0, r4 @ ARM CPU and ARMv6/v7? + bne __fixup_smp_on_up @ no, assume UP + + orr r3, r3, #0x0000ff00 + orr r3, r3, #0x000000f0 @ mask 0xff07fff0 + orr r4, r4, #0x0000b000 + orr r4, r4, #0x00000020 @ val 0x4107b020 + and r0, r9, r3 + teq r0, r4 @ ARM 11MPCore? + moveq pc, lr @ yes, assume SMP + + mrc p15, 0, r0, c0, c0, 5 @ read MPIDR + tst r0, #1 << 31 + movne pc, lr @ bit 31 => SMP + +__fixup_smp_on_up: + adr r0, 1f + ldmia r0, {r3 - r5} + sub r3, r0, r3 + add r4, r4, r3 + add r5, r5, r3 +2: cmp r4, r5 + movhs pc, lr + ldmia r4!, {r0, r6} + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. +#endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b 2b +ENDPROC(__fixup_smp) + + .align +1: .word . + .word __smpalt_begin + .word __smpalt_end + + .pushsection .data + .globl smp_on_up +smp_on_up: + ALT_SMP(.long 1) + ALT_UP(.long 0) + .popsection + +#endif #include "head-common.S" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c new file mode 100644 index 0000000..c9f3f04 --- /dev/null +++ b/arch/arm/kernel/hw_breakpoint.c @@ -0,0 +1,943 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2009, 2010 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + */ +#define pr_fmt(fmt) "hw-breakpoint: " fmt + +#include <linux/errno.h> +#include <linux/hardirq.h> +#include <linux/perf_event.h> +#include <linux/hw_breakpoint.h> +#include <linux/smp.h> + +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <asm/current.h> +#include <asm/hw_breakpoint.h> +#include <asm/kdebug.h> +#include <asm/system.h> +#include <asm/traps.h> + +/* Breakpoint currently in use for each BRP. */ +static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); + +/* Watchpoint currently in use for each WRP. */ +static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); + +/* Number of BRP/WRP registers on this CPU. */ +static int core_num_brps; +static int core_num_reserved_brps; +static int core_num_wrps; + +/* Debug architecture version. */ +static u8 debug_arch; + +/* Maximum supported watchpoint length. */ +static u8 max_watchpoint_len; + +#define READ_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_READ(c ## M, OP2, VAL); \ + break + +#define WRITE_WB_REG_CASE(OP2, M, VAL) \ + case ((OP2 << 4) + M): \ + ARM_DBG_WRITE(c ## M, OP2, VAL);\ + break + +#define GEN_READ_WB_REG_CASES(OP2, VAL) \ + READ_WB_REG_CASE(OP2, 0, VAL); \ + READ_WB_REG_CASE(OP2, 1, VAL); \ + READ_WB_REG_CASE(OP2, 2, VAL); \ + READ_WB_REG_CASE(OP2, 3, VAL); \ + READ_WB_REG_CASE(OP2, 4, VAL); \ + READ_WB_REG_CASE(OP2, 5, VAL); \ + READ_WB_REG_CASE(OP2, 6, VAL); \ + READ_WB_REG_CASE(OP2, 7, VAL); \ + READ_WB_REG_CASE(OP2, 8, VAL); \ + READ_WB_REG_CASE(OP2, 9, VAL); \ + READ_WB_REG_CASE(OP2, 10, VAL); \ + READ_WB_REG_CASE(OP2, 11, VAL); \ + READ_WB_REG_CASE(OP2, 12, VAL); \ + READ_WB_REG_CASE(OP2, 13, VAL); \ + READ_WB_REG_CASE(OP2, 14, VAL); \ + READ_WB_REG_CASE(OP2, 15, VAL) + +#define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ + WRITE_WB_REG_CASE(OP2, 0, VAL); \ + WRITE_WB_REG_CASE(OP2, 1, VAL); \ + WRITE_WB_REG_CASE(OP2, 2, VAL); \ + WRITE_WB_REG_CASE(OP2, 3, VAL); \ + WRITE_WB_REG_CASE(OP2, 4, VAL); \ + WRITE_WB_REG_CASE(OP2, 5, VAL); \ + WRITE_WB_REG_CASE(OP2, 6, VAL); \ + WRITE_WB_REG_CASE(OP2, 7, VAL); \ + WRITE_WB_REG_CASE(OP2, 8, VAL); \ + WRITE_WB_REG_CASE(OP2, 9, VAL); \ + WRITE_WB_REG_CASE(OP2, 10, VAL); \ + WRITE_WB_REG_CASE(OP2, 11, VAL); \ + WRITE_WB_REG_CASE(OP2, 12, VAL); \ + WRITE_WB_REG_CASE(OP2, 13, VAL); \ + WRITE_WB_REG_CASE(OP2, 14, VAL); \ + WRITE_WB_REG_CASE(OP2, 15, VAL) + +static u32 read_wb_reg(int n) +{ + u32 val = 0; + + switch (n) { + GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); + GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); + default: + pr_warning("attempt to read from unknown breakpoint " + "register %d\n", n); + } + + return val; +} + +static void write_wb_reg(int n, u32 val) +{ + switch (n) { + GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); + GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); + default: + pr_warning("attempt to write to unknown breakpoint " + "register %d\n", n); + } + isb(); +} + +/* Determine debug architecture. */ +static u8 get_debug_arch(void) +{ + u32 didr; + + /* Do we implement the extended CPUID interface? */ + if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { + pr_warning("CPUID feature registers not supported. " + "Assuming v6 debug is present.\n"); + return ARM_DEBUG_ARCH_V6; + } + + ARM_DBG_READ(c0, 0, didr); + return (didr >> 16) & 0xf; +} + +u8 arch_get_debug_arch(void) +{ + return debug_arch; +} + +/* Determine number of BRP register available. */ +static int get_num_brp_resources(void) +{ + u32 didr; + ARM_DBG_READ(c0, 0, didr); + return ((didr >> 24) & 0xf) + 1; +} + +/* Does this core support mismatch breakpoints? */ +static int core_has_mismatch_brps(void) +{ + return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && + get_num_brp_resources() > 1); +} + +/* Determine number of usable WRPs available. */ +static int get_num_wrps(void) +{ + /* + * FIXME: When a watchpoint fires, the only way to work out which + * watchpoint it was is by disassembling the faulting instruction + * and working out the address of the memory access. + * + * Furthermore, we can only do this if the watchpoint was precise + * since imprecise watchpoints prevent us from calculating register + * based addresses. + * + * Providing we have more than 1 breakpoint register, we only report + * a single watchpoint register for the time being. This way, we always + * know which watchpoint fired. In the future we can either add a + * disassembler and address generation emulator, or we can insert a + * check to see if the DFAR is set on watchpoint exception entry + * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows + * that it is set on some implementations]. + */ + +#if 0 + int wrps; + u32 didr; + ARM_DBG_READ(c0, 0, didr); + wrps = ((didr >> 28) & 0xf) + 1; +#endif + int wrps = 1; + + if (core_has_mismatch_brps() && wrps >= get_num_brp_resources()) + wrps = get_num_brp_resources() - 1; + + return wrps; +} + +/* We reserve one breakpoint for each watchpoint. */ +static int get_num_reserved_brps(void) +{ + if (core_has_mismatch_brps()) + return get_num_wrps(); + return 0; +} + +/* Determine number of usable BRPs available. */ +static int get_num_brps(void) +{ + int brps = get_num_brp_resources(); + if (core_has_mismatch_brps()) + brps -= get_num_reserved_brps(); + return brps; +} + +/* + * In order to access the breakpoint/watchpoint control registers, + * we must be running in debug monitor mode. Unfortunately, we can + * be put into halting debug mode at any time by an external debugger + * but there is nothing we can do to prevent that. + */ +static int enable_monitor_mode(void) +{ + u32 dscr; + int ret = 0; + + ARM_DBG_READ(c1, 0, dscr); + + /* Ensure that halting mode is disabled. */ + if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled." + "Unable to access hardware resources.")) { + ret = -EPERM; + goto out; + } + + /* If monitor mode is already enabled, just return. */ + if (dscr & ARM_DSCR_MDBGEN) + goto out; + + /* Write to the corresponding DSCR. */ + switch (get_debug_arch()) { + case ARM_DEBUG_ARCH_V6: + case ARM_DEBUG_ARCH_V6_1: + ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); + break; + case ARM_DEBUG_ARCH_V7_ECP14: + ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); + break; + default: + ret = -ENODEV; + goto out; + } + + /* Check that the write made it through. */ + ARM_DBG_READ(c1, 0, dscr); + if (!(dscr & ARM_DSCR_MDBGEN)) + ret = -EPERM; + +out: + return ret; +} + +int hw_breakpoint_slots(int type) +{ + /* + * We can be called early, so don't rely on + * our static variables being initialised. + */ + switch (type) { + case TYPE_INST: + return get_num_brps(); + case TYPE_DATA: + return get_num_wrps(); + default: + pr_warning("unknown slot type: %d\n", type); + return 0; + } +} + +/* + * Check if 8-bit byte-address select is available. + * This clobbers WRP 0. + */ +static u8 get_max_wp_len(void) +{ + u32 ctrl_reg; + struct arch_hw_breakpoint_ctrl ctrl; + u8 size = 4; + + if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) + goto out; + + memset(&ctrl, 0, sizeof(ctrl)); + ctrl.len = ARM_BREAKPOINT_LEN_8; + ctrl_reg = encode_ctrl_reg(ctrl); + + write_wb_reg(ARM_BASE_WVR, 0); + write_wb_reg(ARM_BASE_WCR, ctrl_reg); + if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) + size = 8; + +out: + return size; +} + +u8 arch_get_max_wp_len(void) +{ + return max_watchpoint_len; +} + +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + int i, max_slots, ctrl_base, val_base, ret = 0; + u32 addr, ctrl; + + /* Ensure that we are in monitor mode and halting mode is disabled. */ + ret = enable_monitor_mode(); + if (ret) + goto out; + + addr = info->address; + ctrl = encode_ctrl_reg(info->ctrl) | 0x1; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + ctrl_base = ARM_BASE_BCR; + val_base = ARM_BASE_BVR; + slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + max_slots = core_num_brps; + if (info->step_ctrl.enabled) { + /* Override the breakpoint data with the step data. */ + addr = info->trigger & ~0x3; + ctrl = encode_ctrl_reg(info->step_ctrl); + } + } else { + /* Watchpoint */ + if (info->step_ctrl.enabled) { + /* Install into the reserved breakpoint region. */ + ctrl_base = ARM_BASE_BCR + core_num_brps; + val_base = ARM_BASE_BVR + core_num_brps; + /* Override the watchpoint data with the step data. */ + addr = info->trigger & ~0x3; + ctrl = encode_ctrl_reg(info->step_ctrl); + } else { + ctrl_base = ARM_BASE_WCR; + val_base = ARM_BASE_WVR; + } + slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + } + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) { + ret = -EBUSY; + goto out; + } + + /* Setup the address register. */ + write_wb_reg(val_base + i, addr); + + /* Setup the control register. */ + write_wb_reg(ctrl_base + i, ctrl); + +out: + return ret; +} + +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + int i, max_slots, base; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + base = ARM_BASE_BCR; + slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + max_slots = core_num_brps; + } else { + /* Watchpoint */ + if (info->step_ctrl.enabled) + base = ARM_BASE_BCR + core_num_brps; + else + base = ARM_BASE_WCR; + slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + } + + /* Remove the breakpoint. */ + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) + return; + + /* Reset the control register. */ + write_wb_reg(base + i, 0); +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case ARM_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case ARM_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case ARM_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case ARM_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + + return len_in_bytes; +} + +/* + * Check whether bp virtual address is in kernel space. + */ +int arch_check_bp_in_kernelspace(struct perf_event *bp) +{ + unsigned int len; + unsigned long va; + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + va = info->address; + len = get_hbp_len(info->ctrl.len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. + * Hopefully this will disappear when ptrace can bypass the conversion + * to generic breakpoint descriptions. + */ +int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type) +{ + /* Type */ + switch (ctrl.type) { + case ARM_BREAKPOINT_EXECUTE: + *gen_type = HW_BREAKPOINT_X; + break; + case ARM_BREAKPOINT_LOAD: + *gen_type = HW_BREAKPOINT_R; + break; + case ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_W; + break; + case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (ctrl.len) { + case ARM_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case ARM_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case ARM_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case ARM_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Construct an arch_hw_breakpoint from a perf_event. + */ +static int arch_build_bp_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + /* Type */ + switch (bp->attr.bp_type) { + case HW_BREAKPOINT_X: + info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + break; + case HW_BREAKPOINT_R: + info->ctrl.type = ARM_BREAKPOINT_LOAD; + break; + case HW_BREAKPOINT_W: + info->ctrl.type = ARM_BREAKPOINT_STORE; + break; + case HW_BREAKPOINT_RW: + info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (bp->attr.bp_len) { + case HW_BREAKPOINT_LEN_1: + info->ctrl.len = ARM_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + info->ctrl.len = ARM_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_4: + info->ctrl.len = ARM_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_8: + info->ctrl.len = ARM_BREAKPOINT_LEN_8; + if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) + && max_watchpoint_len >= 8) + break; + default: + return -EINVAL; + } + + /* + * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. + * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported + * by the hardware and must be aligned to the appropriate number of + * bytes. + */ + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && + info->ctrl.len != ARM_BREAKPOINT_LEN_2 && + info->ctrl.len != ARM_BREAKPOINT_LEN_4) + return -EINVAL; + + /* Address */ + info->address = bp->attr.bp_addr; + + /* Privilege */ + info->ctrl.privilege = ARM_BREAKPOINT_USER; + if (arch_check_bp_in_kernelspace(bp)) + info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; + + /* Enabled? */ + info->ctrl.enabled = !bp->attr.disabled; + + /* Mismatch */ + info->ctrl.mismatch = 0; + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings. + */ +int arch_validate_hwbkpt_settings(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int ret = 0; + u32 offset, alignment_mask = 0x3; + + /* Build the arch_hw_breakpoint. */ + ret = arch_build_bp_info(bp); + if (ret) + goto out; + + /* Check address alignment. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + alignment_mask = 0x7; + offset = info->address & alignment_mask; + switch (offset) { + case 0: + /* Aligned */ + break; + case 1: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; + case 2: + /* Allow halfword watchpoints and breakpoints. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + break; + default: + ret = -EINVAL; + goto out; + } + + info->address &= ~alignment_mask; + info->ctrl.len <<= offset; + + /* + * Currently we rely on an overflow handler to take + * care of single-stepping the breakpoint when it fires. + * In the case of userspace breakpoints on a core with V7 debug, + * we can use the mismatch feature as a poor-man's hardware + * single-step, but this only works for per-task breakpoints. + */ + if (WARN_ONCE(!bp->overflow_handler && + (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() + || !bp->hw.bp_target), + "overflow handler required but none found")) { + ret = -EINVAL; + } +out: + return ret; +} + +/* + * Enable/disable single-stepping over the breakpoint bp at address addr. + */ +static void enable_single_step(struct perf_event *bp, u32 addr) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + arch_uninstall_hw_breakpoint(bp); + info->step_ctrl.mismatch = 1; + info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; + info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; + info->step_ctrl.privilege = info->ctrl.privilege; + info->step_ctrl.enabled = 1; + info->trigger = addr; + arch_install_hw_breakpoint(bp); +} + +static void disable_single_step(struct perf_event *bp) +{ + arch_uninstall_hw_breakpoint(bp); + counter_arch_bp(bp)->step_ctrl.enabled = 0; + arch_install_hw_breakpoint(bp); +} + +static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) +{ + int i; + struct perf_event *wp, **slots; + struct arch_hw_breakpoint *info; + + slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + + /* Without a disassembler, we can only handle 1 watchpoint. */ + BUG_ON(core_num_wrps > 1); + + for (i = 0; i < core_num_wrps; ++i) { + rcu_read_lock(); + + wp = slots[i]; + + if (wp == NULL) { + rcu_read_unlock(); + continue; + } + + /* + * The DFAR is an unknown value. Since we only allow a + * single watchpoint, we can set the trigger to the lowest + * possible faulting address. + */ + info = counter_arch_bp(wp); + info->trigger = wp->attr.bp_addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(wp, regs); + + /* + * If no overflow handler is present, insert a temporary + * mismatch breakpoint so we can single-step over the + * watchpoint trigger. + */ + if (!wp->overflow_handler) + enable_single_step(wp, instruction_pointer(regs)); + + rcu_read_unlock(); + } +} + +static void watchpoint_single_step_handler(unsigned long pc) +{ + int i; + struct perf_event *wp, **slots; + struct arch_hw_breakpoint *info; + + slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + + for (i = 0; i < core_num_reserved_brps; ++i) { + rcu_read_lock(); + + wp = slots[i]; + + if (wp == NULL) + goto unlock; + + info = counter_arch_bp(wp); + if (!info->step_ctrl.enabled) + goto unlock; + + /* + * Restore the original watchpoint if we've completed the + * single-step. + */ + if (info->trigger != pc) + disable_single_step(wp); + +unlock: + rcu_read_unlock(); + } +} + +static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) +{ + int i; + u32 ctrl_reg, val, addr; + struct perf_event *bp, **slots; + struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + + /* The exception entry code places the amended lr in the PC. */ + addr = regs->ARM_pc; + + /* Check the currently installed breakpoints first. */ + for (i = 0; i < core_num_brps; ++i) { + rcu_read_lock(); + + bp = slots[i]; + + if (bp == NULL) + goto unlock; + + info = counter_arch_bp(bp); + + /* Check if the breakpoint value matches. */ + val = read_wb_reg(ARM_BASE_BVR + i); + if (val != (addr & ~0x3)) + goto mismatch; + + /* Possible match, check the byte address select to confirm. */ + ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + if ((1 << (addr & 0x3)) & ctrl.len) { + info->trigger = addr; + pr_debug("breakpoint fired: address = 0x%x\n", addr); + perf_bp_event(bp, regs); + if (!bp->overflow_handler) + enable_single_step(bp, addr); + goto unlock; + } + +mismatch: + /* If we're stepping a breakpoint, it can now be restored. */ + if (info->step_ctrl.enabled) + disable_single_step(bp); +unlock: + rcu_read_unlock(); + } + + /* Handle any pending watchpoint single-step breakpoints. */ + watchpoint_single_step_handler(addr); +} + +/* + * Called from either the Data Abort Handler [watchpoint] or the + * Prefetch Abort Handler [breakpoint] with preemption disabled. + */ +static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + int ret = 0; + u32 dscr; + + /* We must be called with preemption disabled. */ + WARN_ON(preemptible()); + + /* We only handle watchpoints and hardware breakpoints. */ + ARM_DBG_READ(c1, 0, dscr); + + /* Perform perf callbacks. */ + switch (ARM_DSCR_MOE(dscr)) { + case ARM_ENTRY_BREAKPOINT: + breakpoint_handler(addr, regs); + break; + case ARM_ENTRY_ASYNC_WATCHPOINT: + WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); + case ARM_ENTRY_SYNC_WATCHPOINT: + watchpoint_handler(addr, regs); + break; + default: + ret = 1; /* Unhandled fault. */ + } + + /* + * Re-enable preemption after it was disabled in the + * low-level exception handling code. + */ + preempt_enable(); + + return ret; +} + +/* + * One-time initialisation. + */ +static void reset_ctrl_regs(void *unused) +{ + int i; + + /* + * v7 debug contains save and restore registers so that debug state + * can be maintained across low-power modes without leaving + * the debug logic powered up. It is IMPLEMENTATION DEFINED whether + * we can write to the debug registers out of reset, so we must + * unlock the OS Lock Access Register to avoid taking undefined + * instruction exceptions later on. + */ + if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { + /* + * Unconditionally clear the lock by writing a value + * other than 0xC5ACCE55 to the access register. + */ + asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); + isb(); + } + + if (enable_monitor_mode()) + return; + + /* We must also reset any reserved registers. */ + for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { + write_wb_reg(ARM_BASE_BCR + i, 0UL); + write_wb_reg(ARM_BASE_BVR + i, 0UL); + } + + for (i = 0; i < core_num_wrps; ++i) { + write_wb_reg(ARM_BASE_WCR + i, 0UL); + write_wb_reg(ARM_BASE_WVR + i, 0UL); + } +} + +static int __cpuinit dbg_reset_notify(struct notifier_block *self, + unsigned long action, void *cpu) +{ + if (action == CPU_ONLINE) + smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata dbg_reset_nb = { + .notifier_call = dbg_reset_notify, +}; + +static int __init arch_hw_breakpoint_init(void) +{ + u32 dscr; + + debug_arch = get_debug_arch(); + + if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { + pr_info("debug architecture 0x%x unsupported.\n", debug_arch); + return 0; + } + + /* Determine how many BRPs/WRPs are available. */ + core_num_brps = get_num_brps(); + core_num_reserved_brps = get_num_reserved_brps(); + core_num_wrps = get_num_wrps(); + + pr_info("found %d breakpoint and %d watchpoint registers.\n", + core_num_brps + core_num_reserved_brps, core_num_wrps); + + if (core_num_reserved_brps) + pr_info("%d breakpoint(s) reserved for watchpoint " + "single-step.\n", core_num_reserved_brps); + + ARM_DBG_READ(c1, 0, dscr); + if (dscr & ARM_DSCR_HDBGEN) { + pr_warning("halting debug mode enabled. Assuming maximum " + "watchpoint size of 4 bytes."); + } else { + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + smp_call_function(reset_ctrl_regs, NULL, 1); + reset_ctrl_regs(NULL); + + /* Work out the maximum supported watchpoint length. */ + max_watchpoint_len = get_max_wp_len(); + pr_info("maximum watchpoint size is %u bytes.\n", + max_watchpoint_len); + } + + /* Register debug fault handler. */ + hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, + "watchpoint debug exception"); + hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, + "breakpoint debug exception"); + + /* Register hotplug notifier. */ + register_cpu_notifier(&dbg_reset_nb); + return 0; +} +arch_initcall(arch_hw_breakpoint_init); + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ +} + +/* + * Dummy function to register with die_notifier. + */ +int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index 3f47086..e7cbb50 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -24,9 +24,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * * The things we do for performance.. */ -union thread_union init_thread_union - __attribute__((__section__(".data.init_task"))) = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 096f600..8135438 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -27,7 +27,6 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/slab.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> @@ -36,8 +35,10 @@ #include <linux/list.h> #include <linux/kallsyms.h> #include <linux/proc_fs.h> +#include <linux/ftrace.h> #include <asm/system.h> +#include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> @@ -48,19 +49,28 @@ #define irq_finish(irq) do { } while (0) #endif -void (*init_arch_irq)(void) __initdata = NULL; unsigned long irq_err_count; int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, cpu; + struct irq_desc *desc; struct irqaction * action; unsigned long flags; + int prec, n; + + for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++) + n *= 10; + +#ifdef CONFIG_SMP + if (prec < 4) + prec = 4; +#endif if (i == 0) { char cpuname[12]; - seq_printf(p, " "); + seq_printf(p, "%*s ", prec, ""); for_each_present_cpu(cpu) { sprintf(cpuname, "CPU%d", cpu); seq_printf(p, " %10s", cpuname); @@ -68,53 +78,46 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + if (i < nr_irqs) { + desc = irq_to_desc(i); + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto unlock; - seq_printf(p, "%3d: ", i); + seq_printf(p, "%*d: ", prec, i); for_each_present_cpu(cpu) seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); - seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); + seq_printf(p, " %10s", desc->chip->name ? : "-"); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { -#ifdef CONFIG_ARCH_ACORN - show_fiq_list(p, v); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } else if (i == nr_irqs) { +#ifdef CONFIG_FIQ + show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP - show_ipi_list(p); - show_local_irqs(p); + show_ipi_list(p, prec); #endif - seq_printf(p, "Err: %10lu\n", irq_err_count); +#ifdef CONFIG_LOCAL_TIMERS + show_local_irqs(p, prec); +#endif + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); } return 0; } -/* Handle bad interrupts */ -static struct irq_desc bad_irq_desc = { - .handle_irq = handle_bad_irq, - .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), -}; - -#ifdef CONFIG_CPUMASK_OFFSTACK -/* We are not allocating bad_irq_desc.affinity or .pending_mask */ -#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." -#endif - /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their * own 'handler' */ -asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +asmlinkage void __exception_irq_entry +asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -124,10 +127,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ - if (irq >= NR_IRQS) - handle_bad_irq(irq, &bad_irq_desc); - else + if (unlikely(irq >= nr_irqs)) { + if (printk_ratelimit()) + printk(KERN_WARNING "Bad IRQ%u\n", irq); + ack_bad_irq(irq); + } else { generic_handle_irq(irq); + } /* AT91 specific workaround */ irq_finish(irq); @@ -141,13 +147,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) struct irq_desc *desc; unsigned long flags; - if (irq >= NR_IRQS) { + if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } - desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + desc = irq_to_desc(irq); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) desc->status &= ~IRQ_NOREQUEST; @@ -155,22 +161,21 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) desc->status &= ~IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) desc->status &= ~IRQ_NOAUTOEN; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) { - int irq; - - for (irq = 0; irq < NR_IRQS; irq++) - irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; + machine_desc->init_irq(); +} -#ifdef CONFIG_SMP - cpumask_setall(bad_irq_desc.affinity); - bad_irq_desc.node = smp_processor_id(); -#endif - init_arch_irq(); +#ifdef CONFIG_SPARSE_IRQ +int __init arch_probe_nr_irqs(void) +{ + nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; + return nr_irqs; } +#endif #ifdef CONFIG_HOTPLUG_CPU @@ -178,9 +183,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* @@ -191,10 +196,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); + struct irq_desc *desc; - for (i = 0; i < NR_IRQS; i++) { - struct irq_desc *desc = irq_desc + i; - + for_each_irq_desc(i, desc) { if (desc->node == cpu) { unsigned int newcpu = cpumask_any_and(desc->affinity, cpu_online_mask); diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 8ac9b84..3464859 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c @@ -22,47 +22,42 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; static ctl_table ctl_isa_vars[4] = { { - .ctl_name = BUS_ISA_MEM_BASE, .procname = "membase", .data = &isa_membase, .maxlen = sizeof(isa_membase), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = BUS_ISA_PORT_BASE, .procname = "portbase", .data = &isa_portbase, .maxlen = sizeof(isa_portbase), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = BUS_ISA_PORT_SHIFT, .procname = "portshift", .data = &isa_portshift, .maxlen = sizeof(isa_portshift), .mode = 0444, - .proc_handler = &proc_dointvec, - }, {0} + .proc_handler = proc_dointvec, + }, {} }; static struct ctl_table_header *isa_sysctl_header; static ctl_table ctl_isa[2] = { { - .ctl_name = CTL_BUS_ISA, .procname = "isa", .mode = 0555, .child = ctl_isa_vars, - }, {0} + }, {} }; static ctl_table ctl_bus[2] = { { - .ctl_name = CTL_BUS, .procname = "bus", .mode = 0555, .child = ctl_isa, - }, {0} + }, {} }; void __init diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index b63b528..7fa3bb0 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -19,6 +19,14 @@ #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#if defined(CONFIG_CPU_PJ4) +#define PJ4(code...) code +#define XSC(code...) +#else +#define PJ4(code...) +#define XSC(code...) code +#endif + #define MMX_WR0 (0x00) #define MMX_WR1 (0x08) #define MMX_WR2 (0x10) @@ -58,11 +66,17 @@ ENTRY(iwmmxt_task_enable) - mrc p15, 0, r2, c15, c1, 0 - tst r2, #0x3 @ CP0 and CP1 accessible? + XSC(mrc p15, 0, r2, c15, c1, 0) + PJ4(mrc p15, 0, r2, c1, c0, 2) + @ CP0 and CP1 accessible? + XSC(tst r2, #0x3) + PJ4(tst r2, #0xf) movne pc, lr @ if so no business here - orr r2, r2, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r2, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(orr r2, r2, #0x3) + XSC(mcr p15, 0, r2, c15, c1, 0) + PJ4(orr r2, r2, #0xf) + PJ4(mcr p15, 0, r2, c1, c0, 2) ldr r3, =concan_owner add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area @@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable) teqne r1, r2 @ or specified one? bne 1f @ no: quit - mrc p15, 0, r4, c15, c1, 0 - orr r4, r4, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r4, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(mrc p15, 0, r4, c15, c1, 0) + XSC(orr r4, r4, #0xf) + XSC(mcr p15, 0, r4, c15, c1, 0) + PJ4(mrc p15, 0, r4, c1, c0, 2) + PJ4(orr r4, r4, #0x3) + PJ4(mcr p15, 0, r4, c1, c0, 2) + mov r0, #0 @ nothing to load str r0, [r3] @ no more current owner mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait bl concan_save - bic r4, r4, #0x3 @ disable access to CP0 and CP1 - mcr p15, 0, r4, c15, c1, 0 + @ disable access to CP0 and CP1 + XSC(bic r4, r4, #0x3) + XSC(mcr p15, 0, r4, c15, c1, 0) + PJ4(bic r4, r4, #0xf) + PJ4(mcr p15, 0, r4, c1, c0, 2) + mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait @@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore) */ ENTRY(iwmmxt_task_switch) - mrc p15, 0, r1, c15, c1, 0 - tst r1, #0x3 @ CP0 and CP1 accessible? + XSC(mrc p15, 0, r1, c15, c1, 0) + PJ4(mrc p15, 0, r1, c1, c0, 2) + @ CP0 and CP1 accessible? + XSC(tst r1, #0x3) + PJ4(tst r1, #0xf) bne 1f @ yes: block them for next task ldr r2, =concan_owner @@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch) teq r2, r3 @ next task owns it? movne pc, lr @ no: leave Concan disabled -1: eor r1, r1, #3 @ flip Concan access - mcr p15, 0, r1, c15, c1, 0 +1: @ flip Conan access + XSC(eor r1, r1, #0x3) + XSC(mcr p15, 0, r1, c15, c1, 0) + PJ4(eor r1, r1, #0xf) + PJ4(mcr p15, 0, r1, c1, c0, 2) mrc p15, 0, r1, c2, c0, 0 sub pc, lr, r1, lsr #32 @ cpwait and return diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index ba8ccfe..778c2f7 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -9,57 +9,63 @@ * Authors: George Davis <davis_g@mvista.com> * Deepak Saxena <dsaxena@plexity.net> */ +#include <linux/irq.h> +#include <linux/kdebug.h> #include <linux/kgdb.h> #include <asm/traps.h> -/* Make a local copy of the registers passed into the handler (bletch) */ -void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { - int regno; - - /* Initialize all to zero. */ - for (regno = 0; regno < GDB_MAX_REGS; regno++) - gdb_regs[regno] = 0; + { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, + { "r1", 4, offsetof(struct pt_regs, ARM_r1)}, + { "r2", 4, offsetof(struct pt_regs, ARM_r2)}, + { "r3", 4, offsetof(struct pt_regs, ARM_r3)}, + { "r4", 4, offsetof(struct pt_regs, ARM_r4)}, + { "r5", 4, offsetof(struct pt_regs, ARM_r5)}, + { "r6", 4, offsetof(struct pt_regs, ARM_r6)}, + { "r7", 4, offsetof(struct pt_regs, ARM_r7)}, + { "r8", 4, offsetof(struct pt_regs, ARM_r8)}, + { "r9", 4, offsetof(struct pt_regs, ARM_r9)}, + { "r10", 4, offsetof(struct pt_regs, ARM_r10)}, + { "fp", 4, offsetof(struct pt_regs, ARM_fp)}, + { "ip", 4, offsetof(struct pt_regs, ARM_ip)}, + { "sp", 4, offsetof(struct pt_regs, ARM_sp)}, + { "lr", 4, offsetof(struct pt_regs, ARM_lr)}, + { "pc", 4, offsetof(struct pt_regs, ARM_pc)}, + { "f0", 12, -1 }, + { "f1", 12, -1 }, + { "f2", 12, -1 }, + { "f3", 12, -1 }, + { "f4", 12, -1 }, + { "f5", 12, -1 }, + { "f6", 12, -1 }, + { "f7", 12, -1 }, + { "fps", 4, -1 }, + { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)}, +}; - gdb_regs[_R0] = kernel_regs->ARM_r0; - gdb_regs[_R1] = kernel_regs->ARM_r1; - gdb_regs[_R2] = kernel_regs->ARM_r2; - gdb_regs[_R3] = kernel_regs->ARM_r3; - gdb_regs[_R4] = kernel_regs->ARM_r4; - gdb_regs[_R5] = kernel_regs->ARM_r5; - gdb_regs[_R6] = kernel_regs->ARM_r6; - gdb_regs[_R7] = kernel_regs->ARM_r7; - gdb_regs[_R8] = kernel_regs->ARM_r8; - gdb_regs[_R9] = kernel_regs->ARM_r9; - gdb_regs[_R10] = kernel_regs->ARM_r10; - gdb_regs[_FP] = kernel_regs->ARM_fp; - gdb_regs[_IP] = kernel_regs->ARM_ip; - gdb_regs[_SPT] = kernel_regs->ARM_sp; - gdb_regs[_LR] = kernel_regs->ARM_lr; - gdb_regs[_PC] = kernel_regs->ARM_pc; - gdb_regs[_CPSR] = kernel_regs->ARM_cpsr; +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; } -/* Copy local gdb registers back to kgdb regs, for later copy to kernel */ -void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { - kernel_regs->ARM_r0 = gdb_regs[_R0]; - kernel_regs->ARM_r1 = gdb_regs[_R1]; - kernel_regs->ARM_r2 = gdb_regs[_R2]; - kernel_regs->ARM_r3 = gdb_regs[_R3]; - kernel_regs->ARM_r4 = gdb_regs[_R4]; - kernel_regs->ARM_r5 = gdb_regs[_R5]; - kernel_regs->ARM_r6 = gdb_regs[_R6]; - kernel_regs->ARM_r7 = gdb_regs[_R7]; - kernel_regs->ARM_r8 = gdb_regs[_R8]; - kernel_regs->ARM_r9 = gdb_regs[_R9]; - kernel_regs->ARM_r10 = gdb_regs[_R10]; - kernel_regs->ARM_fp = gdb_regs[_FP]; - kernel_regs->ARM_ip = gdb_regs[_IP]; - kernel_regs->ARM_sp = gdb_regs[_SPT]; - kernel_regs->ARM_lr = gdb_regs[_LR]; - kernel_regs->ARM_pc = gdb_regs[_PC]; - kernel_regs->ARM_cpsr = gdb_regs[_CPSR]; + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; } void @@ -97,6 +103,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) gdb_regs[_CPSR] = thread_regs->ARM_cpsr; } +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->ARM_pc = pc; +} + static int compiled_break; int kgdb_arch_handle_exception(int exception_vector, int signo, @@ -158,6 +169,45 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { .fn = kgdb_compiled_brk_fn }; +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + return NOTIFY_STOP; +} +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + .priority = -INT_MAX, +}; + + /** * kgdb_arch_init - Perform any architecture specific initalization. * @@ -166,6 +216,11 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { */ int kgdb_arch_init(void) { + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + register_undef_hook(&kgdb_brkpt_hook); register_undef_hook(&kgdb_compiled_brkpt_hook); @@ -182,6 +237,7 @@ void kgdb_arch_exit(void) { unregister_undef_hook(&kgdb_brkpt_hook); unregister_undef_hook(&kgdb_compiled_brkpt_hook); + unregister_die_notifier(&kgdb_notifier); } /* diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index da1f949..2c1f005 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) { insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; union reg_pair fnr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rdv; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; long cpsr = regs->ARM_cpsr; fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); @@ -1161,11 +1162,12 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx - * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx + * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx * ALU op with S bit and Rd == 15 : * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ - if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */ + if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ + (insn & 0x0ff00000) == 0x03400000 || /* Undef */ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ return INSN_REJECTED; @@ -1176,7 +1178,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ - insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */ + insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_imm_rwflags : emulate_alu_imm_rflags; diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index f692efd..2ba7deb 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -22,6 +22,8 @@ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/stop_machine.h> #include <linux/stringify.h> #include <asm/traps.h> #include <asm/cacheflush.h> @@ -83,10 +85,24 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) flush_insns(p->addr, 1); } +/* + * The actual disarming is done here on each CPU and synchronized using + * stop_machine. This synchronization is necessary on SMP to avoid removing + * a probe between the moment the 'Undefined Instruction' exception is raised + * and the moment the exception handler reads the faulting instruction from + * memory. + */ +int __kprobes __arch_disarm_kprobe(void *p) +{ + struct kprobe *kp = p; + *kp->addr = kp->opcode; + flush_insns(kp->addr, 1); + return 0; +} + void __kprobes arch_disarm_kprobe(struct kprobe *p) { - *p->addr = p->opcode; - flush_insns(p->addr, 1); + stop_machine(__arch_disarm_kprobe, p, &cpu_online_map); } void __kprobes arch_remove_kprobe(struct kprobe *p) @@ -378,6 +394,14 @@ void __kprobes jprobe_return(void) /* * Setup an empty pt_regs. Fill SP and PC fields as * they're needed by longjmp_break_handler. + * + * We allocate some slack between the original SP and start of + * our fabricated regs. To be precise we want to have worst case + * covered which is STMFD with all 16 regs so we allocate 2 * + * sizeof(struct_pt_regs)). + * + * This is to prevent any simulated instruction from writing + * over the regs when they are accessing the stack. */ "sub sp, %0, %1 \n\t" "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" @@ -395,7 +419,7 @@ void __kprobes jprobe_return(void) "ldmia sp, {r0 - pc} \n\t" : : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs)), + "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), "J" (offsetof(struct pt_regs, ARM_cpsr)) diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c new file mode 100644 index 0000000..31a316c --- /dev/null +++ b/arch/arm/kernel/leds.c @@ -0,0 +1,115 @@ +/* + * LED support code, ripped out of arch/arm/kernel/time.c + * + * Copyright (C) 1994-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sysdev.h> + +#include <asm/leds.h> + +static void dummy_leds_event(led_event_t evt) +{ +} + +void (*leds_event)(led_event_t) = dummy_leds_event; + +struct leds_evt_name { + const char name[8]; + int on; + int off; +}; + +static const struct leds_evt_name evt_names[] = { + { "amber", led_amber_on, led_amber_off }, + { "blue", led_blue_on, led_blue_off }, + { "green", led_green_on, led_green_off }, + { "red", led_red_on, led_red_off }, +}; + +static ssize_t leds_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t size) +{ + int ret = -EINVAL, len = strcspn(buf, " "); + + if (len > 0 && buf[len] == '\0') + len--; + + if (strncmp(buf, "claim", len) == 0) { + leds_event(led_claim); + ret = size; + } else if (strncmp(buf, "release", len) == 0) { + leds_event(led_release); + ret = size; + } else { + int i; + + for (i = 0; i < ARRAY_SIZE(evt_names); i++) { + if (strlen(evt_names[i].name) != len || + strncmp(buf, evt_names[i].name, len) != 0) + continue; + if (strncmp(buf+len, " on", 3) == 0) { + leds_event(evt_names[i].on); + ret = size; + } else if (strncmp(buf+len, " off", 4) == 0) { + leds_event(evt_names[i].off); + ret = size; + } + break; + } + } + return ret; +} + +static SYSDEV_ATTR(event, 0200, NULL, leds_store); + +static int leds_suspend(struct sys_device *dev, pm_message_t state) +{ + leds_event(led_stop); + return 0; +} + +static int leds_resume(struct sys_device *dev) +{ + leds_event(led_start); + return 0; +} + +static int leds_shutdown(struct sys_device *dev) +{ + leds_event(led_halted); + return 0; +} + +static struct sysdev_class leds_sysclass = { + .name = "leds", + .shutdown = leds_shutdown, + .suspend = leds_suspend, + .resume = leds_resume, +}; + +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + +static int __init leds_init(void) +{ + int ret; + ret = sysdev_class_register(&leds_sysclass); + if (ret == 0) + ret = sysdev_register(&leds_device); + if (ret == 0) + ret = sysdev_create_file(&leds_device, &attr_event); + return ret; +} + +device_initcall(leds_init); + +EXPORT_SYMBOL(leds_event); diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 598ca61..30ead13 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -23,6 +23,8 @@ extern unsigned long kexec_indirection_page; extern unsigned long kexec_mach_type; extern unsigned long kexec_boot_atags; +static atomic_t waiting_for_crash_ipi; + /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. @@ -37,12 +39,40 @@ void machine_kexec_cleanup(struct kimage *image) { } -void machine_shutdown(void) +void machine_crash_nonpanic_core(void *unused) { + struct pt_regs regs; + + crash_setup_regs(®s, NULL); + printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", + smp_processor_id()); + crash_save_cpu(®s, smp_processor_id()); + flush_cache_all(); + + atomic_dec(&waiting_for_crash_ipi); + while (1) + cpu_relax(); } void machine_crash_shutdown(struct pt_regs *regs) { + unsigned long msecs; + + local_irq_disable(); + + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); + + crash_save_cpu(regs, smp_processor_id()); + + printk(KERN_INFO "Loading crashdump kernel...\n"); } void machine_kexec(struct kimage *image) @@ -74,7 +104,14 @@ void machine_kexec(struct kimage *image) (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); - cpu_proc_fin(); + local_irq_disable(); + local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ + flush_cache_all(); + outer_flush_all(); + outer_disable(); + cpu_proc_fin(); + outer_inv_all(); + flush_cache_all(); cpu_reset(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index bac03c8..0c1bb68 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -16,9 +16,9 @@ #include <linux/mm.h> #include <linux/elf.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> +#include <linux/gfp.h> #include <asm/pgtable.h> #include <asm/sections.h> @@ -67,24 +67,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, char *secstrings, struct module *mod) { -#ifdef CONFIG_ARM_UNWIND - Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; - - for (s = sechdrs; s < sechdrs_end; s++) { - if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_init = s; - else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_devinit = s; - else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0) - mod->arch.unw_sec_core = s; - else if (strcmp(".init.text", secstrings + s->sh_name) == 0) - mod->arch.sec_init_text = s; - else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0) - mod->arch.sec_devinit_text = s; - else if (strcmp(".text", secstrings + s->sh_name) == 0) - mod->arch.sec_core_text = s; - } -#endif return 0; } @@ -102,6 +84,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned long loc; Elf32_Sym *sym; s32 offset; +#ifdef CONFIG_THUMB2_KERNEL + u32 upper, lower, sign, j1, j2; +#endif offset = ELF32_R_SYM(rel->r_info); if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { @@ -184,6 +169,90 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, (offset & 0x0fff); break; +#ifdef CONFIG_THUMB2_KERNEL + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + upper = *(u16 *)loc; + lower = *(u16 *)(loc + 2); + + /* + * 25 bit signed address range (Thumb-2 BL and B.W + * instructions): + * S:I1:I2:imm10:imm11:0 + * where: + * S = upper[10] = offset[24] + * I1 = ~(J1 ^ S) = offset[23] + * I2 = ~(J2 ^ S) = offset[22] + * imm10 = upper[9:0] = offset[21:12] + * imm11 = lower[10:0] = offset[11:1] + * J1 = lower[13] + * J2 = lower[11] + */ + sign = (upper >> 10) & 1; + j1 = (lower >> 13) & 1; + j2 = (lower >> 11) & 1; + offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) | + ((~(j2 ^ sign) & 1) << 22) | + ((upper & 0x03ff) << 12) | + ((lower & 0x07ff) << 1); + if (offset & 0x01000000) + offset -= 0x02000000; + offset += sym->st_value - loc; + + /* only Thumb addresses allowed (no interworking) */ + if (!(offset & 1) || + offset <= (s32)0xff000000 || + offset >= (s32)0x01000000) { + printk(KERN_ERR + "%s: relocation out of range, section " + "%d reloc %d sym '%s'\n", module->name, + relindex, i, strtab + sym->st_name); + return -ENOEXEC; + } + + sign = (offset >> 24) & 1; + j1 = sign ^ (~(offset >> 23) & 1); + j2 = sign ^ (~(offset >> 22) & 1); + *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) | + ((offset >> 12) & 0x03ff)); + *(u16 *)(loc + 2) = (u16)((lower & 0xd000) | + (j1 << 13) | (j2 << 11) | + ((offset >> 1) & 0x07ff)); + break; + + case R_ARM_THM_MOVW_ABS_NC: + case R_ARM_THM_MOVT_ABS: + upper = *(u16 *)loc; + lower = *(u16 *)(loc + 2); + + /* + * MOVT/MOVW instructions encoding in Thumb-2: + * + * i = upper[10] + * imm4 = upper[3:0] + * imm3 = lower[14:12] + * imm8 = lower[7:0] + * + * imm16 = imm4:i:imm3:imm8 + */ + offset = ((upper & 0x000f) << 12) | + ((upper & 0x0400) << 1) | + ((lower & 0x7000) >> 4) | (lower & 0x00ff); + offset = (offset ^ 0x8000) - 0x8000; + offset += sym->st_value; + + if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS) + offset >>= 16; + + *(u16 *)loc = (u16)((upper & 0xfbf0) | + ((offset & 0xf000) >> 12) | + ((offset & 0x0800) >> 1)); + *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) | + ((offset & 0x0700) << 4) | + (offset & 0x00ff)); + break; +#endif + default: printk(KERN_ERR "%s: unknown relocation: %u\n", module->name, ELF32_R_TYPE(rel->r_info)); @@ -202,50 +271,69 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, return -ENOEXEC; } -#ifdef CONFIG_ARM_UNWIND -static void register_unwind_tables(struct module *mod) -{ - if (mod->arch.unw_sec_init && mod->arch.sec_init_text) - mod->arch.unwind_init = - unwind_table_add(mod->arch.unw_sec_init->sh_addr, - mod->arch.unw_sec_init->sh_size, - mod->arch.sec_init_text->sh_addr, - mod->arch.sec_init_text->sh_size); - if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text) - mod->arch.unwind_devinit = - unwind_table_add(mod->arch.unw_sec_devinit->sh_addr, - mod->arch.unw_sec_devinit->sh_size, - mod->arch.sec_devinit_text->sh_addr, - mod->arch.sec_devinit_text->sh_size); - if (mod->arch.unw_sec_core && mod->arch.sec_core_text) - mod->arch.unwind_core = - unwind_table_add(mod->arch.unw_sec_core->sh_addr, - mod->arch.unw_sec_core->sh_size, - mod->arch.sec_core_text->sh_addr, - mod->arch.sec_core_text->sh_size); -} +struct mod_unwind_map { + const Elf_Shdr *unw_sec; + const Elf_Shdr *txt_sec; +}; -static void unregister_unwind_tables(struct module *mod) +int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *mod) { - unwind_table_del(mod->arch.unwind_init); - unwind_table_del(mod->arch.unwind_devinit); - unwind_table_del(mod->arch.unwind_core); -} -#else -static inline void register_unwind_tables(struct module *mod) { } -static inline void unregister_unwind_tables(struct module *mod) { } -#endif +#ifdef CONFIG_ARM_UNWIND + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + struct mod_unwind_map maps[ARM_SEC_MAX]; + int i; -int -module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, - struct module *module) -{ - register_unwind_tables(module); + memset(maps, 0, sizeof(maps)); + + for (s = sechdrs; s < sechdrs_end; s++) { + const char *secname = secstrs + s->sh_name; + + if (!(s->sh_flags & SHF_ALLOC)) + continue; + + if (strcmp(".ARM.exidx.init.text", secname) == 0) + maps[ARM_SEC_INIT].unw_sec = s; + else if (strcmp(".ARM.exidx.devinit.text", secname) == 0) + maps[ARM_SEC_DEVINIT].unw_sec = s; + else if (strcmp(".ARM.exidx", secname) == 0) + maps[ARM_SEC_CORE].unw_sec = s; + else if (strcmp(".ARM.exidx.exit.text", secname) == 0) + maps[ARM_SEC_EXIT].unw_sec = s; + else if (strcmp(".ARM.exidx.devexit.text", secname) == 0) + maps[ARM_SEC_DEVEXIT].unw_sec = s; + else if (strcmp(".init.text", secname) == 0) + maps[ARM_SEC_INIT].txt_sec = s; + else if (strcmp(".devinit.text", secname) == 0) + maps[ARM_SEC_DEVINIT].txt_sec = s; + else if (strcmp(".text", secname) == 0) + maps[ARM_SEC_CORE].txt_sec = s; + else if (strcmp(".exit.text", secname) == 0) + maps[ARM_SEC_EXIT].txt_sec = s; + else if (strcmp(".devexit.text", secname) == 0) + maps[ARM_SEC_DEVEXIT].txt_sec = s; + } + + for (i = 0; i < ARM_SEC_MAX; i++) + if (maps[i].unw_sec && maps[i].txt_sec) + mod->arch.unwind[i] = + unwind_table_add(maps[i].unw_sec->sh_addr, + maps[i].unw_sec->sh_size, + maps[i].txt_sec->sh_addr, + maps[i].txt_sec->sh_size); +#endif return 0; } void module_arch_cleanup(struct module *mod) { - unregister_unwind_tables(mod); +#ifdef CONFIG_ARM_UNWIND + int i; + + for (i = 0; i < ARM_SEC_MAX; i++) + if (mod->arch.unwind[i]) + unwind_table_del(mod->arch.unwind[i]); +#endif } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 0000000..5efa264 --- /dev/null +++ b/arch/arm/kernel/perf_event.c @@ -0,0 +1,745 @@ +#undef DEBUG + +/* + * ARM performance counter support. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> + * + * This code is based on the sparc64 perf event code, which is in turn based + * on the x86 code. Callchain code is based on the ARM OProfile backtrace + * code. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/cputype.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> +#include <asm/stacktrace.h> + +static struct platform_device *pmu_device; + +/* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ +static DEFINE_RAW_SPINLOCK(pmu_lock); + +/* + * ARMv6 supports a maximum of 3 events, starting from index 1. If we add + * another platform that supports more, we need to increase this to be the + * largest of all platforms. + * + * ARMv7 supports up to 32 events: + * cycle counter CCNT + 31 events counters CNT0..30. + * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. + */ +#define ARMPMU_MAX_HWEVENTS 33 + +/* The events for a given CPU. */ +struct cpu_hw_events { + /* + * The events that are active on the CPU for the given index. Index 0 + * is reserved. + */ + struct perf_event *events[ARMPMU_MAX_HWEVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; + + /* + * A 1 bit for an index indicates that the counter is actively being + * used. + */ + unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; +}; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct arm_pmu { + enum arm_perf_pmu_ids id; + const char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); + u32 (*read_counter)(int idx); + void (*write_counter)(int idx, u32 val); + void (*start)(void); + void (*stop)(void); + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + const unsigned (*event_map)[PERF_COUNT_HW_MAX]; + u32 raw_event_mask; + int num_events; + u64 max_period; +}; + +/* Set at runtime when we know what CPU type we are. */ +static const struct arm_pmu *armpmu; + +enum arm_perf_pmu_ids +armpmu_get_pmu_id(void) +{ + int id = -ENODEV; + + if (armpmu != NULL) + id = armpmu->id; + + return id; +} +EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); + +int +armpmu_get_max_events(void) +{ + int max_events = 0; + + if (armpmu != NULL) + max_events = armpmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(armpmu_get_max_events); + +int perf_num_counters(void) +{ + return armpmu_get_max_events(); +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +#define HW_OP_UNSUPPORTED 0xFFFF + +#define C(_x) \ + PERF_COUNT_HW_CACHE_##_x + +#define CACHE_OP_UNSUPPORTED 0xFFFF + +static int +armpmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_map_event(u64 config) +{ + int mapping = (*armpmu->event_map)[config]; + return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; +} + +static int +armpmu_map_raw_event(u64 config) +{ + return (int)(config & armpmu->raw_event_mask); +} + +static int +armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)armpmu->max_period) + left = armpmu->max_period; + + local64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static u64 +armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + int shift = 64 - 32; + s64 prev_raw_count, new_raw_count; + u64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + armpmu_event_update(event, hwc, hwc->idx); +} + +static void +armpmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!armpmu) + return; + + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(hwc, hwc->idx); + barrier(); /* why? */ + armpmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static void +armpmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!armpmu) + return; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + /* + * Set the period again. Some counters can't be stopped, so when we + * were stopped we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event, hwc, hwc->idx); + armpmu->enable(hwc, hwc->idx); +} + +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + clear_bit(idx, cpuc->active_mask); + armpmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static int +armpmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + perf_pmu_disable(event->pmu); + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(hwc, idx); + cpuc->events[idx] = event; + set_bit(idx, cpuc->active_mask); + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static struct pmu pmu; + +static int +validate_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; + + return armpmu->get_event_idx(cpuc, &fake_event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + +static int +armpmu_reserve_hardware(void) +{ + int i, err = -ENODEV, irq; + + pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); + if (IS_ERR(pmu_device)) { + pr_warning("unable to reserve pmu\n"); + return PTR_ERR(pmu_device); + } + + init_pmu(ARM_PMU_DEVICE_CPU); + + if (pmu_device->num_resources < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + for (i = 0; i < pmu_device->num_resources; ++i) { + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + err = request_irq(irq, armpmu->handle_irq, + IRQF_DISABLED | IRQF_NOBALANCING, + "armpmu", NULL); + if (err) { + pr_warning("unable to request IRQ%d for ARM perf " + "counters\n", irq); + break; + } + } + + if (err) { + for (i = i - 1; i >= 0; --i) { + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + release_pmu(pmu_device); + pmu_device = NULL; + } + + return err; +} + +static void +armpmu_release_hardware(void) +{ + int i, irq; + + for (i = pmu_device->num_resources - 1; i >= 0; --i) { + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + armpmu->stop(); + + release_pmu(pmu_device); + pmu_device = NULL; +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmu_reserve_mutex); + +static void +hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + armpmu_release_hardware(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int mapping, err; + + /* Decode the generic type into an ARM event identifier. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + mapping = armpmu_map_event(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + mapping = armpmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + mapping = armpmu_map_raw_event(event->attr.config); + } else { + pr_debug("event type %x not supported\n", event->attr.type); + return -EOPNOTSUPP; + } + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * Check whether we need to exclude the counter from certain modes. + * The ARM performance counters are on all of the time so if someone + * has asked us for some excludes then we have to fail. + */ + if (event->attr.exclude_kernel || event->attr.exclude_user || + event->attr.exclude_hv || event->attr.exclude_idle) { + pr_debug("ARM performance counters do not support " + "mode exclusion\n"); + return -EPERM; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + + /* + * Store the event encoding into the config_base field. config and + * event_base are unused as the only 2 things we need to know are + * the event mapping and the counter to use. The counter to use is + * also the indx and the config_base is the event type. + */ + hwc->config_base = (unsigned long)mapping; + hwc->config = 0; + hwc->event_base = 0; + + if (!hwc->sample_period) { + hwc->sample_period = armpmu->max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) { + err = validate_group(event); + if (err) + return -EINVAL; + } + + return err; +} + +static int armpmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!armpmu) + return -ENODEV; + + event->destroy = hw_perf_event_destroy; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > armpmu->num_events) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) { + err = armpmu_reserve_hardware(); + } + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static void armpmu_enable(struct pmu *pmu) +{ + /* Enable all of the perf events on hardware. */ + int idx; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!armpmu) + return; + + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + + if (!event) + continue; + + armpmu->enable(&event->hw, idx); + } + + armpmu->start(); +} + +static void armpmu_disable(struct pmu *pmu) +{ + if (armpmu) + armpmu->stop(); +} + +static struct pmu pmu = { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, +}; + +/* Include the PMU-specific implementations. */ +#include "perf_event_xscale.c" +#include "perf_event_v6.c" +#include "perf_event_v7.c" + +static int __init +init_hw_perf_events(void) +{ + unsigned long cpuid = read_cpuid_id(); + unsigned long implementor = (cpuid & 0xFF000000) >> 24; + unsigned long part_number = (cpuid & 0xFFF0); + + /* ARM Ltd CPUs. */ + if (0x41 == implementor) { + switch (part_number) { + case 0xB360: /* ARM1136 */ + case 0xB560: /* ARM1156 */ + case 0xB760: /* ARM1176 */ + armpmu = armv6pmu_init(); + break; + case 0xB020: /* ARM11mpcore */ + armpmu = armv6mpcore_pmu_init(); + break; + case 0xC080: /* Cortex-A8 */ + armpmu = armv7_a8_pmu_init(); + break; + case 0xC090: /* Cortex-A9 */ + armpmu = armv7_a9_pmu_init(); + break; + } + /* Intel CPUs [xscale]. */ + } else if (0x69 == implementor) { + part_number = (cpuid >> 13) & 0x7; + switch (part_number) { + case 1: + armpmu = xscale1pmu_init(); + break; + case 2: + armpmu = xscale2pmu_init(); + break; + } + } + + if (armpmu) { + pr_info("enabled with %s PMU driver, %d counters available\n", + armpmu->name, armpmu->num_events); + } else { + pr_info("no hardware support available\n"); + } + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); + +/* + * Callchain handling code. + */ + +/* + * The registers we're interested in are at the end of the variable + * length saved register structure. The fp points at the end of this + * structure so the address of this struct is: + * (struct frame_tail *)(xxx->fp)-1 + * + * This code has been adapted from the ARM OProfile support. + */ +struct frame_tail { + struct frame_tail __user *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static struct frame_tail __user * +user_backtrace(struct frame_tail __user *tail, + struct perf_callchain_entry *entry) +{ + struct frame_tail buftail; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + return NULL; + if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) + return NULL; + + perf_callchain_store(entry, buftail.lr); + + /* + * Frame pointers should strictly progress back up the stack + * (towards higher addresses). + */ + if (tail >= buftail.fp) + return NULL; + + return buftail.fp - 1; +} + +void +perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) +{ + struct frame_tail __user *tail; + + + tail = (struct frame_tail __user *)regs->ARM_fp - 1; + + while (tail && !((unsigned long)tail & 0x3)) + tail = user_backtrace(tail, entry); +} + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int +callchain_trace(struct stackframe *fr, + void *data) +{ + struct perf_callchain_entry *entry = data; + perf_callchain_store(entry, fr->pc); + return 0; +} + +void +perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) +{ + struct stackframe fr; + + fr.fp = regs->ARM_fp; + fr.sp = regs->ARM_sp; + fr.lr = regs->ARM_lr; + fr.pc = regs->ARM_pc; + walk_stackframe(&fr, callchain_trace, entry); +} diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c new file mode 100644 index 0000000..c058bfc --- /dev/null +++ b/arch/arm/kernel/perf_event_v6.c @@ -0,0 +1,672 @@ +/* + * ARMv6 Performance counter handling code. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * + * ARMv6 has 2 configurable performance counters and a single cycle counter. + * They all share a single reset bit but can be written to zero so we can use + * that for a reset. + * + * The counters can't be individually enabled or disabled so when we remove + * one event and replace it with another we could get spurious counts from the + * wrong event. However, we can take advantage of the fact that the + * performance counters can export events to the event bus, and the event bus + * itself can be monitored. This requires that we *don't* export the events to + * the event bus. The procedure for disabling a configurable counter is: + * - change the counter to count the ETMEXTOUT[0] signal (0x20). This + * effectively stops the counter from counting. + * - disable the counter's interrupt generation (each counter has it's + * own interrupt enable bit). + * Once stopped, the counter value can be written as 0 to reset. + * + * To enable a counter: + * - enable the counter's interrupt generation. + * - set the new event type. + * + * Note: the dedicated cycle counter only counts cycles and can't be + * enabled/disabled independently of the others. When we want to disable the + * cycle counter, we have to just disable the interrupt reporting and start + * ignoring that counter. When re-enabling, we have to reset the value and + * enable the interrupt. + */ + +#ifdef CONFIG_CPU_V6 +enum armv6_perf_types { + ARMV6_PERFCTR_ICACHE_MISS = 0x0, + ARMV6_PERFCTR_IBUF_STALL = 0x1, + ARMV6_PERFCTR_DDEP_STALL = 0x2, + ARMV6_PERFCTR_ITLB_MISS = 0x3, + ARMV6_PERFCTR_DTLB_MISS = 0x4, + ARMV6_PERFCTR_BR_EXEC = 0x5, + ARMV6_PERFCTR_BR_MISPREDICT = 0x6, + ARMV6_PERFCTR_INSTR_EXEC = 0x7, + ARMV6_PERFCTR_DCACHE_HIT = 0x9, + ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, + ARMV6_PERFCTR_DCACHE_MISS = 0xB, + ARMV6_PERFCTR_DCACHE_WBACK = 0xC, + ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, + ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, + ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, + ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, + ARMV6_PERFCTR_WBUF_DRAINED = 0x12, + ARMV6_PERFCTR_CPU_CYCLES = 0xFF, + ARMV6_PERFCTR_NOP = 0x20, +}; + +enum armv6_counters { + ARMV6_CYCLE_COUNTER = 1, + ARMV6_COUNTER0, + ARMV6_COUNTER1, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +enum armv6mpcore_perf_types { + ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, + ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, + ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, + ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, + ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, + ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, + ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, + ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, + ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, + ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, + ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, + ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, + ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, + ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, + ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, + ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +static inline unsigned long +armv6_pmcr_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); + return val; +} + +static inline void +armv6_pmcr_write(unsigned long val) +{ + asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); +} + +#define ARMV6_PMCR_ENABLE (1 << 0) +#define ARMV6_PMCR_CTR01_RESET (1 << 1) +#define ARMV6_PMCR_CCOUNT_RESET (1 << 2) +#define ARMV6_PMCR_CCOUNT_DIV (1 << 3) +#define ARMV6_PMCR_COUNT0_IEN (1 << 4) +#define ARMV6_PMCR_COUNT1_IEN (1 << 5) +#define ARMV6_PMCR_CCOUNT_IEN (1 << 6) +#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) +#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) +#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) +#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 +#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) +#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 +#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) + +#define ARMV6_PMCR_OVERFLOWED_MASK \ + (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ + ARMV6_PMCR_CCOUNT_OVERFLOW) + +static inline int +armv6_pmcr_has_overflowed(unsigned long pmcr) +{ + return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; +} + +static inline int +armv6_pmcr_counter_has_overflowed(unsigned long pmcr, + enum armv6_counters counter) +{ + int ret = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; + else if (ARMV6_COUNTER0 == counter) + ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; + else if (ARMV6_COUNTER1 == counter) + ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return ret; +} + +static inline u32 +armv6pmu_read_counter(int counter) +{ + unsigned long value = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return value; +} + +static inline void +armv6pmu_write_counter(int counter, + u32 value) +{ + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); +} + +static void +armv6pmu_enable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, evt, flags; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = 0; + evt = ARMV6_PMCR_CCOUNT_IEN; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_EVT_COUNT0_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | + ARMV6_PMCR_COUNT0_IEN; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_EVT_COUNT1_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | + ARMV6_PMCR_COUNT1_IEN; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the event + * that we're interested in. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static irqreturn_t +armv6pmu_handle_irq(int irq_num, + void *dev) +{ + unsigned long pmcr = armv6_pmcr_read(); + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + if (!armv6_pmcr_has_overflowed(pmcr)) + return IRQ_NONE; + + regs = get_irq_regs(); + + /* + * The interrupts are cleared by writing the overflow flags back to + * the control register. All of the other bits don't have any effect + * if they are rewritten, so write the whole value back. + */ + armv6_pmcr_write(pmcr); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static void +armv6pmu_start(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val |= ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +armv6pmu_stop(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int +armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + /* Always place a cycle counter into the cycle counter. */ + if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { + if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV6_CYCLE_COUNTER; + } else { + /* + * For anything other than a cycle counter, try and use + * counter0 and counter1. + */ + if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) + return ARMV6_COUNTER1; + + if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) + return ARMV6_COUNTER0; + + /* The counters are all in use. */ + return -EAGAIN; + } +} + +static void +armv6pmu_disable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, evt, flags; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = ARMV6_PMCR_CCOUNT_IEN; + evt = 0; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the number + * of ETM bus signal assertion cycles. The external reporting should + * be disabled and so this should never increment. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, flags, evt = 0; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = ARMV6_PMCR_CCOUNT_IEN; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_COUNT0_IEN; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_COUNT1_IEN; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Unlike UP ARMv6, we don't have a way of stopping the counters. We + * simply disable the interrupt reporting. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static const struct arm_pmu armv6pmu = { + .id = ARM_PERF_PMU_ID_V6, + .name = "v6", + .handle_irq = armv6pmu_handle_irq, + .enable = armv6pmu_enable_event, + .disable = armv6pmu_disable_event, + .read_counter = armv6pmu_read_counter, + .write_counter = armv6pmu_write_counter, + .get_event_idx = armv6pmu_get_event_idx, + .start = armv6pmu_start, + .stop = armv6pmu_stop, + .cache_map = &armv6_perf_cache_map, + .event_map = &armv6_perf_map, + .raw_event_mask = 0xFF, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init armv6pmu_init(void) +{ + return &armv6pmu; +} + +/* + * ARMv6mpcore is almost identical to single core ARMv6 with the exception + * that some of the events have different enumerations and that there is no + * *hack* to stop the programmable counters. To stop the counters we simply + * disable the interrupt reporting and update the event. When unthrottling we + * reset the period and enable the interrupt reporting. + */ +static const struct arm_pmu armv6mpcore_pmu = { + .id = ARM_PERF_PMU_ID_V6MP, + .name = "v6mpcore", + .handle_irq = armv6pmu_handle_irq, + .enable = armv6pmu_enable_event, + .disable = armv6mpcore_pmu_disable_event, + .read_counter = armv6pmu_read_counter, + .write_counter = armv6pmu_write_counter, + .get_event_idx = armv6pmu_get_event_idx, + .start = armv6pmu_start, + .stop = armv6pmu_stop, + .cache_map = &armv6mpcore_perf_cache_map, + .event_map = &armv6mpcore_perf_map, + .raw_event_mask = 0xFF, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init armv6mpcore_pmu_init(void) +{ + return &armv6mpcore_pmu; +} +#else +static const struct arm_pmu *__init armv6pmu_init(void) +{ + return NULL; +} + +static const struct arm_pmu *__init armv6mpcore_pmu_init(void) +{ + return NULL; +} +#endif /* CONFIG_CPU_V6 */ diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c new file mode 100644 index 0000000..2e14025 --- /dev/null +++ b/arch/arm/kernel/perf_event_v7.c @@ -0,0 +1,906 @@ +/* + * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. + * + * ARMv7 support: Jean Pihet <jpihet@mvista.com> + * 2010 (c) MontaVista Software, LLC. + * + * Copied from ARMv6 code, with the low level code inspired + * by the ARMv7 Oprofile code. + * + * Cortex-A8 has up to 4 configurable performance counters and + * a single cycle counter. + * Cortex-A9 has up to 31 configurable performance counters and + * a single cycle counter. + * + * All counters can be enabled/disabled and IRQ masked separately. The cycle + * counter and all 4 performance counters together can be reset separately. + */ + +#ifdef CONFIG_CPU_V7 +/* Common ARMv7 event types */ +enum armv7_perf_types { + ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, + ARMV7_PERFCTR_IFETCH_MISS = 0x01, + ARMV7_PERFCTR_ITLB_MISS = 0x02, + ARMV7_PERFCTR_DCACHE_REFILL = 0x03, + ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, + ARMV7_PERFCTR_DTLB_REFILL = 0x05, + ARMV7_PERFCTR_DREAD = 0x06, + ARMV7_PERFCTR_DWRITE = 0x07, + + ARMV7_PERFCTR_EXC_TAKEN = 0x09, + ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, + ARMV7_PERFCTR_CID_WRITE = 0x0B, + /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. + * It counts: + * - all branch instructions, + * - instructions that explicitly write the PC, + * - exception generating instructions. + */ + ARMV7_PERFCTR_PC_WRITE = 0x0C, + ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, + ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, + ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, + ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, + + ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, + + ARMV7_PERFCTR_CPU_CYCLES = 0xFF +}; + +/* ARMv7 Cortex-A8 specific event types */ +enum armv7_a8_perf_types { + ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, + + ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, + + ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, + ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, + ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, + ARMV7_PERFCTR_L2_ACCESS = 0x43, + ARMV7_PERFCTR_L2_CACH_MISS = 0x44, + ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, + ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, + ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, + ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, + ARMV7_PERFCTR_L1_DATA_MISS = 0x49, + ARMV7_PERFCTR_L1_INST_MISS = 0x4A, + ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, + ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, + ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, + ARMV7_PERFCTR_L2_NEON = 0x4E, + ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, + ARMV7_PERFCTR_L1_INST = 0x50, + ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, + ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, + ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, + ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, + ARMV7_PERFCTR_OP_EXECUTED = 0x55, + ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, + ARMV7_PERFCTR_CYCLES_INST = 0x57, + ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, + ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, + ARMV7_PERFCTR_NEON_CYCLES = 0x5A, + + ARMV7_PERFCTR_PMU0_EVENTS = 0x70, + ARMV7_PERFCTR_PMU1_EVENTS = 0x71, + ARMV7_PERFCTR_PMU_EVENTS = 0x72, +}; + +/* ARMv7 Cortex-A9 specific event types */ +enum armv7_a9_perf_types { + ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, + ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, + ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, + + ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, + ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, + + ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, + ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, + ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, + ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, + ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, + ARMV7_PERFCTR_DATA_EVICTION = 0x65, + ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, + ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, + ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, + + ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, + + ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, + ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, + ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, + ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, + ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, + + ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, + ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, + ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, + ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, + ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, + ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, + ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, + + ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, + ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, + + ARMV7_PERFCTR_ISB_INST = 0x90, + ARMV7_PERFCTR_DSB_INST = 0x91, + ARMV7_PERFCTR_DMB_INST = 0x92, + ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, + + ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, + ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, + ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, + ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, + ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, + ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 +}; + +/* + * Cortex-A8 HW events mapping + * + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * Only ITLB misses and DTLB refills are supported. + * If users want the DTLB refills misses a raw counter + * must be used. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Cortex-A9 HW events mapping + */ +static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = + ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * Only ITLB misses and DTLB refills are supported. + * If users want the DTLB refills misses a raw counter + * must be used. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Perf Events counters + */ +enum armv7_counters { + ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ + ARMV7_COUNTER0 = 2, /* First event counter */ +}; + +/* + * The cycle counter is ARMV7_CYCLE_COUNTER. + * The first event counter is ARMV7_COUNTER0. + * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). + */ +#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) + +/* + * ARMv7 low level PMNC access + */ + +/* + * Per-CPU PMNC: config reg + */ +#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ +#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ +#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ +#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ +#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ +#define ARMV7_PMNC_N_MASK 0x1f +#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ + +/* + * Available counters + */ +#define ARMV7_CNT0 0 /* First event counter */ +#define ARMV7_CCNT 31 /* Cycle counter */ + +/* Perf Event to low level counters mapping */ +#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) + +/* + * CNTENS: counters enable reg + */ +#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_CNTENS_C (1 << ARMV7_CCNT) + +/* + * CNTENC: counters disable reg + */ +#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_CNTENC_C (1 << ARMV7_CCNT) + +/* + * INTENS: counters overflow interrupt enable reg + */ +#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_INTENS_C (1 << ARMV7_CCNT) + +/* + * INTENC: counters overflow interrupt disable reg + */ +#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_INTENC_C (1 << ARMV7_CCNT) + +/* + * EVTSEL: Event selection reg + */ +#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ + +/* + * SELECT: Counter selection reg + */ +#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ + +/* + * FLAG: counters overflow flag status reg + */ +#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_FLAG_C (1 << ARMV7_CCNT) +#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK + +static inline unsigned long armv7_pmnc_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); + return val; +} + +static inline void armv7_pmnc_write(unsigned long val) +{ + val &= ARMV7_PMNC_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); +} + +static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) +{ + return pmnc & ARMV7_OVERFLOWED_MASK; +} + +static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, + enum armv7_counters counter) +{ + int ret = 0; + + if (counter == ARMV7_CYCLE_COUNTER) + ret = pmnc & ARMV7_FLAG_C; + else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) + ret = pmnc & ARMV7_FLAG_P(counter); + else + pr_err("CPU%u checking wrong counter %d overflow status\n", + smp_processor_id(), counter); + + return ret; +} + +static inline int armv7_pmnc_select_counter(unsigned int idx) +{ + u32 val; + + if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { + pr_err("CPU%u selecting wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); + + return idx; +} + +static inline u32 armv7pmu_read_counter(int idx) +{ + unsigned long value = 0; + + if (idx == ARMV7_CYCLE_COUNTER) + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); + else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { + if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mrc p15, 0, %0, c9, c13, 2" + : "=r" (value)); + } else + pr_err("CPU%u reading wrong counter %d\n", + smp_processor_id(), idx); + + return value; +} + +static inline void armv7pmu_write_counter(int idx, u32 value) +{ + if (idx == ARMV7_CYCLE_COUNTER) + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { + if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mcr p15, 0, %0, c9, c13, 2" + : : "r" (value)); + } else + pr_err("CPU%u writing wrong counter %d\n", + smp_processor_id(), idx); +} + +static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) +{ + if (armv7_pmnc_select_counter(idx) == idx) { + val &= ARMV7_EVTSEL_MASK; + asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); + } +} + +static inline u32 armv7_pmnc_enable_counter(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u enabling wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_CNTENS_C; + else + val = ARMV7_CNTENS_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_disable_counter(unsigned int idx) +{ + u32 val; + + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u disabling wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_CNTENC_C; + else + val = ARMV7_CNTENC_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_enable_intens(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u enabling wrong PMNC counter" + " interrupt enable %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_INTENS_C; + else + val = ARMV7_INTENS_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_disable_intens(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u disabling wrong PMNC counter" + " interrupt enable %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_INTENC_C; + else + val = ARMV7_INTENC_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_getreset_flags(void) +{ + u32 val; + + /* Read */ + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + + /* Write to clear flags */ + val &= ARMV7_FLAG_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); + + return val; +} + +#ifdef DEBUG +static void armv7_pmnc_dump_regs(void) +{ + u32 val; + unsigned int cnt; + + printk(KERN_INFO "PMNC registers dump:\n"); + + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); + printk(KERN_INFO "PMNC =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); + printk(KERN_INFO "CNTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); + printk(KERN_INFO "INTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + printk(KERN_INFO "FLAGS =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); + printk(KERN_INFO "SELECT=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); + printk(KERN_INFO "CCNT =0x%08x\n", val); + + for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { + armv7_pmnc_select_counter(cnt); + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); + printk(KERN_INFO "CNT[%d] count =0x%08x\n", + cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); + printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", + cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + } +} +#endif + +static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* + * Enable interrupt for this counter + */ + armv7_pmnc_enable_intens(idx); + + /* + * Enable counter + */ + armv7_pmnc_enable_counter(idx); + + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + + /* + * Disable counter and interrupt + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Disable interrupt for this counter + */ + armv7_pmnc_disable_intens(idx); + + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* + * Get and reset the IRQ flags + */ + pmnc = armv7_pmnc_getreset_flags(); + + /* + * Did an overflow occur? + */ + if (!armv7_pmnc_has_overflowed(pmnc)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static void armv7pmu_start(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pmu_lock, flags); + /* Enable all counters */ + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void armv7pmu_stop(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pmu_lock, flags); + /* Disable all counters */ + armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + int idx; + + /* Always place a cycle counter into the cycle counter. */ + if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { + if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV7_CYCLE_COUNTER; + } else { + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + + /* The counters are all in use. */ + return -EAGAIN; + } +} + +static struct arm_pmu armv7pmu = { + .handle_irq = armv7pmu_handle_irq, + .enable = armv7pmu_enable_event, + .disable = armv7pmu_disable_event, + .read_counter = armv7pmu_read_counter, + .write_counter = armv7pmu_write_counter, + .get_event_idx = armv7pmu_get_event_idx, + .start = armv7pmu_start, + .stop = armv7pmu_stop, + .raw_event_mask = 0xFF, + .max_period = (1LLU << 32) - 1, +}; + +static u32 __init armv7_reset_read_pmnc(void) +{ + u32 nb_cnt; + + /* Initialize & Reset PMNC: C and P bits */ + armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); + + /* Read the nb of CNTx counters supported from PMNC */ + nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + + /* Add the CPU cycles counter and return */ + return nb_cnt + 1; +} + +static const struct arm_pmu *__init armv7_a8_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA8; + armv7pmu.name = "ARMv7 Cortex-A8"; + armv7pmu.cache_map = &armv7_a8_perf_cache_map; + armv7pmu.event_map = &armv7_a8_perf_map; + armv7pmu.num_events = armv7_reset_read_pmnc(); + return &armv7pmu; +} + +static const struct arm_pmu *__init armv7_a9_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA9; + armv7pmu.name = "ARMv7 Cortex-A9"; + armv7pmu.cache_map = &armv7_a9_perf_cache_map; + armv7pmu.event_map = &armv7_a9_perf_map; + armv7pmu.num_events = armv7_reset_read_pmnc(); + return &armv7pmu; +} +#else +static const struct arm_pmu *__init armv7_a8_pmu_init(void) +{ + return NULL; +} + +static const struct arm_pmu *__init armv7_a9_pmu_init(void) +{ + return NULL; +} +#endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c new file mode 100644 index 0000000..28cd3b0 --- /dev/null +++ b/arch/arm/kernel/perf_event_xscale.c @@ -0,0 +1,807 @@ +/* + * ARMv5 [xscale] Performance counter handling code. + * + * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> + * + * Based on the previous xscale OProfile code. + * + * There are two variants of the xscale PMU that we support: + * - xscale1pmu: 2 event counters and a cycle counter + * - xscale2pmu: 4 event counters and a cycle counter + * The two variants share event definitions, but have different + * PMU structures. + */ + +#ifdef CONFIG_CPU_XSCALE +enum xscale_perf_types { + XSCALE_PERFCTR_ICACHE_MISS = 0x00, + XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, + XSCALE_PERFCTR_DATA_STALL = 0x02, + XSCALE_PERFCTR_ITLB_MISS = 0x03, + XSCALE_PERFCTR_DTLB_MISS = 0x04, + XSCALE_PERFCTR_BRANCH = 0x05, + XSCALE_PERFCTR_BRANCH_MISS = 0x06, + XSCALE_PERFCTR_INSTRUCTION = 0x07, + XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, + XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, + XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, + XSCALE_PERFCTR_DCACHE_MISS = 0x0B, + XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, + XSCALE_PERFCTR_PC_CHANGED = 0x0D, + XSCALE_PERFCTR_BCU_REQUEST = 0x10, + XSCALE_PERFCTR_BCU_FULL = 0x11, + XSCALE_PERFCTR_BCU_DRAIN = 0x12, + XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, + XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, + XSCALE_PERFCTR_RMW = 0x16, + /* XSCALE_PERFCTR_CCNT is not hardware defined */ + XSCALE_PERFCTR_CCNT = 0xFE, + XSCALE_PERFCTR_UNUSED = 0xFF, +}; + +enum xscale_counters { + XSCALE_CYCLE_COUNTER = 1, + XSCALE_COUNTER0, + XSCALE_COUNTER1, + XSCALE_COUNTER2, + XSCALE_COUNTER3, +}; + +static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, + [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, + [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +#define XSCALE_PMU_ENABLE 0x001 +#define XSCALE_PMN_RESET 0x002 +#define XSCALE_CCNT_RESET 0x004 +#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) +#define XSCALE_PMU_CNT64 0x008 + +#define XSCALE1_OVERFLOWED_MASK 0x700 +#define XSCALE1_CCOUNT_OVERFLOW 0x400 +#define XSCALE1_COUNT0_OVERFLOW 0x100 +#define XSCALE1_COUNT1_OVERFLOW 0x200 +#define XSCALE1_CCOUNT_INT_EN 0x040 +#define XSCALE1_COUNT0_INT_EN 0x010 +#define XSCALE1_COUNT1_INT_EN 0x020 +#define XSCALE1_COUNT0_EVT_SHFT 12 +#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) +#define XSCALE1_COUNT1_EVT_SHFT 20 +#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) + +static inline u32 +xscale1pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); + return val; +} + +static inline void +xscale1pmu_write_pmnc(u32 val) +{ + /* upper 4bits and 7, 11 are write-as-0 */ + val &= 0xffff77f; + asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); +} + +static inline int +xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = pmnc & XSCALE1_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = pmnc & XSCALE1_COUNT1_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale1pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* + * NOTE: there's an A stepping erratum that states if an overflow + * bit already exists and another occurs, the previous + * Overflow bit gets cleared. There's no workaround. + * Fixed in B stepping or later. + */ + pmnc = xscale1pmu_read_pmnc(); + + /* + * Write the value back to clear the overflow flags. Overflow + * flags remain in pmnc for use below. We also disable the PMU + * while we process the interrupt. + */ + xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) + return IRQ_NONE; + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + irq_work_run(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void +xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long val, mask, evt, flags; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = 0; + evt = XSCALE1_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | + XSCALE1_COUNT0_INT_EN; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | + XSCALE1_COUNT1_INT_EN; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long val, mask, evt, flags; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = XSCALE1_CCOUNT_INT_EN; + evt = 0; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int +xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + if (XSCALE_PERFCTR_CCNT == event->config_base) { + if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return XSCALE_CYCLE_COUNTER; + } else { + if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) + return XSCALE_COUNTER1; + + if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) + return XSCALE_COUNTER0; + + return -EAGAIN; + } +} + +static void +xscale1pmu_start(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val |= XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale1pmu_stop(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline u32 +xscale1pmu_read_counter(int counter) +{ + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void +xscale1pmu_write_counter(int counter, u32 val) +{ + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); + break; + } +} + +static const struct arm_pmu xscale1pmu = { + .id = ARM_PERF_PMU_ID_XSCALE1, + .name = "xscale1", + .handle_irq = xscale1pmu_handle_irq, + .enable = xscale1pmu_enable_event, + .disable = xscale1pmu_disable_event, + .read_counter = xscale1pmu_read_counter, + .write_counter = xscale1pmu_write_counter, + .get_event_idx = xscale1pmu_get_event_idx, + .start = xscale1pmu_start, + .stop = xscale1pmu_stop, + .cache_map = &xscale_perf_cache_map, + .event_map = &xscale_perf_map, + .raw_event_mask = 0xFF, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init xscale1pmu_init(void) +{ + return &xscale1pmu; +} + +#define XSCALE2_OVERFLOWED_MASK 0x01f +#define XSCALE2_CCOUNT_OVERFLOW 0x001 +#define XSCALE2_COUNT0_OVERFLOW 0x002 +#define XSCALE2_COUNT1_OVERFLOW 0x004 +#define XSCALE2_COUNT2_OVERFLOW 0x008 +#define XSCALE2_COUNT3_OVERFLOW 0x010 +#define XSCALE2_CCOUNT_INT_EN 0x001 +#define XSCALE2_COUNT0_INT_EN 0x002 +#define XSCALE2_COUNT1_INT_EN 0x004 +#define XSCALE2_COUNT2_INT_EN 0x008 +#define XSCALE2_COUNT3_INT_EN 0x010 +#define XSCALE2_COUNT0_EVT_SHFT 0 +#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) +#define XSCALE2_COUNT1_EVT_SHFT 8 +#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) +#define XSCALE2_COUNT2_EVT_SHFT 16 +#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) +#define XSCALE2_COUNT3_EVT_SHFT 24 +#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) + +static inline u32 +xscale2pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); + /* bits 1-2 and 4-23 are read-unpredictable */ + return val & 0xff000009; +} + +static inline void +xscale2pmu_write_pmnc(u32 val) +{ + /* bits 4-23 are write-as-0, 24-31 are write ignored */ + val &= 0xf; + asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_overflow_flags(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_overflow_flags(u32 val) +{ + asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_event_select(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_event_select(u32 val) +{ + asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); +} + +static inline u32 +xscale2pmu_read_int_enable(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); + return val; +} + +static void +xscale2pmu_write_int_enable(u32 val) +{ + asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); +} + +static inline int +xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = of_flags & XSCALE2_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = of_flags & XSCALE2_COUNT1_OVERFLOW; + break; + case XSCALE_COUNTER2: + ret = of_flags & XSCALE2_COUNT2_OVERFLOW; + break; + case XSCALE_COUNTER3: + ret = of_flags & XSCALE2_COUNT3_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale2pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc, of_flags; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* Disable the PMU. */ + pmnc = xscale2pmu_read_pmnc(); + xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + /* Check the overflow flag register. */ + of_flags = xscale2pmu_read_overflow_flags(); + if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) + return IRQ_NONE; + + /* Clear the overflow bits. */ + xscale2pmu_write_overflow_flags(of_flags); + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + irq_work_run(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void +xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags, ien, evtsel; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien |= XSCALE2_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + ien |= XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + ien |= XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; + break; + case XSCALE_COUNTER2: + ien |= XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; + break; + case XSCALE_COUNTER3: + ien |= XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + raw_spin_lock_irqsave(&pmu_lock, flags); + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags, ien, evtsel; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien &= ~XSCALE2_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + ien &= ~XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + ien &= ~XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; + break; + case XSCALE_COUNTER2: + ien &= ~XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; + break; + case XSCALE_COUNTER3: + ien &= ~XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + raw_spin_lock_irqsave(&pmu_lock, flags); + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int +xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + int idx = xscale1pmu_get_event_idx(cpuc, event); + if (idx >= 0) + goto out; + + if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) + idx = XSCALE_COUNTER3; + else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) + idx = XSCALE_COUNTER2; +out: + return idx; +} + +static void +xscale2pmu_start(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; + val |= XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale2pmu_stop(void) +{ + unsigned long flags, val; + + raw_spin_lock_irqsave(&pmu_lock, flags); + val = xscale2pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline u32 +xscale2pmu_read_counter(int counter) +{ + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void +xscale2pmu_write_counter(int counter, u32 val) +{ + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); + break; + } +} + +static const struct arm_pmu xscale2pmu = { + .id = ARM_PERF_PMU_ID_XSCALE2, + .name = "xscale2", + .handle_irq = xscale2pmu_handle_irq, + .enable = xscale2pmu_enable_event, + .disable = xscale2pmu_disable_event, + .read_counter = xscale2pmu_read_counter, + .write_counter = xscale2pmu_write_counter, + .get_event_idx = xscale2pmu_get_event_idx, + .start = xscale2pmu_start, + .stop = xscale2pmu_stop, + .cache_map = &xscale_perf_cache_map, + .event_map = &xscale_perf_map, + .raw_event_mask = 0xFF, + .num_events = 5, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init xscale2pmu_init(void) +{ + return &xscale2pmu; +} +#else +static const struct arm_pmu *__init xscale1pmu_init(void) +{ + return NULL; +} + +static const struct arm_pmu *__init xscale2pmu_init(void) +{ + return NULL; +} +#endif /* CONFIG_CPU_XSCALE */ diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c new file mode 100644 index 0000000..a4b1b07 --- /dev/null +++ b/arch/arm/kernel/pj4-cp0.c @@ -0,0 +1,94 @@ +/* + * linux/arch/arm/kernel/pj4-cp0.c + * + * PJ4 iWMMXt coprocessor context switching and handling + * + * Copyright (c) 2010 Marvell International Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/io.h> +#include <asm/thread_notify.h> + +static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + /* + * flush_thread() zeroes thread->fpstate, so no need + * to do anything here. + * + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_EXIT: + iwmmxt_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + iwmmxt_task_switch(thread); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block iwmmxt_notifier_block = { + .notifier_call = iwmmxt_do, +}; + + +static u32 __init pj4_cp_access_read(void) +{ + u32 value; + + __asm__ __volatile__ ( + "mrc p15, 0, %0, c1, c0, 2\n\t" + : "=r" (value)); + return value; +} + +static void __init pj4_cp_access_write(u32 value) +{ + u32 temp; + + __asm__ __volatile__ ( + "mcr p15, 0, %1, c1, c0, 2\n\t" + "mrc p15, 0, %0, c1, c0, 2\n\t" + "mov %0, %0\n\t" + "sub pc, pc, #4\n\t" + : "=r" (temp) : "r" (value)); +} + + +/* + * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy + * switch code handle iWMMXt context switching. + */ +static int __init pj4_cp0_init(void) +{ + u32 cp_access; + + cp_access = pj4_cp_access_read() & ~0xf; + pj4_cp_access_write(cp_access); + + printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n"); + elf_hwcap |= HWCAP_IWMMXT; + thread_register_notifier(&iwmmxt_notifier_block); + + return 0; +} + +late_initcall(pj4_cp0_init); diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c new file mode 100644 index 0000000..b8af96e --- /dev/null +++ b/arch/arm/kernel/pmu.c @@ -0,0 +1,142 @@ +/* + * linux/arch/arm/kernel/pmu.c + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * Copyright (C) 2010 ARM Ltd, Will Deacon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "PMU: " fmt + +#include <linux/cpumask.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <asm/pmu.h> + +static volatile long pmu_lock; + +static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; + +static int __devinit pmu_device_probe(struct platform_device *pdev) +{ + + if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { + pr_warning("received registration request for unknown " + "device %d\n", pdev->id); + return -EINVAL; + } + + if (pmu_devices[pdev->id]) + pr_warning("registering new PMU device type %d overwrites " + "previous registration!\n", pdev->id); + else + pr_info("registered new PMU device of type %d\n", + pdev->id); + + pmu_devices[pdev->id] = pdev; + return 0; +} + +static struct platform_driver pmu_driver = { + .driver = { + .name = "arm-pmu", + }, + .probe = pmu_device_probe, +}; + +static int __init register_pmu_driver(void) +{ + return platform_driver_register(&pmu_driver); +} +device_initcall(register_pmu_driver); + +struct platform_device * +reserve_pmu(enum arm_pmu_type device) +{ + struct platform_device *pdev; + + if (test_and_set_bit_lock(device, &pmu_lock)) { + pdev = ERR_PTR(-EBUSY); + } else if (pmu_devices[device] == NULL) { + clear_bit_unlock(device, &pmu_lock); + pdev = ERR_PTR(-ENODEV); + } else { + pdev = pmu_devices[device]; + } + + return pdev; +} +EXPORT_SYMBOL_GPL(reserve_pmu); + +int +release_pmu(struct platform_device *pdev) +{ + if (WARN_ON(pdev != pmu_devices[pdev->id])) + return -EINVAL; + clear_bit_unlock(pdev->id, &pmu_lock); + return 0; +} +EXPORT_SYMBOL_GPL(release_pmu); + +static int +set_irq_affinity(int irq, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + int err = irq_set_affinity(irq, cpumask_of(cpu)); + if (err) + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + return err; +#else + return 0; +#endif +} + +static int +init_cpu_pmu(void) +{ + int i, err = 0; + struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; + + if (!pdev) { + err = -ENODEV; + goto out; + } + + for (i = 0; i < pdev->num_resources; ++i) { + err = set_irq_affinity(platform_get_irq(pdev, i), i); + if (err) + break; + } + +out: + return err; +} + +int +init_pmu(enum arm_pmu_type device) +{ + int err = 0; + + switch (device) { + case ARM_PMU_DEVICE_CPU: + err = init_cpu_pmu(); + break; + default: + pr_warning("attempt to initialise unknown device %d\n", + device); + err = -EINVAL; + } + + return err; +} +EXPORT_SYMBOL_GPL(init_pmu); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 39196df..e76fcaa 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -16,7 +16,6 @@ #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/reboot.h> @@ -29,7 +28,10 @@ #include <linux/tick.h> #include <linux/utsname.h> #include <linux/uaccess.h> +#include <linux/random.h> +#include <linux/hw_breakpoint.h> +#include <asm/cacheflush.h> #include <asm/leds.h> #include <asm/processor.h> #include <asm/system.h> @@ -37,6 +39,12 @@ #include <asm/stacktrace.h> #include <asm/mach/time.h> +#ifdef CONFIG_CC_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + static const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", @@ -85,10 +93,9 @@ __setup("hlt", hlt_setup); void arm_machine_restart(char mode, const char *cmd) { - /* - * Clean and disable cache, and turn off interrupts - */ - cpu_proc_fin(); + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); /* * Tell the mm system that we are going to reboot - @@ -97,6 +104,15 @@ void arm_machine_restart(char mode, const char *cmd) */ setup_mm_for_reboot(mode); + /* Clean and invalidate caches */ + flush_cache_all(); + + /* Turn off caching */ + cpu_proc_fin(); + + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); + /* * Now call the architecture specific reboot code. */ @@ -120,6 +136,25 @@ EXPORT_SYMBOL(pm_power_off); void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); +static void do_nothing(void *unused) +{ +} + +/* + * cpu_idle_wait - Used to ensure that all the CPUs discard old value of + * pm_idle and update to new pm_idle value. Required while changing pm_idle + * handler on SMP systems. + * + * Caller must have changed pm_idle to the new value before the call. Old + * pm_idle value will not be used by any CPU after the return of this function. + */ +void cpu_idle_wait(void) +{ + smp_mb(); + /* kick all the CPUs so that they exit out of pm_idle */ + smp_call_function(do_nothing, NULL, 1); +} +EXPORT_SYMBOL_GPL(cpu_idle_wait); /* * This is our default idle handler. We need to disable @@ -190,19 +225,29 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); -void machine_halt(void) +void machine_shutdown(void) { +#ifdef CONFIG_SMP + smp_send_stop(); +#endif } +void machine_halt(void) +{ + machine_shutdown(); + while (1); +} void machine_power_off(void) { + machine_shutdown(); if (pm_power_off) pm_power_off(); } void machine_restart(char *cmd) { + machine_shutdown(); arm_pm_restart(reboot_mode, cmd); } @@ -212,7 +257,8 @@ void __show_regs(struct pt_regs *regs) char buf[64]; printk("CPU: %d %s (%s %.*s)\n", - smp_processor_id(), print_tainted(), init_utsname()->release, + raw_smp_processor_id(), print_tainted(), + init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); print_symbol("PC is at %s\n", instruction_pointer(regs)); @@ -274,22 +320,25 @@ void show_regs(struct pt_regs * regs) __backtrace(); } +ATOMIC_NOTIFIER_HEAD(thread_notify_head); + +EXPORT_SYMBOL_GPL(thread_notify_head); + /* * Free current thread data structures etc.. */ void exit_thread(void) { + thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); } -ATOMIC_NOTIFIER_HEAD(thread_notify_head); - -EXPORT_SYMBOL_GPL(thread_notify_head); - void flush_thread(void) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; + flush_ptrace_hw_breakpoint(tsk); + memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); @@ -299,9 +348,6 @@ void flush_thread(void) void release_thread(struct task_struct *dead_task) { - struct thread_info *thread = task_thread_info(dead_task); - - thread_notify(THREAD_NOTIFY_RELEASE, thread); } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -321,6 +367,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_context.sp = (unsigned long)childregs; thread->cpu_context.pc = (unsigned long)ret_from_fork; + clear_ptrace_hw_breakpoint(p); + if (clone_flags & CLONE_SETTLS) thread->tp_value = regs->ARM_r3; @@ -328,6 +376,15 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, } /* + * Fill in the task's elfregs structure for a core dump. + */ +int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) +{ + elf_core_copy_regs(elfregs, task_pt_regs(t)); + return 1; +} + +/* * fill in the fpe structure for a core dump... */ int dump_fpu (struct pt_regs *regs, struct user_fp *fp) @@ -344,23 +401,27 @@ EXPORT_SYMBOL(dump_fpu); /* * Shuffle the argument into the correct register before calling the - * thread function. r1 is the thread argument, r2 is the pointer to - * the thread function, and r3 points to the exit function. + * thread function. r4 is the thread argument, r5 is the pointer to + * the thread function, and r6 points to the exit function. */ extern void kernel_thread_helper(void); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" -" mov r0, r1\n" -" mov lr, r3\n" -" mov pc, r2\n" +#ifdef CONFIG_TRACE_IRQFLAGS +" bl trace_hardirqs_on\n" +#endif +" msr cpsr_c, r7\n" +" mov r0, r4\n" +" mov lr, r6\n" +" mov pc, r5\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); +" .popsection"); #ifdef CONFIG_ARM_UNWIND extern void kernel_thread_exit(long code); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_exit, #function\n" "kernel_thread_exit:\n" @@ -370,7 +431,7 @@ asm( ".section .text\n" " nop\n" " .fnend\n" " .size kernel_thread_exit, . - kernel_thread_exit\n" -" .previous"); +" .popsection"); #else #define kernel_thread_exit do_exit #endif @@ -384,11 +445,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) memset(®s, 0, sizeof(regs)); - regs.ARM_r1 = (unsigned long)arg; - regs.ARM_r2 = (unsigned long)fn; - regs.ARM_r3 = (unsigned long)kernel_thread_exit; + regs.ARM_r4 = (unsigned long)arg; + regs.ARM_r5 = (unsigned long)fn; + regs.ARM_r6 = (unsigned long)kernel_thread_exit; + regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; + regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } @@ -414,3 +476,30 @@ unsigned long get_wchan(struct task_struct *p) } while (count ++ < 16); return 0; } + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} + +/* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Let's declare a mapping + * for it so it is visible through ptrace and /proc/<pid>/mem. + */ + +int vectors_user_mapping(void) +{ + struct mm_struct *mm = current->mm; + return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC | + VM_ALWAYSDUMP | VM_RESERVED, + NULL); +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; +} diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 89882a1..19c6816 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -19,6 +19,8 @@ #include <linux/init.h> #include <linux/signal.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/hw_breakpoint.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -52,6 +54,102 @@ #define BREAKINST_THUMB 0xde01 #endif +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) \ + {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(r0), + REG_OFFSET_NAME(r1), + REG_OFFSET_NAME(r2), + REG_OFFSET_NAME(r3), + REG_OFFSET_NAME(r4), + REG_OFFSET_NAME(r5), + REG_OFFSET_NAME(r6), + REG_OFFSET_NAME(r7), + REG_OFFSET_NAME(r8), + REG_OFFSET_NAME(r9), + REG_OFFSET_NAME(r10), + REG_OFFSET_NAME(fp), + REG_OFFSET_NAME(ip), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(lr), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(cpsr), + REG_OFFSET_NAME(ORIG_r0), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. @@ -452,12 +550,23 @@ void ptrace_cancel_bpt(struct task_struct *child) clear_breakpoint(child, &child->thread.debug.bp[i]); } +void user_disable_single_step(struct task_struct *task) +{ + task->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(task); +} + +void user_enable_single_step(struct task_struct *task) +{ + task->ptrace |= PT_SINGLESTEP; +} + /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - single_step_disable(child); + user_disable_single_step(child); } /* @@ -499,10 +608,41 @@ static struct undef_hook thumb_break_hook = { .fn = break_trap, }; +static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) +{ + unsigned int instr2; + void __user *pc; + + /* Check the second half of the instruction. */ + pc = (void __user *)(instruction_pointer(regs) + 2); + + if (processor_mode(regs) == SVC_MODE) { + instr2 = *(u16 *) pc; + } else { + get_user(instr2, (u16 __user *)pc); + } + + if (instr2 == 0xa000) { + ptrace_break(current, regs); + return 0; + } else { + return 1; + } +} + +static struct undef_hook thumb2_break_hook = { + .instr_mask = 0xffff, + .instr_val = 0xf7f0, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = thumb2_break_trap, +}; + static int __init ptrace_break_init(void) { register_undef_hook(&arm_break_hook); register_undef_hook(&thumb_break_hook); + register_undef_hook(&thumb2_break_hook); return 0; } @@ -521,7 +661,13 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, return -EIO; tmp = 0; - if (off < sizeof(struct pt_regs)) + if (off == PT_TEXT_ADDR) + tmp = tsk->mm->start_code; + else if (off == PT_DATA_ADDR) + tmp = tsk->mm->start_data; + else if (off == PT_TEXT_END_ADDR) + tmp = tsk->mm->end_code; + else if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); return put_user(tmp, ret); @@ -663,7 +809,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, @@ -686,7 +832,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, @@ -697,115 +843,282 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) if (get_user(vfp->hard.fpscr, &ufp->fpscr)) return -EFAULT; + vfp_flush_hwstate(thread); + return 0; } #endif -long arch_ptrace(struct task_struct *child, long request, long addr, long data) +#ifdef CONFIG_HAVE_HW_BREAKPOINT +/* + * Convert a virtual register number into an index for a thread_info + * breakpoint array. Breakpoints are identified using positive numbers + * whilst watchpoints are negative. The registers are laid out as pairs + * of (address, control), each pair mapping to a unique hw_breakpoint struct. + * Register 0 is reserved for describing resource information. + */ +static int ptrace_hbp_num_to_idx(long num) { - int ret; + if (num < 0) + num = (ARM_MAX_BRP << 1) - num; + return (num - 1) >> 1; +} - switch (request) { - /* - * read word at location "addr" in the child process. - */ - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - ret = generic_ptrace_peekdata(child, addr, data); - break; +/* + * Returns the virtual register number for the address of the + * breakpoint at index idx. + */ +static long ptrace_hbp_idx_to_num(int idx) +{ + long mid = ARM_MAX_BRP << 1; + long num = (idx << 1) + 1; + return num > mid ? mid - num : num; +} - case PTRACE_PEEKUSR: - ret = ptrace_read_user(child, addr, (unsigned long __user *)data); - break; +/* + * Handle hitting a HW-breakpoint. + */ +static void ptrace_hbptriggered(struct perf_event *bp, int unused, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); + long num; + int i; + siginfo_t info; - /* - * write the word at location addr. - */ - case PTRACE_POKETEXT: - case PTRACE_POKEDATA: - ret = generic_ptrace_pokedata(child, addr, data); + for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) + if (current->thread.debug.hbp[i] == bp) break; - case PTRACE_POKEUSR: - ret = ptrace_write_user(child, addr, data); - break; + num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); - /* - * continue/restart and stop at next (return from) syscall - */ - case PTRACE_SYSCALL: - case PTRACE_CONT: - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - single_step_disable(child); - wake_up_process(child); - ret = 0; - break; + info.si_signo = SIGTRAP; + info.si_errno = (int)num; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void __user *)(bkpt->trigger); + + force_sig_info(SIGTRAP, &info, current); +} + +/* + * Set ptrace breakpoint pointers to zero for this task. + * This is required in order to prevent child processes from unregistering + * breakpoints held by their parent. + */ +void clear_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); +} + +/* + * Unregister breakpoints from this task and reset the pointers in + * the thread_struct. + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { + if (t->debug.hbp[i]) { + unregister_hw_breakpoint(t->debug.hbp[i]); + t->debug.hbp[i] = NULL; + } + } +} + +static u32 ptrace_get_hbp_resource_info(void) +{ + u8 num_brps, num_wrps, debug_arch, wp_len; + u32 reg = 0; + + num_brps = hw_breakpoint_slots(TYPE_INST); + num_wrps = hw_breakpoint_slots(TYPE_DATA); + debug_arch = arch_get_debug_arch(); + wp_len = arch_get_max_wp_len(); + + reg |= debug_arch; + reg <<= 8; + reg |= wp_len; + reg <<= 8; + reg |= num_wrps; + reg <<= 8; + reg |= num_brps; + + return reg; +} + +static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) +{ + struct perf_event_attr attr; + + ptrace_breakpoint_init(&attr); + + /* Initialise fields to sane defaults. */ + attr.bp_addr = 0; + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = type; + attr.disabled = 1; + + return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); +} + +static int ptrace_gethbpregs(struct task_struct *tsk, long num, + unsigned long __user *data) +{ + u32 reg; + int idx, ret = 0; + struct perf_event *bp; + struct arch_hw_breakpoint_ctrl arch_ctrl; + + if (num == 0) { + reg = ptrace_get_hbp_resource_info(); + } else { + idx = ptrace_hbp_num_to_idx(num); + if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { + ret = -EINVAL; + goto out; + } + + bp = tsk->thread.debug.hbp[idx]; + if (!bp) { + reg = 0; + goto put; + } + + arch_ctrl = counter_arch_bp(bp)->ctrl; /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. + * Fix up the len because we may have adjusted it + * to compensate for an unaligned address. */ - case PTRACE_KILL: - single_step_disable(child); - if (child->exit_state != EXIT_ZOMBIE) { - child->exit_code = SIGKILL; - wake_up_process(child); - } - ret = 0; + while (!(arch_ctrl.len & 0x1)) + arch_ctrl.len >>= 1; + + if (idx & 0x1) + reg = encode_ctrl_reg(arch_ctrl); + else + reg = bp->attr.bp_addr; + } + +put: + if (put_user(reg, data)) + ret = -EFAULT; + +out: + return ret; +} + +static int ptrace_sethbpregs(struct task_struct *tsk, long num, + unsigned long __user *data) +{ + int idx, gen_len, gen_type, implied_type, ret = 0; + u32 user_val; + struct perf_event *bp; + struct arch_hw_breakpoint_ctrl ctrl; + struct perf_event_attr attr; + + if (num == 0) + goto out; + else if (num < 0) + implied_type = HW_BREAKPOINT_RW; + else + implied_type = HW_BREAKPOINT_X; + + idx = ptrace_hbp_num_to_idx(num); + if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { + ret = -EINVAL; + goto out; + } + + if (get_user(user_val, data)) { + ret = -EFAULT; + goto out; + } + + bp = tsk->thread.debug.hbp[idx]; + if (!bp) { + bp = ptrace_hbp_create(tsk, implied_type); + if (IS_ERR(bp)) { + ret = PTR_ERR(bp); + goto out; + } + tsk->thread.debug.hbp[idx] = bp; + } + + attr = bp->attr; + + if (num & 0x1) { + /* Address */ + attr.bp_addr = user_val; + } else { + /* Control */ + decode_ctrl_reg(user_val, &ctrl); + ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); + if (ret) + goto out; + + if ((gen_type & implied_type) != gen_type) { + ret = -EINVAL; + goto out; + } + + attr.bp_len = gen_len; + attr.bp_type = gen_type; + attr.disabled = !ctrl.enabled; + } + + ret = modify_user_hw_breakpoint(bp, &attr); +out: + return ret; +} +#endif + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + int ret; + unsigned long __user *datap = (unsigned long __user *) data; + + switch (request) { + case PTRACE_PEEKUSR: + ret = ptrace_read_user(child, addr, datap); break; - /* - * execute single instruction. - */ - case PTRACE_SINGLESTEP: - ret = -EIO; - if (!valid_signal(data)) - break; - single_step_enable(child); - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; + case PTRACE_POKEUSR: + ret = ptrace_write_user(child, addr, data); break; case PTRACE_GETREGS: - ret = ptrace_getregs(child, (void __user *)data); + ret = ptrace_getregs(child, datap); break; case PTRACE_SETREGS: - ret = ptrace_setregs(child, (void __user *)data); + ret = ptrace_setregs(child, datap); break; case PTRACE_GETFPREGS: - ret = ptrace_getfpregs(child, (void __user *)data); + ret = ptrace_getfpregs(child, datap); break; case PTRACE_SETFPREGS: - ret = ptrace_setfpregs(child, (void __user *)data); + ret = ptrace_setfpregs(child, datap); break; #ifdef CONFIG_IWMMXT case PTRACE_GETWMMXREGS: - ret = ptrace_getwmmxregs(child, (void __user *)data); + ret = ptrace_getwmmxregs(child, datap); break; case PTRACE_SETWMMXREGS: - ret = ptrace_setwmmxregs(child, (void __user *)data); + ret = ptrace_setwmmxregs(child, datap); break; #endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value, - (unsigned long __user *) data); + datap); break; case PTRACE_SET_SYSCALL: @@ -815,21 +1128,32 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) #ifdef CONFIG_CRUNCH case PTRACE_GETCRUNCHREGS: - ret = ptrace_getcrunchregs(child, (void __user *)data); + ret = ptrace_getcrunchregs(child, datap); break; case PTRACE_SETCRUNCHREGS: - ret = ptrace_setcrunchregs(child, (void __user *)data); + ret = ptrace_setcrunchregs(child, datap); break; #endif #ifdef CONFIG_VFP case PTRACE_GETVFPREGS: - ret = ptrace_getvfpregs(child, (void __user *)data); + ret = ptrace_getvfpregs(child, datap); break; case PTRACE_SETVFPREGS: - ret = ptrace_setvfpregs(child, (void __user *)data); + ret = ptrace_setvfpregs(child, datap); + break; +#endif + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case PTRACE_GETHBPREGS: + ret = ptrace_gethbpregs(child, addr, + (unsigned long __user *)data); + break; + case PTRACE_SETHBPREGS: + ret = ptrace_sethbpregs(child, addr, + (unsigned long __user *)data); break; #endif diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h index def3b61..3926605 100644 --- a/arch/arm/kernel/ptrace.h +++ b/arch/arm/kernel/ptrace.h @@ -14,20 +14,6 @@ extern void ptrace_set_bpt(struct task_struct *); extern void ptrace_break(struct task_struct *, struct pt_regs *); /* - * make sure single-step breakpoint is gone. - */ -static inline void single_step_disable(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -static inline void single_step_enable(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - -/* * Send SIGTRAP if we're single-stepping */ static inline void single_step_trap(struct task_struct *task) diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index 61930eb0..9cf4cbf 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -10,6 +10,12 @@ relocate_new_kernel: ldr r0,kexec_indirection_page ldr r1,kexec_start_address + /* + * If there is no indirection page (we are doing crashdumps) + * skip any relocation. + */ + cmp r0, #0 + beq 2f 0: /* top, read another word for the indirection page */ ldr r3, [r0],#4 @@ -53,6 +59,8 @@ relocate_new_kernel: ldr r2,kexec_boot_atags mov pc,lr + .align + .globl kexec_start_address kexec_start_address: .long 0x0 diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c new file mode 100644 index 0000000..df246da --- /dev/null +++ b/arch/arm/kernel/return_address.c @@ -0,0 +1,71 @@ +/* + * arch/arm/kernel/return_address.c + * + * Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * for Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#include <linux/module.h> + +#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) +#include <linux/sched.h> + +#include <asm/stacktrace.h> + +struct return_address_data { + unsigned int level; + void *addr; +}; + +static int save_return_addr(struct stackframe *frame, void *d) +{ + struct return_address_data *data = d; + + if (!data->level) { + data->addr = (void *)frame->lr; + + return 1; + } else { + --data->level; + return 0; + } +} + +void *return_address(unsigned int level) +{ + struct return_address_data data; + struct stackframe frame; + register unsigned long current_sp asm ("sp"); + + data.level = level + 1; + + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_sp; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)return_address; + + walk_stackframe(&frame, save_return_addr, &data); + + if (!data.level) + return data.addr; + else + return NULL; +} + +#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ + +#if defined(CONFIG_ARM_UNWIND) +#warning "TODO: return_address should use unwind tables" +#endif + +void *return_address(unsigned int level) +{ + return NULL; +} + +#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */ + +EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c new file mode 100644 index 0000000..2cdcc92 --- /dev/null +++ b/arch/arm/kernel/sched_clock.c @@ -0,0 +1,69 @@ +/* + * sched_clock.c: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clocksource.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/timer.h> + +#include <asm/sched_clock.h> + +static void sched_clock_poll(unsigned long wrap_ticks); +static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); +static void (*sched_clock_update_fn)(void); + +static void sched_clock_poll(unsigned long wrap_ticks) +{ + mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); + sched_clock_update_fn(); +} + +void __init init_sched_clock(struct clock_data *cd, void (*update)(void), + unsigned int clock_bits, unsigned long rate) +{ + unsigned long r, w; + u64 res, wrap; + char r_unit; + + sched_clock_update_fn = update; + + /* calculate the mult/shift to convert counter ticks to ns. */ + clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60); + + r = rate; + if (r >= 4000000) { + r /= 1000000; + r_unit = 'M'; + } else { + r /= 1000; + r_unit = 'k'; + } + + /* calculate how many ns until we wrap */ + wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift); + do_div(wrap, NSEC_PER_MSEC); + w = wrap; + + /* calculate the ns resolution of this counter */ + res = cyc_to_ns(1ULL, cd->mult, cd->shift); + pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", + clock_bits, r, r_unit, res, w); + + /* + * Start the timer to keep sched_clock() properly updated and + * sets the initial epoch. + */ + sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); + sched_clock_poll(sched_clock_timer.data); + + /* + * Ensure that sched_clock() starts off at 0ns + */ + cd->epoch_ns = 0; +} diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index bc5e412..3455ad3 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -19,18 +19,24 @@ #include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/init.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/memblock.h> +#include <asm/unified.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/procinfo.h> #include <asm/sections.h> #include <asm/setup.h> +#include <asm/smp_plat.h> #include <asm/mach-types.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> @@ -42,8 +48,11 @@ #include <asm/traps.h> #include <asm/unwind.h> +#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #include "compat.h" +#endif #include "atags.h" +#include "tcm.h" #ifndef MEM_SIZE #define MEM_SIZE (16*1024*1024) @@ -66,9 +75,9 @@ extern void reboot_setup(char *str); unsigned int processor_id; EXPORT_SYMBOL(processor_id); -unsigned int __machine_arch_type; +unsigned int __machine_arch_type __read_mostly; EXPORT_SYMBOL(__machine_arch_type); -unsigned int cacheid; +unsigned int cacheid __read_mostly; EXPORT_SYMBOL(cacheid); unsigned int __atags_pointer __initdata; @@ -82,24 +91,25 @@ EXPORT_SYMBOL(system_serial_low); unsigned int system_serial_high; EXPORT_SYMBOL(system_serial_high); -unsigned int elf_hwcap; +unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL(elf_hwcap); #ifdef MULTI_CPU -struct processor processor; +struct processor processor __read_mostly; #endif #ifdef MULTI_TLB -struct cpu_tlb_fns cpu_tlb; +struct cpu_tlb_fns cpu_tlb __read_mostly; #endif #ifdef MULTI_USER -struct cpu_user_fns cpu_user; +struct cpu_user_fns cpu_user __read_mostly; #endif #ifdef MULTI_CACHE -struct cpu_cache_fns cpu_cache; +struct cpu_cache_fns cpu_cache __read_mostly; #endif #ifdef CONFIG_OUTER_CACHE -struct outer_cache_fns outer_cache; +struct outer_cache_fns outer_cache __read_mostly; +EXPORT_SYMBOL(outer_cache); #endif struct stack { @@ -115,7 +125,8 @@ EXPORT_SYMBOL(elf_platform); static const char *cpu_name; static const char *machine_name; -static char __initdata command_line[COMMAND_LINE_SIZE]; +static char __initdata cmd_line[COMMAND_LINE_SIZE]; +struct machine_desc *machine_desc __initdata; static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; @@ -229,6 +240,35 @@ int cpu_architecture(void) return cpu_arch; } +static int cpu_has_aliasing_icache(unsigned int arch) +{ + int aliasing_icache; + unsigned int id_reg, num_sets, line_size; + + /* arch specifies the register format */ + switch (arch) { + case CPU_ARCH_ARMv7: + asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" + : /* No output operands */ + : "r" (1)); + isb(); + asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" + : "=r" (id_reg)); + line_size = 4 << ((id_reg & 0x7) + 2); + num_sets = ((id_reg >> 13) & 0x7fff) + 1; + aliasing_icache = (line_size * num_sets) > PAGE_SIZE; + break; + case CPU_ARCH_ARMv6: + aliasing_icache = read_cpuid_cachetype() & (1 << 11); + break; + default: + /* I-cache aliases will be handled by D-cache aliasing code */ + aliasing_icache = 0; + } + + return aliasing_icache; +} + static void __init cacheid_init(void) { unsigned int cachetype = read_cpuid_cachetype(); @@ -240,10 +280,15 @@ static void __init cacheid_init(void) cacheid = CACHEID_VIPT_NONALIASING; if ((cachetype & (3 << 14)) == 1 << 14) cacheid |= CACHEID_ASID_TAGGED; - } else if (cachetype & (1 << 23)) + else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7)) + cacheid |= CACHEID_VIPT_I_ALIASING; + } else if (cachetype & (1 << 23)) { cacheid = CACHEID_VIPT_ALIASING; - else + } else { cacheid = CACHEID_VIPT_NONALIASING; + if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) + cacheid |= CACHEID_VIPT_I_ALIASING; + } } else { cacheid = CACHEID_VIVT; } @@ -254,7 +299,7 @@ static void __init cacheid_init(void) cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", cache_is_vivt() ? "VIVT" : icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : - cache_is_vipt_aliasing() ? "VIPT aliasing" : + icache_is_vipt_aliasing() ? "VIPT aliasing" : cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); } @@ -265,6 +310,21 @@ static void __init cacheid_init(void) extern struct proc_info_list *lookup_processor_type(unsigned int); extern struct machine_desc *lookup_machine_type(unsigned int); +static void __init feat_v6_fixup(void) +{ + int id = read_cpuid_id(); + + if ((id & 0xff0f0000) != 0x41070000) + return; + + /* + * HWCAP_TLS is available only on 1136 r1p0 and later, + * see also kuser_get_tls_init. + */ + if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) + elf_hwcap &= ~HWCAP_TLS; +} + static void __init setup_processor(void) { struct proc_info_list *list; @@ -307,6 +367,8 @@ static void __init setup_processor(void) elf_hwcap &= ~HWCAP_THUMB; #endif + feat_v6_fixup(); + cacheid_init(); cpu_proc_init(); } @@ -327,25 +389,38 @@ void cpu_init(void) } /* + * Define the placement constraint for the inline asm directive below. + * In Thumb-2, msr with an immediate value is not allowed. + */ +#ifdef CONFIG_THUMB2_KERNEL +#define PLC "r" +#else +#define PLC "I" +#endif + + /* * setup stacks for re-entrant exception handlers */ __asm__ ( "msr cpsr_c, %1\n\t" - "add sp, %0, %2\n\t" + "add r14, %0, %2\n\t" + "mov sp, r14\n\t" "msr cpsr_c, %3\n\t" - "add sp, %0, %4\n\t" + "add r14, %0, %4\n\t" + "mov sp, r14\n\t" "msr cpsr_c, %5\n\t" - "add sp, %0, %6\n\t" + "add r14, %0, %6\n\t" + "mov sp, r14\n\t" "msr cpsr_c, %7" : : "r" (stk), - "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), - "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), - "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), - "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); } @@ -385,13 +460,12 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); bank->size = size & PAGE_MASK; - bank->node = PHYS_TO_NID(start); /* * Check whether this memory region has non-zero size or * invalid node number. */ - if (bank->size == 0 || bank->node >= MAX_NUMNODES) + if (bank->size == 0) return -EINVAL; meminfo.nr_banks++; @@ -402,10 +476,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ -static void __init early_mem(char **p) +static int __init early_mem(char *p) { static int usermem __initdata = 0; unsigned long size, start; + char *endp; /* * If the user specifies memory size, we @@ -418,52 +493,15 @@ static void __init early_mem(char **p) } start = PHYS_OFFSET; - size = memparse(*p, p); - if (**p == '@') - start = memparse(*p + 1, p); + size = memparse(p, &endp); + if (*endp == '@') + start = memparse(endp + 1, NULL); arm_add_memory(start, size); -} -__early_param("mem=", early_mem); -/* - * Initial parsing of the command line. - */ -static void __init parse_cmdline(char **cmdline_p, char *from) -{ - char c = ' ', *to = command_line; - int len = 0; - - for (;;) { - if (c == ' ') { - extern struct early_params __early_begin, __early_end; - struct early_params *p; - - for (p = &__early_begin; p < &__early_end; p++) { - int arglen = strlen(p->arg); - - if (memcmp(from, p->arg, arglen) == 0) { - if (to != command_line) - to -= 1; - from += arglen; - p->fn(&from); - - while (*from != ' ' && *from != '\0') - from++; - break; - } - } - } - c = *from++; - if (!c) - break; - if (COMMAND_LINE_SIZE <= ++len) - break; - *to++ = c; - } - *to = '\0'; - *cmdline_p = command_line; + return 0; } +early_param("mem", early_mem); static void __init setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) @@ -488,7 +526,7 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(_etext - 1); - kernel_data.start = virt_to_phys(_data); + kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for (i = 0; i < mi->nr_banks; i++) { @@ -612,6 +650,7 @@ static int __init parse_tag_revision(const struct tag *tag) __tagtable(ATAG_REVISION, parse_tag_revision); +#ifndef CONFIG_CMDLINE_FORCE static int __init parse_tag_cmdline(const struct tag *tag) { strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); @@ -619,6 +658,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); +#endif /* CONFIG_CMDLINE_FORCE */ /* * Scan the tag table for this tag, and call its parse function. @@ -669,17 +709,95 @@ static struct init_tags { { 0, ATAG_NONE } }; -static void (*init_machine)(void) __initdata; - static int __init customize_machine(void) { /* customizes platform devices, or adds new ones */ - if (init_machine) - init_machine(); + if (machine_desc->init_machine) + machine_desc->init_machine(); return 0; } arch_initcall(customize_machine); +#ifdef CONFIG_KEXEC +static inline unsigned long long get_total_mem(void) +{ + unsigned long total; + + total = max_low_pfn - min_low_pfn; + return total << PAGE_SHIFT; +} + +/** + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + unsigned long long total_mem; + int ret; + + total_mem = get_total_mem(); + ret = parse_crashkernel(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret) + return; + + ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); + if (ret < 0) { + printk(KERN_WARNING "crashkernel reservation failed - " + "memory is in use (0x%lx)\n", (unsigned long)crash_base); + return; + } + + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(total_mem >> 20)); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else +static inline void reserve_crashkernel(void) {} +#endif /* CONFIG_KEXEC */ + +/* + * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by + * is_kdump_kernel() to determine if we are booting after a panic. Hence + * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. + */ + +#ifdef CONFIG_CRASH_DUMP +/* + * elfcorehdr= specifies the location of elf core header stored by the crashed + * kernel. This option will be passed by kexec loader to the capture kernel. + */ +static int __init setup_elfcorehdr(char *arg) +{ + char *end; + + if (!arg) + return -EINVAL; + + elfcorehdr_addr = memparse(arg, &end); + return end > arg ? 0 : -EINVAL; +} +early_param("elfcorehdr", setup_elfcorehdr); +#endif /* CONFIG_CRASH_DUMP */ + +static void __init squash_mem_tags(struct tag *tag) +{ + for (; tag->hdr.size; tag = tag_next(tag)) + if (tag->hdr.tag == ATAG_MEM) + tag->hdr.tag = ATAG_NONE; +} + void __init setup_arch(char **cmdline_p) { struct tag *tags = (struct tag *)&init_tags; @@ -690,6 +808,7 @@ void __init setup_arch(char **cmdline_p) setup_processor(); mdesc = setup_machine(machine_arch_type); + machine_desc = mdesc; machine_name = mdesc->name; if (mdesc->soft_reboot) @@ -700,12 +819,14 @@ void __init setup_arch(char **cmdline_p) else if (mdesc->boot_params) tags = phys_to_virt(mdesc->boot_params); +#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) /* * If we have the old style parameters, convert them to * a tag list. */ if (tags->hdr.tag != ATAG_CORE) convert_to_tag_list(tags); +#endif if (tags->hdr.tag != ATAG_CORE) tags = (struct tag *)&init_tags; @@ -724,24 +845,32 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; - memcpy(boot_command_line, from, COMMAND_LINE_SIZE); - boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; - parse_cmdline(cmdline_p, from); + /* parse_early_param needs a boot_command_line */ + strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + + /* populate cmd_line too for later use, preserving boot_command_line */ + strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = cmd_line; + + parse_early_param(); + + arm_memblock_init(&meminfo, mdesc); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); #ifdef CONFIG_SMP - smp_init_cpus(); + if (is_smp()) + smp_init_cpus(); #endif + reserve_crashkernel(); cpu_init(); + tcm_init(); - /* - * Set up various architecture-specific pointers - */ - init_arch_irq = mdesc->init_irq; - system_timer = mdesc->timer; - init_machine = mdesc->init_machine; +#ifdef CONFIG_MULTI_IRQ_HANDLER + handle_arch_irq = mdesc->handle_irq; +#endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) @@ -751,6 +880,9 @@ void __init setup_arch(char **cmdline_p) #endif #endif early_trap_init(); + + if (mdesc->init_early) + mdesc->init_early(); } @@ -766,9 +898,21 @@ static int __init topology_init(void) return 0; } - subsys_initcall(topology_init); +#ifdef CONFIG_HAVE_PROC_CPU +static int __init proc_cpu_init(void) +{ + struct proc_dir_entry *res; + + res = proc_mkdir("cpu", NULL); + if (!res) + return -ENOMEM; + return 0; +} +fs_initcall(proc_cpu_init); +#endif + static const char *hwcap_str[] = { "swp", "half", diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 93bb424..907d5a6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -1,7 +1,7 @@ /* * linux/arch/arm/kernel/signal.c * - * Copyright (C) 1995-2002 Russell King + * Copyright (C) 1995-2009 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -12,11 +12,13 @@ #include <linux/personality.h> #include <linux/freezer.h> #include <linux/uaccess.h> +#include <linux/tracehook.h> #include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> +#include <asm/vfp.h> #include "ptrace.h" #include "signal.h" @@ -28,6 +30,7 @@ */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) +#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. @@ -47,57 +50,34 @@ const unsigned long sigreturn_codes[7] = { MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; -static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); +/* + * Either we support OABI only, or we have EABI with the OABI + * compat layer enabled. In the later case we don't know if + * user space is EABI or not, and if not we must not clobber r7. + * Always using the OABI syscall solves that issue and works for + * all those cases. + */ +const unsigned long syscall_restart_code[2] = { + SWI_SYS_RESTART, /* swi __NR_restart_syscall */ + 0xe49df004, /* ldr pc, [sp], #4 */ +}; /* * atomically swap in the new signal mask, and wait for a signal. */ -asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs) +asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) { - sigset_t saveset; - mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - regs->ARM_r0 = -EINTR; - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - if (do_signal(&saveset, regs, 0)) - return regs->ARM_r0; - } -} - -asmlinkage int -sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs) -{ - sigset_t saveset, newset; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(sigset_t)) - return -EINVAL; - - if (copy_from_user(&newset, unewset, sizeof(newset))) - return -EFAULT; - sigdelsetmask(&newset, ~_BLOCKABLE); - - spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - regs->ARM_r0 = -EINTR; - - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - if (do_signal(&saveset, regs, 0)) - return regs->ARM_r0; - } + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_restore_sigmask(); + return -ERESTARTNOHAND; } asmlinkage int @@ -133,7 +113,7 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, } #ifdef CONFIG_CRUNCH -static int preserve_crunch_context(struct crunch_sigframe *frame) +static int preserve_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; @@ -146,7 +126,7 @@ static int preserve_crunch_context(struct crunch_sigframe *frame) return __copy_to_user(frame, kframe, sizeof(*frame)); } -static int restore_crunch_context(struct crunch_sigframe *frame) +static int restore_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; @@ -196,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif +#ifdef CONFIG_VFP + +static int preserve_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + const unsigned long magic = VFP_MAGIC; + const unsigned long size = VFP_STORAGE_SIZE; + int err = 0; + + vfp_sync_hwstate(thread); + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int restore_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + unsigned long magic; + unsigned long size; + unsigned long fpexc; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + h->fpexc = fpexc; + + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + if (!err) + vfp_flush_hwstate(thread); + + return err ? -EFAULT : 0; +} + +#endif + /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ @@ -254,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_restore_state(&sf->aux.vfp); + if (err == 0) + err |= restore_vfp_context(&aux->vfp); #endif return err; @@ -369,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_save_state(&sf->aux.vfp); + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); @@ -545,7 +609,7 @@ static inline void setup_syscall_restart(struct pt_regs *regs) /* * OK, we're invoking a handler */ -static void +static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, int syscall) @@ -596,7 +660,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, if (ret != 0) { force_sigsegv(sig, tsk); - return; + return ret; } /* @@ -610,6 +674,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); + return 0; } /* @@ -621,7 +686,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) +static void do_signal(struct pt_regs *regs, int syscall) { struct k_sigaction ka; siginfo_t info; @@ -634,7 +699,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) * if so. */ if (!user_mode(regs)) - return 0; + return; if (try_to_freeze()) goto no_signal; @@ -643,9 +708,24 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - handle_signal(signr, &ka, &info, oldset, regs, syscall); + sigset_t *oldset; + + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else + oldset = ¤t->blocked; + if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } single_step_set(current); - return 1; + return; } no_signal: @@ -663,32 +743,16 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) regs->ARM_pc -= 4; #else u32 __user *usp; - u32 swival = __NR_restart_syscall; - regs->ARM_sp -= 12; + regs->ARM_sp -= 4; usp = (u32 __user *)regs->ARM_sp; - /* - * Either we supports OABI only, or we have - * EABI with the OABI compat layer enabled. - * In the later case we don't know if user - * space is EABI or not, and if not we must - * not clobber r7. Always using the OABI - * syscall solves that issue and works for - * all those cases. - */ - swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE; - - put_user(regs->ARM_pc, &usp[0]); - /* swi __NR_restart_syscall */ - put_user(0xef000000 | swival, &usp[1]); - /* ldr pc, [sp], #12 */ - put_user(0xe49df00c, &usp[2]); - - flush_icache_range((unsigned long)usp, - (unsigned long)(usp + 3)); - - regs->ARM_pc = regs->ARM_sp + 4; + if (put_user(regs->ARM_pc, usp) == 0) { + regs->ARM_pc = KERN_RESTART_CODE; + } else { + regs->ARM_sp += 4; + force_sigsegv(0, current); + } #endif } } @@ -697,14 +761,28 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) regs->ARM_r0 == -ERESTARTNOINTR) { setup_syscall_restart(regs); } + + /* If there's no signal to deliver, we just put the saved sigmask + * back. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } single_step_set(current); - return 0; } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) { if (thread_flags & _TIF_SIGPENDING) - do_signal(¤t->blocked, regs, syscall); + do_signal(regs, syscall); + + if (thread_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); + } } diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h index 27beece..6fcfe83 100644 --- a/arch/arm/kernel/signal.h +++ b/arch/arm/kernel/signal.h @@ -1,12 +1,14 @@ /* * linux/arch/arm/kernel/signal.h * - * Copyright (C) 2005 Russell King. + * Copyright (C) 2005-2009 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) +#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) extern const unsigned long sigreturn_codes[7]; +extern const unsigned long syscall_restart_code[2]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index de885fd..4539ebc 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -16,6 +16,7 @@ #include <linux/cache.h> #include <linux/profile.h> #include <linux/errno.h> +#include <linux/ftrace.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/cpu.h> @@ -24,6 +25,7 @@ #include <linux/irq.h> #include <linux/percpu.h> #include <linux/clockchips.h> +#include <linux/completion.h> #include <asm/atomic.h> #include <asm/cacheflush.h> @@ -33,6 +35,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> +#include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/localtimer.h> @@ -44,22 +47,8 @@ */ struct secondary_data secondary_data; -/* - * structures for inter-processor calls - * - A collection of single bit ipi messages. - */ -struct ipi_data { - spinlock_t lock; - unsigned long ipi_count; - unsigned long bits; -}; - -static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { - .lock = SPIN_LOCK_UNLOCKED, -}; - enum ipi_msg_type { - IPI_TIMER, + IPI_TIMER = 2, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, @@ -71,7 +60,6 @@ int __cpuinit __cpu_up(unsigned int cpu) struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; pgd_t *pgd; - pmd_t *pmd; int ret; /* @@ -85,6 +73,12 @@ int __cpuinit __cpu_up(unsigned int cpu) return PTR_ERR(idle); } ci->idle = idle; + } else { + /* + * Since this idle thread is being re-used, call + * init_idle() to reinitialize the thread structure. + */ + init_idle(idle, cpu); } /* @@ -94,10 +88,16 @@ int __cpuinit __cpu_up(unsigned int cpu) * a 1:1 mapping for the physical address of the kernel. */ pgd = pgd_alloc(&init_mm); - pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); - *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | - PMD_TYPE_SECT | PMD_SECT_AP_WRITE); - flush_pmd_entry(pmd); + if (!pgd) + return -ENOMEM; + + if (PHYS_OFFSET != PAGE_OFFSET) { +#ifndef CONFIG_HOTPLUG_CPU + identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); +#endif + identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); + identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); + } /* * We need to tell the secondary core where to find @@ -105,7 +105,8 @@ int __cpuinit __cpu_up(unsigned int cpu) */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(pgd); - wmb(); + __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); + outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); /* * Now bring the CPU into our world. @@ -127,39 +128,43 @@ int __cpuinit __cpu_up(unsigned int cpu) barrier(); } - if (!cpu_online(cpu)) + if (!cpu_online(cpu)) { + pr_crit("CPU%u: failed to come online\n", cpu); ret = -EIO; + } + } else { + pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } secondary_data.stack = NULL; secondary_data.pgdir = 0; - *pmd = __pmd(0); - clean_pmd_entry(pmd); - pgd_free(&init_mm, pgd); - - if (ret) { - printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); - - /* - * FIXME: We need to clean up the new idle thread. --rmk - */ + if (PHYS_OFFSET != PAGE_OFFSET) { +#ifndef CONFIG_HOTPLUG_CPU + identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); +#endif + identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); + identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); } + pgd_free(&init_mm, pgd); + return ret; } #ifdef CONFIG_HOTPLUG_CPU +static void percpu_timer_stop(void); + /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuexit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); struct task_struct *p; int ret; - ret = mach_cpu_disable(cpu); + ret = platform_cpu_disable(cpu); if (ret) return ret; @@ -177,7 +182,7 @@ int __cpuexit __cpu_disable(void) /* * Stop the local timer for this CPU. */ - local_timer_stop(); + percpu_timer_stop(); /* * Flush user cache and TLB mappings, and then remove this CPU @@ -189,19 +194,27 @@ int __cpuexit __cpu_disable(void) read_lock(&tasklist_lock); for_each_process(p) { if (p->mm) - cpu_clear(cpu, p->mm->cpu_vm_mask); + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); } read_unlock(&tasklist_lock); return 0; } +static DECLARE_COMPLETION(cpu_died); + /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuexit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { + if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { + pr_err("CPU%u: cpu didn't die\n", cpu); + return; + } + printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); + if (!platform_cpu_kill(cpu)) printk("CPU%u: unable to kill\n", cpu); } @@ -214,16 +227,21 @@ void __cpuexit __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void __cpuexit cpu_die(void) +void __ref cpu_die(void) { unsigned int cpu = smp_processor_id(); - local_irq_disable(); idle_task_exit(); + local_irq_disable(); + mb(); + + /* Tell __cpu_die() that this CPU is now safe to dispose of */ + complete(&cpu_died); + /* * actual CPU shutdown procedure is at least platform (if not - * CPU) specific + * CPU) specific. */ platform_cpu_die(cpu); @@ -233,6 +251,7 @@ void __cpuexit cpu_die(void) * to be repeated to undo the effects of taking the CPU offline. */ __asm__("mov sp, %0\n" + " mov fp, #0\n" " b secondary_start_kernel" : : "r" (task_stack_page(current) + THREAD_SIZE - 8)); @@ -240,6 +259,17 @@ void __cpuexit cpu_die(void) #endif /* CONFIG_HOTPLUG_CPU */ /* + * Called by both boot and secondaries to move global data into + * per-processor storage. + */ +static void __cpuinit smp_store_cpu_info(unsigned int cpuid) +{ + struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); + + cpu_info->loops_per_jiffy = loops_per_jiffy; +} + +/* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ @@ -254,16 +284,16 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * All kernel threads share the same mm context; grab a * reference and switch to it. */ - atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_count); current->active_mm = mm; - cpu_set(cpu, mm->cpu_vm_mask); + cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); enter_lazy_tlb(mm, current); local_flush_tlb_all(); cpu_init(); preempt_disable(); + trace_hardirqs_off(); /* * Give the platform a chance to do its own initialisation. @@ -297,17 +327,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) cpu_idle(); } -/* - * Called by both boot and secondaries to move global data into - * per-processor storage. - */ -void __cpuinit smp_store_cpu_info(unsigned int cpuid) -{ - struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); - - cpu_info->loops_per_jiffy = loops_per_jiffy; -} - void __init smp_cpus_done(unsigned int max_cpus) { int cpu; @@ -330,61 +349,80 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_data, cpu).idle = current; } -static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) +void __init smp_prepare_cpus(unsigned int max_cpus) { - unsigned long flags; - unsigned int cpu; - - local_irq_save(flags); + unsigned int ncores = num_possible_cpus(); - for_each_cpu(cpu, mask) { - struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - - spin_lock(&ipi->lock); - ipi->bits |= 1 << msg; - spin_unlock(&ipi->lock); - } + smp_store_cpu_info(smp_processor_id()); /* - * Call the platform specific cross-CPU call function. + * are we trying to boot more cores than exist? */ - smp_cross_call(mask); + if (max_cpus > ncores) + max_cpus = ncores; - local_irq_restore(flags); + if (max_cpus > 1) { + /* + * Enable the local timer or broadcast device for the + * boot CPU, but only if we have more than one CPU. + */ + percpu_timer_setup(); + + /* + * Initialise the SCU if there are more than one CPU + * and let them know where to start. + */ + platform_smp_prepare_cpus(max_cpus); + } } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - send_ipi_message(mask, IPI_CALL_FUNC); + smp_cross_call(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } -void show_ipi_list(struct seq_file *p) +static const char *ipi_types[NR_IPI] = { +#define S(x,s) [x - IPI_TIMER] = s + S(IPI_TIMER, "Timer broadcast interrupts"), + S(IPI_RESCHEDULE, "Rescheduling interrupts"), + S(IPI_CALL_FUNC, "Function call interrupts"), + S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), + S(IPI_CPU_STOP, "CPU stop interrupts"), +}; + +void show_ipi_list(struct seq_file *p, int prec) { - unsigned int cpu; + unsigned int cpu, i; - seq_puts(p, "IPI:"); + for (i = 0; i < NR_IPI; i++) { + seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); - for_each_present_cpu(cpu) - seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); + for_each_present_cpu(cpu) + seq_printf(p, "%10u ", + __get_irq_stat(cpu, ipi_irqs[i])); - seq_putc(p, '\n'); + seq_printf(p, " %s\n", ipi_types[i]); + } } -void show_local_irqs(struct seq_file *p) +u64 smp_irq_stat_cpu(unsigned int cpu) { - unsigned int cpu; + u64 sum = 0; + int i; - seq_printf(p, "LOC: "); + for (i = 0; i < NR_IPI; i++) + sum += __get_irq_stat(cpu, ipi_irqs[i]); - for_each_present_cpu(cpu) - seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); +#ifdef CONFIG_LOCAL_TIMERS + sum += __get_irq_stat(cpu, local_timer_irqs); +#endif - seq_putc(p, '\n'); + return sum; } /* @@ -401,26 +439,42 @@ static void ipi_timer(void) } #ifdef CONFIG_LOCAL_TIMERS -asmlinkage void __exception do_local_timer(struct pt_regs *regs) +asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); int cpu = smp_processor_id(); if (local_timer_ack()) { - irq_stat[cpu].local_timer_irqs++; + __inc_irq_stat(cpu, local_timer_irqs); ipi_timer(); } set_irq_regs(old_regs); } + +void show_local_irqs(struct seq_file *p, int prec) +{ + unsigned int cpu; + + seq_printf(p, "%*s: ", prec, "LOC"); + + for_each_present_cpu(cpu) + seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); + + seq_printf(p, " Local timer interrupts\n"); +} #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static void smp_timer_broadcast(const struct cpumask *mask) { - send_ipi_message(mask, IPI_TIMER); + smp_cross_call(mask, IPI_TIMER); } +#else +#define smp_timer_broadcast NULL +#endif +#ifndef CONFIG_LOCAL_TIMERS static void broadcast_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -435,7 +489,6 @@ static void local_timer_setup(struct clock_event_device *evt) evt->rating = 400; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; - evt->broadcast = smp_timer_broadcast; clockevents_register_device(evt); } @@ -447,10 +500,26 @@ void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); + evt->broadcast = smp_timer_broadcast; local_timer_setup(evt); } +#ifdef CONFIG_HOTPLUG_CPU +/* + * The generic clock events code purposely does not stop the local timer + * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it + * manually here. + */ +static void percpu_timer_stop(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); + + evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +} +#endif + static DEFINE_SPINLOCK(stop_lock); /* @@ -458,10 +527,13 @@ static DEFINE_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { - spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); - dump_stack(); - spin_unlock(&stop_lock); + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { + spin_lock(&stop_lock); + printk(KERN_CRIT "CPU%u: stopping\n", cpu); + dump_stack(); + spin_unlock(&stop_lock); + } set_cpu_online(cpu, false); @@ -474,221 +546,76 @@ static void ipi_cpu_stop(unsigned int cpu) /* * Main handler for inter-processor interrupts - * - * For ARM, the ipimask now only identifies a single - * category of IPI (Bit 1 IPIs have been replaced by a - * different mechanism): - * - * Bit 0 - Inter-processor function call */ -asmlinkage void __exception do_IPI(struct pt_regs *regs) +asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); - struct ipi_data *ipi = &per_cpu(ipi_data, cpu); struct pt_regs *old_regs = set_irq_regs(regs); - ipi->ipi_count++; - - for (;;) { - unsigned long msgs; - - spin_lock(&ipi->lock); - msgs = ipi->bits; - ipi->bits = 0; - spin_unlock(&ipi->lock); - - if (!msgs) - break; - - do { - unsigned nextmsg; + if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) + __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); - nextmsg = msgs & -msgs; - msgs &= ~nextmsg; - nextmsg = ffz(~nextmsg); - - switch (nextmsg) { - case IPI_TIMER: - ipi_timer(); - break; + switch (ipinr) { + case IPI_TIMER: + ipi_timer(); + break; - case IPI_RESCHEDULE: - /* - * nothing more to do - eveything is - * done on the interrupt return path - */ - break; + case IPI_RESCHEDULE: + /* + * nothing more to do - eveything is + * done on the interrupt return path + */ + break; - case IPI_CALL_FUNC: - generic_smp_call_function_interrupt(); - break; + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; - case IPI_CPU_STOP: - ipi_cpu_stop(cpu); - break; + case IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; - default: - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", - cpu, nextmsg); - break; - } - } while (msgs); + default: + printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", + cpu, ipinr); + break; } - set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) { - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - send_ipi_message(&mask, IPI_CPU_STOP); -} + unsigned long timeout; -/* - * not supported here - */ -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} + if (num_online_cpus() > 1) { + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); -static void -on_each_cpu_mask(void (*func)(void *), void *info, int wait, - const struct cpumask *mask) -{ - preempt_disable(); + smp_cross_call(&mask, IPI_CPU_STOP); + } - smp_call_function_many(mask, func, info, wait); - if (cpumask_test_cpu(smp_processor_id(), mask)) - func(info); + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); - preempt_enable(); + if (num_online_cpus() > 1) + pr_warning("SMP: failed to stop secondary CPUs\n"); } -/**********************************************************************/ - /* - * TLB operations + * not supported here */ -struct tlb_args { - struct vm_area_struct *ta_vma; - unsigned long ta_start; - unsigned long ta_end; -}; - -/* all SMP configurations have the extended CPUID registers */ -static inline int tlb_ops_need_broadcast(void) -{ - return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; -} - -static inline void ipi_flush_tlb_all(void *ignored) -{ - local_flush_tlb_all(); -} - -static inline void ipi_flush_tlb_mm(void *arg) -{ - struct mm_struct *mm = (struct mm_struct *)arg; - - local_flush_tlb_mm(mm); -} - -static inline void ipi_flush_tlb_page(void *arg) -{ - struct tlb_args *ta = (struct tlb_args *)arg; - - local_flush_tlb_page(ta->ta_vma, ta->ta_start); -} - -static inline void ipi_flush_tlb_kernel_page(void *arg) -{ - struct tlb_args *ta = (struct tlb_args *)arg; - - local_flush_tlb_kernel_page(ta->ta_start); -} - -static inline void ipi_flush_tlb_range(void *arg) -{ - struct tlb_args *ta = (struct tlb_args *)arg; - - local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); -} - -static inline void ipi_flush_tlb_kernel_range(void *arg) -{ - struct tlb_args *ta = (struct tlb_args *)arg; - - local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); -} - -void flush_tlb_all(void) -{ - if (tlb_ops_need_broadcast()) - on_each_cpu(ipi_flush_tlb_all, NULL, 1); - else - local_flush_tlb_all(); -} - -void flush_tlb_mm(struct mm_struct *mm) -{ - if (tlb_ops_need_broadcast()) - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); - else - local_flush_tlb_mm(mm); -} - -void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) -{ - if (tlb_ops_need_broadcast()) { - struct tlb_args ta; - ta.ta_vma = vma; - ta.ta_start = uaddr; - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); - } else - local_flush_tlb_page(vma, uaddr); -} - -void flush_tlb_kernel_page(unsigned long kaddr) -{ - if (tlb_ops_need_broadcast()) { - struct tlb_args ta; - ta.ta_start = kaddr; - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); - } else - local_flush_tlb_kernel_page(kaddr); -} - -void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (tlb_ops_need_broadcast()) { - struct tlb_args ta; - ta.ta_vma = vma; - ta.ta_start = start; - ta.ta_end = end; - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); - } else - local_flush_tlb_range(vma, start, end); -} - -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +int setup_profiling_timer(unsigned int multiplier) { - if (tlb_ops_need_broadcast()) { - struct tlb_args ta; - ta.ta_start = start; - ta.ta_end = end; - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); - } else - local_flush_tlb_kernel_range(start, end); + return -EINVAL; } diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index d3831f6..9ab4149 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -37,6 +37,10 @@ void __init scu_enable(void __iomem *scu_base) u32 scu_ctrl; scu_ctrl = __raw_readl(scu_base + SCU_CTRL); + /* already enabled? */ + if (scu_ctrl & 1) + return; + scu_ctrl |= 1; __raw_writel(scu_ctrl, scu_base + SCU_CTRL); diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c new file mode 100644 index 0000000..7dcb352 --- /dev/null +++ b/arch/arm/kernel/smp_tlb.c @@ -0,0 +1,139 @@ +/* + * linux/arch/arm/kernel/smp_tlb.c + * + * Copyright (C) 2002 ARM Limited, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/preempt.h> +#include <linux/smp.h> + +#include <asm/smp_plat.h> +#include <asm/tlbflush.h> + +static void on_each_cpu_mask(void (*func)(void *), void *info, int wait, + const struct cpumask *mask) +{ + preempt_disable(); + + smp_call_function_many(mask, func, info, wait); + if (cpumask_test_cpu(smp_processor_id(), mask)) + func(info); + + preempt_enable(); +} + +/**********************************************************************/ + +/* + * TLB operations + */ +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +static inline void ipi_flush_tlb_mm(void *arg) +{ + struct mm_struct *mm = (struct mm_struct *)arg; + + local_flush_tlb_mm(mm); +} + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} + +static inline void ipi_flush_tlb_kernel_page(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_page(ta->ta_start); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); +} + +static inline void ipi_flush_tlb_kernel_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); +} + +void flush_tlb_all(void) +{ + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_tlb_all, NULL, 1); + else + local_flush_tlb_all(); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + if (tlb_ops_need_broadcast()) + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); + else + local_flush_tlb_mm(mm); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = uaddr; + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); + } else + local_flush_tlb_page(vma, uaddr); +} + +void flush_tlb_kernel_page(unsigned long kaddr) +{ + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = kaddr; + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + } else + local_flush_tlb_kernel_page(kaddr); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); + } else + local_flush_tlb_range(vma, start, end); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + } else + local_flush_tlb_kernel_range(start, end); +} + diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index d8c88c6..dd79074 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -21,23 +21,6 @@ #include <asm/smp_twd.h> #include <asm/hardware/gic.h> -#define TWD_TIMER_LOAD 0x00 -#define TWD_TIMER_COUNTER 0x04 -#define TWD_TIMER_CONTROL 0x08 -#define TWD_TIMER_INTSTAT 0x0C - -#define TWD_WDOG_LOAD 0x20 -#define TWD_WDOG_COUNTER 0x24 -#define TWD_WDOG_CONTROL 0x28 -#define TWD_WDOG_INTSTAT 0x2C -#define TWD_WDOG_RESETSTAT 0x30 -#define TWD_WDOG_DISABLE 0x34 - -#define TWD_TIMER_CONTROL_ENABLE (1 << 0) -#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) -#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) -#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) - /* set up by the platform code */ void __iomem *twd_base; @@ -144,12 +127,11 @@ static void __cpuinit twd_calibrate_rate(void) */ void __cpuinit twd_timer_setup(struct clock_event_device *clk) { - unsigned long flags; - twd_calibrate_rate(); clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; @@ -159,17 +141,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) clk->min_delta_ns = clockevent_delta2ns(0xf, clk); /* Make sure our local interrupt controller has this enabled */ - local_irq_save(flags); - get_irq_chip(clk->irq)->unmask(clk->irq); - local_irq_restore(flags); + gic_enable_ppi(clk->irq); clockevents_register_device(clk); } - -/* - * take a local timer down - */ -void __cpuexit twd_timer_stop(void) -{ - __raw_writel(0, twd_base + TWD_TIMER_CONTROL); -} diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 9f444e5..c2e112e 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -21,14 +21,14 @@ * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ -int unwind_frame(struct stackframe *frame) +int notrace unwind_frame(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; /* only go to a higher address on the stack */ low = frame->sp; - high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; + high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ if (fp < (low + 12) || fp + 4 >= high) @@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame) } #endif -void walk_stackframe(struct stackframe *frame, +void notrace walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data) { while (1) { diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c new file mode 100644 index 0000000..7a576092 --- /dev/null +++ b/arch/arm/kernel/swp_emulate.c @@ -0,0 +1,267 @@ +/* + * linux/arch/arm/kernel/swp_emulate.c + * + * Copyright (C) 2009 ARM Limited + * __user_* functions adapted from include/asm/uaccess.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Implements emulation of the SWP/SWPB instructions using load-exclusive and + * store-exclusive for processors that have them disabled (or future ones that + * might not implement them). + * + * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] + * Where: Rt = destination + * Rt2 = source + * Rn = address + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/syscalls.h> +#include <linux/perf_event.h> + +#include <asm/traps.h> +#include <asm/uaccess.h> + +/* + * Error-checking SWP macros implemented using ldrex{b}/strex{b} + */ +#define __user_swpX_asm(data, addr, res, temp, B) \ + __asm__ __volatile__( \ + " mov %2, %1\n" \ + "0: ldrex"B" %1, [%3]\n" \ + "1: strex"B" %0, %2, [%3]\n" \ + " cmp %0, #0\n" \ + " movne %0, %4\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %0, %5\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 0b, 3b\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "=&r" (res), "+r" (data), "=&r" (temp) \ + : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ + : "cc", "memory") + +#define __user_swp_asm(data, addr, res, temp) \ + __user_swpX_asm(data, addr, res, temp, "") +#define __user_swpb_asm(data, addr, res, temp) \ + __user_swpX_asm(data, addr, res, temp, "b") + +/* + * Macros/defines for extracting register numbers from instruction. + */ +#define EXTRACT_REG_NUM(instruction, offset) \ + (((instruction) & (0xf << (offset))) >> (offset)) +#define RN_OFFSET 16 +#define RT_OFFSET 12 +#define RT2_OFFSET 0 +/* + * Bit 22 of the instruction encoding distinguishes between + * the SWP and SWPB variants (bit set means SWPB). + */ +#define TYPE_SWPB (1 << 22) + +static unsigned long swpcounter; +static unsigned long swpbcounter; +static unsigned long abtcounter; +static pid_t previous_pid; + +#ifdef CONFIG_PROC_FS +static int proc_read_status(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + char *p = page; + int len; + + p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter); + p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter); + p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter); + if (previous_pid != 0) + p += sprintf(p, "Last process:\t\t%d\n", previous_pid); + + len = (p - page) - off; + if (len < 0) + len = 0; + + *eof = (len <= count) ? 1 : 0; + *start = page + off; + + return len; +} +#endif + +/* + * Set up process info to signal segmentation fault - called on access error. + */ +static void set_segfault(struct pt_regs *regs, unsigned long addr) +{ + siginfo_t info; + + if (find_vma(current->mm, addr) == NULL) + info.si_code = SEGV_MAPERR; + else + info.si_code = SEGV_ACCERR; + + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_addr = (void *) instruction_pointer(regs); + + pr_debug("SWP{B} emulation: access caused memory abort!\n"); + arm_notify_die("Illegal memory access", regs, &info, 0, 0); + + abtcounter++; +} + +static int emulate_swpX(unsigned int address, unsigned int *data, + unsigned int type) +{ + unsigned int res = 0; + + if ((type != TYPE_SWPB) && (address & 0x3)) { + /* SWP to unaligned address not permitted */ + pr_debug("SWP instruction on unaligned pointer!\n"); + return -EFAULT; + } + + while (1) { + unsigned long temp; + + /* + * Barrier required between accessing protected resource and + * releasing a lock for it. Legacy code might not have done + * this, and we cannot determine that this is not the case + * being emulated, so insert always. + */ + smp_mb(); + + if (type == TYPE_SWPB) + __user_swpb_asm(*data, address, res, temp); + else + __user_swp_asm(*data, address, res, temp); + + if (likely(res != -EAGAIN) || signal_pending(current)) + break; + + cond_resched(); + } + + if (res == 0) { + /* + * Barrier also required between aquiring a lock for a + * protected resource and accessing the resource. Inserted for + * same reason as above. + */ + smp_mb(); + + if (type == TYPE_SWPB) + swpbcounter++; + else + swpcounter++; + } + + return res; +} + +/* + * swp_handler logs the id of calling process, dissects the instruction, sanity + * checks the memory location, calls emulate_swpX for the actual operation and + * deals with fixup/error handling before returning + */ +static int swp_handler(struct pt_regs *regs, unsigned int instr) +{ + unsigned int address, destreg, data, type; + unsigned int res = 0; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); + + if (current->pid != previous_pid) { + pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", + current->comm, (unsigned long)current->pid); + previous_pid = current->pid; + } + + address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)]; + data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)]; + destreg = EXTRACT_REG_NUM(instr, RT_OFFSET); + + type = instr & TYPE_SWPB; + + pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", + EXTRACT_REG_NUM(instr, RN_OFFSET), address, + destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); + + /* Check access in reasonable access range for both SWP and SWPB */ + if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { + pr_debug("SWP{B} emulation: access to %p not allowed!\n", + (void *)address); + res = -EFAULT; + } else { + res = emulate_swpX(address, &data, type); + } + + if (res == 0) { + /* + * On successful emulation, revert the adjustment to the PC + * made in kernel/traps.c in order to resume execution at the + * instruction following the SWP{B}. + */ + regs->ARM_pc += 4; + regs->uregs[destreg] = data; + } else if (res == -EFAULT) { + /* + * Memory errors do not mean emulation failed. + * Set up signal info to return SEGV, then return OK + */ + set_segfault(regs, address); + } + + return 0; +} + +/* + * Only emulate SWP/SWPB executed in ARM state/User mode. + * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE. + */ +static struct undef_hook swp_hook = { + .instr_mask = 0x0fb00ff0, + .instr_val = 0x01000090, + .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT, + .cpsr_val = USR_MODE, + .fn = swp_handler +}; + +/* + * Register handler and create status file in /proc/cpu + * Invoked as late_initcall, since not needed before init spawned. + */ +static int __init swp_emulation_init(void) +{ +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *res; + + res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL); + + if (!res) + return -ENOMEM; + + res->read_proc = proc_read_status; +#endif /* CONFIG_PROC_FS */ + + printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n"); + register_undef_hook(&swp_hook); + + return 0; +} + +late_initcall(swp_emulation_init); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index b3ec641..62e7c61 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -15,7 +15,6 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/sem.h> #include <linux/msg.h> @@ -25,191 +24,9 @@ #include <linux/mman.h> #include <linux/fs.h> #include <linux/file.h> -#include <linux/utsname.h> #include <linux/ipc.h> #include <linux/uaccess.h> - -extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, - unsigned long new_len, unsigned long flags, - unsigned long new_addr); - -/* common code for old and new mmaps */ -inline long do_mmap2( - unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - int error = -EINVAL; - struct file * file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS) - goto out; - - error = -EBADF; - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - down_write(¤t->mm->mmap_sem); - error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return error; -} - -struct mmap_arg_struct { - unsigned long addr; - unsigned long len; - unsigned long prot; - unsigned long flags; - unsigned long fd; - unsigned long offset; -}; - -asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) -{ - int error = -EFAULT; - struct mmap_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; - - error = -EINVAL; - if (a.offset & ~PAGE_MASK) - goto out; - - error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -out: - return error; -} - -asmlinkage unsigned long -sys_arm_mremap(unsigned long addr, unsigned long old_len, - unsigned long new_len, unsigned long flags, - unsigned long new_addr) -{ - unsigned long ret = -EINVAL; - - if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS) - goto out; - - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); - up_write(¤t->mm->mmap_sem); - -out: - return ret; -} - -/* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. - */ - -struct sel_arg_struct { - unsigned long n; - fd_set __user *inp, *outp, *exp; - struct timeval __user *tvp; -}; - -asmlinkage int old_select(struct sel_arg_struct __user *arg) -{ - struct sel_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - /* sys_select() does the appropriate kernel locking */ - return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); -} - -#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) -/* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * - * This is really horribly ugly. - */ -asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth) -{ - int version, ret; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMOP: - return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL); - case SEMTIMEDOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, second, - (const struct timespec __user *)fifth); - - case SEMGET: - return sys_semget (first, second, third); - case SEMCTL: { - union semun fourth; - if (!ptr) - return -EINVAL; - if (get_user(fourth.__pad, (void __user * __user *) ptr)) - return -EFAULT; - return sys_semctl (first, second, third, fourth); - } - - case MSGSND: - return sys_msgsnd(first, (struct msgbuf __user *) ptr, - second, third); - case MSGRCV: - switch (version) { - case 0: { - struct ipc_kludge tmp; - if (!ptr) - return -EINVAL; - if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr, - sizeof (tmp))) - return -EFAULT; - return sys_msgrcv (first, tmp.msgp, second, - tmp.msgtyp, third); - } - default: - return sys_msgrcv (first, - (struct msgbuf __user *) ptr, - second, fifth, third); - } - case MSGGET: - return sys_msgget ((key_t) first, second); - case MSGCTL: - return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); - - case SHMAT: - switch (version) { - default: { - ulong raddr; - ret = do_shmat(first, (char __user *)ptr, second, &raddr); - if (ret) - return ret; - return put_user(raddr, (ulong __user *)third); - } - case 1: /* Of course, we don't support iBCS2! */ - return -EINVAL; - } - case SHMDT: - return sys_shmdt ((char __user *)ptr); - case SHMGET: - return sys_shmget (first, second, third); - case SHMCTL: - return sys_shmctl (first, second, - (struct shmid_ds __user *) ptr); - default: - return -ENOSYS; - } -} -#endif +#include <linux/slab.h> /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper @@ -245,8 +62,9 @@ asmlinkage int sys_vfork(struct pt_regs *regs) /* sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv, - char __user * __user *envp, struct pt_regs *regs) +asmlinkage int sys_execve(const char __user *filenamei, + const char __user *const __user *argv, + const char __user *const __user *envp, struct pt_regs *regs) { int error; char * filename; @@ -261,14 +79,17 @@ out: return error; } -int kernel_execve(const char *filename, char *const argv[], char *const envp[]) +int kernel_execve(const char *filename, + const char *const argv[], + const char *const envp[]) { struct pt_regs regs; int ret; memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve((char *)filename, (char __user * __user *)argv, - (char __user * __user *)envp, ®s); + ret = do_execve(filename, + (const char __user *const __user *)argv, + (const char __user *const __user *)envp, ®s); if (ret < 0) goto out; diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index d59a0cd..4ad8da1 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -141,7 +141,7 @@ static long cp_oldabi_stat64(struct kstat *stat, return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } -asmlinkage long sys_oabi_stat64(char __user * filename, +asmlinkage long sys_oabi_stat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; @@ -151,7 +151,7 @@ asmlinkage long sys_oabi_stat64(char __user * filename, return error; } -asmlinkage long sys_oabi_lstat64(char __user * filename, +asmlinkage long sys_oabi_lstat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; @@ -172,7 +172,7 @@ asmlinkage long sys_oabi_fstat64(unsigned long fd, } asmlinkage long sys_oabi_fstatat64(int dfd, - char __user *filename, + const char __user *filename, struct oldabi_stat64 __user *statbuf, int flag) { @@ -346,9 +346,6 @@ asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, return sys_oabi_semtimedop(semid, tsops, nsops, NULL); } -extern asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth); - asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c new file mode 100644 index 0000000..26685c2 --- /dev/null +++ b/arch/arm/kernel/tcm.c @@ -0,0 +1,274 @@ +/* + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * TCM memory handling for ARM systems + * + * Author: Linus Walleij <linus.walleij@stericsson.com> + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/stddef.h> +#include <linux/ioport.h> +#include <linux/genalloc.h> +#include <linux/string.h> /* memcpy */ +#include <asm/cputype.h> +#include <asm/mach/map.h> +#include <mach/memory.h> +#include "tcm.h" + +static struct gen_pool *tcm_pool; + +/* TCM section definitions from the linker */ +extern char __itcm_start, __sitcm_text, __eitcm_text; +extern char __dtcm_start, __sdtcm_data, __edtcm_data; + +/* These will be increased as we run */ +u32 dtcm_end = DTCM_OFFSET; +u32 itcm_end = ITCM_OFFSET; + +/* + * TCM memory resources + */ +static struct resource dtcm_res = { + .name = "DTCM RAM", + .start = DTCM_OFFSET, + .end = DTCM_OFFSET, + .flags = IORESOURCE_MEM +}; + +static struct resource itcm_res = { + .name = "ITCM RAM", + .start = ITCM_OFFSET, + .end = ITCM_OFFSET, + .flags = IORESOURCE_MEM +}; + +static struct map_desc dtcm_iomap[] __initdata = { + { + .virtual = DTCM_OFFSET, + .pfn = __phys_to_pfn(DTCM_OFFSET), + .length = 0, + .type = MT_MEMORY_DTCM + } +}; + +static struct map_desc itcm_iomap[] __initdata = { + { + .virtual = ITCM_OFFSET, + .pfn = __phys_to_pfn(ITCM_OFFSET), + .length = 0, + .type = MT_MEMORY_ITCM + } +}; + +/* + * Allocate a chunk of TCM memory + */ +void *tcm_alloc(size_t len) +{ + unsigned long vaddr; + + if (!tcm_pool) + return NULL; + + vaddr = gen_pool_alloc(tcm_pool, len); + if (!vaddr) + return NULL; + + return (void *) vaddr; +} +EXPORT_SYMBOL(tcm_alloc); + +/* + * Free a chunk of TCM memory + */ +void tcm_free(void *addr, size_t len) +{ + gen_pool_free(tcm_pool, (unsigned long) addr, len); +} +EXPORT_SYMBOL(tcm_free); + +static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, + u32 *offset) +{ + const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, + 256, 512, 1024, -1, -1, -1, -1 }; + u32 tcm_region; + int tcm_size; + + /* + * If there are more than one TCM bank of this type, + * select the TCM bank to operate on in the TCM selection + * register. + */ + if (banks > 1) + asm("mcr p15, 0, %0, c9, c2, 0" + : /* No output operands */ + : "r" (bank)); + + /* Read the special TCM region register c9, 0 */ + if (!type) + asm("mrc p15, 0, %0, c9, c1, 0" + : "=r" (tcm_region)); + else + asm("mrc p15, 0, %0, c9, c1, 1" + : "=r" (tcm_region)); + + tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; + if (tcm_size < 0) { + pr_err("CPU: %sTCM%d of unknown size\n", + type ? "I" : "D", bank); + return -EINVAL; + } else if (tcm_size > 32) { + pr_err("CPU: %sTCM%d larger than 32k found\n", + type ? "I" : "D", bank); + return -EINVAL; + } else { + pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", + type ? "I" : "D", + bank, + tcm_size, + (tcm_region & 0xfffff000U), + (tcm_region & 1) ? "" : "not "); + } + + /* Force move the TCM bank to where we want it, enable */ + tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; + + if (!type) + asm("mcr p15, 0, %0, c9, c1, 0" + : /* No output operands */ + : "r" (tcm_region)); + else + asm("mcr p15, 0, %0, c9, c1, 1" + : /* No output operands */ + : "r" (tcm_region)); + + /* Increase offset */ + *offset += (tcm_size << 10); + + pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", + type ? "I" : "D", + bank, + tcm_size, + (tcm_region & 0xfffff000U)); + return 0; +} + +/* + * This initializes the TCM memory + */ +void __init tcm_init(void) +{ + u32 tcm_status = read_cpuid_tcmstatus(); + u8 dtcm_banks = (tcm_status >> 16) & 0x03; + u8 itcm_banks = (tcm_status & 0x03); + char *start; + char *end; + char *ram; + int ret; + int i; + + /* Setup DTCM if present */ + if (dtcm_banks > 0) { + for (i = 0; i < dtcm_banks; i++) { + ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); + if (ret) + return; + } + dtcm_res.end = dtcm_end - 1; + request_resource(&iomem_resource, &dtcm_res); + dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; + iotable_init(dtcm_iomap, 1); + /* Copy data from RAM to DTCM */ + start = &__sdtcm_data; + end = &__edtcm_data; + ram = &__dtcm_start; + /* This means you compiled more code than fits into DTCM */ + BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); + memcpy(start, ram, (end-start)); + pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); + } + + /* Setup ITCM if present */ + if (itcm_banks > 0) { + for (i = 0; i < itcm_banks; i++) { + ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); + if (ret) + return; + } + itcm_res.end = itcm_end - 1; + request_resource(&iomem_resource, &itcm_res); + itcm_iomap[0].length = itcm_end - ITCM_OFFSET; + iotable_init(itcm_iomap, 1); + /* Copy code from RAM to ITCM */ + start = &__sitcm_text; + end = &__eitcm_text; + ram = &__itcm_start; + /* This means you compiled more code than fits into ITCM */ + BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); + memcpy(start, ram, (end-start)); + pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); + } +} + +/* + * This creates the TCM memory pool and has to be done later, + * during the core_initicalls, since the allocator is not yet + * up and running when the first initialization runs. + */ +static int __init setup_tcm_pool(void) +{ + u32 tcm_status = read_cpuid_tcmstatus(); + u32 dtcm_pool_start = (u32) &__edtcm_data; + u32 itcm_pool_start = (u32) &__eitcm_text; + int ret; + + /* + * Set up malloc pool, 2^2 = 4 bytes granularity since + * the TCM is sometimes just 4 KiB. NB: pages and cache + * line alignments does not matter in TCM! + */ + tcm_pool = gen_pool_create(2, -1); + + pr_debug("Setting up TCM memory pool\n"); + + /* Add the rest of DTCM to the TCM pool */ + if (tcm_status & (0x03 << 16)) { + if (dtcm_pool_start < dtcm_end) { + ret = gen_pool_add(tcm_pool, dtcm_pool_start, + dtcm_end - dtcm_pool_start, -1); + if (ret) { + pr_err("CPU DTCM: could not add DTCM " \ + "remainder to pool!\n"); + return ret; + } + pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ + "the TCM memory pool\n", + dtcm_end - dtcm_pool_start, + dtcm_pool_start); + } + } + + /* Add the rest of ITCM to the TCM pool */ + if (tcm_status & 0x03) { + if (itcm_pool_start < itcm_end) { + ret = gen_pool_add(tcm_pool, itcm_pool_start, + itcm_end - itcm_pool_start, -1); + if (ret) { + pr_err("CPU ITCM: could not add ITCM " \ + "remainder to pool!\n"); + return ret; + } + pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ + "the TCM memory pool\n", + itcm_end - itcm_pool_start, + itcm_pool_start); + } + } + return 0; +} + +core_initcall(setup_tcm_pool); diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h new file mode 100644 index 0000000..8015ad4 --- /dev/null +++ b/arch/arm/kernel/tcm.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * TCM memory handling for ARM systems + * + * Author: Linus Walleij <linus.walleij@stericsson.com> + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + */ + +#ifdef CONFIG_HAVE_TCM +void __init tcm_init(void); +#else +/* No TCM support, just blank inlines to be optimized out */ +inline void tcm_init(void) +{ +} +#endif diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 4cdc4a0..f1e2eb1 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -10,17 +10,13 @@ * * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/smp.h> #include <linux/timex.h> #include <linux/errno.h> @@ -34,12 +30,13 @@ #include <asm/leds.h> #include <asm/thread_info.h> #include <asm/stacktrace.h> +#include <asm/mach/arch.h> #include <asm/mach/time.h> /* * Our system timer. */ -struct sys_timer *system_timer; +static struct sys_timer *system_timer; #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) /* this needs a better home */ @@ -76,151 +73,15 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -/* - * hook for setting the RTC's idea of the current time. - */ -int (*set_rtc)(void); - -#ifndef CONFIG_GENERIC_TIME -static unsigned long dummy_gettimeoffset(void) -{ - return 0; -} -#endif - -static unsigned long next_rtc_update; - -/* - * If we have an externally synchronized linux clock, then update - * CMOS clock accordingly every ~11 minutes. set_rtc() has to be - * called as close as possible to 500 ms before the new second - * starts. - */ -static inline void do_set_rtc(void) -{ - if (!ntp_synced() || set_rtc == NULL) - return; - - if (next_rtc_update && - time_before((unsigned long)xtime.tv_sec, next_rtc_update)) - return; - - if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && - xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) - return; - - if (set_rtc()) - /* - * rtc update failed. Try again in 60s - */ - next_rtc_update = xtime.tv_sec + 60; - else - next_rtc_update = xtime.tv_sec + 660; -} - -#ifdef CONFIG_LEDS - -static void dummy_leds_event(led_event_t evt) -{ -} - -void (*leds_event)(led_event_t) = dummy_leds_event; - -struct leds_evt_name { - const char name[8]; - int on; - int off; -}; - -static const struct leds_evt_name evt_names[] = { - { "amber", led_amber_on, led_amber_off }, - { "blue", led_blue_on, led_blue_off }, - { "green", led_green_on, led_green_off }, - { "red", led_red_on, led_red_off }, -}; - -static ssize_t leds_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, size_t size) +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +u32 arch_gettimeoffset(void) { - int ret = -EINVAL, len = strcspn(buf, " "); - - if (len > 0 && buf[len] == '\0') - len--; - - if (strncmp(buf, "claim", len) == 0) { - leds_event(led_claim); - ret = size; - } else if (strncmp(buf, "release", len) == 0) { - leds_event(led_release); - ret = size; - } else { - int i; - - for (i = 0; i < ARRAY_SIZE(evt_names); i++) { - if (strlen(evt_names[i].name) != len || - strncmp(buf, evt_names[i].name, len) != 0) - continue; - if (strncmp(buf+len, " on", 3) == 0) { - leds_event(evt_names[i].on); - ret = size; - } else if (strncmp(buf+len, " off", 4) == 0) { - leds_event(evt_names[i].off); - ret = size; - } - break; - } - } - return ret; -} - -static SYSDEV_ATTR(event, 0200, NULL, leds_store); - -static int leds_suspend(struct sys_device *dev, pm_message_t state) -{ - leds_event(led_stop); - return 0; -} - -static int leds_resume(struct sys_device *dev) -{ - leds_event(led_start); - return 0; -} + if (system_timer->offset != NULL) + return system_timer->offset() * 1000; -static int leds_shutdown(struct sys_device *dev) -{ - leds_event(led_halted); return 0; } - -static struct sysdev_class leds_sysclass = { - .name = "leds", - .shutdown = leds_shutdown, - .suspend = leds_suspend, - .resume = leds_resume, -}; - -static struct sys_device leds_device = { - .id = 0, - .cls = &leds_sysclass, -}; - -static int __init leds_init(void) -{ - int ret; - ret = sysdev_class_register(&leds_sysclass); - if (ret == 0) - ret = sysdev_register(&leds_device); - if (ret == 0) - ret = sysdev_create_file(&leds_device, &attr_event); - return ret; -} - -device_initcall(leds_init); - -EXPORT_SYMBOL(leds_event); -#endif +#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ #ifdef CONFIG_LEDS_TIMER static inline void do_leds(void) @@ -236,96 +97,6 @@ static inline void do_leds(void) #define do_leds() #endif -#ifndef CONFIG_GENERIC_TIME -void do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - unsigned long seq; - unsigned long usec, sec; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - usec = system_timer->offset(); - sec = xtime.tv_sec; - usec += xtime.tv_nsec / 1000; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - /* usec may have gone up a lot: be safe */ - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * done, and then undo it! - */ - nsec -= system_timer->offset() * NSEC_PER_USEC; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); -#endif /* !CONFIG_GENERIC_TIME */ - -/** - * save_time_delta - Save the offset between system time and RTC time - * @delta: pointer to timespec to store delta - * @rtc: pointer to timespec for current RTC time - * - * Return a delta between the system time and the RTC time, such - * that system time can be restored later with restore_time_delta() - */ -void save_time_delta(struct timespec *delta, struct timespec *rtc) -{ - set_normalized_timespec(delta, - xtime.tv_sec - rtc->tv_sec, - xtime.tv_nsec - rtc->tv_nsec); -} -EXPORT_SYMBOL(save_time_delta); - -/** - * restore_time_delta - Restore the current system time - * @delta: delta returned by save_time_delta() - * @rtc: pointer to timespec for current RTC time - */ -void restore_time_delta(struct timespec *delta, struct timespec *rtc) -{ - struct timespec ts; - - set_normalized_timespec(&ts, - delta->tv_sec + rtc->tv_sec, - delta->tv_nsec + rtc->tv_nsec); - - do_settimeofday(&ts); -} -EXPORT_SYMBOL(restore_time_delta); #ifndef CONFIG_GENERIC_CLOCKEVENTS /* @@ -335,7 +106,6 @@ void timer_tick(void) { profile_tick(CPU_PROFILING); do_leds(); - do_set_rtc(); write_seqlock(&xtime_lock); do_timer(1); write_sequnlock(&xtime_lock); @@ -391,10 +161,7 @@ device_initcall(timer_init_sysfs); void __init time_init(void) { -#ifndef CONFIG_GENERIC_TIME - if (system_timer->offset == NULL) - system_timer->offset = dummy_gettimeoffset; -#endif + system_timer = machine_desc->timer; system_timer->init(); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 57eb0f6..ee57640 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -1,7 +1,7 @@ /* * linux/arch/arm/kernel/traps.c * - * Copyright (C) 1995-2002 Russell King + * Copyright (C) 1995-2009 Russell King * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds * * This program is free software; you can redistribute it and/or modify @@ -12,15 +12,17 @@ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably * kill the offending process. */ -#include <linux/module.h> #include <linux/signal.h> -#include <linux/spinlock.h> #include <linux/personality.h> #include <linux/kallsyms.h> -#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> #include <linux/hardirq.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/kexec.h> +#include <linux/delay.h> #include <linux/init.h> -#include <linux/uaccess.h> #include <asm/atomic.h> #include <asm/cacheflush.h> @@ -28,12 +30,15 @@ #include <asm/unistd.h> #include <asm/traps.h> #include <asm/unwind.h> +#include <asm/tls.h> #include "ptrace.h" #include "signal.h" static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; +void *vectors_page; + #ifdef CONFIG_DEBUG_USER unsigned int user_debug; @@ -45,21 +50,18 @@ static int __init user_debug_setup(char *str) __setup("user_debug=", user_debug_setup); #endif -static void dump_mem(const char *str, unsigned long bottom, unsigned long top); +static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { #ifdef CONFIG_KALLSYMS - printk("[<%08lx>] ", where); - print_symbol("(%s) ", where); - printk("from [<%08lx>] ", from); - print_symbol("(%s)\n", from); + printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif if (in_exception_text(where)) - dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); } #ifndef CONFIG_ARM_UNWIND @@ -81,9 +83,10 @@ static int verify_stack(unsigned long sp) /* * Dump out the contents of some memory nicely... */ -static void dump_mem(const char *str, unsigned long bottom, unsigned long top) +static void dump_mem(const char *lvl, const char *str, unsigned long bottom, + unsigned long top) { - unsigned long p = bottom & ~31; + unsigned long first; mm_segment_t fs; int i; @@ -95,33 +98,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top) fs = get_fs(); set_fs(KERNEL_DS); - printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); + printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); - for (p = bottom & ~31; p < top;) { - printk("%04lx: ", p & 0xffff); + for (first = bottom & ~31; first < top; first += 32) { + unsigned long p; + char str[sizeof(" 12345678") * 8 + 1]; - for (i = 0; i < 8; i++, p += 4) { - unsigned int val; + memset(str, ' ', sizeof(str)); + str[sizeof(str) - 1] = '\0'; - if (p < bottom || p >= top) - printk(" "); - else { - __get_user(val, (unsigned long *)p); - printk("%08x ", val); + for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { + if (p >= bottom && p < top) { + unsigned long val; + if (__get_user(val, (unsigned long *)p) == 0) + sprintf(str + i * 9, " %08lx", val); + else + sprintf(str + i * 9, " ????????"); } } - printk ("\n"); + printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } set_fs(fs); } -static void dump_instr(struct pt_regs *regs) +static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; mm_segment_t fs; + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* @@ -132,7 +139,6 @@ static void dump_instr(struct pt_regs *regs) fs = get_fs(); set_fs(KERNEL_DS); - printk("Code: "); for (i = -4; i < 1; i++) { unsigned int val, bad; @@ -142,13 +148,14 @@ static void dump_instr(struct pt_regs *regs) bad = __get_user(val, &((u32 *)addr)[i]); if (!bad) - printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); + p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", + width, val); else { - printk("bad PC value."); + p += sprintf(p, "bad PC value"); break; } } - printk("\n"); + printk("%sCode: %s\n", lvl, str); set_fs(fs); } @@ -219,24 +226,34 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #define S_SMP "" #endif -static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) +static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; + int ret; - printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", + printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); + sysfs_printk_last_file(); + + /* trap and error numbers are mostly meaningless on ARM */ + ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + print_modules(); __show_regs(regs); - printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, task_pid_nr(tsk), thread + 1); + printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { - dump_mem("Stack: ", regs->ARM_sp, + dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); dump_backtrace(regs, tsk); - dump_instr(regs); + dump_instr(KERN_EMERG, regs); } + + return ret; } DEFINE_SPINLOCK(die_lock); @@ -244,28 +261,32 @@ DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ -NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) +void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); + int ret; oops_enter(); - console_verbose(); spin_lock_irq(&die_lock); + console_verbose(); bust_spinlocks(1); - __die(str, err, thread, regs); + ret = __die(str, err, thread, regs); + + if (regs && kexec_should_crash(thread->task)) + crash_kexec(regs); + bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); + oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); - if (panic_on_oops) panic("Fatal exception"); - - oops_exit(); - do_exit(SIGSEGV); + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); } void arm_notify_die(const char *str, struct pt_regs *regs, @@ -349,7 +370,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); - dump_instr(regs); + dump_instr(KERN_INFO, regs); } #endif @@ -400,7 +421,7 @@ static int bad_syscall(int n, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", task_pid_nr(current), current->comm, n); - dump_instr(regs); + dump_instr(KERN_ERR, regs); } #endif @@ -418,12 +439,14 @@ static int bad_syscall(int n, struct pt_regs *regs) static inline void do_cache_op(unsigned long start, unsigned long end, int flags) { + struct mm_struct *mm = current->active_mm; struct vm_area_struct *vma; if (end < start || flags) return; - vma = find_vma(current->active_mm, start); + down_read(&mm->mmap_sem); + vma = find_vma(mm, start); if (vma && vma->vm_start < end) { if (start < vma->vm_start) start = vma->vm_start; @@ -432,6 +455,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) flush_cache_user_range(vma, start, end); } + up_read(&mm->mmap_sem); } /* @@ -494,17 +518,20 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) case NR(set_tls): thread->tp_value = regs->ARM_r0; -#if defined(CONFIG_HAS_TLS_REG) - asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); -#elif !defined(CONFIG_TLS_REG_EMUL) - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; -#endif + if (tls_emu) + return 0; + if (has_tls_reg) { + asm ("mcr p15, 0, %0, c13, c0, 3" + : : "r" (regs->ARM_r0)); + } else { + /* + * User space must never try to access this directly. + * Expect your app to break eventually if you do so. + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; + } return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG @@ -519,7 +546,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) * __kuser_cmpxchg code in entry-armv.S should be aware of its * existence. Don't ever use this from user code. */ - case 0xfff0: + case NR(cmpxchg): for (;;) { extern void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs); @@ -564,7 +591,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if not implemented, rather than raising SIGILL. This way the calling program can gracefully determine whether a feature is supported. */ - if (no <= 0x7ff) + if ((no & 0xffff) <= 0x7ff) return -ENOSYS; break; } @@ -576,7 +603,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { printk("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); - dump_instr(regs); + dump_instr("", regs); if (user_mode(regs)) { __show_regs(regs); c_backtrace(regs->ARM_fp, processor_mode(regs)); @@ -653,7 +680,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) if (user_debug & UDBG_BADABORT) { printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", task_pid_nr(current), current->comm, code, instr); - dump_instr(regs); + dump_instr(KERN_ERR, regs); show_pte(current->mm, addr); } #endif @@ -683,19 +710,19 @@ void __readwrite_bug(const char *fn) } EXPORT_SYMBOL(__readwrite_bug); -void __pte_error(const char *file, int line, unsigned long val) +void __pte_error(const char *file, int line, pte_t pte) { - printk("%s:%d: bad pte %08lx.\n", file, line, val); + printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte)); } -void __pmd_error(const char *file, int line, unsigned long val) +void __pmd_error(const char *file, int line, pmd_t pmd) { - printk("%s:%d: bad pmd %08lx.\n", file, line, val); + printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd)); } -void __pgd_error(const char *file, int line, unsigned long val) +void __pgd_error(const char *file, int line, pgd_t pgd) { - printk("%s:%d: bad pgd %08lx.\n", file, line, val); + printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd)); } asmlinkage void __div0(void) @@ -719,9 +746,23 @@ void __init trap_init(void) return; } +static void __init kuser_get_tls_init(unsigned long vectors) +{ + /* + * vectors + 0xfe0 = __kuser_get_tls + * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 + */ + if (tls_emu || has_tls_reg) + memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); +} + void __init early_trap_init(void) { +#if defined(CONFIG_CPU_USE_DOMAINS) unsigned long vectors = CONFIG_VECTORS_BASE; +#else + unsigned long vectors = (unsigned long)vectors_page; +#endif extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; extern char __kuser_helper_start[], __kuser_helper_end[]; @@ -737,11 +778,18 @@ void __init early_trap_init(void) memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); /* + * Do processor specific fixups for the kuser helpers + */ + kuser_get_tls_init(vectors); + + /* * Copy signal return handlers into the vector page, and * set sigreturn to be a pointer to these. */ - memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, - sizeof(sigreturn_codes)); + memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), + sigreturn_codes, sizeof(sigreturn_codes)); + memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), + syscall_restart_code, sizeof(syscall_restart_code)); flush_icache_range(vectors, vectors + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index dd56e11..d2cb0b3 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -26,6 +26,17 @@ * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html */ +#ifndef __CHECKER__ +#if !defined (__ARM_EABI__) +#warning Your compiler does not have EABI support. +#warning ARM unwind is known to compile only with EABI compilers. +#warning Change compiler or disable ARM_UNWIND option. +#elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) +#warning Your compiler is too buggy; it is known to not compile ARM unwind support. +#warning Change compiler or disable ARM_UNWIND option. +#endif +#endif /* __CHECKER__ */ + #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> @@ -62,7 +73,11 @@ struct unwind_ctrl_block { }; enum regs { +#ifdef CONFIG_THUMB2_KERNEL + FP = 7, +#else FP = 11, +#endif SP = 13, LR = 14, PC = 15 @@ -131,6 +146,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr) addr < table->end_addr) { idx = search_index(addr, table->start, table->stop - 1); + /* Move-to-front to exploit common traces */ + list_move(&table->list, &unwind_tables); break; } } @@ -262,7 +279,7 @@ int unwind_frame(struct stackframe *frame) /* only go to a higher address on the stack */ low = frame->sp; - high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; + high = ALIGN(low, THREAD_SIZE); pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, frame->pc, frame->lr, frame->sp); @@ -346,7 +363,9 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; - frame.pc = regs->ARM_pc; + /* PC might be corrupted, use LR in that case. */ + frame.pc = kernel_text_address(regs->ARM_pc) + ? regs->ARM_pc : regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4340bf3..86b66f3 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -6,7 +6,21 @@ #include <asm-generic/vmlinux.lds.h> #include <asm/thread_info.h> #include <asm/memory.h> +#include <asm/page.h> +#define PROC_INFO \ + VMLINUX_SYMBOL(__proc_info_begin) = .; \ + *(.proc.info.init) \ + VMLINUX_SYMBOL(__proc_info_end) = .; + +#ifdef CONFIG_HOTPLUG_CPU +#define ARM_CPU_DISCARD(x) +#define ARM_CPU_KEEP(x) x +#else +#define ARM_CPU_DISCARD(x) x +#define ARM_CPU_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -23,71 +37,55 @@ SECTIONS #else . = PAGE_OFFSET + TEXT_OFFSET; #endif - .text.head : { - _stext = .; - _sinittext = .; - *(.text.head) - } .init : { /* Init code and data */ + _stext = .; + _sinittext = .; + HEAD_TEXT INIT_TEXT _einittext = .; - __proc_info_begin = .; - *(.proc.info.init) - __proc_info_end = .; + ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; *(.arch.info.init) __arch_info_end = .; __tagtable_begin = .; *(.taglist.init) __tagtable_end = .; - . = ALIGN(16); - __setup_start = .; - *(.init.setup) - __setup_end = .; - __early_begin = .; - *(.early_param.init) - __early_end = .; - __initcall_start = .; - INITCALLS - __initcall_end = .; - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - __security_initcall_start = .; - *(.security_initcall.init) - __security_initcall_end = .; -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(32); - __initramfs_start = .; - usr/built-in.o(.init.ramfs) - __initramfs_end = .; +#ifdef CONFIG_SMP_ON_UP + __smpalt_begin = .; + *(.alt.smp.init) + __smpalt_end = .; #endif - . = ALIGN(4096); - __per_cpu_load = .; - __per_cpu_start = .; - *(.data.percpu.page_aligned) - *(.data.percpu) - *(.data.percpu.shared_aligned) - __per_cpu_end = .; + + INIT_SETUP(16) + + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA - . = ALIGN(4096); - __init_end = .; #endif } - /DISCARD/ : { /* Exit code and data */ - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) + PERCPU(PAGE_SIZE) + +#ifndef CONFIG_XIP_KERNEL + . = ALIGN(PAGE_SIZE); + __init_end = .; +#endif + + /* + * unwind exit sections must be discarded before the rest of the + * unwind sections get included. + */ + /DISCARD/ : { *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) -#ifndef CONFIG_HOTPLUG_CPU - *(.ARM.exidx.cpuexit.text) - *(.ARM.extab.cpuexit.text) -#endif + ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) + ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) #ifndef CONFIG_HOTPLUG *(.ARM.exidx.devexit.text) *(.ARM.extab.devexit.text) @@ -103,6 +101,7 @@ SECTIONS __exception_text_start = .; *(.exception.text) __exception_text_end = .; + IRQENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -115,12 +114,12 @@ SECTIONS *(.rodata.*) *(.glue_7) *(.glue_7t) + . = ALIGN(4); *(.got) /* Global offset table */ + ARM_CPU_KEEP(PROC_INFO) } - RODATA - - _etext = .; /* End of text and rodata section */ + RO_DATA(PAGE_SIZE) #ifdef CONFIG_ARM_UNWIND /* @@ -139,6 +138,8 @@ SECTIONS } #endif + _etext = .; /* End of text and rodata section */ + #ifdef CONFIG_XIP_KERNEL __data_loc = ALIGN(4); /* location in binary */ . = PAGE_OFFSET + TEXT_OFFSET; @@ -155,27 +156,19 @@ SECTIONS * first, the init task union, aligned * to an 8192 byte boundary. */ - *(.data.init_task) + INIT_TASK_DATA(THREAD_SIZE) #ifdef CONFIG_XIP_KERNEL - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA - . = ALIGN(4096); + . = ALIGN(PAGE_SIZE); __init_end = .; #endif - . = ALIGN(4096); - __nosave_begin = .; - *(.data.nosave) - . = ALIGN(4096); - __nosave_end = .; - - /* - * then the cacheline aligned data - */ - . = ALIGN(32); - *(.data.cacheline_aligned) + NOSAVE_DATA + CACHELINE_ALIGNED_DATA(32) + READ_MOSTLY_DATA(32) /* * The exception fixup table (might need resorting at runtime) @@ -197,21 +190,77 @@ SECTIONS } _edata_loc = __data_loc + SIZEOF(.data); - .bss : { - __bss_start = .; /* BSS */ - *(.bss) - *(COMMON) - __bss_stop = .; - _end = .; +#ifdef CONFIG_HAVE_TCM + /* + * We align everything to a page boundary so we can + * free it after init has commenced and TCM contents have + * been copied to its destination. + */ + .tcm_start : { + . = ALIGN(PAGE_SIZE); + __tcm_start = .; + __itcm_start = .; + } + + /* + * Link these to the ITCM RAM + * Put VMA to the TCM address and LMA to the common RAM + * and we'll upload the contents from RAM to TCM and free + * the used RAM after that. + */ + .text_itcm ITCM_OFFSET : AT(__itcm_start) + { + __sitcm_text = .; + *(.tcm.text) + *(.tcm.rodata) + . = ALIGN(4); + __eitcm_text = .; + } + + /* + * Reset the dot pointer, this is needed to create the + * relative __dtcm_start below (to be used as extern in code). + */ + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm); + + .dtcm_start : { + __dtcm_start = .; } - /* Stabs debugging sections. */ - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } + + /* TODO: add remainder of ITCM as well, that can be used for data! */ + .data_dtcm DTCM_OFFSET : AT(__dtcm_start) + { + . = ALIGN(4); + __sdtcm_data = .; + *(.tcm.data) + . = ALIGN(4); + __edtcm_data = .; + } + + /* Reset the dot pointer or the linker gets confused */ + . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm); + + /* End marker for freeing TCM copy in linked object */ + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){ + . = ALIGN(PAGE_SIZE); + __tcm_end = .; + } +#endif + + BSS_SECTION(0, 0, 0) + _end = .; + + STABS_DEBUG .comment 0 : { *(.comment) } + + /* Default discards */ + DISCARDS + +#ifndef CONFIG_SMP_ON_UP + /DISCARD/ : { + *(.alt.smp.init) + } +#endif } /* diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index 17127db..1796157 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c @@ -70,7 +70,7 @@ static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) * initialised state information on the first fault. */ - case THREAD_NOTIFY_RELEASE: + case THREAD_NOTIFY_EXIT: iwmmxt_task_release(thread); break; |