From bc8b57f08c53344d13e3b5e644c56c0355899b47 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 29 Nov 2010 19:43:21 +0100 Subject: ARM: 6497/1: kexec: Correct data alignment for CONFIG_THUMB2_KERNEL Directives such as .long and .word do not magically cause the assembler location counter to become aligned in gas. As a result, using these directives in code sections can result in misaligned data words when building a Thumb-2 kernel (CONFIG_THUMB2_KERNEL). This is a Bad Thing, since the ABI permits the compiler to assume that fundamental types of word size or above are word- aligned when accessing them from C. If the data is not really word-aligned, this can cause impaired performance and stray alignment faults in some circumstances. In general, the following rules should be applied when using data word declaration directives inside code sections: * .quad and .double: .align 3 * .long, .word, .single, .float: .align (or .align 2) * .short: No explicit alignment required, since Thumb-2 instructions are always 2 or 4 bytes in size. immediately after an instruction. Reviewed-by: Will Deacon Signed-off-by: Dave Martin Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/relocate_kernel.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index fd26f8d..9cf4cbf 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -59,6 +59,8 @@ relocate_new_kernel: ldr r2,kexec_boot_atags mov pc,lr + .align + .globl kexec_start_address kexec_start_address: .long 0x0 -- cgit v1.1 From 4f79a5dd7c3e316e2230dc0ee665c40a39023a81 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 29 Nov 2010 19:43:24 +0100 Subject: ARM: 6500/1: Thumb-2: Correct data alignment for CONFIG_THUMB2_KERNEL in kernel/head.S Directives such as .long and .word do not magically cause the assembler location counter to become aligned in gas. As a result, using these directives in code sections can result in misaligned data words when building a Thumb-2 kernel (CONFIG_THUMB2_KERNEL). This is a Bad Thing, since the ABI permits the compiler to assume that fundamental types of word size or above are word- aligned when accessing them from C. If the data is not really word-aligned, this can cause impaired performance and stray alignment faults in some circumstances. In general, the following rules should be applied when using data word declaration directives inside code sections: * .quad and .double: .align 3 * .long, .word, .single, .float: .align (or .align 2) * .short: No explicit alignment required, since Thumb-2 instructions are always 2 or 4 bytes in size. immediately after an instruction. Reviewed-by: Will Deacon Signed-off-by: Dave Martin Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/head.S | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index dd6b369..591c097 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -262,6 +262,7 @@ __create_page_tables: mov pc, lr ENDPROC(__create_page_tables) .ltorg + .align __enable_mmu_loc: .long . .long __enable_mmu @@ -308,6 +309,8 @@ ENTRY(__secondary_switched) b secondary_start_kernel ENDPROC(__secondary_switched) + .align + .type __secondary_data, %object __secondary_data: .long . @@ -413,6 +416,7 @@ __fixup_smp_on_up: mov pc, lr ENDPROC(__fixup_smp) + .align 1: .word . .word __smpalt_begin .word __smpalt_end -- cgit v1.1 From a75e5248c51af1eaeed936be6bd3497b93f09685 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 29 Nov 2010 19:43:28 +0100 Subject: ARM: 6504/1: Thumb-2: Fix long-distance conditional branches in head.S for Thumb-2. The 32-bit conditional branches in Thumb-2 have a shorter range (+/-512K) than their ARM counterparts (+/-32MB). The linker does not currently generate trampolines to extend the range of these Thumb-2 conditional branches, resulting in link errors when vmlinux is sufficiently large, e.g.: head.o:(.text+0x464): relocation truncated to fit: R_ARM_THM_JUMP19 This patch forces the longer-range, unconditional branch encoding by use of an explicit IT instruction. The resulting branches are triggered on the same conditions as before. Signed-off-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 591c097..6bd82d25 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -85,9 +85,11 @@ ENTRY(stext) mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? + THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' bl __lookup_machine_type @ r5=machinfo movs r8, r5 @ invalid machine (r5=0)? + THUMB( it eq ) @ force fixup-able long branch encoding beq __error_a @ yes, error 'a' bl __vet_atags #ifdef CONFIG_SMP_ON_UP @@ -283,6 +285,7 @@ ENTRY(secondary_startup) bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' + THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p /* -- cgit v1.1 From 55afd264cdd5d5848753e90884ed596e11bce0ff Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 1 Dec 2010 18:12:43 +0100 Subject: ARM: 6519/1: kuser: Fix incorrect cmpxchg syscall in kuser helpers The existing code invokes the syscall with rubbish in r7, due to what looks like an incorrect literal load idiom. Reviewed-by: Will Deacon Signed-off-by: Dave Martin Acked-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c09e357..bb96a7d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -911,7 +911,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 * A special ghost syscall is used for that (see traps.c). */ stmfd sp!, {r7, lr} - ldr r7, =1f @ it's 20 bits + ldr r7, 1f @ it's 20 bits swi __ARM_NR_cmpxchg ldmfd sp!, {r7, pc} 1: .word __ARM_NR_cmpxchg -- cgit v1.1 From ef6c84454f8567d4968c210d7d194fb711ed3739 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 24 Nov 2010 11:54:25 +0800 Subject: ARM: pxa: add iwmmx support for PJ4 iwmmxt is used in XScale, XScale3, Mohawk and PJ4 core. But the instructions of accessing CP0 and CP1 is changed in PJ4. Append more files to support iwmmxt in PJ4 core. Signed-off-by: Zhou Zhu Signed-off-by: Haojian Zhuang Acked-by: Nicolas Pitre Signed-off-by: Eric Miao --- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/iwmmxt.S | 55 ++++++++++++++++++++------- arch/arm/kernel/pj4-cp0.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 137 insertions(+), 13 deletions(-) create mode 100644 arch/arm/kernel/pj4-cp0.c (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5b9b268..b0f11fa 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -50,6 +50,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o +obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_CPU_HAS_PMU) += pmu.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index b63b528..7fa3bb0 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -19,6 +19,14 @@ #include #include +#if defined(CONFIG_CPU_PJ4) +#define PJ4(code...) code +#define XSC(code...) +#else +#define PJ4(code...) +#define XSC(code...) code +#endif + #define MMX_WR0 (0x00) #define MMX_WR1 (0x08) #define MMX_WR2 (0x10) @@ -58,11 +66,17 @@ ENTRY(iwmmxt_task_enable) - mrc p15, 0, r2, c15, c1, 0 - tst r2, #0x3 @ CP0 and CP1 accessible? + XSC(mrc p15, 0, r2, c15, c1, 0) + PJ4(mrc p15, 0, r2, c1, c0, 2) + @ CP0 and CP1 accessible? + XSC(tst r2, #0x3) + PJ4(tst r2, #0xf) movne pc, lr @ if so no business here - orr r2, r2, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r2, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(orr r2, r2, #0x3) + XSC(mcr p15, 0, r2, c15, c1, 0) + PJ4(orr r2, r2, #0xf) + PJ4(mcr p15, 0, r2, c1, c0, 2) ldr r3, =concan_owner add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area @@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable) teqne r1, r2 @ or specified one? bne 1f @ no: quit - mrc p15, 0, r4, c15, c1, 0 - orr r4, r4, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r4, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(mrc p15, 0, r4, c15, c1, 0) + XSC(orr r4, r4, #0xf) + XSC(mcr p15, 0, r4, c15, c1, 0) + PJ4(mrc p15, 0, r4, c1, c0, 2) + PJ4(orr r4, r4, #0x3) + PJ4(mcr p15, 0, r4, c1, c0, 2) + mov r0, #0 @ nothing to load str r0, [r3] @ no more current owner mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait bl concan_save - bic r4, r4, #0x3 @ disable access to CP0 and CP1 - mcr p15, 0, r4, c15, c1, 0 + @ disable access to CP0 and CP1 + XSC(bic r4, r4, #0x3) + XSC(mcr p15, 0, r4, c15, c1, 0) + PJ4(bic r4, r4, #0xf) + PJ4(mcr p15, 0, r4, c1, c0, 2) + mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait @@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore) */ ENTRY(iwmmxt_task_switch) - mrc p15, 0, r1, c15, c1, 0 - tst r1, #0x3 @ CP0 and CP1 accessible? + XSC(mrc p15, 0, r1, c15, c1, 0) + PJ4(mrc p15, 0, r1, c1, c0, 2) + @ CP0 and CP1 accessible? + XSC(tst r1, #0x3) + PJ4(tst r1, #0xf) bne 1f @ yes: block them for next task ldr r2, =concan_owner @@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch) teq r2, r3 @ next task owns it? movne pc, lr @ no: leave Concan disabled -1: eor r1, r1, #3 @ flip Concan access - mcr p15, 0, r1, c15, c1, 0 +1: @ flip Conan access + XSC(eor r1, r1, #0x3) + XSC(mcr p15, 0, r1, c15, c1, 0) + PJ4(eor r1, r1, #0xf) + PJ4(mcr p15, 0, r1, c1, c0, 2) mrc p15, 0, r1, c2, c0, 0 sub pc, lr, r1, lsr #32 @ cpwait and return diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c new file mode 100644 index 0000000..a4b1b07 --- /dev/null +++ b/arch/arm/kernel/pj4-cp0.c @@ -0,0 +1,94 @@ +/* + * linux/arch/arm/kernel/pj4-cp0.c + * + * PJ4 iWMMXt coprocessor context switching and handling + * + * Copyright (c) 2010 Marvell International Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + /* + * flush_thread() zeroes thread->fpstate, so no need + * to do anything here. + * + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_EXIT: + iwmmxt_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + iwmmxt_task_switch(thread); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block iwmmxt_notifier_block = { + .notifier_call = iwmmxt_do, +}; + + +static u32 __init pj4_cp_access_read(void) +{ + u32 value; + + __asm__ __volatile__ ( + "mrc p15, 0, %0, c1, c0, 2\n\t" + : "=r" (value)); + return value; +} + +static void __init pj4_cp_access_write(u32 value) +{ + u32 temp; + + __asm__ __volatile__ ( + "mcr p15, 0, %1, c1, c0, 2\n\t" + "mrc p15, 0, %0, c1, c0, 2\n\t" + "mov %0, %0\n\t" + "sub pc, pc, #4\n\t" + : "=r" (temp) : "r" (value)); +} + + +/* + * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy + * switch code handle iWMMXt context switching. + */ +static int __init pj4_cp0_init(void) +{ + u32 cp_access; + + cp_access = pj4_cp_access_read() & ~0xf; + pj4_cp_access_write(cp_access); + + printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n"); + elf_hwcap |= HWCAP_IWMMXT; + thread_register_notifier(&iwmmxt_notifier_block); + + return 0; +} + +late_initcall(pj4_cp0_init); -- cgit v1.1