diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 15:06:28 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 15:06:28 +0900 |
commit | f43dc23d5ea91fca257be02138a255f02d98e806 (patch) | |
tree | b29722f6e965316e90ac97abf79923ced250dc21 /arch/arm/include | |
parent | f8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff) | |
parent | 4162cf64973df51fc885825bc9ca4d055891c49f (diff) | |
download | op-kernel-dev-f43dc23d5ea91fca257be02138a255f02d98e806.zip op-kernel-dev-f43dc23d5ea91fca257be02138a255f02d98e806.tar.gz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts:
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh2a/setup-mxg.c
arch/sh/kernel/cpu/sh2a/setup-sh7201.c
arch/sh/kernel/cpu/sh2a/setup-sh7203.c
arch/sh/kernel/cpu/sh2a/setup-sh7206.c
arch/sh/kernel/cpu/sh3/setup-sh7705.c
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sh/kernel/cpu/sh3/setup-sh7710.c
arch/sh/kernel/cpu/sh3/setup-sh7720.c
arch/sh/kernel/cpu/sh4/setup-sh4-202.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4/setup-sh7760.c
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
arch/sh/kernel/cpu/sh4a/setup-sh7786.c
arch/sh/kernel/cpu/sh4a/setup-shx3.c
arch/sh/kernel/cpu/sh5/setup-sh5.c
drivers/serial/sh-sci.c
drivers/serial/sh-sci.h
include/linux/serial_sci.h
Diffstat (limited to 'arch/arm/include')
110 files changed, 3717 insertions, 1282 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 73237bd..6550db3 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,3 +1,3 @@ include include/asm-generic/Kbuild.asm -unifdef-y += hwcap.h +header-y += hwcap.h diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h new file mode 100644 index 0000000..d370ee3 --- /dev/null +++ b/arch/arm/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a09..bc2d2d7 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -18,6 +18,7 @@ #endif #include <asm/ptrace.h> +#include <asm/domain.h> /* * Endian independent macros for shifting bytes within registers. @@ -74,23 +75,56 @@ * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 - .macro disable_irq + .macro disable_irq_notrace cpsid i .endm - .macro enable_irq + .macro enable_irq_notrace cpsie i .endm #else - .macro disable_irq + .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm - .macro enable_irq + .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif + .macro asm_trace_hardirqs_off +#if defined(CONFIG_TRACE_IRQFLAGS) + stmdb sp!, {r0-r3, ip, lr} + bl trace_hardirqs_off + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on_cond, cond +#if defined(CONFIG_TRACE_IRQFLAGS) + /* + * actually the registers should be pushed and pop'd conditionally, but + * after bl the flags are certainly clobbered + */ + stmdb sp!, {r0-r3, ip, lr} + bl\cond trace_hardirqs_on + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on + asm_trace_hardirqs_on_cond al + .endm + + .macro disable_irq + disable_irq_notrace + asm_trace_hardirqs_off + .endm + + .macro enable_irq + asm_trace_hardirqs_on + enable_irq_notrace + .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. @@ -104,26 +138,155 @@ * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ - .macro restore_irqs, oldcpsr + .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm + .macro restore_irqs, oldcpsr + tst \oldcpsr, #PSR_I_BIT + asm_trace_hardirqs_on_cond eq + restore_irqs_notrace \oldcpsr + .endm + #define USER(x...) \ 9999: x; \ - .section __ex_table,"a"; \ + .pushsection __ex_table,"a"; \ .align 3; \ .long 9999b,9001f; \ - .previous + .popsection + +#ifdef CONFIG_SMP +#define ALT_SMP(instr...) \ +9998: instr +/* + * Note: if you get assembler errors from ALT_UP() when building with + * CONFIG_THUMB2_KERNEL, you almost certainly need to use + * ALT_SMP( W(instr) ... ) + */ +#define ALT_UP(instr...) \ + .pushsection ".alt.smp.init", "a" ;\ + .long 9998b ;\ +9997: instr ;\ + .if . - 9997b != 4 ;\ + .error "ALT_UP() content must assemble to exactly 4 bytes";\ + .endif ;\ + .popsection +#define ALT_UP_B(label) \ + .equ up_b_offset, label - 9998b ;\ + .pushsection ".alt.smp.init", "a" ;\ + .long 9998b ;\ + W(b) . + up_b_offset ;\ + .popsection +#else +#define ALT_SMP(instr...) +#define ALT_UP(instr...) instr +#define ALT_UP_B(label) b label +#endif /* * SMP data memory barrier */ - .macro smp_dmb + .macro smp_dmb mode #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 - dmb + .ifeqs "\mode","arm" + ALT_SMP(dmb) + .else + ALT_SMP(W(dmb)) + .endif #elif __LINUX_ARM_ARCH__ == 6 - mcr p15, 0, r0, c7, c10, 5 @ dmb + ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb +#else +#error Incompatible SMP platform #endif + .ifeqs "\mode","arm" + ALT_UP(nop) + .else + ALT_UP(W(nop)) + .endif +#endif + .endm + +#ifdef CONFIG_THUMB2_KERNEL + .macro setmode, mode, reg + mov \reg, #\mode + msr cpsr_c, \reg + .endm +#else + .macro setmode, mode, reg + msr cpsr_c, #\mode + .endm #endif + +/* + * STRT/LDRT access macros with ARM and Thumb-2 variants + */ +#ifdef CONFIG_THUMB2_KERNEL + + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() +9999: + .if \inc == 1 + \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] + .elseif \inc == 4 + \instr\cond\()\t\().w \reg, [\ptr, #\off] + .else + .error "Unsupported inc macro argument" + .endif + + .pushsection __ex_table,"a" + .align 3 + .long 9999b, \abort + .popsection + .endm + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + @ explicit IT instruction needed because of the label + @ introduced by the USER macro + .ifnc \cond,al + .if \rept == 1 + itt \cond + .elseif \rept == 2 + ittt \cond + .else + .error "Unsupported rept macro argument" + .endif + .endif + + @ Slightly optimised to avoid incrementing the pointer twice + usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort + .if \rept == 2 + usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort + .endif + + add\cond \ptr, #\rept * \inc + .endm + +#else /* !CONFIG_THUMB2_KERNEL */ + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() + .rept \rept +9999: + .if \inc == 1 + \instr\cond\()b\()\t \reg, [\ptr], #\inc + .elseif \inc == 4 + \instr\cond\()\t \reg, [\ptr], #\inc + .else + .error "Unsupported inc macro argument" + .endif + + .pushsection __ex_table,"a" + .align 3 + .long 9999b, \abort + .popsection + .endr + .endm + +#endif /* CONFIG_THUMB2_KERNEL */ + + .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc str, \reg, \ptr, \inc, \cond, \rept, \abort + .endm + + .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9e07fe5..7e79503 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -19,43 +19,33 @@ #ifdef __KERNEL__ -#define atomic_read(v) ((v)->counter) +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 /* * ARMv6 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. Writing to 'v->counter' - * without using the following operations WILL break the atomic - * nature of these ops. + * to ensure that the update happens. */ -static inline void atomic_set(atomic_t *v, int i) -{ - unsigned long tmp; - - __asm__ __volatile__("@ atomic_set\n" -"1: ldrex %0, [%1]\n" -" strex %0, %2, [%1]\n" -" teq %0, #0\n" -" bne 1b" - : "=&r" (tmp) - : "r" (&v->counter), "r" (i) - : "cc"); -} - static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; int result; __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -68,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -88,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -106,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -128,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) do { __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%2]\n" + "ldrex %1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "Ir" (old), "r" (new) : "cc"); } while (res); @@ -147,26 +137,22 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%2]\n" -" bic %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (tmp), "=&r" (tmp2) + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) : "r" (addr), "Ir" (mask) : "cc"); } #else /* ARM_ARCH_6 */ -#include <asm/system.h> - #ifdef CONFIG_SMP #error SMP not supported on pre-ARMv6 CPUs #endif -#define atomic_set(v,i) (((v)->counter) = (i)) - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; @@ -249,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifndef CONFIG_GENERIC_ATOMIC64 +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +static inline u64 atomic64_read(atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + u64 tmp; + + __asm__ __volatile__("@ atomic64_set\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %0, %3, %H3, [%2]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp), "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_add\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_sub\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +{ + u64 oldval; + unsigned long res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic64_cmpxchg\n" + "ldrexd %1, %H1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "teqeq %H1, %H4\n" + "strexdeq %0, %5, %H5, [%3]" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "r" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_xchg\n" +"1: ldrexd %0, %H0, [%3]\n" +" strexd %1, %4, %H4, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "r" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_dec_if_positive(atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, #1\n" +" sbc %H0, %H0, #0\n" +" teq %H0, #0\n" +" bmi 2f\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +{ + u64 val; + unsigned long tmp; + int ret = 1; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_unless\n" +"1: ldrexd %0, %H0, [%4]\n" +" teq %0, %5\n" +" teqeq %H0, %H5\n" +" moveq %1, #0\n" +" beq 2f\n" +" adds %0, %0, %6\n" +" adc %H0, %H0, %H6\n" +" strexd %2, %0, %H0, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (ret) + smp_mb(); + + return ret; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#else /* !CONFIG_GENERIC_ATOMIC64 */ +#include <asm-generic/atomic64.h> +#endif #include <asm-generic/atomic-long.h> #endif #endif diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 63a481f..338ff19 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) *p = res | mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) *p = res & ~mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } static inline int @@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) *p = res ^ mask; raw_local_irq_restore(flags); - return res & mask; + return (res & mask) != 0; } #include <asm-generic/bitops/non-atomic.h> diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index feaa75f..75fe66b 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -4,7 +4,7 @@ #ifndef __ASMARM_CACHE_H #define __ASMARM_CACHE_H -#define L1_CACHE_SHIFT 5 +#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* @@ -14,7 +14,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES /* * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. @@ -23,4 +23,6 @@ #define ARCH_SLAB_MINALIGN 8 #endif +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 1a711ea..3acd8fa 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -15,6 +15,7 @@ #include <asm/glue.h> #include <asm/shmparam.h> #include <asm/cachetype.h> +#include <asm/outercache.h> #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) @@ -42,7 +43,8 @@ #endif #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) # define MULTI_CACHE 1 #endif @@ -135,10 +137,10 @@ #endif /* - * This flag is used to indicate that the page pointed to by a pte - * is dirty and requires cleaning before returning it to the user. + * This flag is used to indicate that the page pointed to by a pte is clean + * and does not require cleaning before returning it to the user. */ -#define PG_dcache_dirty PG_arch_1 +#define PG_dcache_clean PG_arch_1 /* * MM Cache Management @@ -154,16 +156,22 @@ * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * - * flush_cache_kern_all() + * flush_icache_all() + * + * Unconditionally clean and invalidate the entire icache. + * Currently only needed for cache-v6.S and cache-v7.S, see + * __flush_icache_all for the generic implementation. + * + * flush_kern_all() * * Unconditionally clean and invalidate the entire cache. * - * flush_cache_user_mm(mm) + * flush_user_all() * * Clean and invalidate all user space cache entries * before a change of page tables. * - * flush_cache_user_range(start, end, flags) + * flush_user_range(start, end, flags) * * Clean and invalidate a range of cache entries in the * specified address space before a change of page tables. @@ -179,23 +187,22 @@ * - start - virtual start address * - end - virtual end address * - * DMA Cache Coherency - * =================== - * - * dma_inv_range(start, end) + * coherent_user_range(start, end) * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. + * Ensure coherency between the Icache and the Dcache in the + * region described by start, end. If you have non-snooping + * Harvard caches, you need to implement this function. * - start - virtual start address * - end - virtual end address * - * dma_clean_range(start, end) + * flush_kern_dcache_area(kaddr, size) * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address + * Ensure that the data held in page is written back. + * - kaddr - page address + * - size - region size + * + * DMA Cache Coherency + * =================== * * dma_flush_range(start, end) * @@ -205,23 +212,19 @@ */ struct cpu_cache_fns { + void (*flush_icache_all)(void); void (*flush_kern_all)(void); void (*flush_user_all)(void); void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); - void (*flush_kern_dcache_page)(void *); + void (*flush_kern_dcache_area)(void *, size_t); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); - void (*dma_flush_range)(const void *, const void *); -}; + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); -struct outer_cache_fns { - void (*inv_range)(unsigned long, unsigned long); - void (*clean_range)(unsigned long, unsigned long); - void (*flush_range)(unsigned long, unsigned long); + void (*dma_flush_range)(const void *, const void *); }; /* @@ -231,12 +234,13 @@ struct outer_cache_fns { extern struct cpu_cache_fns cpu_cache; +#define __cpuc_flush_icache_all cpu_cache.flush_icache_all #define __cpuc_flush_kern_all cpu_cache.flush_kern_all #define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_user_range cpu_cache.coherent_user_range -#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page +#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area /* * These are private to the dma-mapping API. Do not use directly. @@ -244,25 +248,27 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else +#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) +#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) +extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long); -extern void __cpuc_flush_dcache_page(void *); +extern void __cpuc_flush_dcache_area(void *, size_t); /* * These are private to the dma-mapping API. Do not use directly. @@ -270,58 +276,23 @@ extern void __cpuc_flush_dcache_page(void *); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif -#ifdef CONFIG_OUTER_CACHE - -extern struct outer_cache_fns outer_cache; - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ - if (outer_cache.inv_range) - outer_cache.inv_range(start, end); -} -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ - if (outer_cache.clean_range) - outer_cache.clean_range(start, end); -} -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ - if (outer_cache.flush_range) - outer_cache.flush_range(start, end); -} - -#else - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ } -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ } -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ } - -#endif - /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -330,48 +301,73 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) /* * Convert calls to our calling convention. */ + +/* Invalidate I-cache */ +#define __flush_icache_all_generic() \ + asm("mcr p15, 0, %0, c7, c5, 0" \ + : : "r" (0)); + +/* Invalidate I-cache inner shareable */ +#define __flush_icache_all_v7_smp() \ + asm("mcr p15, 0, %0, c7, c1, 0" \ + : : "r" (0)); + +/* + * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 + * will fall through to use __flush_icache_all_generic. + */ +#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ + defined(CONFIG_SMP_ON_UP) +#define __flush_icache_preferred __cpuc_flush_icache_all +#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) +#define __flush_icache_preferred __flush_icache_all_v7_smp +#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) +#define __flush_icache_preferred __cpuc_flush_icache_all +#else +#define __flush_icache_preferred __flush_icache_all_generic +#endif + +static inline void __flush_icache_all(void) +{ + __flush_icache_preferred(); +} + #define flush_cache_all() __cpuc_flush_kern_all() -#ifndef CONFIG_CPU_CACHE_VIPT -static inline void flush_cache_mm(struct mm_struct *mm) + +static inline void vivt_flush_cache_mm(struct mm_struct *mm) { - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); } static inline void -flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } static inline void -flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } } -static inline void -flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} +#ifndef CONFIG_CPU_CACHE_VIPT +#define flush_cache_mm(mm) \ + vivt_flush_cache_mm(mm) +#define flush_cache_range(vma,start,end) \ + vivt_flush_cache_range(vma,start,end) +#define flush_cache_page(vma,addr,pfn) \ + vivt_flush_cache_page(vma,addr,pfn) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) @@ -408,15 +404,18 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -extern void __flush_dcache_page(struct address_space *mapping, struct page *page); - -static inline void __flush_icache_all(void) +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) { - asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" - : - : "r" (0)); + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); } #define ARCH_HAS_FLUSH_ANON_PAGE @@ -432,9 +431,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) { - /* highmem pages are always flushed upon kunmap already */ - if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) - __cpuc_flush_dcache_page(page_address(page)); } #define flush_dcache_mmap_lock(mapping) \ @@ -451,13 +447,6 @@ static inline void flush_kernel_dcache_page(struct page *page) */ #define flush_icache_page(vma,page) do { } while (0) -static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, - unsigned offset, size_t size) -{ - const void *start = (void __force *)virt + offset; - dmac_inv_range(start, start + size); -} - /* * flush_cache_vmap() is used when creating mappings (eg, via vmap, * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h index d3a4c2c..c023db0 100644 --- a/arch/arm/include/asm/cachetype.h +++ b/arch/arm/include/asm/cachetype.h @@ -6,6 +6,7 @@ #define CACHEID_VIPT_ALIASING (1 << 2) #define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) #define CACHEID_ASID_TAGGED (1 << 3) +#define CACHEID_VIPT_I_ALIASING (1 << 4) extern unsigned int cacheid; @@ -14,15 +15,18 @@ extern unsigned int cacheid; #define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) #define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) #define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) +#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING) /* * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture * Mask out support which will never be present on newer CPUs. * - v6+ is never VIVT - * - v7+ VIPT never aliases + * - v7+ VIPT never aliases on D-side */ #if __LINUX_ARM_ARCH__ >= 7 -#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) +#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\ + CACHEID_ASID_TAGGED |\ + CACHEID_VIPT_I_ALIASING) #elif __LINUX_ARM_ARCH__ >= 6 #define __CACHEID_ARCH_MIN (~CACHEID_VIVT) #else diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b6ec7c6..765d332 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -12,19 +12,13 @@ #ifndef __ASM_CLKDEV_H #define __ASM_CLKDEV_H -struct clk; +#include <linux/slab.h> -struct clk_lookup { - struct list_head node; - const char *dev_id; - const char *con_id; - struct clk *clk; -}; +#include <mach/clkdev.h> -struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, - const char *dev_fmt, ...); - -void clkdev_add(struct clk_lookup *cl); -void clkdev_drop(struct clk_lookup *cl); +static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) +{ + return kzalloc(size, GFP_KERNEL); +} #endif diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index 634b2d7..7939681 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -11,6 +11,7 @@ #define __ASM_ARM_CPU_H #include <linux/percpu.h> +#include <linux/cpu.h> struct cpuinfo_arm { struct cpu cpu; diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index b3e656c..20ae96c 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -63,6 +63,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) return read_cpuid(CPUID_CACHETYPE); } +static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) +{ + return read_cpuid(CPUID_TCM); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For @@ -73,7 +78,10 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) #else static inline int cpu_is_xsc3(void) { - if ((read_cpuid_id() & 0xffffe000) == 0x69056000) + unsigned int id; + id = read_cpuid_id() & 0xffffe000; + /* It covers both Intel ID and Marvell ID */ + if ((id == 0x69056000) || (id == 0x56056000)) return 1; return 0; diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index c61642b..9f390ce 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -12,4 +12,7 @@ struct dev_archdata { #endif }; +struct pdev_archdata { +}; + #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ff46dfa..4fff837 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,30 +5,30 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> +#include <linux/dma-debug.h> #include <asm-generic/dma-coherent.h> #include <asm/memory.h> +#ifdef __arch_page_to_dma +#error Please update to __arch_pfn_to_dma +#endif + /* - * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions - * used internally by the DMA-mapping API to provide DMA addresses. They - * must not be used by drivers. + * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private + * functions used internally by the DMA-mapping API to provide DMA + * addresses. They must not be used by drivers. */ -#ifndef __arch_page_to_dma - -#if !defined(CONFIG_HIGHMEM) -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +#ifndef __arch_pfn_to_dma +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) { - return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); + return (dma_addr_t)__pfn_to_bus(pfn); } -#elif defined(__pfn_to_bus) -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) + +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) { - return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); + return __bus_to_pfn(addr); } -#else -#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM" -#endif static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { @@ -40,9 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); } #else -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) +{ + return __arch_pfn_to_dma(dev, pfn); +} + +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) { - return __arch_page_to_dma(dev, page); + return __arch_dma_to_pfn(dev, addr); } static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) @@ -57,18 +62,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. + * + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_cpu_to_dev(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_cpu_to_dev(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); +} /* * Return whether the given device DMA address mask can be supported @@ -88,6 +133,14 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { +#ifdef CONFIG_DMABOUNCE + if (dev->archdata.dmabounce) { + if (dma_mask >= ISA_DMA_THRESHOLD) + return 0; + else + return -EIO; + } +#endif if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; @@ -96,16 +149,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -static inline int dma_get_cache_alignment(void) -{ - return 32; -} - -static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) -{ - return !!arch_is_coherent(); -} - /* * DMA errors are defined by all-bits-set in the DMA address. */ @@ -255,11 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t dma_map_single(struct device *, void *, size_t, +extern dma_addr_t __dma_map_single(struct device *, void *, size_t, enum dma_data_direction); -extern dma_addr_t dma_map_page(struct device *, struct page *, +extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, + enum dma_data_direction); +extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, +extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* @@ -283,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } +static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, + size_t size, enum dma_data_direction dir) +{ + __dma_single_cpu_to_dev(cpu_addr, size, dir); + return virt_to_dma(dev, cpu_addr); +} + +static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(page, offset, size, dir); + return pfn_to_dma(dev, page_to_pfn(page)) + offset; +} + +static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); +} + +static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), + handle & ~PAGE_MASK, size, dir); +} +#endif /* CONFIG_DMABOUNCE */ + /** * dma_map_single - map a single buffer for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -300,12 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + dma_addr_t addr; + BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + addr = __dma_map_single(dev, cpu_addr, size, dir); + debug_dma_map_page(dev, virt_to_page(cpu_addr), + (unsigned long)cpu_addr & ~PAGE_MASK, size, + dir, addr, true); - return virt_to_dma(dev, cpu_addr); + return addr; } /** @@ -325,12 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + dma_addr_t addr; + BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); - return page_to_dma(dev, page) + offset; + return addr; } /** @@ -350,9 +429,9 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + debug_dma_unmap_page(dev, handle, size, dir, true); + __dma_unmap_single(dev, handle, size, dir); } -#endif /* CONFIG_DMABOUNCE */ /** * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() @@ -371,7 +450,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, handle, size, dir); + debug_dma_unmap_page(dev, handle, size, dir, false); + __dma_unmap_page(dev, handle, size, dir); } /** @@ -398,7 +478,12 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); + + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -407,11 +492,12 @@ static inline void dma_sync_single_range_for_device(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); + debug_dma_sync_single_for_device(dev, handle + offset, size, dir); + if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 7edf353..ca51143 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan); #define NO_DMA 255 #endif +#endif /* CONFIG_ISA_DMA_API */ + #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif -#endif /* CONFIG_ISA_DMA_API */ - #endif /* __ASM_ARM_DMA_H */ diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index cc7ef40..af18cea 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -45,13 +45,17 @@ */ #define DOMAIN_NOACCESS 0 #define DOMAIN_CLIENT 1 +#ifdef CONFIG_CPU_USE_DOMAINS #define DOMAIN_MANAGER 3 +#else +#define DOMAIN_MANAGER 1 +#endif #define domain_val(dom,type) ((type) << (2*(dom))) #ifndef __ASSEMBLY__ -#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_USE_DOMAINS #define set_domain(x) \ do { \ __asm__ __volatile__( \ @@ -74,5 +78,28 @@ #define modify_domain(dom,type) do { } while (0) #endif +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions (inline assembly) + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define T(instr) #instr "t" +#else +#define T(instr) #instr #endif -#endif /* !__ASSEMBLY__ */ + +#else /* __ASSEMBLY__ */ + +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define T(instr) instr ## t +#else +#define T(instr) instr +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* !__ASM_PROC_DOMAIN_H */ diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index c207504..c3cd875 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -9,6 +9,8 @@ #include <asm/ptrace.h> #include <asm/user.h> +struct task_struct; + typedef unsigned long elf_greg_t; typedef unsigned long elf_freg_t[3]; @@ -55,6 +57,11 @@ typedef struct user_fp elf_fpregset_t; #define R_ARM_MOVW_ABS_NC 43 #define R_ARM_MOVT_ABS 44 +#define R_ARM_THM_CALL 10 +#define R_ARM_THM_JUMP24 30 +#define R_ARM_THM_MOVW_ABS_NC 47 +#define R_ARM_THM_MOVT_ABS 48 + /* * These are used to set parameters in the core dumps. */ @@ -92,10 +99,15 @@ struct elf32_hdr; extern int elf_check_arch(const struct elf32_hdr *); #define elf_check_arch elf_check_arch +#define vmcore_elf64_check_arch(x) (0) + extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) -#define USE_ELF_CORE_DUMP +struct task_struct; +int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); +#define ELF_CORE_COPY_TASK_REGS dump_task_regs + #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical @@ -113,4 +125,12 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); extern void elf_set_personality(const struct elf32_hdr *); #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + +extern int vectors_user_mapping(void); +#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES + #endif diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S new file mode 100644 index 0000000..ec0bbf7 --- /dev/null +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -0,0 +1,44 @@ +/* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro arch_irq_handler_default + get_irqnr_preamble r5, lr +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, BSYM(1b) + bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_UP_B(9997f) + movne r1, sp + adrne lr, BSYM(1b) + bne do_IPI + +#ifdef CONFIG_LOCAL_TIMERS + test_for_ltirq r0, r6, r5, lr + movne r0, sp + adrne lr, BSYM(1b) + bne do_local_timer +#endif +#endif +9997: + .endm + + .macro arch_irq_handler, symbol_name + .align 5 + .global \symbol_name +\symbol_name: + mov r4, lr + arch_irq_handler_default + mov pc, r4 + .endm diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S new file mode 100644 index 0000000..3ceb85e --- /dev/null +++ b/arch/arm/include/asm/entry-macro-vic2.S @@ -0,0 +1,57 @@ +/* arch/arm/include/asm/entry-macro-vic2.S + * + * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * Low-level IRQ helper macros for a device with two VICs + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. +*/ + +/* This should be included from <mach/entry-macro.S> with the necessary + * defines for virtual addresses and IRQ bases for the two vics. + * + * The code needs the following defined: + * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ + * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ + * VA_VIC0 Virtual address of VIC0 + * VA_VIC1 Virtual address of VIC1 + * + * Note, code assumes VIC0's virtual address is an ARM immediate constant + * away from VIC1. +*/ + +#include <asm/hardware/vic.h> + + .macro disable_fiq + .endm + + .macro get_irqnr_preamble, base, tmp + ldr \base, =VA_VIC0 + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + + @ check the vic0 + mov \irqnr, #IRQ_VIC0_BASE + 31 + ldr \irqstat, [ \base, # VIC_IRQ_STATUS ] + teq \irqstat, #0 + + @ otherwise try vic1 + addeq \tmp, \base, #(VA_VIC1 - VA_VIC0) + addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) + ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ] + teqeq \irqstat, #0 + + clzne \irqstat, \irqstat + subne \irqnr, \irqnr, \irqstat + .endm diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39c8bc1..f89515a 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -2,13 +2,66 @@ #define _ASM_ARM_FTRACE #ifdef CONFIG_FUNCTION_TRACER -#define MCOUNT_ADDR ((long)(mcount)) +#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #ifndef __ASSEMBLY__ extern void mcount(void); +extern void __gnu_mcount_nc(void); + +#ifdef CONFIG_DYNAMIC_FTRACE +struct dyn_arch_ftrace { +#ifdef CONFIG_OLD_MCOUNT + bool old_mcount; #endif +}; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* With Thumb-2, the recorded addresses have the lsb set */ + return addr & ~1; +} +extern void ftrace_caller_old(void); +extern void ftrace_call_old(void); #endif +#endif + +#endif + +#ifndef __ASSEMBLY__ + +#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) +/* + * return_address uses walk_stackframe to do it's work. If both + * CONFIG_FRAME_POINTER=y and CONFIG_ARM_UNWIND=y walk_stackframe uses unwind + * information. For this to work in the function tracer many functions would + * have to be marked with __notrace. So for now just depend on + * !CONFIG_ARM_UNWIND. + */ + +void *return_address(unsigned int); + +#else + +extern inline void *return_address(unsigned int level) +{ + return NULL; +} + +#endif + +#define HAVE_ARCH_CALLER_ADDR + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* ifndef __ASSEMBLY__ */ + #endif /* _ASM_ARM_FTRACE */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 9ee743b..b33fe70 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -13,22 +13,23 @@ #include <linux/preempt.h> #include <linux/uaccess.h> #include <asm/errno.h> +#include <asm/domain.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile__( \ - "1: ldrt %1, [%2]\n" \ + "1: " T(ldr) " %1, [%2]\n" \ " " insn "\n" \ - "2: strt %0, [%2]\n" \ + "2: " T(str) " %0, [%2]\n" \ " mov %0, #0\n" \ "3:\n" \ - " .section __ex_table,\"a\"\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4f, 2b, 4f\n" \ - " .previous\n" \ - " .section .fixup,\"ax\"\n" \ + " .popsection\n" \ + " .pushsection .fixup,\"ax\"\n" \ "4: mov %0, %4\n" \ " b 3b\n" \ - " .previous" \ + " .popsection" \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -97,18 +98,19 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) pagefault_disable(); /* implies preempt_disable() */ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: ldrt %0, [%3]\n" + "1: " T(ldr) " %0, [%3]\n" " teq %0, %1\n" - "2: streqt %2, [%3]\n" + " it eq @ explicit IT needed for the 2b label\n" + "2: " T(streq) " %2, [%3]\n" "3:\n" - " .section __ex_table,\"a\"\n" + " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" - " .previous\n" - " .section .fixup,\"ax\"\n" + " .popsection\n" + " .pushsection .fixup,\"ax\"\n" "4: mov %0, %4\n" " b 3b\n" - " .previous" + " .popsection" : "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index a0e39d5d00..234a3fc 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -120,25 +120,39 @@ #endif /* - * Prefetch abort handler. If the CPU has an IFAR use that, otherwise - * use the address of the aborted instruction + * Prefetch Abort Model + * ================ + * + * We have the following to choose from: + * legacy - no IFSR, no IFAR + * v6 - ARMv6: IFSR, no IFAR + * v7 - ARMv7: IFSR and IFAR */ + #undef CPU_PABORT_HANDLER #undef MULTI_PABORT -#ifdef CONFIG_CPU_PABRT_IFAR +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +# define MULTI_PABORT 1 +# else +# define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 # ifdef CPU_PABORT_HANDLER # define MULTI_PABORT 1 # else -# define CPU_PABORT_HANDLER(reg, insn) mrc p15, 0, reg, cr6, cr0, 2 +# define CPU_PABORT_HANDLER v6_pabort # endif #endif -#ifdef CONFIG_CPU_PABRT_NOIFAR +#ifdef CONFIG_CPU_PABRT_V7 # ifdef CPU_PABORT_HANDLER # define MULTI_PABORT 1 # else -# define CPU_PABORT_HANDLER(reg, insn) mov reg, insn +# define CPU_PABORT_HANDLER v7_pabort # endif #endif diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 182310b..89ad180 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,14 +5,34 @@ #include <linux/threads.h> #include <asm/irq.h> +#define NR_IPI 5 + typedef struct { unsigned int __softirq_pending; +#ifdef CONFIG_LOCAL_TIMERS unsigned int local_timer_irqs; +#endif +#ifdef CONFIG_SMP + unsigned int ipi_irqs[NR_IPI]; +#endif } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#if NR_IRQS > 256 +#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ +#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) + +#ifdef CONFIG_SMP +u64 smp_irq_stat_cpu(unsigned int cpu); +#else +#define smp_irq_stat_cpu(cpu) 0 +#endif + +#define arch_irq_stat_cpu smp_irq_stat_cpu + +#if NR_IRQS > 512 +#define HARDIRQ_BITS 10 +#elif NR_IRQS > 256 #define HARDIRQ_BITS 9 #else #define HARDIRQ_BITS 8 diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h index 04be3bd..c0f4e7b 100644 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ b/arch/arm/include/asm/hardware/arm_timer.h @@ -1,21 +1,30 @@ #ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H #define __ASM_ARM_HARDWARE_ARM_TIMER_H -#define TIMER_LOAD 0x00 -#define TIMER_VALUE 0x04 -#define TIMER_CTRL 0x08 -#define TIMER_CTRL_ONESHOT (1 << 0) -#define TIMER_CTRL_32BIT (1 << 1) -#define TIMER_CTRL_DIV1 (0 << 2) -#define TIMER_CTRL_DIV16 (1 << 2) -#define TIMER_CTRL_DIV256 (2 << 2) -#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */ -#define TIMER_CTRL_PERIODIC (1 << 6) -#define TIMER_CTRL_ENABLE (1 << 7) +/* + * ARM timer implementation, found in Integrator, Versatile and Realview + * platforms. Not all platforms support all registers and bits in these + * registers, so we mark them with A for Integrator AP, C for Integrator + * CP, V for Versatile and R for Realview. + * + * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview + * can have 16-bit or 32-bit selectable via a bit in the control register. + */ +#define TIMER_LOAD 0x00 /* ACVR rw */ +#define TIMER_VALUE 0x04 /* ACVR ro */ +#define TIMER_CTRL 0x08 /* ACVR rw */ +#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ +#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ +#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ +#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ +#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ +#define TIMER_CTRL_IE (1 << 5) /* VR */ +#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ +#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ -#define TIMER_INTCLR 0x0c -#define TIMER_RIS 0x10 -#define TIMER_MIS 0x14 -#define TIMER_BGLOAD 0x18 +#define TIMER_INTCLR 0x0c /* ACVR wo */ +#define TIMER_RIS 0x10 /* CVR ro */ +#define TIMER_MIS 0x14 /* CVR ro */ +#define TIMER_BGLOAD 0x18 /* CVR rw */ #endif diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index cdb9022..5aeec1e 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -50,6 +50,26 @@ #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 #define L2X0_DEBUG_CTRL 0xF40 +#define L2X0_PREFETCH_CTRL 0xF60 +#define L2X0_POWER_CTRL 0xF80 +#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) +#define L2X0_STNDBY_MODE_EN (1 << 0) + +/* Registers shifts and masks */ +#define L2X0_CACHE_ID_PART_MASK (0xf << 6) +#define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L310 (3 << 6) + +#define L2X0_AUX_CTRL_MASK 0xc0000fff +#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 +#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 +#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) +#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 +#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 +#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 +#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 +#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 +#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h new file mode 100644 index 0000000..538f17c --- /dev/null +++ b/arch/arm/include/asm/hardware/cache-tauros2.h @@ -0,0 +1,11 @@ +/* + * arch/arm/include/asm/hardware/cache-tauros2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init tauros2_init(void); diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h new file mode 100644 index 0000000..7ecd793 --- /dev/null +++ b/arch/arm/include/asm/hardware/coresight.h @@ -0,0 +1,157 @@ +/* + * linux/arch/arm/include/asm/hardware/coresight.h + * + * CoreSight components' registers + * + * Copyright (C) 2009 Nokia Corporation. + * Alexander Shishkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_HARDWARE_CORESIGHT_H +#define __ASM_HARDWARE_CORESIGHT_H + +#define TRACER_ACCESSED_BIT 0 +#define TRACER_RUNNING_BIT 1 +#define TRACER_CYCLE_ACC_BIT 2 +#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT) +#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) +#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) + +#define TRACER_TIMEOUT 10000 + +#define etm_writel(t, v, x) \ + (__raw_writel((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) + +/* CoreSight Management Registers */ +#define CSMR_LOCKACCESS 0xfb0 +#define CSMR_LOCKSTATUS 0xfb4 +#define CSMR_AUTHSTATUS 0xfb8 +#define CSMR_DEVID 0xfc8 +#define CSMR_DEVTYPE 0xfcc +/* CoreSight Component Registers */ +#define CSCR_CLASS 0xff4 + +#define UNLOCK_MAGIC 0xc5acce55 + +/* ETM control register, "ETM Architecture", 3.3.1 */ +#define ETMR_CTRL 0 +#define ETMCTRL_POWERDOWN 1 +#define ETMCTRL_PROGRAM (1 << 10) +#define ETMCTRL_PORTSEL (1 << 11) +#define ETMCTRL_DO_CONTEXTID (3 << 14) +#define ETMCTRL_PORTMASK1 (7 << 4) +#define ETMCTRL_PORTMASK2 (1 << 21) +#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2) +#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21) +#define ETMCTRL_DO_CPRT (1 << 1) +#define ETMCTRL_DATAMASK (3 << 2) +#define ETMCTRL_DATA_DO_DATA (1 << 2) +#define ETMCTRL_DATA_DO_ADDR (1 << 3) +#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR) +#define ETMCTRL_BRANCH_OUTPUT (1 << 8) +#define ETMCTRL_CYCLEACCURATE (1 << 12) + +/* ETM configuration code register */ +#define ETMR_CONFCODE (0x04) + +/* ETM trace start/stop resource control register */ +#define ETMR_TRACESSCTRL (0x18) + +/* ETM trigger event register */ +#define ETMR_TRIGEVT (0x08) + +/* address access type register bits, "ETM architecture", + * table 3-27 */ +/* - access type */ +#define ETMAAT_IFETCH 0 +#define ETMAAT_IEXEC 1 +#define ETMAAT_IEXECPASS 2 +#define ETMAAT_IEXECFAIL 3 +#define ETMAAT_DLOADSTORE 4 +#define ETMAAT_DLOAD 5 +#define ETMAAT_DSTORE 6 +/* - comparison access size */ +#define ETMAAT_JAVA (0 << 3) +#define ETMAAT_THUMB (1 << 3) +#define ETMAAT_ARM (3 << 3) +/* - data value comparison control */ +#define ETMAAT_NOVALCMP (0 << 5) +#define ETMAAT_VALMATCH (1 << 5) +#define ETMAAT_VALNOMATCH (3 << 5) +/* - exact match */ +#define ETMAAT_EXACTMATCH (1 << 7) +/* - context id comparator control */ +#define ETMAAT_IGNCONTEXTID (0 << 8) +#define ETMAAT_VALUE1 (1 << 8) +#define ETMAAT_VALUE2 (2 << 8) +#define ETMAAT_VALUE3 (3 << 8) +/* - security level control */ +#define ETMAAT_IGNSECURITY (0 << 10) +#define ETMAAT_NSONLY (1 << 10) +#define ETMAAT_SONLY (2 << 10) + +#define ETMR_COMP_VAL(x) (0x40 + (x) * 4) +#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4) + +/* ETM status register, "ETM Architecture", 3.3.2 */ +#define ETMR_STATUS (0x10) +#define ETMST_OVERFLOW BIT(0) +#define ETMST_PROGBIT BIT(1) +#define ETMST_STARTSTOP BIT(2) +#define ETMST_TRIGGER BIT(3) + +#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) +#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) +#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER) + +#define ETMR_TRACEENCTRL2 0x1c +#define ETMR_TRACEENCTRL 0x24 +#define ETMTE_INCLEXCL BIT(24) +#define ETMR_TRACEENEVT 0x20 +#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ + ETMCTRL_DATA_DO_ADDR | \ + ETMCTRL_BRANCH_OUTPUT | \ + ETMCTRL_DO_CONTEXTID) + +/* ETM management registers, "ETM Architecture", 3.5.24 */ +#define ETMMR_OSLAR 0x300 +#define ETMMR_OSLSR 0x304 +#define ETMMR_OSSRR 0x308 +#define ETMMR_PDSR 0x314 + +/* ETB registers, "CoreSight Components TRM", 9.3 */ +#define ETBR_DEPTH 0x04 +#define ETBR_STATUS 0x0c +#define ETBR_READMEM 0x10 +#define ETBR_READADDR 0x14 +#define ETBR_WRITEADDR 0x18 +#define ETBR_TRIGGERCOUNT 0x1c +#define ETBR_CTRL 0x20 +#define ETBR_FORMATTERCTRL 0x304 +#define ETBFF_ENFTC 1 +#define ETBFF_ENFCONT BIT(1) +#define ETBFF_FONFLIN BIT(4) +#define ETBFF_MANUAL_FLUSH BIT(6) +#define ETBFF_TRIGIN BIT(8) +#define ETBFF_TRIGEVT BIT(9) +#define ETBFF_TRIGFL BIT(10) + +#define etb_writel(t, v, x) \ + (__raw_writel((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) + +#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etm_unlock(t) \ + do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) +#define etb_unlock(t) \ + do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) + +#endif /* __ASM_HARDWARE_CORESIGHT_H */ + diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S new file mode 100644 index 0000000..c115b82 --- /dev/null +++ b/arch/arm/include/asm/hardware/entry-macro-gic.S @@ -0,0 +1,75 @@ +/* + * arch/arm/include/asm/hardware/entry-macro-gic.S + * + * Low-level IRQ helper macros for GIC + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <asm/hardware/gic.h> + +#ifndef HAVE_GET_IRQNR_PREAMBLE + .macro get_irqnr_preamble, base, tmp + ldr \base, =gic_cpu_base_addr + ldr \base, [\base] + .endm +#endif + +/* + * The interrupt numbering scheme is defined in the + * interrupt controller spec. To wit: + * + * Interrupts 0-15 are IPI + * 16-28 are reserved + * 29-31 are local. We allow 30 to be used for the watchdog. + * 32-1020 are global + * 1021-1022 are reserved + * 1023 is "spurious" (no interrupt) + * + * For now, we ignore all local interrupts so only return an interrupt if it's + * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs. + * + * A simple read from the controller will tell us the number of the highest + * priority enabled interrupt. We then just need to check whether it is in the + * valid range for an IRQ (30-1020 inclusive). + */ + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + + ldr \irqstat, [\base, #GIC_CPU_INTACK] + /* bits 12-10 = src CPU, 9-0 = int # */ + + ldr \tmp, =1021 + bic \irqnr, \irqstat, #0x1c00 + cmp \irqnr, #29 + cmpcc \irqnr, \irqnr + cmpne \irqnr, \tmp + cmpcs \irqnr, \irqnr + .endm + +/* We assume that irqstat (the raw value of the IRQ acknowledge + * register) is preserved from the macro above. + * If there is an IPI, we immediately signal end of interrupt on the + * controller, since this requires the original irqstat value which + * we won't easily be able to recreate later. + */ + + .macro test_for_ipi, irqnr, irqstat, base, tmp + bic \irqnr, \irqstat, #0x1c00 + cmp \irqnr, #16 + strcc \irqstat, [\base, #GIC_CPU_EOI] + cmpcs \irqnr, \irqnr + .endm + +/* As above, this assumes that irqstat and base are preserved.. */ + + .macro test_for_ltirq, irqnr, irqstat, base, tmp + bic \irqnr, \irqstat, #0x1c00 + mov \tmp, #0 + cmp \irqnr, #29 + moveq \tmp, #1 + streq \irqstat, [\base, #GIC_CPU_EOI] + cmp \tmp, #0 + .endm diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 7f34333b..84557d3 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -33,10 +33,13 @@ #define GIC_DIST_SOFTINT 0xf00 #ifndef __ASSEMBLY__ -void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); -void gic_cpu_init(unsigned int gic_nr, void __iomem *base); +extern void __iomem *gic_cpu_base_addr; + +void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); +void gic_secondary_init(unsigned int); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); +void gic_enable_ppi(unsigned int); #endif #endif diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h new file mode 100644 index 0000000..794220b --- /dev/null +++ b/arch/arm/include/asm/hardware/icst.h @@ -0,0 +1,59 @@ +/* + * arch/arm/include/asm/hardware/icst.h + * + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support functions for calculating clocks/divisors for the ICST + * clock generators. See http://www.idt.com/ for more information + * on these devices. + */ +#ifndef ASMARM_HARDWARE_ICST_H +#define ASMARM_HARDWARE_ICST_H + +struct icst_params { + unsigned long ref; + unsigned long vco_max; /* inclusive */ + unsigned long vco_min; /* exclusive */ + unsigned short vd_min; /* inclusive */ + unsigned short vd_max; /* inclusive */ + unsigned char rd_min; /* inclusive */ + unsigned char rd_max; /* inclusive */ + const unsigned char *s2div; /* chip specific s2div array */ + const unsigned char *idx2s; /* chip specific idx2s array */ +}; + +struct icst_vco { + unsigned short v; + unsigned char r; + unsigned char s; +}; + +unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco); +struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq); + +/* + * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V). + * This frequency is pre-output divider. + */ +#define ICST307_VCO_MIN 6000000 +#define ICST307_VCO_MAX 200000000 + +extern const unsigned char icst307_s2div[]; +extern const unsigned char icst307_idx2s[]; + +/* + * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V). + * This frequency is pre-output divider. + */ +#define ICST525_VCO_MIN 10000000 +#define ICST525_VCO_MAX_3V 200000000 +#define ICST525_VCO_MAX_5V 320000000 + +extern const unsigned char icst525_s2div[]; +extern const unsigned char icst525_idx2s[]; + +#endif diff --git a/arch/arm/include/asm/hardware/icst307.h b/arch/arm/include/asm/hardware/icst307.h deleted file mode 100644 index 554f128..0000000 --- a/arch/arm/include/asm/hardware/icst307.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * arch/arm/include/asm/hardware/icst307.h - * - * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Support functions for calculating clocks/divisors for the ICS307 - * clock generators. See http://www.icst.com/ for more information - * on these devices. - * - * This file is similar to the icst525.h file - */ -#ifndef ASMARM_HARDWARE_ICST307_H -#define ASMARM_HARDWARE_ICST307_H - -struct icst307_params { - unsigned long ref; - unsigned long vco_max; /* inclusive */ - unsigned short vd_min; /* inclusive */ - unsigned short vd_max; /* inclusive */ - unsigned char rd_min; /* inclusive */ - unsigned char rd_max; /* inclusive */ -}; - -struct icst307_vco { - unsigned short v; - unsigned char r; - unsigned char s; -}; - -unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco); -struct icst307_vco icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq); -struct icst307_vco icst307_ps_to_vco(const struct icst307_params *p, unsigned long period); - -#endif diff --git a/arch/arm/include/asm/hardware/icst525.h b/arch/arm/include/asm/hardware/icst525.h deleted file mode 100644 index 58f0dc4..0000000 --- a/arch/arm/include/asm/hardware/icst525.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * arch/arm/include/asm/hardware/icst525.h - * - * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Support functions for calculating clocks/divisors for the ICST525 - * clock generators. See http://www.icst.com/ for more information - * on these devices. - */ -#ifndef ASMARM_HARDWARE_ICST525_H -#define ASMARM_HARDWARE_ICST525_H - -struct icst525_params { - unsigned long ref; - unsigned long vco_max; /* inclusive */ - unsigned short vd_min; /* inclusive */ - unsigned short vd_max; /* inclusive */ - unsigned char rd_min; /* inclusive */ - unsigned char rd_max; /* inclusive */ -}; - -struct icst525_vco { - unsigned short v; - unsigned char r; - unsigned char s; -}; - -unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco); -struct icst525_vco icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq); -struct icst525_vco icst525_ps_to_vco(const struct icst525_params *p, unsigned long period); - -#endif diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 83e6ba3..9b28f12 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -187,11 +187,74 @@ union iop3xx_desc { void *ptr; }; +/* No support for p+q operations */ +static inline int +iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op) +{ + BUG(); + return 0; +} + +static inline void +iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, + dma_addr_t addr, unsigned char coef) +{ + BUG(); +} + +static inline int +iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op) +{ + BUG(); + return 0; +} + +static inline void +iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) +{ + BUG(); +} + +#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr + +static inline void +iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, + dma_addr_t *src) +{ + BUG(); +} + static inline int iop_adma_get_max_xor(void) { return 32; } +static inline int iop_adma_get_max_pq(void) +{ + BUG(); + return 0; +} + static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) { int id = chan->device->id; @@ -303,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -326,12 +388,16 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } +static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) +{ + return 0; +} + static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -349,6 +415,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, return 0; } + +static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + BUG(); + return 0; +} + static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -661,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) i += slots_per_op; } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - if (len) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; } } @@ -756,13 +828,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, hw_desc->src[0] = val; } -static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +static inline enum sum_check_flags +iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) { struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); - return desc_ctrl.zero_result_err; + return desc_ctrl.zero_result_err << SUM_CHECK_P; } static inline void iop_chan_append(struct iop_adma_chan *chan) diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 4b8e7f5..5daea29 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX I/O and Mem space regions for PCI autoconfiguration */ #define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 +#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000 #define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 #define IOP3XX_PCI_LOWER_IO_PA 0x90000000 @@ -233,7 +234,13 @@ extern int iop3xx_get_init_atu(void); void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -unsigned long iop_gettimeoffset(void); + +static inline u32 read_tmr0(void) +{ + u32 val; + asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val)); + return val; +} static inline void write_tmr0(u32 val) { @@ -252,6 +259,11 @@ static inline u32 read_tcr0(void) return val; } +static inline void write_tcr0(u32 val) +{ + asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val)); +} + static inline u32 read_tcr1(void) { u32 val; @@ -259,6 +271,11 @@ static inline u32 read_tcr1(void) return val; } +static inline void write_tcr1(u32 val) +{ + asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val)); +} + static inline void write_trr0(u32 val) { asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val)); diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 385c6e8..59b8c38 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -86,6 +86,7 @@ struct iop_adma_chan { * @idx: pool index * @unmap_src_cnt: number of xor sources * @unmap_len: transaction bytecount + * @tx_list: list of descriptors that are associated with one operation * @async_tx: support for the async_tx api * @group_list: list of slots that make up a multi-descriptor transaction * for example transfer lengths larger than the supported hw max @@ -102,10 +103,12 @@ struct iop_adma_desc_slot { u16 idx; u16 unmap_src_cnt; size_t unmap_len; + struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { u32 *xor_check_result; u32 *crc32_result; + u32 *pq_check_result; }; }; diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 74b5fff..b2f95c7 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -75,6 +75,19 @@ extern unsigned long it8152_base_address; IT8152_PD_IRQ(1) USB (USBR) IT8152_PD_IRQ(0) Audio controller (ACR) */ +#define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) +#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) + +/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ +#define IT8152_LD_IRQ_COUNT 9 +#define IT8152_LP_IRQ_COUNT 16 +#define IT8152_PD_IRQ_COUNT 15 + +/* Priorities: */ +#define IT8152_PD_IRQ(i) IT8152_IRQ(i) +#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT) +#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT) + /* frequently used interrupts */ #define IT8152_PCISERR IT8152_PD_IRQ(14) #define IT8152_H2PTADR IT8152_PD_IRQ(13) diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 954b1be..74e51d6b 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -214,4 +214,8 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int /* Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); +struct locomo_platform_data { + int irq_base; /* IRQ base for cascaded on-chip IRQs */ +}; + #endif diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h index 6a6c66b..f35b86e 100644 --- a/arch/arm/include/asm/hardware/pl080.h +++ b/arch/arm/include/asm/hardware/pl080.h @@ -43,7 +43,7 @@ /* Per channel configuration registers */ -#define PL008_Cx_STRIDE (0x20) +#define PL080_Cx_STRIDE (0x20) #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) #define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20))) #define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20))) @@ -68,6 +68,8 @@ #define PL080_CONTROL_TC_IRQ_EN (1 << 31) #define PL080_CONTROL_PROT_MASK (0x7 << 28) #define PL080_CONTROL_PROT_SHIFT (28) +#define PL080_CONTROL_PROT_CACHE (1 << 30) +#define PL080_CONTROL_PROT_BUFF (1 << 29) #define PL080_CONTROL_PROT_SYS (1 << 28) #define PL080_CONTROL_DST_INCR (1 << 27) #define PL080_CONTROL_SRC_INCR (1 << 26) diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h new file mode 100644 index 0000000..575fa81 --- /dev/null +++ b/arch/arm/include/asm/hardware/pl330.h @@ -0,0 +1,217 @@ +/* linux/include/asm/hardware/pl330.h + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh <jassi.brar@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __PL330_CORE_H +#define __PL330_CORE_H + +#define PL330_MAX_CHAN 8 +#define PL330_MAX_IRQS 32 +#define PL330_MAX_PERI 32 + +enum pl330_srccachectrl { + SCCTRL0 = 0, /* Noncacheable and nonbufferable */ + SCCTRL1, /* Bufferable only */ + SCCTRL2, /* Cacheable, but do not allocate */ + SCCTRL3, /* Cacheable and bufferable, but do not allocate */ + SINVALID1, + SINVALID2, + SCCTRL6, /* Cacheable write-through, allocate on reads only */ + SCCTRL7, /* Cacheable write-back, allocate on reads only */ +}; + +enum pl330_dstcachectrl { + DCCTRL0 = 0, /* Noncacheable and nonbufferable */ + DCCTRL1, /* Bufferable only */ + DCCTRL2, /* Cacheable, but do not allocate */ + DCCTRL3, /* Cacheable and bufferable, but do not allocate */ + DINVALID1 = 8, + DINVALID2, + DCCTRL6, /* Cacheable write-through, allocate on writes only */ + DCCTRL7, /* Cacheable write-back, allocate on writes only */ +}; + +/* Populated by the PL330 core driver for DMA API driver's info */ +struct pl330_config { + u32 periph_id; + u32 pcell_id; +#define DMAC_MODE_NS (1 << 0) + unsigned int mode; + unsigned int data_bus_width:10; /* In number of bits */ + unsigned int data_buf_dep:10; + unsigned int num_chan:4; + unsigned int num_peri:6; + u32 peri_ns; + unsigned int num_events:6; + u32 irq_ns; +}; + +/* Handle to the DMAC provided to the PL330 core */ +struct pl330_info { + /* Owning device */ + struct device *dev; + /* Size of MicroCode buffers for each channel. */ + unsigned mcbufsz; + /* ioremap'ed address of PL330 registers. */ + void __iomem *base; + /* Client can freely use it. */ + void *client_data; + /* PL330 core data, Client must not touch it. */ + void *pl330_data; + /* Populated by the PL330 core driver during pl330_add */ + struct pl330_config pcfg; + /* + * If the DMAC has some reset mechanism, then the + * client may want to provide pointer to the method. + */ + void (*dmac_reset)(struct pl330_info *pi); +}; + +enum pl330_byteswap { + SWAP_NO = 0, + SWAP_2, + SWAP_4, + SWAP_8, + SWAP_16, +}; + +/** + * Request Configuration. + * The PL330 core does not modify this and uses the last + * working configuration if the request doesn't provide any. + * + * The Client may want to provide this info only for the + * first request and a request with new settings. + */ +struct pl330_reqcfg { + /* Address Incrementing */ + unsigned dst_inc:1; + unsigned src_inc:1; + + /* + * For now, the SRC & DST protection levels + * and burst size/length are assumed same. + */ + bool nonsecure; + bool privileged; + bool insnaccess; + unsigned brst_len:5; + unsigned brst_size:3; /* in power of 2 */ + + enum pl330_dstcachectrl dcctl; + enum pl330_srccachectrl scctl; + enum pl330_byteswap swap; +}; + +/* + * One cycle of DMAC operation. + * There may be more than one xfer in a request. + */ +struct pl330_xfer { + u32 src_addr; + u32 dst_addr; + /* Size to xfer */ + u32 bytes; + /* + * Pointer to next xfer in the list. + * The last xfer in the req must point to NULL. + */ + struct pl330_xfer *next; +}; + +/* The xfer callbacks are made with one of these arguments. */ +enum pl330_op_err { + /* The all xfers in the request were success. */ + PL330_ERR_NONE, + /* If req aborted due to global error. */ + PL330_ERR_ABORT, + /* If req failed due to problem with Channel. */ + PL330_ERR_FAIL, +}; + +enum pl330_reqtype { + MEMTOMEM, + MEMTODEV, + DEVTOMEM, + DEVTODEV, +}; + +/* A request defining Scatter-Gather List ending with NULL xfer. */ +struct pl330_req { + enum pl330_reqtype rqtype; + /* Index of peripheral for the xfer. */ + unsigned peri:5; + /* Unique token for this xfer, set by the client. */ + void *token; + /* Callback to be called after xfer. */ + void (*xfer_cb)(void *token, enum pl330_op_err err); + /* If NULL, req will be done at last set parameters. */ + struct pl330_reqcfg *cfg; + /* Pointer to first xfer in the request. */ + struct pl330_xfer *x; +}; + +/* + * To know the status of the channel and DMAC, the client + * provides a pointer to this structure. The PL330 core + * fills it with current information. + */ +struct pl330_chanstatus { + /* + * If the DMAC engine halted due to some error, + * the client should remove-add DMAC. + */ + bool dmac_halted; + /* + * If channel is halted due to some error, + * the client should ABORT/FLUSH and START the channel. + */ + bool faulting; + /* Location of last load */ + u32 src_addr; + /* Location of last store */ + u32 dst_addr; + /* + * Pointer to the currently active req, NULL if channel is + * inactive, even though the requests may be present. + */ + struct pl330_req *top_req; + /* Pointer to req waiting second in the queue if any. */ + struct pl330_req *wait_req; +}; + +enum pl330_chan_op { + /* Start the channel */ + PL330_OP_START, + /* Abort the active xfer */ + PL330_OP_ABORT, + /* Stop xfer and flush queue */ + PL330_OP_FLUSH, +}; + +extern int pl330_add(struct pl330_info *); +extern void pl330_del(struct pl330_info *pi); +extern int pl330_update(const struct pl330_info *pi); +extern void pl330_release_channel(void *ch_id); +extern void *pl330_request_channel(const struct pl330_info *pi); +extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus); +extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op); +extern int pl330_submit_req(void *ch_id, struct pl330_req *r); + +#endif /* __PL330_CORE_H */ diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 5da2595..92ed254 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -578,4 +578,8 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); +struct sa1111_platform_data { + int irq_base; /* base for cascaded on-chip IRQs */ +}; + #endif /* _ASM_ARCH_SA1111 */ diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h index 46492a63..ebb3cea 100644 --- a/arch/arm/include/asm/hardware/scoop.h +++ b/arch/arm/include/asm/hardware/scoop.h @@ -22,18 +22,23 @@ #define SCOOP_GPWR 0x24 #define SCOOP_GPRR 0x28 -#define SCOOP_GPCR_PA22 ( 1 << 12 ) -#define SCOOP_GPCR_PA21 ( 1 << 11 ) -#define SCOOP_GPCR_PA20 ( 1 << 10 ) -#define SCOOP_GPCR_PA19 ( 1 << 9 ) -#define SCOOP_GPCR_PA18 ( 1 << 8 ) -#define SCOOP_GPCR_PA17 ( 1 << 7 ) -#define SCOOP_GPCR_PA16 ( 1 << 6 ) -#define SCOOP_GPCR_PA15 ( 1 << 5 ) -#define SCOOP_GPCR_PA14 ( 1 << 4 ) -#define SCOOP_GPCR_PA13 ( 1 << 3 ) -#define SCOOP_GPCR_PA12 ( 1 << 2 ) -#define SCOOP_GPCR_PA11 ( 1 << 1 ) +#define SCOOP_CPR_OUT (1 << 7) +#define SCOOP_CPR_SD_3V (1 << 2) +#define SCOOP_CPR_CF_XV (1 << 1) +#define SCOOP_CPR_CF_3V (1 << 0) + +#define SCOOP_GPCR_PA22 (1 << 12) +#define SCOOP_GPCR_PA21 (1 << 11) +#define SCOOP_GPCR_PA20 (1 << 10) +#define SCOOP_GPCR_PA19 (1 << 9) +#define SCOOP_GPCR_PA18 (1 << 8) +#define SCOOP_GPCR_PA17 (1 << 7) +#define SCOOP_GPCR_PA16 (1 << 6) +#define SCOOP_GPCR_PA15 (1 << 5) +#define SCOOP_GPCR_PA14 (1 << 4) +#define SCOOP_GPCR_PA13 (1 << 3) +#define SCOOP_GPCR_PA12 (1 << 2) +#define SCOOP_GPCR_PA11 (1 << 1) struct scoop_config { unsigned short io_out; diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h new file mode 100644 index 0000000..a101f10 --- /dev/null +++ b/arch/arm/include/asm/hardware/sp810.h @@ -0,0 +1,59 @@ +/* + * arch/arm/include/asm/hardware/sp810.h + * + * ARM PrimeXsys System Controller SP810 header file + * + * Copyright (C) 2009 ST Microelectronics + * Viresh Kumar<viresh.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARM_SP810_H +#define __ASM_ARM_SP810_H + +#include <linux/io.h> + +/* sysctl registers offset */ +#define SCCTRL 0x000 +#define SCSYSSTAT 0x004 +#define SCIMCTRL 0x008 +#define SCIMSTAT 0x00C +#define SCXTALCTRL 0x010 +#define SCPLLCTRL 0x014 +#define SCPLLFCTRL 0x018 +#define SCPERCTRL0 0x01C +#define SCPERCTRL1 0x020 +#define SCPEREN 0x024 +#define SCPERDIS 0x028 +#define SCPERCLKEN 0x02C +#define SCPERSTAT 0x030 +#define SCSYSID0 0xEE0 +#define SCSYSID1 0xEE4 +#define SCSYSID2 0xEE8 +#define SCSYSID3 0xEEC +#define SCITCR 0xF00 +#define SCITIR0 0xF04 +#define SCITIR1 0xF08 +#define SCITOR 0xF0C +#define SCCNTCTRL 0xF10 +#define SCCNTDATA 0xF14 +#define SCCNTSTEP 0xF18 +#define SCPERIPHID0 0xFE0 +#define SCPERIPHID1 0xFE4 +#define SCPERIPHID2 0xFE8 +#define SCPERIPHID3 0xFEC +#define SCPCELLID0 0xFF0 +#define SCPCELLID1 0xFF4 +#define SCPCELLID2 0xFF8 +#define SCPCELLID3 0xFFC + +static inline void sysctl_soft_reset(void __iomem *base) +{ + /* writing any value to SCSYSSTAT reg will reset system */ + writel(0, base + SCSYSSTAT); +} + +#endif /* __ASM_ARM_SP810_H */ diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h new file mode 100644 index 0000000..21e75e3 --- /dev/null +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -0,0 +1,2 @@ +void sp804_clocksource_init(void __iomem *); +void sp804_clockevents_init(void __iomem *, unsigned int); diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00..7080e2c 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -11,7 +11,11 @@ #define kmap_prot PAGE_KERNEL -#define flush_cache_kmaps() flush_cache_all() +#define flush_cache_kmaps() \ + do { \ + if (cache_is_vivt()) \ + flush_cache_all(); \ + } while (0) extern pte_t *pkmap_page_table; @@ -21,11 +25,17 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); +/* + * The following functions are already defined by <linux/highmem.h> + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); -extern void *kmap_atomic(struct page *page, enum km_type type); -extern void kunmap_atomic(void *kvaddr, enum km_type type); -extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +extern void *__kmap_atomic(struct page *page); +extern void __kunmap_atomic(void *kvaddr); +extern void *kmap_atomic_pfn(unsigned long pfn); extern struct page *kmap_atomic_to_page(const void *ptr); +#endif #endif diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h new file mode 100644 index 0000000..f389b27 --- /dev/null +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -0,0 +1,133 @@ +#ifndef _ARM_HW_BREAKPOINT_H +#define _ARM_HW_BREAKPOINT_H + +#ifdef __KERNEL__ + +struct task_struct; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +struct arch_hw_breakpoint_ctrl { + u32 __reserved : 9, + mismatch : 1, + : 9, + len : 8, + type : 2, + privilege : 2, + enabled : 1; +}; + +struct arch_hw_breakpoint { + u32 address; + u32 trigger; + struct arch_hw_breakpoint_ctrl step_ctrl; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) +{ + return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) | + (ctrl.privilege << 1) | ctrl.enabled; +} + +static inline void decode_ctrl_reg(u32 reg, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + ctrl->enabled = reg & 0x1; + reg >>= 1; + ctrl->privilege = reg & 0x3; + reg >>= 2; + ctrl->type = reg & 0x3; + reg >>= 2; + ctrl->len = reg & 0xff; + reg >>= 17; + ctrl->mismatch = reg & 0x1; +} + +/* Debug architecture numbers. */ +#define ARM_DEBUG_ARCH_RESERVED 0 /* In case of ptrace ABI updates. */ +#define ARM_DEBUG_ARCH_V6 1 +#define ARM_DEBUG_ARCH_V6_1 2 +#define ARM_DEBUG_ARCH_V7_ECP14 3 +#define ARM_DEBUG_ARCH_V7_MM 4 + +/* Breakpoint */ +#define ARM_BREAKPOINT_EXECUTE 0 + +/* Watchpoints */ +#define ARM_BREAKPOINT_LOAD 1 +#define ARM_BREAKPOINT_STORE 2 + +/* Privilege Levels */ +#define ARM_BREAKPOINT_PRIV 1 +#define ARM_BREAKPOINT_USER 2 + +/* Lengths */ +#define ARM_BREAKPOINT_LEN_1 0x1 +#define ARM_BREAKPOINT_LEN_2 0x3 +#define ARM_BREAKPOINT_LEN_4 0xf +#define ARM_BREAKPOINT_LEN_8 0xff + +/* Limits */ +#define ARM_MAX_BRP 16 +#define ARM_MAX_WRP 16 +#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) + +/* DSCR method of entry bits. */ +#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf) +#define ARM_ENTRY_BREAKPOINT 0x1 +#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2 +#define ARM_ENTRY_SYNC_WATCHPOINT 0xa + +/* DSCR monitor/halting bits. */ +#define ARM_DSCR_HDBGEN (1 << 14) +#define ARM_DSCR_MDBGEN (1 << 15) + +/* opcode2 numbers for the co-processor instructions. */ +#define ARM_OP2_BVR 4 +#define ARM_OP2_BCR 5 +#define ARM_OP2_WVR 6 +#define ARM_OP2_WCR 7 + +/* Base register numbers for the debug registers. */ +#define ARM_BASE_BVR 64 +#define ARM_BASE_BCR 80 +#define ARM_BASE_WVR 96 +#define ARM_BASE_WCR 112 + +/* Accessor macros for the debug registers. */ +#define ARM_DBG_READ(M, OP2, VAL) do {\ + asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ +} while (0) + +#define ARM_DBG_WRITE(M, OP2, VAL) do {\ + asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ +} while (0) + +struct notifier_block; +struct perf_event; +struct pmu; + +extern struct pmu perf_ops_bp; +extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type); +extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + +extern u8 arch_get_debug_arch(void); +extern u8 arch_get_max_wp_len(void); +extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk); + +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +int hw_breakpoint_slots(int type); + +#else +static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {} + +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif /* __KERNEL__ */ +#endif /* _ARM_HW_BREAKPOINT_H */ diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 90831f6..5586b7c 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags); #define IRQF_PROBE (1 << 1) #define IRQF_NOAUTOEN (1 << 2) +#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) + #endif diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index f7bd52b..c1062c3 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -19,6 +19,7 @@ #define HWCAP_NEON 4096 #define HWCAP_VFPv3 8192 #define HWCAP_VFPv3D16 16384 +#define HWCAP_TLS 32768 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cf..20e0f7c 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/memory.h> +#include <asm/system.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -69,9 +70,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * __arm_ioremap takes CPU physical address. * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page + * The _caller variety takes a __builtin_return_address(0) value for + * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, + size_t, unsigned int, void *); +extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, + void *); + +extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iounmap(volatile void __iomem *addr); /* @@ -172,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * IO port primitives for more information. */ #ifdef __mem_pci -#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ +#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ __raw_readw(__mem_pci(c))); __v; }) -#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ __raw_readl(__mem_pci(c))); __v; }) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) + +#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ + cpu_to_le16(v),__mem_pci(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ + cpu_to_le32(v),__mem_pci(c))) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) #define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) #define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) -#define writeb(v,c) __raw_writeb(v,__mem_pci(c)) -#define writew(v,c) __raw_writew((__force __u16) \ - cpu_to_le16(v),__mem_pci(c)) -#define writel(v,c) __raw_writel((__force __u32) \ - cpu_to_le32(v),__mem_pci(c)) - #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) #define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) @@ -220,30 +241,27 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * */ #ifndef __arch_ioremap -#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) -#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) -#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC) -#define iounmap(cookie) __iounmap(cookie) -#else +#define __arch_ioremap __arm_ioremap +#define __arch_iounmap __iounmap +#endif + #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap(cookie) __arch_iounmap(cookie) -#endif +#define iounmap __arch_iounmap /* * io{read,write}{8,16,32} macros */ #ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) +#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) +#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite8(v,p) __raw_writeb(v, p) -#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) -#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) +#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) +#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) @@ -273,6 +291,7 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(unsigned long addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +extern int devmem_is_allowed(unsigned long pfn); #endif /* diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h index a91d8a1..9c96298 100644 --- a/arch/arm/include/asm/ioctls.h +++ b/arch/arm/include/asm/ioctls.h @@ -1,84 +1,8 @@ #ifndef __ASM_ARM_IOCTLS_H #define __ASM_ARM_IOCTLS_H -#include <asm/ioctl.h> - -/* 0x54 is just a magic number to make these relatively unique ('T') */ - -#define TCGETS 0x5401 -#define TCSETS 0x5402 -#define TCSETSW 0x5403 -#define TCSETSF 0x5404 -#define TCGETA 0x5405 -#define TCSETA 0x5406 -#define TCSETAW 0x5407 -#define TCSETAF 0x5408 -#define TCSBRK 0x5409 -#define TCXONC 0x540A -#define TCFLSH 0x540B -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E -#define TIOCGPGRP 0x540F -#define TIOCSPGRP 0x5410 -#define TIOCOUTQ 0x5411 -#define TIOCSTI 0x5412 -#define TIOCGWINSZ 0x5413 -#define TIOCSWINSZ 0x5414 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define FIONREAD 0x541B -#define TIOCINQ FIONREAD -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 -#define FIONBIO 0x5421 -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ -#define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T',0x2A, struct termios2) -#define TCSETS2 _IOW('T',0x2B, struct termios2) -#define TCSETSW2 _IOW('T',0x2C, struct termios2) -#define TCSETSF2 _IOW('T',0x2D, struct termios2) -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ - -#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ -#define FIOCLEX 0x5451 -#define FIOASYNC 0x5452 -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define FIOQSIZE 0x545E -/* Used for packet mode */ -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#include <asm-generic/ioctls.h> #endif diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 328f14a..2721a58 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -7,6 +7,8 @@ #define irq_canonicalize(i) (i) #endif +#define NR_IRQS_LEGACY 16 + /* * Use this value to indicate lack of interrupt * capability @@ -17,6 +19,7 @@ #ifndef __ASSEMBLY__ struct irqaction; +struct pt_regs; extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 6d09974..1e6cca5 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -10,66 +10,85 @@ */ #if __LINUX_ARM_ARCH__ >= 6 -#define raw_local_irq_save(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ - "cpsid i" \ - : "=r" (x) : : "memory", "cc"); \ - }) +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + asm volatile( + " mrs %0, cpsr @ arch_local_irq_save\n" + " cpsid i" + : "=r" (flags) : : "memory", "cc"); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile( + " cpsie i @ arch_local_irq_enable" + : + : + : "memory", "cc"); +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile( + " cpsid i @ arch_local_irq_disable" + : + : + : "memory", "cc"); +} -#define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") -#define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") - #else /* * Save the current interrupt enable state & disable IRQs */ -#define raw_local_irq_save(x) \ - ({ \ - unsigned long temp; \ - (void) (&temp == &x); \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ -" orr %1, %0, #128\n" \ -" msr cpsr_c, %1" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags, temp; + + asm volatile( + " mrs %0, cpsr @ arch_local_irq_save\n" + " orr %1, %0, #128\n" + " msr cpsr_c, %1" + : "=r" (flags), "=r" (temp) + : + : "memory", "cc"); + return flags; +} + /* * Enable IRQs */ -#define raw_local_irq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_enable\n" \ -" bic %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) +static inline void arch_local_irq_enable(void) +{ + unsigned long temp; + asm volatile( + " mrs %0, cpsr @ arch_local_irq_enable\n" + " bic %0, %0, #128\n" + " msr cpsr_c, %0" + : "=r" (temp) + : + : "memory", "cc"); +} /* * Disable IRQs */ -#define raw_local_irq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_disable\n" \ -" orr %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) +static inline void arch_local_irq_disable(void) +{ + unsigned long temp; + asm volatile( + " mrs %0, cpsr @ arch_local_irq_disable\n" + " orr %0, %0, #128\n" + " msr cpsr_c, %0" + : "=r" (temp) + : + : "memory", "cc"); +} /* * Enable FIQs @@ -106,27 +125,31 @@ /* * Save the current interrupt enable state. */ -#define raw_local_save_flags(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_save_flags" \ - : "=r" (x) : : "memory", "cc"); \ - }) +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile( + " mrs %0, cpsr @ local_save_flags" + : "=r" (flags) : : "memory", "cc"); + return flags; +} /* * restore saved IRQ & FIQ state */ -#define raw_local_irq_restore(x) \ - __asm__ __volatile__( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc") +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + " msr cpsr_c, %0 @ local_irq_restore" + : + : "r" (flags) + : "memory", "cc"); +} -#define raw_irqs_disabled_flags(flags) \ -({ \ - (int)((flags) & PSR_I_BIT); \ -}) +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} #endif #endif diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index df15a0d..c0094d8 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -19,10 +19,36 @@ #ifndef __ASSEMBLY__ -struct kimage; -/* Provide a dummy definition to avoid build failures. */ +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) { } + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ( + "stmia %[regs_base], {r0-r12}\n\t" + "mov %[_ARM_sp], sp\n\t" + "str lr, %[_ARM_lr]\n\t" + "adr %[_ARM_pc], 1f\n\t" + "mrs %[_ARM_cpsr], cpsr\n\t" + "1:" + : [_ARM_pc] "=r" (newregs->ARM_pc), + [_ARM_cpsr] "=r" (newregs->ARM_cpsr), + [_ARM_sp] "=r" (newregs->ARM_sp), + [_ARM_lr] "=o" (newregs->ARM_lr) + : [regs_base] "r" (&newregs->ARM_r0) + : "memory" + ); + } +} #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 67af4b8..48066ce 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -71,10 +71,11 @@ extern int kgdb_fault_expected; #define _FP_REGS 8 #define _EXTRA_REGS 2 #define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) +#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS) #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (GDB_MAX_REGS << 2) +#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index d16ec97..e51b1e8 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -18,8 +18,16 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_L1_CACHE, KM_L2_CACHE, + KM_KDB, KM_TYPE_NR }; +#ifdef CONFIG_DEBUG_HIGHMEM +#define KM_NMI (-1) +#define KM_NMI_PTE (-1) +#define KM_IRQ_PTE (-1) +#endif + #endif diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h new file mode 100644 index 0000000..36c93b5 --- /dev/null +++ b/arch/arm/include/asm/local64.h @@ -0,0 +1 @@ +#include <asm-generic/local64.h> diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 50c7e7c..6bc63ab 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *); #include "smp_twd.h" #define local_timer_ack() twd_timer_ack() -#define local_timer_stop() twd_timer_stop() #else @@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *); */ int local_timer_ack(void); -/* - * Stop a local timer interrupt. - */ -void local_timer_stop(void); - #endif /* @@ -52,12 +46,6 @@ void local_timer_stop(void); */ void local_timer_setup(struct clock_event_device *); -#else - -static inline void local_timer_stop(void) -{ -} - #endif #endif diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h new file mode 100644 index 0000000..948178c --- /dev/null +++ b/arch/arm/include/asm/mach-types.h @@ -0,0 +1 @@ +#include <generated/mach-types.h> diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index c59842d..3a0893a 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -16,17 +16,15 @@ struct sys_timer; struct machine_desc { /* - * Note! The first four elements are used + * Note! The first two elements are used * by assembler code in head.S, head-common.S */ unsigned int nr; /* architecture number */ - unsigned int phys_io; /* start of physical io */ - unsigned int io_pg_offst; /* byte offset for io - * page tabe entry */ - const char *name; /* architecture name */ unsigned long boot_params; /* tagged list */ + unsigned int nr_irqs; /* number of IRQs */ + unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ @@ -37,13 +35,23 @@ struct machine_desc { void (*fixup)(struct machine_desc *, struct tag *, char **, struct meminfo *); + void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ + void (*init_early)(void); void (*init_irq)(void); struct sys_timer *timer; /* system tick timer */ void (*init_machine)(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER + void (*handle_irq)(struct pt_regs *); +#endif }; /* + * Current machine - only accessible during boot. + */ +extern struct machine_desc *machine_desc; + +/* * Set of macros to define architecture features. This is built into * a table by the linker. */ diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac530..22ac140 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -17,18 +17,21 @@ struct seq_file; /* * This is internal. Do not use it. */ -extern void (*init_arch_irq)(void); extern void init_FIQ(void); -extern int show_fiq_list(struct seq_file *, void *); +extern int show_fiq_list(struct seq_file *, int); + +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +#endif /* * This is for easy migration, but should be changed in the source */ #define do_bad_IRQ(irq,desc) \ do { \ - spin_lock(&desc->lock); \ + raw_spin_lock(&desc->lock); \ handle_bad_irq(irq, desc); \ - spin_unlock(&desc->lock); \ + raw_spin_unlock(&desc->lock); \ } while(0) #endif diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 742c2aaeb..d2fedb5 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -27,6 +27,8 @@ struct map_desc { #define MT_MEMORY 9 #define MT_ROM 10 #define MT_MEMORY_NONCACHED 11 +#define MT_MEMORY_DTCM 12 +#define MT_MEMORY_ITCM 13 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h deleted file mode 100644 index 4da332b..0000000 --- a/arch/arm/include/asm/mach/mmc.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * arch/arm/include/asm/mach/mmc.h - */ -#ifndef ASMARM_MACH_MMC_H -#define ASMARM_MACH_MMC_H - -#include <linux/mmc/host.h> - -struct mmc_platform_data { - unsigned int ocr_mask; /* available voltages */ - u32 (*translate_vdd)(struct device *, unsigned int); - unsigned int (*status)(struct device *); -}; - -#endif diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index a38bdc7..16330bd 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -8,10 +8,16 @@ * published by the Free Software Foundation. */ +#ifndef __ASM_MACH_PCI_H +#define __ASM_MACH_PCI_H + struct pci_sys_data; struct pci_bus; struct hw_pci { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif struct list_head buses; int nr_controllers; int (*setup)(int nr, struct pci_sys_data *); @@ -26,6 +32,9 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ @@ -37,6 +46,7 @@ struct pci_sys_data { /* IRQ mapping */ int (*map_irq)(struct pci_dev *, u8, u8); struct hw_pci *hw; + void *private_data; /* platform controller private data */ }; /* @@ -70,3 +80,5 @@ extern int pci_v3_setup(int nr, struct pci_sys_data *); extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *); extern void pci_v3_preinit(void); extern void pci_v3_postinit(void); + +#endif /* __ASM_MACH_PCI_H */ diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fc..883f6be 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -38,20 +38,11 @@ struct sys_timer { void (*init)(void); void (*suspend)(void); void (*resume)(void); -#ifndef CONFIG_GENERIC_TIME +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET unsigned long (*offset)(void); #endif }; -extern struct sys_timer *system_timer; extern void timer_tick(void); -/* - * Kernel time keeping support. - */ -struct timespec; -extern int (*set_rtc)(void); -extern void save_time_delta(struct timespec *delta, struct timespec *rtc); -extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); - #endif diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index f3eabf1..833306e 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h @@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info { * here. Note that sometimes the signals go through inverters... */ bool gpio_vbus_inverted; - u16 gpio_vbus; /* high == vbus present */ + int gpio_vbus; /* high == vbus present */ bool gpio_pullup_inverted; - u16 gpio_pullup; /* high == pullup activated */ + int gpio_pullup; /* high == pullup activated */ }; diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h new file mode 100644 index 0000000..b8da2e4 --- /dev/null +++ b/arch/arm/include/asm/memblock.h @@ -0,0 +1,9 @@ +#ifndef _ASM_ARM_MEMBLOCK_H +#define _ASM_ARM_MEMBLOCK_H + +struct meminfo; +struct machine_desc; + +extern void arm_memblock_init(struct meminfo *, struct machine_desc *); + +#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 85763db..23c2e8e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -44,7 +44,13 @@ * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ +#ifndef CONFIG_THUMB2_KERNEL #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#else +/* smaller range for Thumb-2 symbols relocation (2^24)*/ +#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#endif + #if TASK_SIZE > MODULES_VADDR #error Top of user space clashes with start of module space #endif @@ -70,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -87,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET @@ -107,11 +124,12 @@ #endif /* !CONFIG_MMU */ /* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. + * We fix the TCM memories max 32 KiB ITCM resp DTCM at these + * locations */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M +#ifdef CONFIG_HAVE_TCM +#define ITCM_OFFSET UL(0xfffe0000) +#define DTCM_OFFSET UL(0xfffe8000) #endif /* @@ -119,8 +137,10 @@ * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. */ +#ifndef __virt_to_phys #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) +#endif /* * Convert a physical address to a Page Frame Number and back @@ -128,6 +148,12 @@ #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + #ifndef __ASSEMBLY__ /* @@ -141,7 +167,7 @@ #endif #ifndef arch_adjust_zones -#define arch_adjust_zones(node,size,holes) do { } while (0) +#define arch_adjust_zones(size,holes) do { } while (0) #elif !defined(CONFIG_ZONE_DMA) #error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" #endif @@ -188,7 +214,8 @@ static inline void *phys_to_virt(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) +#define __pfn_to_bus(x) __pfn_to_phys(x) +#define __bus_to_pfn(x) __phys_to_pfn(x) #endif static inline __deprecated unsigned long virt_to_bus(void *x) @@ -212,102 +239,15 @@ static inline __deprecated void *bus_to_virt(unsigned long x) * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * - * pfn_valid(pfn) indicates whether a PFN number is valid * * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#ifndef CONFIG_DISCONTIGMEM - #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#ifndef CONFIG_SPARSEMEM -#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) -#endif - #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) -#define PHYS_TO_NID(addr) (0) - -#else /* CONFIG_DISCONTIGMEM */ - -/* - * This is more complex. We have a set of mem_map arrays spread - * around in memory. - */ -#include <linux/numa.h> - -#define arch_pfn_to_nid(pfn) PFN_TO_NID(pfn) -#define arch_local_page_offset(pfn, nid) LOCAL_MAP_NR((pfn) << PAGE_SHIFT) - -#define pfn_valid(pfn) \ - ({ \ - unsigned int nid = PFN_TO_NID(pfn); \ - int valid = nid < MAX_NUMNODES; \ - if (valid) { \ - pg_data_t *node = NODE_DATA(nid); \ - valid = (pfn - node->node_start_pfn) < \ - node->node_spanned_pages; \ - } \ - valid; \ - }) - -#define virt_to_page(kaddr) \ - (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) - -#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES) - -/* - * Common discontigmem stuff. - * PHYS_TO_NID is used by the ARM kernel/setup.c - */ -#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT) - -/* - * Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory - * and returns the mem_map of that node. - */ -#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr)) - -/* - * Given a page frame number, find the owning node of the memory - * and returns the mem_map of that node. - */ -#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn)) - -#ifdef NODE_MEM_SIZE_BITS -#define NODE_MEM_SIZE_MASK ((1 << NODE_MEM_SIZE_BITS) - 1) - -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define KVADDR_TO_NID(addr) \ - (((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MEM_SIZE_BITS) - -/* - * Given a page frame number, convert it to a node id. - */ -#define PFN_TO_NID(pfn) \ - (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MEM_SIZE_BITS - PAGE_SHIFT)) - -/* - * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory - * and returns the index corresponding to the appropriate page in the - * node's mem_map. - */ -#define LOCAL_MAP_NR(addr) \ - (((unsigned long)(addr) & NODE_MEM_SIZE_MASK) >> PAGE_SHIFT) - -#endif /* NODE_MEM_SIZE_BITS */ - -#endif /* !CONFIG_DISCONTIGMEM */ - -/* - * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. - */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - /* * Optional coherency support. Currently used only by selected * Intel XSC3-based systems. diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h index fc26976..41f99c5 100644 --- a/arch/arm/include/asm/mman.h +++ b/arch/arm/include/asm/mman.h @@ -1,17 +1,4 @@ -#ifndef __ARM_MMAN_H__ -#define __ARM_MMAN_H__ +#include <asm-generic/mman.h> -#include <asm-generic/mman-common.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __ARM_MMAN_H__ */ +#define arch_mmap_check(addr, len, flags) \ + (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584..b4ffe9d 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,12 +6,17 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID #define ASID(mm) ((mm)->context.id & 255) + +/* init_mm.context.id_lock should be initialized. */ +#define INIT_MM_CONTEXT(name) \ + .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 263fed0..71605d9 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,7 +18,6 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> -#include <asm-generic/mm_hooks.h> void __check_kvm_seq(struct mm_struct *mm); @@ -43,12 +42,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -62,8 +72,10 @@ static inline void check_context(struct mm_struct *mm) static inline void check_context(struct mm_struct *mm) { +#ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); +#endif } #define init_new_context(tsk,mm) 0 @@ -101,14 +113,19 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #ifdef CONFIG_SMP /* check for possible thread migration */ - if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) + if (!cpumask_empty(mm_cpumask(next)) && + !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); #endif - if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) - cpu_clear(cpu, prev->cpu_vm_mask); + cpumask_clear_cpu(cpu, mm_cpumask(prev)); } #endif } @@ -116,4 +133,32 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) +/* + * We are inserting a "fake" vma for the user-accessible vector page so + * gdb and friends can get to it through ptrace and /proc/<pid>/mem. + * But we also want to remove it before the generic code gets to see it + * during process exit or the unmapping of it would cause total havoc. + * (the macro is used as remove_vma() is static to mm/mmap.c) + */ +#define arch_exit_mmap(mm) \ +do { \ + struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ + if (high_vma) { \ + BUG_ON(high_vma->vm_next); /* it should be last */ \ + if (high_vma->vm_prev) \ + high_vma->vm_prev->vm_next = NULL; \ + else \ + mm->mmap = NULL; \ + rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ + mm->mmap_cache = NULL; \ + mm->map_count--; \ + remove_vma(high_vma); \ + } \ +} while (0) + +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + #endif diff --git a/arch/arm/include/asm/mmzone.h b/arch/arm/include/asm/mmzone.h deleted file mode 100644 index ae63a4f..0000000 --- a/arch/arm/include/asm/mmzone.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * arch/arm/include/asm/mmzone.h - * - * 1999-12-29 Nicolas Pitre Created - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_MMZONE_H -#define __ASM_MMZONE_H - -/* - * Currently defined in arch/arm/mm/discontig.c - */ -extern pg_data_t discontig_node_data[]; - -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (&discontig_node_data[nid]) - -/* - * NODE_MEM_MAP gives the kaddr for the mem_map of the node. - */ -#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map) - -#include <mach/memory.h> - -#endif diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index e4dfa69..12c8e68 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -7,18 +7,20 @@ struct unwind_table; -struct mod_arch_specific -{ #ifdef CONFIG_ARM_UNWIND - Elf_Shdr *unw_sec_init; - Elf_Shdr *unw_sec_devinit; - Elf_Shdr *unw_sec_core; - Elf_Shdr *sec_init_text; - Elf_Shdr *sec_devinit_text; - Elf_Shdr *sec_core_text; - struct unwind_table *unwind_init; - struct unwind_table *unwind_devinit; - struct unwind_table *unwind_core; +enum { + ARM_SEC_INIT, + ARM_SEC_DEVINIT, + ARM_SEC_CORE, + ARM_SEC_EXIT, + ARM_SEC_DEVEXIT, + ARM_SEC_MAX, +}; +#endif + +struct mod_arch_specific { +#ifdef CONFIG_ARM_UNWIND + struct unwind_table *unwind[ARM_SEC_MAX]; #endif }; diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h new file mode 100644 index 0000000..fc19009 --- /dev/null +++ b/arch/arm/include/asm/outercache.h @@ -0,0 +1,99 @@ +/* + * arch/arm/include/asm/outercache.h + * + * Copyright (C) 2010 ARM Ltd. + * Written by Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_OUTERCACHE_H +#define __ASM_OUTERCACHE_H + +struct outer_cache_fns { + void (*inv_range)(unsigned long, unsigned long); + void (*clean_range)(unsigned long, unsigned long); + void (*flush_range)(unsigned long, unsigned long); + void (*flush_all)(void); + void (*inv_all)(void); + void (*disable)(void); +#ifdef CONFIG_OUTER_CACHE_SYNC + void (*sync)(void); +#endif +}; + +#ifdef CONFIG_OUTER_CACHE + +extern struct outer_cache_fns outer_cache; + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ + if (outer_cache.inv_range) + outer_cache.inv_range(start, end); +} +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ + if (outer_cache.clean_range) + outer_cache.clean_range(start, end); +} +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ + if (outer_cache.flush_range) + outer_cache.flush_range(start, end); +} + +static inline void outer_flush_all(void) +{ + if (outer_cache.flush_all) + outer_cache.flush_all(); +} + +static inline void outer_inv_all(void) +{ + if (outer_cache.inv_all) + outer_cache.inv_all(); +} + +static inline void outer_disable(void) +{ + if (outer_cache.disable) + outer_cache.disable(); +} + +#else + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ } +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ } +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ } +static inline void outer_flush_all(void) { } +static inline void outer_inv_all(void) { } +static inline void outer_disable(void) { } + +#endif + +#ifdef CONFIG_OUTER_CACHE_SYNC +static inline void outer_sync(void) +{ + if (outer_cache.sync) + outer_cache.sync(); +} +#else +static inline void outer_sync(void) +{ } +#endif + +#endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 3574c0d..d1b162a 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -43,7 +43,4 @@ typedef unsigned long pgprot_t; #define __pmd(x) (x) #define __pgprot(x) (x) -extern unsigned long memory_start; -extern unsigned long memory_end; - #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index be962c1..f51a695 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -12,7 +12,7 @@ /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifndef __ASSEMBLY__ @@ -117,11 +117,12 @@ #endif struct page; +struct vm_area_struct; struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); }; #ifdef MULTI_USER @@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); #endif #define clear_user_highpage(page,vaddr) \ @@ -145,18 +146,20 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr) + __cpu_copy_user_highpage(to, from, vaddr, vma) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +typedef unsigned long pteval_t; + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS /* * These are used to make use of C type-checking.. */ -typedef struct { unsigned long pte; } pte_t; +typedef struct { pteval_t pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd[2]; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; @@ -174,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * .. while these make it easier on the compiler */ -typedef unsigned long pte_t; +typedef pteval_t pte_t; typedef unsigned long pmd_t; typedef unsigned long pgd_t[2]; typedef unsigned long pgprot_t; @@ -194,6 +197,10 @@ typedef unsigned long pgprot_t; typedef struct page *pgtable_t; +#ifndef CONFIG_SPARSEMEM +extern int pfn_valid(unsigned long); +#endif + #include <asm/memory.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 0abf386..92e2a83 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -4,9 +4,22 @@ #ifdef __KERNEL__ #include <asm-generic/pci-dma-compat.h> +#include <asm/mach/pci.h> /* for pci_sys_data */ #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ -#define pcibios_scan_all_fns(a, b) 0 +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) +{ + struct pci_sys_data *root = bus->sysdata; + + return root->domain; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return pci_domain_nr(bus); +} +#endif /* CONFIG_PCI_DOMAINS */ #ifdef CONFIG_PCI_HOST_ITE8152 /* ITE bridge requires setting latency timer to avoid early bus access @@ -32,17 +45,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) */ #define PCI_DMA_BUS_IS_PHYS (1) -/* - * Whether pci_unmap_{single,page} is a nop depends upon the - * configuration. - */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h new file mode 100644 index 0000000..c4aa4e8 --- /dev/null +++ b/arch/arm/include/asm/perf_event.h @@ -0,0 +1,36 @@ +/* + * linux/arch/arm/include/asm/perf_event.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PERF_EVENT_H__ +#define __ARM_PERF_EVENT_H__ + +/* ARM performance counters start from 1 (in the cp15 accesses) so use the + * same indexes here for consistency. */ +#define PERF_EVENT_INDEX_OFFSET 1 + +/* ARM perf PMU IDs for use by internal perf clients. */ +enum arm_perf_pmu_ids { + ARM_PERF_PMU_ID_XSCALE1 = 0, + ARM_PERF_PMU_ID_XSCALE2, + ARM_PERF_PMU_ID_V6, + ARM_PERF_PMU_ID_V6MP, + ARM_PERF_PMU_ID_CA8, + ARM_PERF_PMU_ID_CA9, + ARM_NUM_PMU_IDS, +}; + +extern enum arm_perf_pmu_ids +armpmu_get_pmu_id(void); + +extern int +armpmu_get_max_events(void); + +#endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 3dcd64b..9763be0 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -30,11 +30,15 @@ #define pmd_free(mm, pmd) do { } while (0) #define pgd_populate(mm,pmd,pte) BUG() -extern pgd_t *get_pgd_slow(struct mm_struct *mm); -extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define pgd_alloc(mm) get_pgd_slow(mm) -#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + +static inline void clean_pte_table(pte_t *pte) +{ + clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); +} /* * Allocate one PTE table. @@ -43,25 +47,23 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); * into one table thus: * * +------------+ - * | h/w pt 0 | - * +------------+ - * | h/w pt 1 | - * +------------+ * | Linux pt 0 | * +------------+ * | Linux pt 1 | * +------------+ + * | h/w pt 0 | + * +------------+ + * | h/w pt 1 | + * +------------+ */ static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte; - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - if (pte) { - clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); - pte += PTRS_PER_PTE; - } + pte = (pte_t *)__get_free_page(PGALLOC_GFP); + if (pte) + clean_pte_table(pte); return pte; } @@ -71,10 +73,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pte; - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); +#ifdef CONFIG_HIGHPTE + pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); +#else + pte = alloc_pages(PGALLOC_GFP, 0); +#endif if (pte) { - void *page = page_address(pte); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); + if (!PageHighMem(pte)) + clean_pte_table(page_address(pte)); pgtable_page_ctor(pte); } @@ -86,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - if (pte) { - pte -= PTRS_PER_PTE; + if (pte) free_page((unsigned long)pte); - } } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -98,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + unsigned long prot) { + unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); @@ -114,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - unsigned long pte_ptr = (unsigned long)ptep; - /* - * The pmd must be loaded with the physical - * address of the PTE table + * The pmd must be loaded with the physical address of the PTE table */ - pte_ptr -= PTRS_PER_PTE * sizeof(void *); - __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); + __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e..ffc0e857 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; } */ #define pgprot_noncached(prot) __pgprot(0) #define pgprot_writecombine(prot) __pgprot(0) +#define pgprot_dmacoherent(prot) __pgprot(0) /* @@ -86,8 +87,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 1cd2d64..ebcb643 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,7 @@ #ifndef _ASMARM_PGTABLE_H #define _ASMARM_PGTABLE_H +#include <linux/const.h> #include <asm-generic/4level-fixup.h> #include <asm/proc-fns.h> @@ -54,7 +55,7 @@ * Therefore, we tweak the implementation slightly - we tell Linux that we * have 2048 entries in the first level, each of which is 8 bytes (iow, two * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, followed by Linux versions + * hardware PTE tables arranged contiguously, preceded by Linux versions * which contain the state information Linux needs. We, therefore, end up * with 512 entries in the "PTE" level. * @@ -62,15 +63,15 @@ * * pgd pte * | | - * +--------+ +0 - * | |-----> +------------+ +0 + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +1024 + * | |-----> +------------+ +3072 * +--------+ +8 | h/w pt 1 | - * | | +------------+ +2048 - * +- - - - + | Linux pt 0 | - * | | +------------+ +3072 - * +--------+ | Linux pt 1 | * | | +------------+ +4096 * * See L_PTE_xxx below for definitions of bits in the "Linux pt", and @@ -102,6 +103,10 @@ #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + /* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map @@ -112,13 +117,13 @@ #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ -extern void __pte_error(const char *file, int line, unsigned long val); -extern void __pmd_error(const char *file, int line, unsigned long val); -extern void __pgd_error(const char *file, int line, unsigned long val); +extern void __pte_error(const char *file, int line, pte_t); +extern void __pmd_error(const char *file, int line, pmd_t); +extern void __pgd_error(const char *file, int line, pgd_t); -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) #endif /* !__ASSEMBLY__ */ #define PMD_SIZE (1UL << PMD_SHIFT) @@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define FIRST_USER_PGD_NR 1 -#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* * section address mask and size definitions. @@ -161,32 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ -#define L_PTE_PRESENT (1 << 0) -#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ -#define L_PTE_YOUNG (1 << 1) -#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ -#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */ -#define L_PTE_DIRTY (1 << 6) -#define L_PTE_WRITE (1 << 7) -#define L_PTE_USER (1 << 8) -#define L_PTE_EXEC (1 << 9) -#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ /* * These are the memory types, defined to be compatible with * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB */ -#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ -#define L_PTE_MT_MASK (0x0f << 2) +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ @@ -203,23 +205,44 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE pgprot_user -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_KERNEL pgprot_kernel -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) - -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) +#define PAGE_KERNEL_EXEC pgprot_kernel + +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) + +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define pgprot_noncached(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#define pgprot_writecombine(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) +#endif #endif /* __ASSEMBLY__ */ @@ -257,62 +280,31 @@ extern pgprot_t pgprot_kernel; extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) -#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) -#define pte_none(pte) (!pte_val(pte)) -#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) -#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +/* to find an entry in a page-table-directory */ +#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) -#define set_pte_at(mm,addr,ptep,pteval) do { \ - set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ - } while (0) +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_special(pte) (0) +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) /* - * The following only works if pte_present() is not true. + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) */ -#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 2) -#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) - -#define PTE_FILE_MAX_BITS 30 - -#define PTE_BIT_FUNC(fn,op) \ -static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } - -PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); -PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); -PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); -PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); -PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); -PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) do { } while (0) +#define set_pgd(pgd,pgdp) do { } while (0) -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -/* - * Mark the prot value as uncacheable and unbufferable. - */ -#define pgprot_noncached(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) -#define pgprot_writecombine(prot) \ - __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir, addr) ((pmd_t *)(dir)) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) @@ -334,66 +326,133 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - unsigned long ptr; - - ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); - ptr += PTRS_PER_PTE * sizeof(void *); - - return __va(ptr); + return __va(pmd_val(pmd) & PAGE_MASK); } #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) -/* to find an entry in a page-table-directory */ -#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) +#ifndef CONFIG_HIGHPTE +#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) +#define __pte_unmap(pte) do { } while (0) +#else +#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) +#define __pte_unmap(pte) kunmap_atomic(pte) +#endif -#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) +#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) +#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) + +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) + +#if __LINUX_ARM_ARCH__ < 6 +static inline void __sync_icache_dcache(pte_t pteval) +{ +} +#else +extern void __sync_icache_dcache(pte_t pteval); +#endif + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + if (addr >= TASK_SIZE) + set_pte_ext(ptep, pteval, 0); + else { + __sync_icache_dcache(pteval); + set_pte_ext(ptep, pteval, PTE_EXT_NG); + } +} + +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) -/* Find an entry in the third-level page table.. */ -#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define PTE_BIT_FUNC(fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } + +PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); +PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); +PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); +PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); +PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); +PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); + +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - -/* Encode and decode a swap entry. +/* + * Encode and decode a swap entry. Swap entries are stored in the Linux + * page tables as follows: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <--------------- offset --------------------> <- type --> 0 0 0 * - * We support up to 32GB of swap on 4k machines + * This gives us up to 63 swap files and 32GB per swap file. Note that + * the offset field is always non-zero. */ -#define __swp_type(x) (((x).val >> 2) & 0x7f) -#define __swp_offset(x) ((x).val >> 9) -#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) +/* + * It is an error for the kernel to have more swap files than we can + * encode in the PTEs. This ensures that we know when MAX_SWAPFILES + * is increased beyond what we presently support. + */ +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) + +/* + * Encode and decode a file entry. File entries are stored in the Linux + * page tables as follows: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <----------------------- offset ------------------------> 1 0 0 + */ +#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) + +#define PTE_FILE_MAX_BITS 29 + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* FIXME: this is not correct */ #define kern_addr_valid(addr) (1) @@ -414,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define pgtable_cache_init() do { } while (0) +void identity_mapping_add(pgd_t *, unsigned long, unsigned long); +void identity_mapping_del(pgd_t *, unsigned long, unsigned long); + #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h new file mode 100644 index 0000000..8ccea01 --- /dev/null +++ b/arch/arm/include/asm/pmu.h @@ -0,0 +1,77 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +enum arm_pmu_type { + ARM_PMU_DEVICE_CPU = 0, + ARM_NUM_PMU_DEVICES, +}; + +#ifdef CONFIG_CPU_HAS_PMU + +/** + * reserve_pmu() - reserve the hardware performance counters + * + * Reserve the hardware performance counters in the system for exclusive use. + * The platform_device for the system is returned on success, ERR_PTR() + * encoded error on failure. + */ +extern struct platform_device * +reserve_pmu(enum arm_pmu_type device); + +/** + * release_pmu() - Relinquish control of the performance counters + * + * Release the performance counters and allow someone else to use them. + * Callers must have disabled the counters and released IRQs before calling + * this. The platform_device returned from reserve_pmu() must be passed as + * a cookie. + */ +extern int +release_pmu(struct platform_device *pdev); + +/** + * init_pmu() - Initialise the PMU. + * + * Initialise the system ready for PMU enabling. This should typically set the + * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do + * the actual hardware initialisation. + */ +extern int +init_pmu(enum arm_pmu_type device); + +#else /* CONFIG_CPU_HAS_PMU */ + +#include <linux/err.h> + +static inline struct platform_device * +reserve_pmu(enum arm_pmu_type device) +{ + return ERR_PTR(-ENODEV); +} + +static inline int +release_pmu(struct platform_device *pdev) +{ + return -ENODEV; +} + +static inline int +init_pmu(enum arm_pmu_type device) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_HAS_PMU */ + +#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 3976412..8fdae9b 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -24,206 +24,228 @@ * CPU_NAME - the prefix for CPU related functions */ -#ifdef CONFIG_CPU_32 -# ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm6 -# endif +#ifdef CONFIG_CPU_ARM610 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm6 # endif -# ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7tdmi -# endif +#endif + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7tdmi # endif -# ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm7 -# endif +#endif + +#ifdef CONFIG_CPU_ARM710 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm7 # endif -# ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm720 -# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm720 # endif -# ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm740 -# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm740 # endif -# ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm9tdmi -# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm9tdmi # endif -# ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm920 -# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm920 # endif -# ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm922 -# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm922 # endif -# ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_fa526 -# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_fa526 # endif -# ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm925 -# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm925 # endif -# ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm926 -# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm926 # endif -# ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm940 -# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm940 # endif -# ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm946 -# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm946 # endif -# ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa110 -# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa110 # endif -# ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_sa1100 -# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_sa1100 # endif -# ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020 # endif -# ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1020e -# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1020e # endif -# ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1022 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1022 # endif -# ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_arm1026 -# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_arm1026 # endif -# ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xscale -# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xscale # endif -# ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_xsc3 -# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_xsc3 # endif -# ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_mohawk -# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_mohawk # endif -# ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_feroceon -# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_feroceon # endif -# ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v6 -# endif +#endif + +#ifdef CONFIG_CPU_V6 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v6 # endif -# ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -# undef MULTI_CPU -# define MULTI_CPU -# else -# define CPU_NAME cpu_v7 -# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +# undef MULTI_CPU +# define MULTI_CPU +# else +# define CPU_NAME cpu_v7 # endif #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 6a89567..67357ba 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -19,6 +19,7 @@ #ifdef __KERNEL__ +#include <asm/hw_breakpoint.h> #include <asm/ptrace.h> #include <asm/types.h> @@ -41,6 +42,9 @@ struct debug_entry { struct debug_info { int nsaved; struct debug_entry bp[2]; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; +#endif }; struct thread_struct { @@ -91,7 +95,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +#if __LINUX_ARM_ARCH__ == 6 +#define cpu_relax() smp_mb() +#else #define cpu_relax() barrier() +#endif /* * Create a new kernel thread diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 67b833c..783d50f 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -29,6 +29,8 @@ #define PTRACE_SETCRUNCHREGS 26 #define PTRACE_GETVFPREGS 27 #define PTRACE_SETVFPREGS 28 +#define PTRACE_GETHBPREGS 29 +#define PTRACE_SETHBPREGS 30 /* * PSR bits @@ -82,6 +84,14 @@ #define PSR_ENDSTATE 0 #endif +/* + * These are 'magic' values for PTRACE_PEEKUSR that return info about where a + * process is located in memory. + */ +#define PT_TEXT_ADDR 0x10000 +#define PT_DATA_ADDR 0x10004 +#define PT_TEXT_END_ADDR 0x10008 + #ifndef __ASSEMBLY__ /* @@ -89,9 +99,15 @@ * stack during a system call. Note that sizeof(struct pt_regs) * has to be a multiple of 8. */ +#ifndef __KERNEL__ struct pt_regs { long uregs[18]; }; +#else /* __KERNEL__ */ +struct pt_regs { + unsigned long uregs[18]; +}; +#endif /* __KERNEL__ */ #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] @@ -114,6 +130,8 @@ struct pt_regs { #ifdef __KERNEL__ +#define arch_has_single_step() (1) + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) @@ -142,15 +160,24 @@ struct pt_regs { */ static inline int valid_user_regs(struct pt_regs *regs) { - if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) { - regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); - return 1; + unsigned long mode = regs->ARM_cpsr & MODE_MASK; + + /* + * Always clear the F (FIQ) and A (delayed abort) bits + */ + regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); + + if ((regs->ARM_cpsr & PSR_I_BIT) == 0) { + if (mode == USR_MODE) + return 1; + if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE) + return 1; } /* * Force CPSR to something logical... */ - regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT; + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; if (!(elf_hwcap & HWCAP_26BIT)) regs->ARM_cpsr |= USR_MODE; @@ -168,6 +195,42 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define predicate(x) ((x) & 0xf0000000) #define PREDICATE_ALWAYS 0xe0000000 +/* + * kprobe-based event tracer support + */ +#include <linux/stddef.h> +#include <linux/types.h> +#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) + +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + return *(unsigned long *)((unsigned long)regs + offset); +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->ARM_sp; +} + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index ca0a37d..2f87870 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -3,25 +3,6 @@ #include <asm/memory.h> #include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; /* buffer offset */ - dma_addr_t dma_address; /* dma address */ - unsigned int length; /* length */ -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include <asm-generic/scatterlist.h> #endif /* _ASMARM_SCATTERLIST_H */ diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h new file mode 100644 index 0000000..a84628b --- /dev/null +++ b/arch/arm/include/asm/sched_clock.h @@ -0,0 +1,118 @@ +/* + * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_SCHED_CLOCK +#define ASM_SCHED_CLOCK + +#include <linux/kernel.h> +#include <linux/types.h> + +struct clock_data { + u64 epoch_ns; + u32 epoch_cyc; + u32 epoch_cyc_copy; + u32 mult; + u32 shift; +}; + +#define DEFINE_CLOCK_DATA(name) struct clock_data name + +static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +{ + return (cyc * mult) >> shift; +} + +/* + * Atomically update the sched_clock epoch. Your update callback will + * be called from a timer before the counter wraps - read the current + * counter value, and call this function to safely move the epochs + * forward. Only use this from the update callback. + */ +static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) +{ + unsigned long flags; + u64 ns = cd->epoch_ns + + cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); + + /* + * Write epoch_cyc and epoch_ns in a way that the update is + * detectable in cyc_to_fixed_sched_clock(). + */ + raw_local_irq_save(flags); + cd->epoch_cyc = cyc; + smp_wmb(); + cd->epoch_ns = ns; + smp_wmb(); + cd->epoch_cyc_copy = cyc; + raw_local_irq_restore(flags); +} + +/* + * If your clock rate is known at compile time, using this will allow + * you to optimize the mult/shift loads away. This is paired with + * init_fixed_sched_clock() to ensure that your mult/shift are correct. + */ +static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask, u32 mult, u32 shift) +{ + u64 epoch_ns; + u32 epoch_cyc; + + /* + * Load the epoch_cyc and epoch_ns atomically. We do this by + * ensuring that we always write epoch_cyc, epoch_ns and + * epoch_cyc_copy in strict order, and read them in strict order. + * If epoch_cyc and epoch_cyc_copy are not equal, then we're in + * the middle of an update, and we should repeat the load. + */ + do { + epoch_cyc = cd->epoch_cyc; + smp_rmb(); + epoch_ns = cd->epoch_ns; + smp_rmb(); + } while (epoch_cyc != cd->epoch_cyc_copy); + + return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); +} + +/* + * Otherwise, you need to use this, which will obtain the mult/shift + * from the clock_data structure. Use init_sched_clock() with this. + */ +static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask) +{ + return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); +} + +/* + * Initialize the clock data - calculate the appropriate multiplier + * and shift. Also setup a timer to ensure that the epoch is refreshed + * at the appropriate time interval, which will call your update + * handler. + */ +void init_sched_clock(struct clock_data *, void (*)(void), + unsigned int, unsigned long); + +/* + * Use this initialization function rather than init_sched_clock() if + * you're using cyc_to_fixed_sched_clock, which will warn if your + * constants are incorrect. + */ +static inline void init_fixed_sched_clock(struct clock_data *cd, + void (*update)(void), unsigned int bits, unsigned long rate, + u32 mult, u32 shift) +{ + init_sched_clock(cd, update, bits, rate); + if (cd->mult != mult || cd->shift != shift) { + pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" + "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", + mult, shift, cd->mult, cd->shift); + } +} + +#endif diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h new file mode 100644 index 0000000..52b156b --- /dev/null +++ b/arch/arm/include/asm/seccomp.h @@ -0,0 +1,11 @@ +#ifndef _ASM_ARM_SECCOMP_H +#define _ASM_ARM_SECCOMP_H + +#include <linux/unistd.h> + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_rt_sigreturn + +#endif /* _ASM_ARM_SECCOMP_H */ diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee1304f..f1e5a9b 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -201,7 +201,7 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { unsigned long start; unsigned long size; - int node; + unsigned int highmem; }; struct meminfo { @@ -211,9 +211,8 @@ struct meminfo { extern struct meminfo meminfo; -#define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < (mi)->nr_banks; iter++) \ - if ((mi)->bank[iter].node == no) +#define for_each_bank(iter,mi) \ + for (iter = 0; iter < (mi)->nr_banks; iter++) #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) @@ -222,18 +221,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h index 4fc1565..316bb2b 100644 --- a/arch/arm/include/asm/sizes.h +++ b/arch/arm/include/asm/sizes.h @@ -13,9 +13,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* DO NOT EDIT!! - this file automatically generated - * from .s file by awk -f s2h.awk - */ /* Size definitions * Copyright (C) ARM Limited 1998. All rights reserved. */ @@ -25,6 +22,9 @@ /* handy sizes */ #define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 #define SZ_256 0x00000100 #define SZ_512 0x00000200 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a06e735..96ed521 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -33,27 +33,23 @@ struct seq_file; /* * generate IPI list text */ -extern void show_ipi_list(struct seq_file *p); +extern void show_ipi_list(struct seq_file *, int); /* * Called from assembly code, this handles an IPI. */ -asmlinkage void do_IPI(struct pt_regs *regs); +asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); /* * Setup the set of possible CPUs (via set_cpu_possible) */ extern void smp_init_cpus(void); -/* - * Move global data into per-processor storage. - */ -extern void smp_store_cpu_info(unsigned int cpuid); /* * Raise an IPI cross call on CPUs in callmap. */ -extern void smp_cross_call(const struct cpumask *mask); +extern void smp_cross_call(const struct cpumask *mask, int ipi); /* * Boot a secondary CPU, and assign it the specified idle task. @@ -73,6 +69,11 @@ asmlinkage void secondary_start_kernel(void); extern void platform_secondary_init(unsigned int cpu); /* + * Initialize cpu_possible map, and enable coherency + */ +extern void platform_smp_prepare_cpus(unsigned int); + +/* * Initial data for bringing up a secondary CPU. */ struct secondary_data { @@ -82,7 +83,7 @@ struct secondary_data { extern struct secondary_data secondary_data; extern int __cpu_disable(void); -extern int mach_cpu_disable(unsigned int cpu); +extern int platform_cpu_disable(unsigned int cpu); extern void __cpu_die(unsigned int cpu); extern void cpu_die(void); @@ -93,11 +94,10 @@ extern void platform_cpu_enable(unsigned int cpu); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask /* * show local interrupt info */ -extern void show_local_irqs(struct seq_file *); +extern void show_local_irqs(struct seq_file *, int); #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h new file mode 100644 index 0000000..f24c1b9 --- /dev/null +++ b/arch/arm/include/asm/smp_plat.h @@ -0,0 +1,46 @@ +/* + * ARM specific SMP header, this contains our implementation + * details. + */ +#ifndef __ASMARM_SMP_PLAT_H +#define __ASMARM_SMP_PLAT_H + +#include <asm/cputype.h> + +/* + * Return true if we are running on a SMP platform + */ +static inline bool is_smp(void) +{ +#ifndef CONFIG_SMP + return false; +#elif defined(CONFIG_SMP_ON_UP) + extern unsigned int smp_on_up; + return !!smp_on_up; +#else + return true; +#endif +} + +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + if (!is_smp()) + return 0; + + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + +#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 +#define cache_ops_need_broadcast() 0 +#else +static inline int cache_ops_need_broadcast(void) +{ + if (!is_smp()) + return 0; + + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} +#endif + +#endif diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 7be0978..fed9981 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -1,11 +1,27 @@ #ifndef __ASMARM_SMP_TWD_H #define __ASMARM_SMP_TWD_H +#define TWD_TIMER_LOAD 0x00 +#define TWD_TIMER_COUNTER 0x04 +#define TWD_TIMER_CONTROL 0x08 +#define TWD_TIMER_INTSTAT 0x0C + +#define TWD_WDOG_LOAD 0x20 +#define TWD_WDOG_COUNTER 0x24 +#define TWD_WDOG_CONTROL 0x28 +#define TWD_WDOG_INTSTAT 0x2C +#define TWD_WDOG_RESETSTAT 0x30 +#define TWD_WDOG_DISABLE 0x34 + +#define TWD_TIMER_CONTROL_ENABLE (1 << 0) +#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) +#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) +#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) + struct clock_event_device; extern void __iomem *twd_base; -void twd_timer_stop(void); int twd_timer_ack(void); void twd_timer_setup(struct clock_event_device *); diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 537de4e..90ffd04 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -57,4 +57,9 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_PROTOCOL 38 +#define SO_DOMAIN 39 + +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681a..17eb355 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,22 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +static inline void dsb_sev(void) +{ +#if __LINUX_ARM_ARCH__ >= 7 + __asm__ __volatile__ ( + "dsb\n" + "sev" + ); +#elif defined(CONFIG_CPU_32v6K) + __asm__ __volatile__ ( + "mcr p15, 0, %0, c7, c10, 4\n" + "sev" + : : "r" (0) + ); +#endif +} + /* * ARMv6 Spin-locking. * @@ -17,13 +33,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +59,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,19 +79,17 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); __asm__ __volatile__( " str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev" -#endif : : "r" (&lock->lock), "r" (0) : "cc"); + + dsb_sev(); } /* @@ -86,7 +100,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +120,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,23 +140,21 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( "str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev\n" -#endif : : "r" (&rw->lock), "r" (0) : "cc"); + + dsb_sev(); } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +168,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +188,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -188,17 +200,15 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" -#ifdef CONFIG_CPU_32v6K -"\n cmp %0, #0\n" -" mcreq p15, 0, %0, c7, c10, 4\n" -" seveq" -#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); + + if (tmp == 0) + dsb_sev(); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,13 +225,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6..d14d197 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h new file mode 100644 index 0000000..de00332 --- /dev/null +++ b/arch/arm/include/asm/stackprotector.h @@ -0,0 +1,38 @@ +/* + * GCC stack protector support. + * + * Stack protector works by putting predefined pattern at the start of + * the stack frame and verifying that it hasn't been overwritten when + * returning from the function. The pattern is called stack canary + * and gcc expects it to be defined by a global variable called + * "__stack_chk_guard" on ARM. This unfortunately means that on SMP + * we cannot have a different canary value per task. + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H 1 + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index ca2bf2f..9997ad2 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -22,6 +22,24 @@ # define __SWAB_64_THRU_32__ #endif +#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6 + +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +{ + __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + __asm__ ("rev %0, %1" : "=r" (x) : "r" (x)); + return x; +} +#define __arch_swab32 __arch_swab32 + +#else + static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __u32 t; @@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) #endif +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index d65b2f5..97f6d60 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -60,7 +60,14 @@ #include <linux/linkage.h> #include <linux/irqflags.h> +#include <asm/outercache.h> + #define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else +#define __exception_irq_entry __exception +#endif struct thread_info; struct task_struct; @@ -73,8 +80,7 @@ extern unsigned int mem_fclk_21285; struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err) - __attribute__((noreturn)); +void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, @@ -82,7 +88,11 @@ void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name); + int sig, int code, const char *name); + +void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) @@ -114,6 +124,13 @@ extern unsigned int user_debug; #define vectors_high() (0) #endif +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + #if __LINUX_ARM_ARCH__ >= 7 #define isb() __asm__ __volatile__ ("isb" : : : "memory") #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") @@ -138,21 +155,29 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#ifndef CONFIG_SMP +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include <mach/barriers.h> +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dmb() +#define wmb() mb() +#else +#include <asm/memory.h> #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define mb() dmb() -#define rmb() dmb() -#define wmb() dmb() #define smp_mb() dmb() #define smp_rmb() dmb() #define smp_wmb() dmb() #endif + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) @@ -317,6 +342,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size extern void disable_hlt(void); extern void enable_hlt(void); +void cpu_idle_wait(void); + #include <asm-generic/cmpxchg-local.h> #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h new file mode 100644 index 0000000..5929ef5 --- /dev/null +++ b/arch/arm/include/asm/tcm.h @@ -0,0 +1,31 @@ +/* + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + * Author: Linus Walleij <linus.walleij@stericsson.com> + * + */ +#ifndef __ASMARM_TCM_H +#define __ASMARM_TCM_H + +#ifndef CONFIG_HAVE_TCM +#error "You should not be including tcm.h unless you have a TCM!" +#endif + +#include <linux/compiler.h> + +/* Tag variables with this */ +#define __tcmdata __section(.tcm.data) +/* Tag constants with this */ +#define __tcmconst __section(.tcm.rodata) +/* Tag functions inside TCM called from outside TCM with this */ +#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline +/* Tag function inside TCM called from inside TCM with this */ +#define __tcmlocalfunc __section(.tcm.text) + +void *tcm_alloc(size_t len); +void tcm_free(void *addr, size_t len); + +#endif diff --git a/arch/arm/include/asm/termbits.h b/arch/arm/include/asm/termbits.h index f784d11..704135d 100644 --- a/arch/arm/include/asm/termbits.h +++ b/arch/arm/include/asm/termbits.h @@ -177,6 +177,7 @@ struct ktermios { #define FLUSHO 0010000 #define PENDIN 0040000 #define IEXTEN 0100000 +#define EXTPROC 0200000 /* tcflow() and TCXONC use these */ #define TCOOFF 0 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 4f88482..7b5cc8d 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -73,7 +73,7 @@ struct thread_info { .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ @@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); -extern void vfp_sync_state(struct thread_info *thread); +extern void vfp_sync_hwstate(struct thread_info *); +extern void vfp_flush_hwstate(struct thread_info *); #endif @@ -130,23 +131,30 @@ extern void vfp_sync_state(struct thread_info *thread); * TIF_SYSCALL_TRACE - syscall trace active * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary + * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 +#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 -#define TIF_MEMDIE 18 +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 +#define TIF_RESTORE_SIGMASK 20 +#define TIF_SECCOMP 21 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_FREEZE (1 << TIF_FREEZE) +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h index f27379d..c4391ba 100644 --- a/arch/arm/include/asm/thread_notify.h +++ b/arch/arm/include/asm/thread_notify.h @@ -41,7 +41,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread) * These are the reason codes for the thread notifier. */ #define THREAD_NOTIFY_FLUSH 0 -#define THREAD_NOTIFY_RELEASE 1 +#define THREAD_NOTIFY_EXIT 1 #define THREAD_NOTIFY_SWITCH 2 #endif diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 321c83e..f41a6f5 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) +#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c964f3f..ce7378ea 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -46,6 +46,9 @@ #define TLB_V7_UIS_FULL (1 << 20) #define TLB_V7_UIS_ASID (1 << 21) +/* Inner Shareable BTB operation (ARMv7 MP extensions) */ +#define TLB_V7_IS_BTB (1 << 22) + #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_DCLEAN (1 << 30) #define TLB_WB (1 << 31) @@ -67,6 +70,10 @@ #undef _TLB #undef MULTI_TLB +#ifdef CONFIG_SMP_ON_UP +#define MULTI_TLB 1 +#endif + #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) #ifdef CONFIG_CPU_TLB_V3 @@ -182,17 +189,23 @@ # define v6wbi_always_flags (-1UL) #endif -#ifdef CONFIG_SMP -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) -#else -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) -#endif #ifdef CONFIG_CPU_TLB_V7 -# define v7wbi_possible_flags v7wbi_tlb_flags -# define v7wbi_always_flags v7wbi_tlb_flags + +# ifdef CONFIG_SMP_ON_UP +# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up) +# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up) +# elif defined(CONFIG_SMP) +# define v7wbi_possible_flags v7wbi_tlb_flags_smp +# define v7wbi_always_flags v7wbi_tlb_flags_smp +# else +# define v7wbi_possible_flags v7wbi_tlb_flags_up +# define v7wbi_always_flags v7wbi_tlb_flags_up +# endif # ifdef _TLB # define MULTI_TLB 1 # else @@ -339,6 +352,12 @@ static inline void local_flush_tlb_all(void) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_mm(struct mm_struct *mm) @@ -350,7 +369,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { if (tlb_flag(TLB_V3_FULL)) asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); if (tlb_flag(TLB_V4_U_FULL)) @@ -360,6 +379,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_V4_I_FULL)) asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); } + put_cpu(); if (tlb_flag(TLB_V6_U_ASID)) asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); @@ -368,13 +388,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_V6_I_ASID)) asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); if (tlb_flag(TLB_V7_UIS_ASID)) +#ifdef CONFIG_ARM_ERRATA_720789 + asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); +#else asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); +#endif if (tlb_flag(TLB_BTB)) { /* flush the branch target cache */ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void @@ -388,7 +418,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { if (tlb_flag(TLB_V3_PAGE)) asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); if (tlb_flag(TLB_V4_U_PAGE)) @@ -408,13 +438,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_V6_I_PAGE)) asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); if (tlb_flag(TLB_V7_UIS_PAGE)) +#ifdef CONFIG_ARM_ERRATA_720789 + asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); +#else asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); +#endif if (tlb_flag(TLB_BTB)) { /* flush the branch target cache */ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_kernel_page(unsigned long kaddr) @@ -453,6 +493,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } /* @@ -524,11 +570,20 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #endif /* - * if PG_dcache_dirty is set for the page, we need to ensure that any + * If PG_dcache_clean is not set for the page, we need to ensure that any * cache entries for the kernels virtual memory range are written - * back to the page. + * back to the page. On ARMv6 and later, the cache coherency is handled via + * the set_pte_at() function. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +#if __LINUX_ARM_ARCH__ < 6 +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); +#else +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} +#endif #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h new file mode 100644 index 0000000..e71d6ff --- /dev/null +++ b/arch/arm/include/asm/tls.h @@ -0,0 +1,46 @@ +#ifndef __ASMARM_TLS_H +#define __ASMARM_TLS_H + +#ifdef __ASSEMBLY__ + .macro set_tls_none, tp, tmp1, tmp2 + .endm + + .macro set_tls_v6k, tp, tmp1, tmp2 + mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + .endm + + .macro set_tls_v6, tp, tmp1, tmp2 + ldr \tmp1, =elf_hwcap + ldr \tmp1, [\tmp1, #0] + mov \tmp2, #0xffff0fff + tst \tmp1, #HWCAP_TLS @ hardware TLS available? + mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 + .endm + + .macro set_tls_software, tp, tmp1, tmp2 + mov \tmp1, #0xffff0fff + str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 + .endm +#endif + +#ifdef CONFIG_TLS_REG_EMUL +#define tls_emu 1 +#define has_tls_reg 1 +#define set_tls set_tls_none +#elif __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define tls_emu 0 +#define has_tls_reg 1 +#define set_tls set_tls_v6k +#elif __LINUX_ARM_ARCH__ == 6 +#define tls_emu 0 +#define has_tls_reg (elf_hwcap & HWCAP_TLS) +#define set_tls set_tls_v6 +#else +#define tls_emu 0 +#define has_tls_reg 0 +#define set_tls set_tls_software +#endif + +#endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 491960b..1b960d5 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -15,16 +15,37 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static inline int __in_irqentry_text(unsigned long ptr) +{ + extern char __irqentry_text_start[]; + extern char __irqentry_text_end[]; + + return ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end; +} +#else +static inline int __in_irqentry_text(unsigned long ptr) +{ + return 0; +} +#endif + static inline int in_exception_text(unsigned long ptr) { extern char __exception_text_start[]; extern char __exception_text_end[]; + int in; - return ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; + in = ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; + + return in ? : __in_irqentry_text(ptr); } extern void __init early_trap_init(void); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void *vectors_page; + #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0da9bc9..b293616 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -17,6 +17,7 @@ #include <asm/memory.h> #include <asm/domain.h> #include <asm/system.h> +#include <asm/unified.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -226,18 +227,18 @@ do { \ #define __get_user_asm_byte(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrbt %1,[%2]\n" \ + "1: " T(ldrb) " %1,[%2],#0\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -262,18 +263,18 @@ do { \ #define __get_user_asm_word(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrt %1,[%2]\n" \ + "1: " T(ldr) " %1,[%2],#0\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -307,17 +308,17 @@ do { \ #define __put_user_asm_byte(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strbt %1,[%2]\n" \ + "1: " T(strb) " %1,[%2],#0\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -340,17 +341,17 @@ do { \ #define __put_user_asm_word(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt %1,[%2]\n" \ + "1: " T(str) " %1,[%2],#0\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -365,19 +366,21 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1]\n" \ + ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, %3\n" \ " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "+r" (__pu_addr) \ : "r" (x), "i" (-EFAULT) \ : "cc") diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f..47f023a 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -59,23 +59,22 @@ struct iwmmxt_sigframe { #endif /* CONFIG_IWMMXT */ #ifdef CONFIG_VFP -#if __LINUX_ARM_ARCH__ < 6 -/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra - * word after the registers, and a word of padding at the end for - * alignment. */ #define VFP_MAGIC 0x56465001 -#define VFP_STORAGE_SIZE 152 -#else -#define VFP_MAGIC 0x56465002 -#define VFP_STORAGE_SIZE 144 -#endif struct vfp_sigframe { unsigned long magic; unsigned long size; - union vfp_state storage; -}; + struct user_vfp ufp; + struct user_vfp_exc ufp_exc; +} __attribute__((__aligned__(8))); + +/* + * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, + * 4 bytes padding. + */ +#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) + #endif /* CONFIG_VFP */ /* @@ -91,7 +90,7 @@ struct aux_sigframe { #ifdef CONFIG_IWMMXT struct iwmmxt_sigframe iwmmxt; #endif -#if 0 && defined CONFIG_VFP /* Not yet saved. */ +#ifdef CONFIG_VFP struct vfp_sigframe vfp; #endif /* Something that isn't a valid magic number for any coprocessor. */ diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h new file mode 100644 index 0000000..bc63116 --- /dev/null +++ b/arch/arm/include/asm/unified.h @@ -0,0 +1,130 @@ +/* + * include/asm-arm/unified.h - Unified Assembler Syntax helper macros + * + * Copyright (C) 2008 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_UNIFIED_H +#define __ASM_UNIFIED_H + +#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) + .syntax unified +#endif + +#ifdef CONFIG_THUMB2_KERNEL + +#if __GNUC__ < 4 +#error Thumb-2 kernel requires gcc >= 4 +#endif + +/* The CPSR bit describing the instruction set (Thumb) */ +#define PSR_ISETSTATE PSR_T_BIT + +#define ARM(x...) +#define THUMB(x...) x +#ifdef __ASSEMBLY__ +#define W(instr) instr.w +#endif +#define BSYM(sym) sym + 1 + +#else /* !CONFIG_THUMB2_KERNEL */ + +/* The CPSR bit describing the instruction set (ARM) */ +#define PSR_ISETSTATE 0 + +#define ARM(x...) x +#define THUMB(x...) +#ifdef __ASSEMBLY__ +#define W(instr) instr +#endif +#define BSYM(sym) sym + +#endif /* CONFIG_THUMB2_KERNEL */ + +#ifndef CONFIG_ARM_ASM_UNIFIED + +/* + * If the unified assembly syntax isn't used (in ARM mode), these + * macros expand to an empty string + */ +#ifdef __ASSEMBLY__ + .macro it, cond + .endm + .macro itt, cond + .endm + .macro ite, cond + .endm + .macro ittt, cond + .endm + .macro itte, cond + .endm + .macro itet, cond + .endm + .macro itee, cond + .endm + .macro itttt, cond + .endm + .macro ittte, cond + .endm + .macro ittet, cond + .endm + .macro ittee, cond + .endm + .macro itett, cond + .endm + .macro itete, cond + .endm + .macro iteet, cond + .endm + .macro iteee, cond + .endm +#else /* !__ASSEMBLY__ */ +__asm__( +" .macro it, cond\n" +" .endm\n" +" .macro itt, cond\n" +" .endm\n" +" .macro ite, cond\n" +" .endm\n" +" .macro ittt, cond\n" +" .endm\n" +" .macro itte, cond\n" +" .endm\n" +" .macro itet, cond\n" +" .endm\n" +" .macro itee, cond\n" +" .endm\n" +" .macro itttt, cond\n" +" .endm\n" +" .macro ittte, cond\n" +" .endm\n" +" .macro ittet, cond\n" +" .endm\n" +" .macro ittee, cond\n" +" .endm\n" +" .macro itett, cond\n" +" .endm\n" +" .macro itete, cond\n" +" .endm\n" +" .macro iteet, cond\n" +" .endm\n" +" .macro iteee, cond\n" +" .endm\n"); +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_ASM_UNIFIED */ + +#endif /* !__ASM_UNIFIED_H */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0e97b8c..c891eb7 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -360,8 +360,8 @@ #define __NR_readlinkat (__NR_SYSCALL_BASE+332) #define __NR_fchmodat (__NR_SYSCALL_BASE+333) #define __NR_faccessat (__NR_SYSCALL_BASE+334) - /* 335 for pselect6 */ - /* 336 for ppoll */ +#define __NR_pselect6 (__NR_SYSCALL_BASE+335) +#define __NR_ppoll (__NR_SYSCALL_BASE+336) #define __NR_unshare (__NR_SYSCALL_BASE+337) #define __NR_set_robust_list (__NR_SYSCALL_BASE+338) #define __NR_get_robust_list (__NR_SYSCALL_BASE+339) @@ -372,7 +372,7 @@ #define __NR_vmsplice (__NR_SYSCALL_BASE+343) #define __NR_move_pages (__NR_SYSCALL_BASE+344) #define __NR_getcpu (__NR_SYSCALL_BASE+345) - /* 346 for epoll_pwait */ +#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346) #define __NR_kexec_load (__NR_SYSCALL_BASE+347) #define __NR_utimensat (__NR_SYSCALL_BASE+348) #define __NR_signalfd (__NR_SYSCALL_BASE+349) @@ -390,7 +390,12 @@ #define __NR_preadv (__NR_SYSCALL_BASE+361) #define __NR_pwritev (__NR_SYSCALL_BASE+362) #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) -#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364) +#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) +#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) +#define __NR_accept4 (__NR_SYSCALL_BASE+366) +#define __NR_fanotify_init (__NR_SYSCALL_BASE+367) +#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) +#define __NR_prlimit64 (__NR_SYSCALL_BASE+369) /* * The following SWIs are ARM private. @@ -403,6 +408,15 @@ #define __ARM_NR_set_tls (__ARM_NR_BASE+5) /* + * *NOTE*: This is a ghost syscall private to the kernel. Only the + * __kuser_cmpxchg code in entry-armv.S should be aware of its + * existence. Don't ever use this from user code. + */ +#ifdef __KERNEL__ +#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) +#endif + +/* * The following syscalls are obsolete and no longer available for EABI. */ #if defined(__ARM_EABI__) && !defined(__KERNEL__) @@ -432,9 +446,13 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_SYS_OLD_MMAP +#define __ARCH_WANT_SYS_OLD_SELECT #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_UTIME @@ -455,6 +473,7 @@ * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 1 +#define __IGNORE_migrate_pages 1 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e05..05ac4b0 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -83,11 +83,21 @@ struct user{ /* * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 - * are ignored by the ptrace system call. + * are ignored by the ptrace system call and the signal handler. */ struct user_vfp { unsigned long long fpregs[32]; unsigned long fpscr; }; +/* + * VFP exception registers exposed to user space during signal delivery. + * Fields not relavant to the current VFP architecture are ignored. + */ +struct user_vfp_exc { + unsigned long fpexc; + unsigned long fpinst; + unsigned long fpinst2; +}; + #endif /* _ARM_USER_H */ diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 422f3cc..3d5fc41 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -3,6 +3,8 @@ * * Assembler-only file containing VFP macros and register definitions. */ +#include <asm/hwcap.h> + #include "vfp.h" @ Macros to allow building with old toolkits (with no VFP support) @@ -22,12 +24,20 @@ LDC p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d0-d15} #endif #ifdef CONFIG_VFPv3 +#if __LINUX_ARM_ARCH__ <= 6 + ldr \tmp, =elf_hwcap @ may not have MVFR regs + ldr \tmp, [\tmp, #0] + tst \tmp, #HWCAP_VFPv3D16 + ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif +#endif .endm @ write all the working registers out of the VFP @@ -38,10 +48,18 @@ STC p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d0-d15} #endif #ifdef CONFIG_VFPv3 +#if __LINUX_ARM_ARCH__ <= 6 + ldr \tmp, =elf_hwcap @ may not have MVFR regs + ldr \tmp, [\tmp, #0] + tst \tmp, #HWCAP_VFPv3D16 + stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addne \base, \base, #32*4 @ step over unused register space +#else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif +#endif .endm |