From fad57feba77d2e5b183e068cb6b90693e4567b40 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 12 Nov 2008 20:11:47 +0900 Subject: sh: dynamic ftrace support. First cut at dynamic ftrace support. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 3aed362..4cb5dbf 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -1,8 +1,29 @@ #ifndef __ASM_SH_FTRACE_H #define __ASM_SH_FTRACE_H +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + #ifndef __ASSEMBLY__ extern void mcount(void); + +#define MCOUNT_ADDR ((long)(mcount)) + +#ifdef CONFIG_DYNAMIC_FTRACE +#define CALLER_ADDR ((long)(ftrace_caller)) +#define STUB_ADDR ((long)(ftrace_stub)) + +#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) +#endif + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* 'addr' is the memory table address. */ + return addr; +} #endif +#endif /* CONFIG_FUNCTION_TRACER */ + #endif /* __ASM_SH_FTRACE_H */ -- cgit v1.1 From 16b529d1d78060254d5bc735390915ca5ccf13a1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 20 Nov 2008 15:25:22 +0900 Subject: sh: Convert to generic bitops for IRQ-toggling implementation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bitops-grb.h | 3 ++ arch/sh/include/asm/bitops-irq.h | 91 --------------------------------------- arch/sh/include/asm/bitops-llsc.h | 2 + arch/sh/include/asm/bitops.h | 6 +-- 4 files changed, 7 insertions(+), 95 deletions(-) delete mode 100644 arch/sh/include/asm/bitops-irq.h (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h index a5907b9..e73af33 100644 --- a/arch/sh/include/asm/bitops-grb.h +++ b/arch/sh/include/asm/bitops-grb.h @@ -166,4 +166,7 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval; } + +#include + #endif /* __ASM_SH_BITOPS_GRB_H */ diff --git a/arch/sh/include/asm/bitops-irq.h b/arch/sh/include/asm/bitops-irq.h deleted file mode 100644 index 653a127..0000000 --- a/arch/sh/include/asm/bitops-irq.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_SH_BITOPS_IRQ_H -#define __ASM_SH_BITOPS_IRQ_H - -static inline void set_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} - -static inline void clear_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static inline void change_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} - -#endif /* __ASM_SH_BITOPS_IRQ_H */ diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h index 43b8e1a..1d2fc0b 100644 --- a/arch/sh/include/asm/bitops-llsc.h +++ b/arch/sh/include/asm/bitops-llsc.h @@ -141,4 +141,6 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval != 0; } +#include + #endif /* __ASM_SH_BITOPS_LLSC_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 367930d..9b141e0 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -16,18 +16,16 @@ #elif defined(CONFIG_CPU_SH4A) #include #else -#include +#include +#include #endif - /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include - #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) { -- cgit v1.1 From 0d5bbe0bc2583c4dc06ea00adccf07c3acd1481d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 25 Nov 2008 21:22:02 +0900 Subject: sh: Provide optimized non-atomic bitops for SH-2A. This ties in the new SH-2A 32-bit non-atomic bitops. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bitops-op32.h | 142 ++++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/bitops.h | 3 + 2 files changed, 145 insertions(+) create mode 100644 arch/sh/include/asm/bitops-op32.h (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h new file mode 100644 index 0000000..f0ae7e9 --- /dev/null +++ b/arch/sh/include/asm/bitops-op32.h @@ -0,0 +1,142 @@ +#ifndef __ASM_SH_BITOPS_OP32_H +#define __ASM_SH_BITOPS_OP32_H + +/* + * The bit modifying instructions on SH-2A are only capable of working + * with a 3-bit immediate, which signifies the shift position for the bit + * being worked on. + */ +#if defined(__BIG_ENDIAN) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) +#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE) +#else +#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) +#endif + +#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) + +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bset.b %1, @(%O2,%0) ! __set_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; + } +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; + } +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bxor.b %1, @(%O2,%0) ! __change_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; + } +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 9b141e0..ebe595b 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -13,6 +13,9 @@ #ifdef CONFIG_GUSA_RB #include +#elif defined(CONFIG_CPU_SH2A) +#include +#include #elif defined(CONFIG_CPU_SH4A) #include #else -- cgit v1.1 From 0c9122323acb0c3410dfbd219cb47f4c2e9305e3 Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Tue, 25 Nov 2008 21:37:14 +0900 Subject: sh: Add SH-4A optimized fastpath mutex implementation. Add fast mutex path implementation for the SH4A architecture Signed-off-by: Michael Trimarchi Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 107 +++++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/mutex.h | 5 +- 2 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/mutex-llsc.h (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h new file mode 100644 index 0000000..7c75af5 --- /dev/null +++ b/arch/sh/include/asm/mutex-llsc.h @@ -0,0 +1,107 @@ +/* + * arch/sh/include/asm/mutex-llsc.h + * + * SH-4A optimized mutex locking primitives + * + * Please look into asm-generic/mutex-xchg.h for a formal definition. + */ +#ifndef __ASM_SH_MUTEX_LLSC_H +#define __ASM_SH_MUTEX_LLSC_H + +/* + * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. + * with a bastardized atomic decrement (it is not a reliable atomic decrement + * but it satisfies the defined semantics for our purpose, while being + * smaller and faster than a real atomic decrement or atomic swap. + * The idea is to attempt decrementing the lock value only once. If once + * decremented it isn't zero, or if its store-back fails due to a dispute + * on the exclusive store, we simply bail out immediately through the slow + * path where the lock will be reattempted until it succeeds. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "movli.l @%1, %0 \n" + "dt %0 \n" + "movco.l %0, @%1 \n" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res != 0)) + fail_fn(count); +} + +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "movli.l @%1, %0 \n" + "dt %0 \n" + "movco.l %0, @%1 \n" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res != 0)) + __res = fail_fn(count); + + return __res; +} + +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "1: movli.l @%1, %0 \n\t" + "add #1, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b\n\t" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res <= 0)) + fail_fn(count); +} + +/* + * If the unlock was done on a contended lock, or if the unlock simply fails + * then the mutex remains locked. + */ +#define __mutex_slowpath_needs_to_unlock() 1 + +/* + * For __mutex_fastpath_trylock we do an atomic decrement and check the + * result and put it in the __res variable. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __res, __orig; + + __asm__ __volatile__ ( + "1: movli.l @%2, %0 \n\t" + "dt %0 \n\t" + "movco.l %0,@%2 \n\t" + "bf 1b \n\t" + "cmp/eq #0,%0 \n\t" + "bt 2f \n\t" + "mov #0, %1 \n\t" + "bf 3f \n\t" + "2: mov #1, %1 \n\t" + "3: " + : "=&z" (__orig), "=&r" (__res) + : "r" (&count->counter) + : "t"); + + return __res; +} +#endif /* __ASM_SH_MUTEX_LLSC_H */ diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h index 458c1f7..d8e3771 100644 --- a/arch/sh/include/asm/mutex.h +++ b/arch/sh/include/asm/mutex.h @@ -5,5 +5,8 @@ * implementation in place, or pick the atomic_xchg() based generic * implementation. (see asm-generic/mutex-xchg.h for details) */ - +#if defined(CONFIG_CPU_SH4A) +#include +#else #include +#endif -- cgit v1.1 From 716777db7270255f1f7210fd87a7188b08c9a267 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 25 Nov 2008 21:57:29 +0900 Subject: sh: P4 ioremap pass-through This patch adds a pass-through case when ioremapping P4 addresses. Addresses passed to ioremap() should be physical addresses, so the best option is usually to convert the virtual address to a physical address before calling ioremap. This will give you a virtual address in P2 which matches the physical address and this works well for most internal hardware blocks on the SuperH architecture. However, some hardware blocks must be accessed through P4. Converting the P4 address to a physical and then back to a P2 does not work. One example of this is the sh7722 TMU block, it must be accessed through P4. Without this patch P4 addresses will be mapped using PTEs which requires the page allocator to be up and running. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/addrspace.h | 11 +++++++++++ arch/sh/include/asm/io.h | 4 ++++ 2 files changed, 15 insertions(+) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 2702d81..36736c7 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -49,5 +49,16 @@ /* Check if an address can be reached in 29 bits */ #define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 65eaae3..61f6dae 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P2SEGADDR(offset); } + + /* P4 above the store queues are always mapped. */ + if (unlikely(offset >= P3_ADDR_MAX)) + return (void __iomem *)P4SEGADDR(offset); #endif return __ioremap(offset, size, flags); -- cgit v1.1 From 95b781c239f53b4c7ecaf2989404ec6379b2409b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 00:29:58 +0900 Subject: sh: Provide optimized unaligned loads on SH-4A. This adds support for unaligned loads on SH-4A, using the SH-4A's neutered movua.l instruction. As movua.l is r0-inspired, stores are still handled through the packed struct. Based on asm-generic/unaligned.h by Harvey Harrison. Signed-off-by: Paul Mundt --- arch/sh/include/asm/unaligned-sh4a.h | 258 +++++++++++++++++++++++++++++++++++ arch/sh/include/asm/unaligned.h | 7 +- 2 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/unaligned-sh4a.h (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h new file mode 100644 index 0000000..d8f8977 --- /dev/null +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -0,0 +1,258 @@ +#ifndef __ASM_SH_UNALIGNED_SH4A_H +#define __ASM_SH_UNALIGNED_SH4A_H + +/* + * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. + * Support for 16 and 64-bit accesses are done through shifting and + * masking relative to the endianness. Unaligned stores are not supported + * by the instruction encoding, so these continue to use the packed + * struct. + * + * The same note as with the movli.l/movco.l pair applies here, as long + * as the load is gauranteed to be inlined, nothing else will hook in to + * r0 and we get the return value for free. + * + * NOTE: Due to the fact we require r0 encoding, care should be taken to + * avoid mixing these heavily with other r0 consumers, such as the atomic + * ops. Failure to adhere to this can result in the compiler running out + * of spill registers and blowing up when building at low optimization + * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. + */ +#include +#include + +static __always_inline u32 __get_unaligned_cpu32(const u8 *p) +{ + unsigned long unaligned; + + __asm__ __volatile__ ( + "movua.l @%1, %0\n\t" + : "=z" (unaligned) + : "r" (p) + ); + + return unaligned; +} + +struct __una_u16 { u16 x __attribute__((packed)); }; +struct __una_u32 { u32 x __attribute__((packed)); }; +struct __una_u64 { u64 x __attribute__((packed)); }; + +static inline u16 __get_unaligned_cpu16(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return __get_unaligned_cpu32(p) & 0xffff; +#else + return __get_unaligned_cpu32(p) >> 16; +#endif +} + +/* + * Even though movua.l supports auto-increment on the read side, it can + * only store to r0 due to instruction encoding constraints, so just let + * the compiler sort it out on its own. + */ +static inline u64 __get_unaligned_cpu64(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return (u64)__get_unaligned_cpu32(p + 4) << 32 | + __get_unaligned_cpu32(p); +#else + return (u64)__get_unaligned_cpu32(p) << 32 | + __get_unaligned_cpu32(p + 4); +#endif +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline void __put_le16_noalign(u8 *p, u16 val) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_le32_noalign(u8 *p, u32 val) +{ + __put_le16_noalign(p, val); + __put_le16_noalign(p + 2, val >> 16); +} + +static inline void __put_le64_noalign(u8 *p, u64 val) +{ + __put_le32_noalign(p, val); + __put_le32_noalign(p + 4, val >> 32); +} + +static inline void __put_be16_noalign(u8 *p, u16 val) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_be32_noalign(u8 *p, u32 val) +{ + __put_be16_noalign(p, val >> 16); + __put_be16_noalign(p + 2, val); +} + +static inline void __put_be64_noalign(u8 *p, u64 val) +{ + __put_be32_noalign(p, val >> 32); + __put_be32_noalign(p + 4, val); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_le16_noalign(p, val); +#endif +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_le32_noalign(p, val); +#endif +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_le64_noalign(p, val); +#endif +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_be16_noalign(p, val); +#endif +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_be32_noalign(p, val); +#endif +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_be64_noalign(p, val); +#endif +} + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#ifdef __LITTLE_ENDIAN +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif + +#endif /* __ASM_SH_UNALIGNED_SH4A_H */ diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h index c1641a0..8c0ad5e 100644 --- a/arch/sh/include/asm/unaligned.h +++ b/arch/sh/include/asm/unaligned.h @@ -1,7 +1,11 @@ #ifndef _ASM_SH_UNALIGNED_H #define _ASM_SH_UNALIGNED_H -/* SH can't handle unaligned accesses. */ +#ifdef CONFIG_CPU_SH4A +/* SH-4A can handle unaligned loads in a relatively neutered fashion. */ +#include +#else +/* Otherwise, SH can't handle unaligned accesses. */ #ifdef __LITTLE_ENDIAN__ # include # include @@ -15,5 +19,6 @@ # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be #endif +#endif #endif /* _ASM_SH_UNALIGNED_H */ -- cgit v1.1 From 9cfc9a9b6fff9ea7a19814b4472b3cb18b7bbdcc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 14:31:03 +0900 Subject: sh: Add a simple code dumper for SUPERH32 show_regs(). This implements a simple show_code() that is in turn plugged in to show_regs() to provide minimal code dumping at the end of the trace. Built on top of a simple instruction disassembler derived from the binutils opcode table. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_32.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index a46a020..7b14f0c 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -175,6 +175,7 @@ static __inline__ void enable_fpu(void) void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); +void show_code(struct pt_regs *regs); extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -- cgit v1.1 From eb67cf14ae5c21609c200859d6f3eba71c591569 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 15:47:44 +0900 Subject: sh: Consolidate cpu_relax()/cpu_sleep() definitions across _32/_64. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor.h | 3 +++ arch/sh/include/asm/processor_32.h | 3 --- arch/sh/include/asm/processor_64.h | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 693364a..f186fc6 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -82,6 +82,9 @@ extern struct sh_cpuinfo cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] +#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") +#define cpu_relax() barrier() + /* Forward decl */ struct seq_operations; diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 7b14f0c..2bfb735 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -183,9 +183,6 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(regs) ((regs)->regs[15]) -#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") -#define cpu_relax() barrier() - #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ defined(CONFIG_CPU_SH4) #define PREFETCH_STRIDE L1_CACHE_BYTES diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index b0b4824..96067e9 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -228,7 +228,5 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(regs) ((regs)->sp) -#define cpu_relax() barrier() - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ -- cgit v1.1 From 22f131aa8de7a534339bf7051680234462f2e877 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 27 Nov 2008 11:04:43 +0900 Subject: sh: Provide a dyn_arch_ftrace struct definition. Needed for dynamic ftrace API changes. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 4cb5dbf..8fea7d8 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -15,15 +15,20 @@ extern void mcount(void); #define STUB_ADDR ((long)(ftrace_stub)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) -#endif + +struct dyn_arch_ftrace { + /* No extra data needed on sh */ +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* 'addr' is the memory table address. */ return addr; } -#endif +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ #endif /* __ASM_SH_FTRACE_H */ -- cgit v1.1 From 2825999e8a9bd7ab7e25a7e7475c7cdd10371a13 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Fri, 28 Nov 2008 22:48:20 +0900 Subject: sh: Add support for SH7201 CPU subtype. This patch adds support for the SH-2A FPU based SH7201 processor subtype. Signed-off-by: Peter Griffin Signed-off-by: Paul Mundt --- arch/sh/include/asm/bugs.h | 2 +- arch/sh/include/asm/processor.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 121b2ec..4924ff6 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -25,7 +25,7 @@ static void __init check_bugs(void) case CPU_SH7619: *p++ = '2'; break; - case CPU_SH7203 ... CPU_MXG: + case CPU_SH7201 ... CPU_MXG: *p++ = '2'; *p++ = 'a'; break; diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index f186fc6..1ef4b24 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -18,7 +18,7 @@ enum cpu_type { CPU_SH7619, /* SH-2A types */ - CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, + CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, /* SH-3 types */ CPU_SH7705, CPU_SH7706, CPU_SH7707, -- cgit v1.1 From 3e51762759db9e26c6c3e4e1010d80a50c62ca03 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 4 Dec 2008 22:45:03 +0900 Subject: sh: move the hp6xx pm code Move the not-so-generic pm code from arch/sh/kernel/pm.c to the platform directory together with the rest of the hp6xx pm code. This is done to let non-hp6xx platforms enable CONFIG_PM. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/pm.h | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 arch/sh/include/asm/pm.h (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/pm.h b/arch/sh/include/asm/pm.h deleted file mode 100644 index 56fdbd6..0000000 --- a/arch/sh/include/asm/pm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright 2006 (c) Andriy Skulysh - * - */ -#ifndef __ASM_SH_PM_H -#define __ASM_SH_PM_H - -extern u8 wakeup_start; -extern u8 wakeup_end; - -void pm_enter(void); - -#endif -- cgit v1.1 From 77ba93a7ac5fb0d9338bffbf97c787b8efe00806 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Dec 2008 11:25:50 +0900 Subject: sh: Fix up the SH-4A mutex fastpath semantics. This fixes up the __mutex_fastpath_xxx() routines to match the semantics noted in the comment. Previously these were looping rather than doing a single-pass, which is counter-intuitive, as the slow path takes care of the looping for us in the event of contention. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index 7c75af5..a91990c 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h @@ -21,16 +21,18 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) fail_fn(count); } @@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) __res = fail_fn(count); @@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "1: movli.l @%1, %0 \n\t" + "movli.l @%2, %0 \n\t" "add #1, %0 \n\t" - "movco.l %0, @%1 \n\t" - "bf 1b\n\t" - : "=&z" (__res) + "movco.l %0, @%2 \n\t" + "movt %1 \n\t" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); - if (unlikely(__res <= 0)) + __res |= !__ex_flag; + if (unlikely(__res != 0)) fail_fn(count); } -- cgit v1.1 From 06be3724548a443a99d703ff79f43d6f1e2975f0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Dec 2008 17:01:40 +0900 Subject: sh: Fix an off-by-1 check in __mutex_fastpath_unlock(). Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index a91990c..ee839ee 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h @@ -73,7 +73,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) : "t"); __res |= !__ex_flag; - if (unlikely(__res != 0)) + if (unlikely(__res <= 0)) fail_fn(count); } -- cgit v1.1 From 35724a0aed6e62bdad640e8a1b8498329708226f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 18:17:19 +0900 Subject: sh: Fix up the cpu_asid() return value on nommu. This ought to be unsigned long, rather than defaulting to int. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu_context.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 04c0c97..5d9157b 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -22,7 +22,7 @@ #define MMU_CONTEXT_ASID_MASK 0x000000ff #define MMU_CONTEXT_VERSION_MASK 0xffffff00 #define MMU_CONTEXT_FIRST_VERSION 0x00000100 -#define NO_CONTEXT 0 +#define NO_CONTEXT 0UL /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 @@ -130,7 +130,7 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) -#define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) +#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) -- cgit v1.1 From f15b2dc02fef0c53aa5ffa3c4617e184f057d402 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 19:18:46 +0900 Subject: sh: Fix up syscall_get_nr() comment in syscall_32.h. Residual copy-and-paste damage, fix it up. Signed-off-by: Paul Mundt --- arch/sh/include/asm/syscall_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 54773f2..05a868a 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -5,7 +5,7 @@ #include #include -/* The system call number is given by the user in %g1 */ +/* The system call number is given by the user in R3 */ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { -- cgit v1.1 From 94e2fb3d3e1f4cb6bad2b13c572c4c99ad734a37 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 19:46:18 +0900 Subject: sh: Provide asm/syscall.h for SH-5. This provides the asm/syscall.h implementation for sh64 parts. Signed-off-by: Paul Mundt --- arch/sh/include/asm/syscall_64.h | 76 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index bcaaa8c..e95f3ae 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -1,6 +1,80 @@ #ifndef __ASM_SH_SYSCALL_64_H #define __ASM_SH_SYSCALL_64_H -#include +#include +#include +#include + +/* The system call number is given by the user in R9 */ +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* + * XXX: This needs some thought. On SH we don't + * save away the original R9 value anywhere. + */ +} + +static inline bool syscall_has_error(struct pt_regs *regs) +{ + return (regs->sr & 0x1) ? true : false; +} +static inline void syscall_set_error(struct pt_regs *regs) +{ + regs->sr |= 0x1; +} +static inline void syscall_clear_error(struct pt_regs *regs) +{ + regs->sr &= ~0x1; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return syscall_has_error(regs) ? regs->regs[9] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[9]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + syscall_set_error(regs); + regs->regs[9] = -error; + } else { + syscall_clear_error(regs); + regs->regs[9] = val; + } +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(args, ®s->reg[2 + i], n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(®s->reg[2 + i], args, n * sizeof(args[0])); +} #endif /* __ASM_SH_SYSCALL_64_H */ -- cgit v1.1 From dd76279b47dce2c0bd7c54997938ec4cb9f16884 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 20:14:15 +0900 Subject: sh: Provide linux/regset.h interface for SH-5. Plugs in general and FPU regsets. Signed-off-by: Paul Mundt --- arch/sh/include/asm/elf.h | 2 -- arch/sh/include/asm/ptrace.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036..b809f22 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,13 +108,11 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) -#ifdef CONFIG_SUPERH32 /* * Enable dump using regset. * This covers all of general/DSP/FPU regs. */ #define CORE_DUMP_USE_REGSET -#endif #define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 3ad18e9..12912ab 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -86,6 +86,7 @@ struct pt_dspregs { unsigned long re; unsigned long mod; }; +#endif #define PTRACE_GETREGS 12 /* General registers */ #define PTRACE_SETREGS 13 @@ -100,7 +101,6 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 -#endif #ifdef __KERNEL__ #include -- cgit v1.1 From d7b01f78a3ae6a3cc21a16a1a3d377adc2227537 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 20:17:15 +0900 Subject: sh: Enable HAVE_ARCH_TRACEHOOK for all SH, now that SH-5 supports it too. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_64.h | 2 +- arch/sh/include/asm/syscall_64.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 96067e9..803177f 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -226,7 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.pc) #define KSTK_ESP(tsk) ((tsk)->thread.sp) -#define user_stack_pointer(regs) ((regs)->sp) +#define user_stack_pointer(regs) ((regs)->regs[15]) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index e95f3ae..e1143b9 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -65,7 +65,7 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { BUG_ON(i + n > 6); - memcpy(args, ®s->reg[2 + i], n * sizeof(args[0])); + memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, @@ -74,7 +74,7 @@ static inline void syscall_set_arguments(struct task_struct *task, const unsigned long *args) { BUG_ON(i + n > 6); - memcpy(®s->reg[2 + i], args, n * sizeof(args[0])); + memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); } #endif /* __ASM_SH_SYSCALL_64_H */ -- cgit v1.1 From ab6e570ba33dbee18c2520d386e0f367a9b573c3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 11 Dec 2008 18:46:46 +0900 Subject: sh: Generic kgdb stub support. This migrates from the old bitrotted kgdb stub implementation and moves to the generic stub. In the process support for SH-2/SH-2A is also added, which the old stub never provided. Signed-off-by: Paul Mundt --- arch/sh/include/asm/kgdb.h | 66 +++++++++++++------------------------------- arch/sh/include/asm/system.h | 2 ++ 2 files changed, 21 insertions(+), 47 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 24e4207..72704ed 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h @@ -1,21 +1,7 @@ -/* - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Based on original code by Glenn Engel, Jim Kingdon, - * David Grothe , Tigran Aivazian, and - * Amit S. Kale - * - * Super-H port based on sh-stub.c (Ben Lee and Steve Chamberlain) by - * Henry Bell - * - * Header file for low-level support for remote debug using GDB. - * - */ - -#ifndef __KGDB_H -#define __KGDB_H +#ifndef __ASM_SH_KGDB_H +#define __ASM_SH_KGDB_H +#include #include /* Same as pt_regs but has vbr in place of syscall_nr */ @@ -30,40 +16,26 @@ struct kgdb_regs { unsigned long vbr; }; -/* State info */ -extern char kgdb_in_gdb_mode; -extern int kgdb_nofault; /* Ignore bus errors (in gdb mem access) */ -extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ +enum regnames { + GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7, + GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15, -/* SCI */ -extern int kgdb_portnum; -extern int kgdb_baud; -extern char kgdb_parity; -extern char kgdb_bits; + GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR, +}; -/* Init and interface stuff */ -extern int kgdb_init(void); -extern int (*kgdb_getchar)(void); -extern void (*kgdb_putchar)(int); +#define NUMREGBYTES ((GDB_VBR + 1) * 4) -/* Trap functions */ -typedef void (kgdb_debug_hook_t)(struct pt_regs *regs); -typedef void (kgdb_bus_error_hook_t)(void); -extern kgdb_debug_hook_t *kgdb_debug_hook; -extern kgdb_bus_error_hook_t *kgdb_bus_err_hook; +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__ ("trapa #0x3c\n"); +} -/* Console */ -struct console; -void kgdb_console_write(struct console *co, const char *s, unsigned count); -extern int kgdb_console_setup(struct console *, char *); +/* State info */ +extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ -/* Prototypes for jmp fns */ -#define _JBLEN 9 -typedef int jmp_buf[_JBLEN]; -extern void longjmp(jmp_buf __jmpb, int __retval); -extern int setjmp(jmp_buf __jmpb); +#define BUFMAX 2048 -/* Forced breakpoint */ -#define breakpoint() __asm__ __volatile__("trapa #0x3c") +#define CACHE_FLUSH_IS_SAFE 1 +#define BREAK_INSTR_SIZE 2 -#endif +#endif /* __ASM_SH_KGDB_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6160fe4..c9ec6af 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -175,6 +175,8 @@ asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) BUILD_TRAP_HANDLER(address_error); BUILD_TRAP_HANDLER(debug); BUILD_TRAP_HANDLER(bug); +BUILD_TRAP_HANDLER(breakpoint); +BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); -- cgit v1.1 From 4466b20cfcfa718ff515b9e3886749cc025e2005 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Dec 2008 16:34:44 +0900 Subject: sh: Add SH-5 optimized memcpy()/memset()/strcpy()/strlen(). Adopted from the uClibc optimized string versions. Signed-off-by: Paul Mundt --- arch/sh/include/asm/string_64.h | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h index aa1fef2..7420071 100644 --- a/arch/sh/include/asm/string_64.h +++ b/arch/sh/include/asm/string_64.h @@ -1,17 +1,20 @@ #ifndef __ASM_SH_STRING_64_H #define __ASM_SH_STRING_64_H -/* - * include/asm-sh/string_64.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ +#ifdef __KERNEL__ + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *dest, const void *src, size_t count); +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *__dest, const char *__src); + +#endif /* __KERNEL__ */ + #endif /* __ASM_SH_STRING_64_H */ -- cgit v1.1 From ca0c14e447a399eb90a1c9a4357560c2a29ef499 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 11:47:59 +0900 Subject: sh: Kill off sh_bios_in_gdb_mode(). With the reworked kgdb support, we always detach and reinitialize the stub. This was mostly a feature for handoffs between sh-ipl+g and the kgdb stub, but virtually no sh-ipl+g versions ever had this working right in the first place. Given that the sh-ipl+g stubs in general use today don't even support the GDB stub, and we have already killed off the special casing in the sh-sci serial driver, kill off this now unused symbol too. Signed-off-by: Paul Mundt --- arch/sh/include/asm/sh_bios.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h index 0ca2619..d9c96d7 100644 --- a/arch/sh/include/asm/sh_bios.h +++ b/arch/sh/include/asm/sh_bios.h @@ -10,7 +10,6 @@ extern void sh_bios_console_write(const char *buf, unsigned int len); extern void sh_bios_char_out(char ch); -extern int sh_bios_in_gdb_mode(void); extern void sh_bios_gdb_detach(void); extern void sh_bios_get_node_addr(unsigned char *node_addr); -- cgit v1.1 From 7b80fb32b39a51ce3e1afa051f5a616eb8ecbed3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 12:19:30 +0900 Subject: sh: Kill off mv_heartbeat() from the machvec. Nothing is using this any more, so get rid of it before anyone gets the bright idea to start using it again. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index f1bae02..e14e09b 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -47,8 +47,6 @@ struct sh_machine_vector { void (*mv_init_irq)(void); void (*mv_init_pci)(void); - void (*mv_heartbeat)(void); - void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); }; -- cgit v1.1 From 2125a46083dc5a9aa321c243e322638a9338cd11 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 12:31:32 +0900 Subject: sh: Kill off dead mv_init_pci() from machvec. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index e14e09b..eec0d22 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -45,7 +45,6 @@ struct sh_machine_vector { int (*mv_irq_demux)(int irq); void (*mv_init_irq)(void); - void (*mv_init_pci)(void); void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); -- cgit v1.1 From 866ef8f48f2272ce8d84156c91964d730666ab33 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 13:57:15 +0900 Subject: sh: mach-edosk7705: Fix up edosk7705 so it all builds again. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index eec0d22..64b1c16 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -14,8 +14,6 @@ #include #include -struct device; - struct sh_machine_vector { void (*mv_setup)(char **cmdline_p); const char *mv_name; -- cgit v1.1 From 5d2685d0b3edc51ecc92604d5b7f5ca9b29b90bb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 15:56:06 +0900 Subject: sh: Conditionalize the code dumper on CONFIG_DUMP_CODE. We don't really want this enabled by default, but it is still quite useful for debugging. So, make it conditional and leave it off by default. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_32.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 2bfb735..d79063c 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -175,7 +175,15 @@ static __inline__ void enable_fpu(void) void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); + +#ifdef CONFIG_DUMP_CODE void show_code(struct pt_regs *regs); +#else +static inline void show_code(struct pt_regs *regs) +{ +} +#endif + extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -- cgit v1.1 From fc5243d98ac2575ad14a974b3c097e9ba874c03d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 25 Dec 2008 13:38:35 +0100 Subject: [S390] arch_setup_additional_pages arguments arch_setup_additional_pages currently gets two arguments, the binary format descripton and an indication if the process uses an executable stack or not. The second argument is not used by anybody, it could be removed without replacement. What actually does make sense is to pass an indication if the process uses the elf interpreter or not. The glibc code will not use anything from the vdso if the process does not use the dynamic linker, so for statically linked binaries the architecture backend can choose not to map the vdso. Acked-by: Ingo Molnar Signed-off-by: Martin Schwidefsky --- arch/sh/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include/asm') diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036..9381397 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -204,7 +204,7 @@ do { \ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern unsigned int vdso_enabled; extern void __kernel_vsyscall; -- cgit v1.1