diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-01 07:48:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-01 07:48:13 -0700 |
commit | c87d5d594736dd8b56df67e31846c7d7b8c41a8f (patch) | |
tree | a4496b74b932e55b544d040af2668e68abcb1e56 /include/asm-generic | |
parent | 094803e0aab3fe75bbf8202a8f4b5280eaade375 (diff) | |
parent | 4e29198e1cd7728c30c96a8483a6068c71b34e4e (diff) | |
download | op-kernel-dev-c87d5d594736dd8b56df67e31846c7d7b8c41a8f.zip op-kernel-dev-c87d5d594736dd8b56df67e31846c7d7b8c41a8f.tar.gz |
Merge Qualcom Hexagon architecture
This is the fifth version of the patchset (with one tiny whitespace fix)
to the Linux kernel to support the Qualcomm Hexagon architecture.
Between now and the next pull requests, Richard Kuo should have his key
signed, etc., and should be back on kernel.org. In the meantime, this
got merged as a emailed patch-series.
* Hexagon: (36 commits)
Add extra arch overrides to asm-generic/checksum.h
Hexagon: Add self to MAINTAINERS
Hexagon: Add basic stacktrace functionality for Hexagon architecture.
Hexagon: Add configuration and makefiles for the Hexagon architecture.
Hexagon: Comet platform support
Hexagon: kgdb support files
Hexagon: Add page-fault support.
Hexagon: Add page table header files & etc.
Hexagon: Add ioremap support
Hexagon: Provide DMA implementation
Hexagon: Implement basic TLB management routines for Hexagon.
Hexagon: Implement basic cache-flush support
Hexagon: Provide basic implementation and/or stubs for I/O routines.
Hexagon: Add user access functions
Hexagon: Add locking types and functions
Hexagon: Add SMP support
Hexagon: Provide basic debugging and system trap support.
Hexagon: Add ptrace support
Hexagon: Add time and timer functions
Hexagon: Add interrupts
...
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/checksum.h | 4 | ||||
-rw-r--r-- | include/asm-generic/rwsem.h | 132 |
2 files changed, 136 insertions, 0 deletions
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 4647c76..c084767 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -33,8 +33,10 @@ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum) extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err); +#ifndef csum_partial_copy_nocheck #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy((src), (dst), (len), (sum)) +#endif /* * This is a version of ip_compute_csum() optimized for IP headers, @@ -63,12 +65,14 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, __wsum sum); #endif +#ifndef csum_tcpudp_magic static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } +#endif /* * this routine is used for miscellaneous IP-like checksums, mainly diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h new file mode 100644 index 0000000..bb1e2cd --- /dev/null +++ b/include/asm-generic/rwsem.h @@ -0,0 +1,132 @@ +#ifndef _ASM_POWERPC_RWSEM_H +#define _ASM_POWERPC_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." +#endif + +#ifdef __KERNEL__ + +/* + * R/W semaphores for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras <paulus@samba.org>. + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_PPC64 +# define RWSEM_ACTIVE_MASK 0xffffffffL +#else +# define RWSEM_ACTIVE_MASK 0x0000ffffL +#endif + +#define RWSEM_UNLOCKED_VALUE 0x00000000L +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + long tmp; + + tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_long_t *)&sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) +{ + atomic_long_add(delta, (atomic_long_t *)&sem->count); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + (atomic_long_t *)&sem->count); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +/* + * implement exchange and add functionality + */ +static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) +{ + return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_RWSEM_H */ |