diff options
author | Becky Bruce <bgill@freescale.com> | 2005-09-22 14:20:04 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-25 22:38:46 +1000 |
commit | feaf7cf153335fe7100b65ed6f4585c3574fe69a (patch) | |
tree | c57198f01b5f12ffe8ce90f4e1399505c1f84a02 /include/asm-powerpc | |
parent | 2bfadee32f1501faa3184d574f6a769f17236c87 (diff) | |
download | op-kernel-dev-feaf7cf153335fe7100b65ed6f4585c3574fe69a.zip op-kernel-dev-feaf7cf153335fe7100b65ed6f4585c3574fe69a.tar.gz |
[PATCH] powerpc: merge atomic.h, memory.h
powerpc: Merge atomic.h and memory.h into powerpc
Merged atomic.h into include/powerpc. Moved asm-style HMT_ defines from
memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style
HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect
its contents.
Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Jon Loeliger <linuxppc@jdl.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/atomic.h | 209 | ||||
-rw-r--r-- | include/asm-powerpc/ppc_asm.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/synch.h | 51 |
3 files changed, 263 insertions, 0 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h new file mode 100644 index 0000000..ed4b345 --- /dev/null +++ b/include/asm-powerpc/atomic.h @@ -0,0 +1,209 @@ +#ifndef _ASM_POWERPC_ATOMIC_H_ +#define _ASM_POWERPC_ATOMIC_H_ + +/* + * PowerPC atomic operations + */ + +typedef struct { volatile int counter; } atomic_t; + +#ifdef __KERNEL__ +#include <asm/synch.h> + +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) + +/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. + * The old ATOMIC_SYNC_FIX covered some but not all of this. + */ +#ifdef CONFIG_IBM405_ERR77 +#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";" +#else +#define PPC405_ERR77(ra,rb) +#endif + +static __inline__ void atomic_add(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%3 # atomic_add\n\ + add %0,%2,%0\n" + PPC405_ERR77(0,%3) +" stwcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (a), "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ int atomic_add_return(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%2 # atomic_add_return\n\ + add %0,%1,%0\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +static __inline__ void atomic_sub(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%3 # atomic_sub\n\ + subf %0,%2,%0\n" + PPC405_ERR77(0,%3) +" stwcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (a), "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ int atomic_sub_return(int a, atomic_t *v) +{ + int t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%2 # atomic_sub_return\n\ + subf %0,%1,%0\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static __inline__ void atomic_inc(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 # atomic_inc\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%2) +" stwcx. %0,0,%2 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ int atomic_inc_return(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%1 # atomic_inc_return\n\ + addic %0,%0,1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +static __inline__ void atomic_dec(atomic_t *v) +{ + int t; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 # atomic_dec\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%2)\ +" stwcx. %0,0,%2\n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ int atomic_dec_return(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%1 # atomic_dec_return\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) + +/* + * Atomically test *v and decrement if it is greater than 0. + * The function returns the old value of *v minus 1. + */ +static __inline__ int atomic_dec_if_positive(atomic_t *v) +{ + int t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ + addic. %0,%0,-1\n\ + blt- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 553035c..4efa718 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -75,8 +75,11 @@ #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) /* Macros to adjust thread priority for Iseries hardware multithreading */ +#define HMT_VERY_LOW or 31,31,31 # very low priority\n" #define HMT_LOW or 1,1,1 +#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n" #define HMT_MEDIUM or 2,2,2 +#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n" #define HMT_HIGH or 3,3,3 /* handle instructions that older assemblers may not know */ diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h new file mode 100644 index 0000000..4660c03 --- /dev/null +++ b/include/asm-powerpc/synch.h @@ -0,0 +1,51 @@ +#ifndef _ASM_POWERPC_SYNCH_H +#define _ASM_POWERPC_SYNCH_H + +#include <linux/config.h> + +#ifdef __powerpc64__ +#define __SUBARCH_HAS_LWSYNC +#endif + +#ifdef __SUBARCH_HAS_LWSYNC +# define LWSYNC lwsync +#else +# define LWSYNC sync +#endif + + +/* + * Arguably the bitops and *xchg operations don't imply any memory barrier + * or SMP ordering, but in fact a lot of drivers expect them to imply + * both, since they do on x86 cpus. + */ +#ifdef CONFIG_SMP +#define EIEIO_ON_SMP "eieio\n" +#define ISYNC_ON_SMP "\n\tisync" +#define SYNC_ON_SMP __stringify(LWSYNC) "\n" +#else +#define EIEIO_ON_SMP +#define ISYNC_ON_SMP +#define SYNC_ON_SMP +#endif + +static inline void eieio(void) +{ + __asm__ __volatile__ ("eieio" : : : "memory"); +} + +static inline void isync(void) +{ + __asm__ __volatile__ ("isync" : : : "memory"); +} + +#ifdef CONFIG_SMP +#define eieio_on_smp() eieio() +#define isync_on_smp() isync() +#else +#define eieio_on_smp() __asm__ __volatile__("": : :"memory") +#define isync_on_smp() __asm__ __volatile__("": : :"memory") +#endif + +#endif /* _ASM_POWERPC_SYNCH_H */ + |