diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 20:07:47 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 14:06:21 +0200 |
commit | 212d3be102d73dce70cc12f39dce4e0aed2c025b (patch) | |
tree | 312fee245c34c7f4310c029a268e3d0d7641c2ed /arch/alpha | |
parent | 56d1defe0bbddaa97d6e74b51490904130fd4f1d (diff) | |
download | op-kernel-dev-212d3be102d73dce70cc12f39dce4e0aed2c025b.zip op-kernel-dev-212d3be102d73dce70cc12f39dce4e0aed2c025b.tar.gz |
alpha: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/include/asm/atomic.h | 43 |
1 files changed, 28 insertions, 15 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 8f8eafb..0eff853 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -29,13 +29,13 @@ * branch back to restart the operation. */ -#define ATOMIC_OP(op) \ +#define ATOMIC_OP(op, asm_op) \ static __inline__ void atomic_##op(int i, atomic_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC_OP_RETURN(op) \ +#define ATOMIC_OP_RETURN(op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%3,%2\n" \ - " " #op "l %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return result; \ } -#define ATOMIC64_OP(op) \ +#define ATOMIC64_OP(op, asm_op) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC64_OP_RETURN(op) \ +#define ATOMIC64_OP_RETURN(op, asm_op) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%3,%2\n" \ - " " #op "q %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -101,15 +101,28 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ return result; \ } -#define ATOMIC_OPS(opg) \ - ATOMIC_OP(opg) \ - ATOMIC_OP_RETURN(opg) \ - ATOMIC64_OP(opg) \ - ATOMIC64_OP_RETURN(opg) +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##l) \ + ATOMIC_OP_RETURN(op, op##l) \ + ATOMIC64_OP(op, op##q) \ + ATOMIC64_OP_RETURN(op, op##q) ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define CONFIG_ARCH_HAS_ATOMIC_OR +#define atomic_andnot atomic_andnot +#define atomic64_andnot atomic64_andnot + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, bis) +ATOMIC_OP(xor, xor) +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, bis) +ATOMIC64_OP(xor, xor) + #undef ATOMIC_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP |