diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/ia64 | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) | |
download | op-kernel-dev-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.zip op-kernel-dev-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.tar.gz |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 192 |
1 files changed, 88 insertions, 104 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0f8bf48..0bf0350 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,68 +21,100 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ int -ia64_atomic_add (int i, atomic_t *v) -{ - __s32 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic_read(v); - new = old + i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); - return new; +#define ATOMIC_OP(op, c_op) \ +static __inline__ int \ +ia64_atomic_##op (int i, atomic_t *v) \ +{ \ + __s32 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ + return new; \ } -static __inline__ long -ia64_atomic64_add (__s64 i, atomic64_t *v) -{ - __s64 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic64_read(v); - new = old + i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); - return new; -} +ATOMIC_OP(add, +) +ATOMIC_OP(sub, -) -static __inline__ int -ia64_atomic_sub (int i, atomic_t *v) -{ - __s32 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic_read(v); - new = old - i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); - return new; -} +#undef ATOMIC_OP -static __inline__ long -ia64_atomic64_sub (__s64 i, atomic64_t *v) -{ - __s64 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic64_read(v); - new = old - i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); - return new; +#define atomic_add_return(i,v) \ +({ \ + int __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic_add(__ia64_aar_i, v); \ +}) + +#define atomic_sub_return(i,v) \ +({ \ + int __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic_sub(__ia64_asr_i, v); \ +}) + +#define ATOMIC64_OP(op, c_op) \ +static __inline__ long \ +ia64_atomic64_##op (__s64 i, atomic64_t *v) \ +{ \ + __s64 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic64_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ + return new; \ } +ATOMIC64_OP(add, +) +ATOMIC64_OP(sub, -) + +#undef ATOMIC64_OP + +#define atomic64_add_return(i,v) \ +({ \ + long __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic64_add(__ia64_aar_i, v); \ +}) + +#define atomic64_sub_return(i,v) \ +({ \ + long __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic64_sub(__ia64_asr_i, v); \ +}) + #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define atomic_add_return(i,v) \ -({ \ - int __ia64_aar_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ - : ia64_atomic_add(__ia64_aar_i, v); \ -}) - -#define atomic64_add_return(i,v) \ -({ \ - long __ia64_aar_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ - : ia64_atomic64_add(__ia64_aar_i, v); \ -}) - /* * Atomically add I to V and return TRUE if the resulting value is * negative. @@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v) return atomic64_add_return(i, v) < 0; } -#define atomic_sub_return(i,v) \ -({ \ - int __ia64_asr_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ - : ia64_atomic_sub(__ia64_asr_i, v); \ -}) - -#define atomic64_sub_return(i,v) \ -({ \ - long __ia64_asr_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ - : ia64_atomic64_sub(__ia64_asr_i, v); \ -}) - #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) @@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) -#define atomic_add(i,v) atomic_add_return((i), (v)) -#define atomic_sub(i,v) atomic_sub_return((i), (v)) +#define atomic_add(i,v) (void)atomic_add_return((i), (v)) +#define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) #define atomic_inc(v) atomic_add(1, (v)) #define atomic_dec(v) atomic_sub(1, (v)) -#define atomic64_add(i,v) atomic64_add_return((i), (v)) -#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) +#define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) +#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) |