From f8ca9d2d2087d8ebf83aae200f9269eafd5e5945 Mon Sep 17 00:00:00 2001 From: jhb Date: Tue, 17 Apr 2001 02:50:05 +0000 Subject: - Fix memory barriers in atomic operations so that the barriers are always "inside" of locked regions. That is, an acquire atomic operation will always enforce a memory barrier after the atomic operation and a release operation will always enforce a memory barrier before the atomic operation. - Explicitly use 'mb' instead of 'wmb' in release atomic operations. The 'wmb' memory barrier is not strong enough to guarantee coherence with other processors. This is effectively a nop since alpha_wmb() actually performs a 'mb' and not a 'wmb', but I wanted the code to be more correct since at some point in the future alpha_wmb()'s implementation may switch to being a real 'wmb'. --- sys/alpha/include/atomic.h | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) (limited to 'sys/alpha/include') diff --git a/sys/alpha/include/atomic.h b/sys/alpha/include/atomic.h index 1c45993..773ae7e 100644 --- a/sys/alpha/include/atomic.h +++ b/sys/alpha/include/atomic.h @@ -256,29 +256,29 @@ static __inline u_int64_t atomic_readandclear_64(volatile u_int64_t *addr) static __inline void \ atomic_##NAME##_acq_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\ { \ - alpha_mb(); \ atomic_##NAME##_##WIDTH(p, v); \ + /* alpha_mb(); */ \ } \ \ static __inline void \ atomic_##NAME##_rel_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\ { \ + alpha_mb(); \ atomic_##NAME##_##WIDTH(p, v); \ - alpha_wmb(); \ } \ \ static __inline void \ atomic_##NAME##_acq_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\ { \ - alpha_mb(); \ atomic_##NAME##_##WIDTH(p, v); \ + /* alpha_mb(); */ \ } \ \ static __inline void \ atomic_##NAME##_rel_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\ { \ + alpha_mb(); \ atomic_##NAME##_##WIDTH(p, v); \ - alpha_wmb(); \ } ATOMIC_ACQ_REL(set, 8, char) @@ -307,28 +307,34 @@ ATOMIC_ACQ_REL(subtract, 64, long) static __inline u_##TYPE \ atomic_load_acq_##WIDTH(volatile u_##TYPE *p) \ { \ + u_##TYPE v; \ + \ + v = *p; \ alpha_mb(); \ - return (*p); \ + return (v); \ } \ \ static __inline void \ atomic_store_rel_##WIDTH(volatile u_##TYPE *p, u_##TYPE v)\ { \ + alpha_mb(); \ *p = v; \ - alpha_wmb(); \ } \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ + u_##TYPE v; \ + \ + v = *p; \ alpha_mb(); \ - return (*p); \ + return (v); \ } \ \ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ + alpha_mb(); \ *p = v; \ - alpha_wmb(); \ } ATOMIC_STORE_LOAD(char, 8) @@ -408,35 +414,35 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) static __inline u_int32_t atomic_cmpset_acq_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval) { + int retval; + + retval = atomic_cmpset_32(p, cmpval, newval); alpha_mb(); - return (atomic_cmpset_32(p, cmpval, newval)); + return (retval); } static __inline u_int32_t atomic_cmpset_rel_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval) { - int retval; - - retval = atomic_cmpset_32(p, cmpval, newval); - alpha_wmb(); - return (retval); + alpha_mb(); + return (atomic_cmpset_32(p, cmpval, newval)); } static __inline u_int64_t atomic_cmpset_acq_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval) { + int retval; + + retval = atomic_cmpset_64(p, cmpval, newval); alpha_mb(); - return (atomic_cmpset_64(p, cmpval, newval)); + return (retval); } static __inline u_int64_t atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval) { - int retval; - - retval = atomic_cmpset_64(p, cmpval, newval); - alpha_wmb(); - return (retval); + alpha_mb(); + return (atomic_cmpset_64(p, cmpval, newval)); } #define atomic_cmpset_acq_int atomic_cmpset_acq_32 -- cgit v1.1