diff options
author | marius <marius@FreeBSD.org> | 2011-12-03 13:51:57 +0000 |
---|---|---|
committer | marius <marius@FreeBSD.org> | 2011-12-03 13:51:57 +0000 |
commit | 6779d3e54cebfb0cf17bb0b04e66e1892ea7d1b7 (patch) | |
tree | 80d123e06e64de15f15c440acc4c5736192f50c5 | |
parent | cbbd4e13dbc7802e27089b4dd004816b530ef41d (diff) | |
download | FreeBSD-src-6779d3e54cebfb0cf17bb0b04e66e1892ea7d1b7.zip FreeBSD-src-6779d3e54cebfb0cf17bb0b04e66e1892ea7d1b7.tar.gz |
Revert r225889 a bit. While it's correct that in total store order there's
no need to additionally add CPU memory barriers to the acquire variants of
atomic(9), these are documented to also include compiler memory barriers.
So add the latter, which were previously included by using membar(), back.
-rw-r--r-- | sys/sparc64/include/atomic.h | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/sys/sparc64/include/atomic.h b/sys/sparc64/include/atomic.h index a47ce32..06a1984 100644 --- a/sys/sparc64/include/atomic.h +++ b/sys/sparc64/include/atomic.h @@ -78,7 +78,7 @@ * order which we use for running the kernel and all of the userland atomic * loads and stores behave as if the were followed by a membar with a mask * of #LoadLoad | #LoadStore | #StoreStore. In order to be also sufficient - * for use of relaxed memory ordering, the atomic_cas() in the acq variants + * for use of relaxed memory ordering, the atomic_cas() in the acq variants * additionally would have to be followed by a membar #LoadLoad | #LoadStore. * Due to the suggested assembly syntax of the membar operands containing a * # character, they cannot be used in macros. The cmask and mmask bits thus @@ -97,6 +97,7 @@ #define atomic_cas_acq(p, e, s, sz) ({ \ itype(sz) v; \ v = atomic_cas((p), (e), (s), sz); \ + __asm __volatile("" : : : "memory"); \ v; \ }) @@ -121,6 +122,7 @@ #define atomic_op_acq(p, op, v, sz) ({ \ itype(sz) t; \ t = atomic_op((p), op, (v), sz); \ + __asm __volatile("" : : : "memory"); \ t; \ }) @@ -137,6 +139,7 @@ #define atomic_load_acq(p, sz) ({ \ itype(sz) v; \ v = atomic_load((p), sz); \ + __asm __volatile("" : : : "memory"); \ v; \ }) |