summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-09-30 13:26:31 +0000
committerattilio <attilio@FreeBSD.org>2009-09-30 13:26:31 +0000
commite9f2530ebfd93a31f9261bfd21bfb5c95dab3720 (patch)
tree14bb844cbbe423beb58afda22e6444bab089a43e /sys
parent49524d648a66d9f3860133c520a179e25a5f35aa (diff)
downloadFreeBSD-src-e9f2530ebfd93a31f9261bfd21bfb5c95dab3720.zip
FreeBSD-src-e9f2530ebfd93a31f9261bfd21bfb5c95dab3720.tar.gz
When releasing a read/shared lock we need to use a write memory barrier
in order to avoid, on architectures which doesn't have strong ordered writes, CPU instructions reordering. Diagnosed by: fabio Reviewed by: jhb Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_rwlock.c7
-rw-r--r--sys/kern/kern_sx.c8
-rw-r--r--sys/sys/rwlock.h7
-rw-r--r--sys/sys/sx.h9
4 files changed, 9 insertions, 22 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index be05b39..c1f13e0 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -541,7 +541,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
*/
x = rw->rw_lock;
if (RW_READERS(x) > 1) {
- if (atomic_cmpset_ptr(&rw->rw_lock, x,
+ if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
x - RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
@@ -559,7 +559,8 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
if (!(x & RW_LOCK_WAITERS)) {
MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
RW_READERS_LOCK(1));
- if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
+ if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
+ RW_UNLOCKED)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, rw);
@@ -597,7 +598,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
x |= (v & RW_LOCK_READ_WAITERS);
} else
queue = TS_SHARED_QUEUE;
- if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
+ if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
x)) {
turnstile_chain_unlock(&rw->lock_object);
continue;
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index c00b267..2d777a2 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -931,7 +931,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
* so, just drop one and return.
*/
if (SX_SHARERS(x) > 1) {
- if (atomic_cmpset_ptr(&sx->sx_lock, x,
+ if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
x - SX_ONE_SHARER)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
@@ -949,8 +949,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
*/
if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
MPASS(x == SX_SHARERS_LOCK(1));
- if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
- SX_LOCK_UNLOCKED)) {
+ if (atomic_cmpset_rel_ptr(&sx->sx_lock,
+ SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, sx);
@@ -973,7 +973,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
* Note that the state of the lock could have changed,
* so if it fails loop back and retry.
*/
- if (!atomic_cmpset_ptr(&sx->sx_lock,
+ if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
SX_LOCK_UNLOCKED)) {
sleepq_release(&sx->lock_object);
diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h
index 50410ff..ad17826 100644
--- a/sys/sys/rwlock.h
+++ b/sys/sys/rwlock.h
@@ -55,13 +55,6 @@
*
* When the lock is not locked by any thread, it is encoded as a read lock
* with zero waiters.
- *
- * A note about memory barriers. Write locks need to use the same memory
- * barriers as mutexes: _acq when acquiring a write lock and _rel when
- * releasing a write lock. Read locks also need to use an _acq barrier when
- * acquiring a read lock. However, since read locks do not update any
- * locked data (modulo bugs of course), no memory barrier is needed when
- * releasing a read lock.
*/
#define RW_LOCK_READ 0x01
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index 4ff693b..67f7d97 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -63,13 +63,6 @@
*
* When the lock is not locked by any thread, it is encoded as a
* shared lock with zero waiters.
- *
- * A note about memory barriers. Exclusive locks need to use the same
- * memory barriers as mutexes: _acq when acquiring an exclusive lock
- * and _rel when releasing an exclusive lock. On the other side,
- * shared lock needs to use an _acq barrier when acquiring the lock
- * but, since they don't update any locked data, no memory barrier is
- * needed when releasing a shared lock.
*/
#define SX_LOCK_SHARED 0x01
@@ -200,7 +193,7 @@ __sx_sunlock(struct sx *sx, const char *file, int line)
uintptr_t x = sx->sx_lock;
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
- !atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
+ !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
_sx_sunlock_hard(sx, file, line);
}
OpenPOWER on IntegriCloud