diff options
author | attilio <attilio@FreeBSD.org> | 2009-08-17 16:17:21 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2009-08-17 16:17:21 +0000 |
commit | e75d30c87f88cc8122139e051639281f766a20ed (patch) | |
tree | c287450b66e6c82a706049a48c5fcff40a6bc3b5 | |
parent | 61d6cdbea1c290b707b3e593b66d13469c3be675 (diff) | |
download | FreeBSD-src-e75d30c87f88cc8122139e051639281f766a20ed.zip FreeBSD-src-e75d30c87f88cc8122139e051639281f766a20ed.tar.gz |
* Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to
a pointer-fetching specific operation check. Consequently, rename the
operation ASSERT_ATOMIC_LOAD_PTR().
* Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking
directly alignment on the word boundry, for all the given specific
architectures. That's a bit too strict for some common case, but it
assures safety.
* Add a comment explaining the scope of the macro
* Add a new stub in the lockmgr specific implementation
Tested by: marcel (initial version), marius
Reviewed by: rwatson, jhb (comment specific review)
Approved by: re (kib)
-rw-r--r-- | sys/kern/kern_lock.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 5 | ||||
-rw-r--r-- | sys/sys/systm.h | 13 |
5 files changed, 22 insertions, 9 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 0affad5..29ae4ac 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) int iflags; MPASS((flags & ~LK_INIT_MASK) == 0); + ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, + ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, + &lk->lk_lock)); iflags = LO_SLEEPABLE | LO_UPGRADABLE; if (flags & LK_CANRECURSE) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index f625098..85ca646 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts) MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); - ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", - __func__, name, &m->mtx_lock)); + ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, + ("%s: mtx_lock not aligned for %s: %p", __func__, name, + &m->mtx_lock)); #ifdef MUTEX_DEBUG /* Diagnostic and error correction */ diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index e234250..be05b39 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts) MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE)) == 0); - ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p", - __func__, name, &rw->rw_lock)); + ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, + ("%s: rw_lock not aligned for %s: %p", __func__, name, + &rw->rw_lock)); flags = LO_UPGRADABLE; if (opts & RW_DUPOK) diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 15c1c9b..4a78444 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char *description, int opts) MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE)) == 0); - ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p", - __func__, description, &sx->sx_lock)); + ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, + ("%s: sx_lock not aligned for %s: %p", __func__, description, + &sx->sx_lock)); flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 2e8b9ad..96222da 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -89,9 +89,16 @@ extern int maxusers; /* system tune hint */ #define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1] #endif -#define ASSERT_ATOMIC_LOAD(var,msg) \ - KASSERT(sizeof(var) <= sizeof(uintptr_t) && \ - ALIGN(&(var)) == (uintptr_t)&(var), msg) +/* + * Assert that a pointer can be loaded from memory atomically. + * + * This assertion enforces stronger alignment than necessary. For example, + * on some architectures, atomicity for unaligned loads will depend on + * whether or not the load spans multiple cache lines. + */ +#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ + KASSERT(sizeof(var) == sizeof(void *) && \ + ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* * XXX the hints declarations are even more misplaced than most declarations |