summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-08-17 16:17:21 +0000
committerattilio <attilio@FreeBSD.org>2009-08-17 16:17:21 +0000
commite75d30c87f88cc8122139e051639281f766a20ed (patch)
treec287450b66e6c82a706049a48c5fcff40a6bc3b5 /sys/kern
parent61d6cdbea1c290b707b3e593b66d13469c3be675 (diff)
downloadFreeBSD-src-e75d30c87f88cc8122139e051639281f766a20ed.zip
FreeBSD-src-e75d30c87f88cc8122139e051639281f766a20ed.tar.gz
* Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to
a pointer-fetching specific operation check. Consequently, rename the operation ASSERT_ATOMIC_LOAD_PTR(). * Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking directly alignment on the word boundry, for all the given specific architectures. That's a bit too strict for some common case, but it assures safety. * Add a comment explaining the scope of the macro * Add a new stub in the lockmgr specific implementation Tested by: marcel (initial version), marius Reviewed by: rwatson, jhb (comment specific review) Approved by: re (kib)
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c3
-rw-r--r--sys/kern/kern_mutex.c5
-rw-r--r--sys/kern/kern_rwlock.c5
-rw-r--r--sys/kern/kern_sx.c5
4 files changed, 12 insertions, 6 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 0affad5..29ae4ac 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
int iflags;
MPASS((flags & ~LK_INIT_MASK) == 0);
+ ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
+ ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
+ &lk->lk_lock));
iflags = LO_SLEEPABLE | LO_UPGRADABLE;
if (flags & LK_CANRECURSE)
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index f625098..85ca646 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
- ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
- __func__, name, &m->mtx_lock));
+ ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
+ ("%s: mtx_lock not aligned for %s: %p", __func__, name,
+ &m->mtx_lock));
#ifdef MUTEX_DEBUG
/* Diagnostic and error correction */
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index e234250..be05b39 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
RW_RECURSE)) == 0);
- ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
- __func__, name, &rw->rw_lock));
+ ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
+ ("%s: rw_lock not aligned for %s: %p", __func__, name,
+ &rw->rw_lock));
flags = LO_UPGRADABLE;
if (opts & RW_DUPOK)
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 15c1c9b..4a78444 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
- ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p",
- __func__, description, &sx->sx_lock));
+ ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
+ ("%s: sx_lock not aligned for %s: %p", __func__, description,
+ &sx->sx_lock));
flags = LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)
OpenPOWER on IntegriCloud