summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorbz <bz@FreeBSD.org>2009-08-14 21:46:54 +0000
committerbz <bz@FreeBSD.org>2009-08-14 21:46:54 +0000
commitbf6acf798532962c108b5e0944b855cf7cfb5ff8 (patch)
treeea52f93659c64af89eda73d8980d2b5ab4e028a9 /sys/kern/kern_mutex.c
parentd51166f15e1c08fdaa3df0992553ad5127da220c (diff)
downloadFreeBSD-src-bf6acf798532962c108b5e0944b855cf7cfb5ff8.zip
FreeBSD-src-bf6acf798532962c108b5e0944b855cf7cfb5ff8.tar.gz
Add a new macro to test that a variable could be loaded atomically.
Check that the given variable is at most uintptr_t in size and that it is aligned. Note: ASSERT_ATOMIC_LOAD() uses ALIGN() to check for adequate alignment -- however, the function of ALIGN() is to guarantee alignment, and therefore may lead to stronger alignment enforcement than necessary for types that are smaller than sizeof(uintptr_t). Add checks to mtx, rw and sx locks init functions to detect possible breakage. This was used during debugging of the problem fixed with r196118 where a pointer was on an un-aligned address in the dpcpu area. In collaboration with: rwatson Reviewed by: rwatson Approved by: re (kib)
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index fc342c5..f625098 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -783,6 +783,8 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
+ ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
+ __func__, name, &m->mtx_lock));
#ifdef MUTEX_DEBUG
/* Diagnostic and error correction */
OpenPOWER on IntegriCloud