diff options
author | jhb <jhb@FreeBSD.org> | 2001-03-06 23:13:15 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2001-03-06 23:13:15 +0000 |
commit | 84abec1e650a65db1216a54f1e07c83669defe0e (patch) | |
tree | db3a3db9781dd22d4da6bafa95961a53dd381bca | |
parent | 8e10dd4dec2d9d4df647ee86cc9fd77f62452b6e (diff) | |
download | FreeBSD-src-84abec1e650a65db1216a54f1e07c83669defe0e.zip FreeBSD-src-84abec1e650a65db1216a54f1e07c83669defe0e.tar.gz |
In order to avoid recursing on the backing mutex for sx locks in the
INVARIANTS case, define the actual KASSERT() in _SX_ASSERT_[SX]LOCKED
macros that are used in the sx code itself and convert the
SX_ASSERT_[SX]LOCKED macros to simple wrappers that grab the mutex for the
duration of the check.
-rw-r--r-- | sys/kern/kern_sx.c | 4 | ||||
-rw-r--r-- | sys/sys/sx.h | 12 |
2 files changed, 12 insertions, 4 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index c089147..a1c576f 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -136,7 +136,7 @@ sx_sunlock(struct sx *sx) { mtx_lock(&sx->sx_lock); - SX_ASSERT_SLOCKED(sx); + _SX_ASSERT_SLOCKED(sx); /* Release. */ sx->sx_cnt--; @@ -161,7 +161,7 @@ sx_xunlock(struct sx *sx) { mtx_lock(&sx->sx_lock); - SX_ASSERT_XLOCKED(sx); + _SX_ASSERT_XLOCKED(sx); MPASS(sx->sx_cnt == -1); /* Release. */ diff --git a/sys/sys/sx.h b/sys/sys/sx.h index 57ed6c7..2c5c981 100644 --- a/sys/sys/sx.h +++ b/sys/sys/sx.h @@ -60,9 +60,12 @@ void sx_xunlock(struct sx *sx); */ #define SX_ASSERT_SLOCKED(sx) do { \ mtx_lock(&(sx)->sx_lock); \ + _SX_ASSERT_SLOCKED((sx)); \ + mtx_unlock(&(sx)->sx_lock); \ +} while (0) +#define _SX_ASSERT_SLOCKED(sx) do { \ KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \ __FUNCTION__, (sx)->sx_descr)); \ - mtx_unlock(&(sx)->sx_lock); \ } while (0) /* @@ -70,15 +73,20 @@ void sx_xunlock(struct sx *sx); */ #define SX_ASSERT_XLOCKED(sx) do { \ mtx_lock(&(sx)->sx_lock); \ + _SX_ASSERT_XLOCKED((sx)); \ + mtx_unlock(&(sx)->sx_lock); \ +} while (0) +#define _SX_ASSERT_XLOCKED(sx) do { \ KASSERT(((sx)->sx_xholder == curproc), \ ("%s: thread %p lacking xlock %s\n", __FUNCTION__, \ curproc, (sx)->sx_descr)); \ - mtx_unlock(&(sx)->sx_lock); \ } while (0) #else /* INVARIANTS */ #define SX_ASSERT_SLOCKED(sx) #define SX_ASSERT_XLOCKED(sx) +#define _SX_ASSERT_SLOCKED(sx) +#define _SX_ASSERT_XLOCKED(sx) #endif /* INVARIANTS */ #endif /* _KERNEL */ |