summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--share/man/man9/sx.926
-rw-r--r--sys/kern/kern_sx.c38
-rw-r--r--sys/sys/sx.h11
3 files changed, 40 insertions, 35 deletions
diff --git a/share/man/man9/sx.9 b/share/man/man9/sx.9
index 68267b6..c6410e4 100644
--- a/share/man/man9/sx.9
+++ b/share/man/man9/sx.9
@@ -200,22 +200,22 @@ for the assertions specified in
.Fa what ,
and panics if they are not met.
One of the following assertions must be specified:
-.Bl -tag -width ".Dv SX_UNLOCKED"
-.It Dv SX_LOCKED
+.Bl -tag -width ".Dv SA_UNLOCKED"
+.It Dv SA_LOCKED
Assert that the current thread has either a shared or an exclusive lock on the
.Vt sx
lock pointed to by the first argument.
-.It Dv SX_SLOCKED
+.It Dv SA_SLOCKED
Assert that the current thread has a shared lock on the
.Vt sx
lock pointed to by
the first argument.
-.It Dv SX_XLOCKED
+.It Dv SA_XLOCKED
Assert that the current thread has an exclusive lock on the
.Vt sx
lock pointed to
by the first argument.
-.It Dv SX_UNLOCKED
+.It Dv SA_UNLOCKED
Assert that the current thread has no lock on the
.Vt sx
lock pointed to
@@ -224,16 +224,16 @@ by the first argument.
.Pp
In addition, one of the following optional assertions may be included with
either an
-.Dv SX_LOCKED ,
-.Dv SX_SLOCKED ,
+.Dv SA_LOCKED ,
+.Dv SA_SLOCKED ,
or
-.Dv SX_XLOCKED
+.Dv SA_XLOCKED
assertion:
-.Bl -tag -width ".Dv SX_NOTRECURSED"
-.It Dv SX_RECURSED
+.Bl -tag -width ".Dv SA_NOTRECURSED"
+.It Dv SA_RECURSED
Assert that the current thread has a recursed lock on
.Fa sx .
-.It Dv SX_NOTRECURSED
+.It Dv SA_NOTRECURSED
Assert that the current thread does not have a recursed lock on
.Fa sx .
.El
@@ -306,8 +306,8 @@ does not hold a shared lock.
In the
.No non- Ns Dv WITNESS
case, the
-.Dv SX_LOCKED
+.Dv SA_LOCKED
and
-.Dv SX_SLOCKED
+.Dv SA_SLOCKED
assertions merely check that some thread holds a shared lock.
They do not ensure that the current thread holds a shared lock.
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index f60e1e1..ea7d036 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -139,7 +139,7 @@ unlock_sx(struct lock_object *lock)
struct sx *sx;
sx = (struct sx *)lock;
- sx_assert(sx, SX_LOCKED | SX_NOTRECURSED);
+ sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
if (sx_xlocked(sx)) {
sx_xunlock(sx);
return (1);
@@ -273,7 +273,7 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_sunlock() of destroyed sx @ %s:%d", file, line));
- _sx_assert(sx, SX_SLOCKED, file, line);
+ _sx_assert(sx, SA_SLOCKED, file, line);
curthread->td_locks--;
WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
@@ -289,7 +289,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xunlock() of destroyed sx @ %s:%d", file, line));
- _sx_assert(sx, SX_XLOCKED, file, line);
+ _sx_assert(sx, SA_XLOCKED, file, line);
curthread->td_locks--;
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
@@ -312,7 +312,7 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
- _sx_assert(sx, SX_SLOCKED, file, line);
+ _sx_assert(sx, SA_SLOCKED, file, line);
/*
* Try to switch from one shared lock to an exclusive lock. We need
@@ -339,7 +339,7 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
- _sx_assert(sx, SX_XLOCKED | SX_NOTRECURSED, file, line);
+ _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
#ifndef INVARIANTS
if (sx_recursed(sx))
panic("downgrade of a recursed lock");
@@ -845,16 +845,16 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
if (panicstr != NULL)
return;
switch (what) {
- case SX_SLOCKED:
- case SX_SLOCKED | SX_NOTRECURSED:
- case SX_SLOCKED | SX_RECURSED:
+ case SA_SLOCKED:
+ case SA_SLOCKED | SA_NOTRECURSED:
+ case SA_SLOCKED | SA_RECURSED:
#ifndef WITNESS
slocked = 1;
/* FALLTHROUGH */
#endif
- case SX_LOCKED:
- case SX_LOCKED | SX_NOTRECURSED:
- case SX_LOCKED | SX_RECURSED:
+ case SA_LOCKED:
+ case SA_LOCKED | SA_NOTRECURSED:
+ case SA_LOCKED | SA_RECURSED:
#ifdef WITNESS
witness_assert(&sx->lock_object, what, file, line);
#else
@@ -872,31 +872,31 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
if (!(sx->sx_lock & SX_LOCK_SHARED)) {
if (sx_recursed(sx)) {
- if (what & SX_NOTRECURSED)
+ if (what & SA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
sx->lock_object.lo_name, file,
line);
- } else if (what & SX_RECURSED)
+ } else if (what & SA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
sx->lock_object.lo_name, file, line);
}
#endif
break;
- case SX_XLOCKED:
- case SX_XLOCKED | SX_NOTRECURSED:
- case SX_XLOCKED | SX_RECURSED:
+ case SA_XLOCKED:
+ case SA_XLOCKED | SA_NOTRECURSED:
+ case SA_XLOCKED | SA_RECURSED:
if (sx_xholder(sx) != curthread)
panic("Lock %s not exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line);
if (sx_recursed(sx)) {
- if (what & SX_NOTRECURSED)
+ if (what & SA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
sx->lock_object.lo_name, file, line);
- } else if (what & SX_RECURSED)
+ } else if (what & SA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
sx->lock_object.lo_name, file, line);
break;
- case SX_UNLOCKED:
+ case SA_UNLOCKED:
#ifdef WITNESS
witness_assert(&sx->lock_object, what, file, line);
#else
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index fa64e67..a65030e 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -245,10 +245,15 @@ struct sx_args {
#define SX_ADAPTIVESPIN 0x10
#define SX_RECURSE 0x20
-/*
- * XXX: These options should be renamed as SA_*
- */
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define SA_LOCKED LA_LOCKED
+#define SA_SLOCKED LA_SLOCKED
+#define SA_XLOCKED LA_XLOCKED
+#define SA_UNLOCKED LA_UNLOCKED
+#define SA_RECURSED LA_RECURSED
+#define SA_NOTRECURSED LA_NOTRECURSED
+
+/* Backwards compatability. */
#define SX_LOCKED LA_LOCKED
#define SX_SLOCKED LA_SLOCKED
#define SX_XLOCKED LA_XLOCKED
OpenPOWER on IntegriCloud