summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 8121d00..34c1531 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -241,6 +241,8 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
{
int error = 0;
+ if (SCHEDULER_STOPPED())
+ return (0);
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
@@ -260,6 +262,9 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
for (;;) {
x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
@@ -283,6 +288,8 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
{
int error = 0;
+ if (SCHEDULER_STOPPED())
+ return (0);
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
@@ -304,6 +311,9 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
{
int rval;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
@@ -330,6 +340,8 @@ void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_sunlock() of destroyed sx @ %s:%d", file, line));
@@ -345,6 +357,8 @@ void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
+ if (SCHEDULER_STOPPED())
+ return;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xunlock() of destroyed sx @ %s:%d", file, line));
@@ -369,6 +383,9 @@ sx_try_upgrade_(struct sx *sx, const char *file, int line)
uintptr_t x;
int success;
+ if (SCHEDULER_STOPPED())
+ return (1);
+
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_SLOCKED, file, line);
@@ -399,6 +416,9 @@ sx_downgrade_(struct sx *sx, const char *file, int line)
uintptr_t x;
int wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
@@ -481,6 +501,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return (0);
+
/* If we already hold an exclusive lock, then recurse. */
if (sx_xlocked(sx)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
@@ -681,6 +704,9 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
uintptr_t x;
int queue, wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
/* If the lock is recursed, then unrecurse one level. */
@@ -753,6 +779,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
int64_t sleep_time = 0;
#endif
+ if (SCHEDULER_STOPPED())
+ return (0);
+
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
@@ -919,6 +948,9 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
uintptr_t x;
int wakeup_swapper;
+ if (SCHEDULER_STOPPED())
+ return;
+
for (;;) {
x = sx->sx_lock;
OpenPOWER on IntegriCloud