summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_lockstat.c2
-rw-r--r--sys/kern/kern_mutex.c121
-rw-r--r--sys/kern/kern_rwlock.c41
-rw-r--r--sys/kern/kern_sx.c42
-rw-r--r--sys/sys/lock.h8
-rw-r--r--sys/sys/lockstat.h2
-rw-r--r--sys/sys/mutex.h13
-rw-r--r--sys/sys/sx.h2
8 files changed, 177 insertions, 54 deletions
diff --git a/sys/kern/kern_lockstat.c b/sys/kern/kern_lockstat.c
index 10da98b..679a01c 100644
--- a/sys/kern/kern_lockstat.c
+++ b/sys/kern/kern_lockstat.c
@@ -62,7 +62,7 @@ SDT_PROBE_DEFINE1(lockstat, , , sx__downgrade, "struct sx *");
SDT_PROBE_DEFINE2(lockstat, , , thread__spin, "struct mtx *", "uint64_t");
-int __read_mostly lockstat_enabled;
+volatile int __read_mostly lockstat_enabled;
uint64_t
lockstat_nsecs(struct lock_object *lo)
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index df8d562..7e581ff 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -272,7 +272,11 @@ __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
line);
mtx_assert(m, MA_OWNED);
+#ifdef LOCK_PROFILING
__mtx_unlock_sleep(c, opts, file, line);
+#else
+ __mtx_unlock(m, curthread, opts, file, line);
+#endif
TD_LOCKS_DEC(curthread);
}
@@ -367,13 +371,18 @@ int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
struct mtx *m;
+ struct thread *td;
+ uintptr_t tid, v;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
#endif
int rval;
+ bool recursed;
- if (SCHEDULER_STOPPED())
+ td = curthread;
+ tid = (uintptr_t)td;
+ if (SCHEDULER_STOPPED_TD(td))
return (1);
m = mtxlock2mtx(c);
@@ -387,13 +396,26 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
file, line));
- if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
- (opts & MTX_RECURSE) != 0)) {
- m->mtx_recurse++;
- atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
- rval = 1;
- } else
- rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
+ rval = 1;
+ recursed = false;
+ v = MTX_UNOWNED;
+ for (;;) {
+ if (_mtx_obtain_lock_fetch(m, &v, tid))
+ break;
+ if (v == MTX_UNOWNED)
+ continue;
+ if (v == tid &&
+ ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
+ (opts & MTX_RECURSE) != 0)) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
+ recursed = true;
+ break;
+ }
+ rval = 0;
+ break;
+ }
+
opts &= ~MTX_RECURSE;
LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
@@ -401,10 +423,9 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
TD_LOCKS_INC(curthread);
- if (m->mtx_recurse == 0)
+ if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
m, contested, waittime, file, line);
-
}
return (rval);
@@ -416,9 +437,14 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
* We call this if the lock is either contested (i.e. we need to go to
* sleep waiting for it), or if we need to recurse on it.
*/
+#if LOCK_DEBUG > 0
void
__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
const char *file, int line)
+#else
+void
+__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid)
+#endif
{
struct mtx *m;
struct turnstile *ts;
@@ -440,6 +466,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ int doing_lockprof;
+#endif
if (SCHEDULER_STOPPED())
return;
@@ -458,14 +487,18 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
(opts & MTX_RECURSE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
m->lock_object.lo_name, file, line));
+#if LOCK_DEBUG > 0
opts &= ~MTX_RECURSE;
+#endif
m->mtx_recurse++;
atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
return;
}
+#if LOCK_DEBUG > 0
opts &= ~MTX_RECURSE;
+#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@@ -476,8 +509,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
CTR4(KTR_LOCK,
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
-#ifdef KDTRACE_HOOKS
- all_time -= lockstat_nsecs(&m->lock_object);
+#ifdef LOCK_PROFILING
+ doing_lockprof = 1;
+#elif defined(KDTRACE_HOOKS)
+ doing_lockprof = lockstat_enabled;
+ if (__predict_false(doing_lockprof))
+ all_time -= lockstat_nsecs(&m->lock_object);
#endif
for (;;) {
@@ -584,9 +621,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
#endif
v = MTX_READ_VALUE(m);
}
-#ifdef KDTRACE_HOOKS
- all_time += lockstat_nsecs(&m->lock_object);
-#endif
#ifdef KTR
if (cont_logged) {
CTR4(KTR_CONTENTION,
@@ -594,6 +628,13 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
m->lock_object.lo_name, (void *)tid, file, line);
}
#endif
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ if (__predict_true(!doing_lockprof))
+ return;
+#endif
+#ifdef KDTRACE_HOOKS
+ all_time += lockstat_nsecs(&m->lock_object);
+#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
waittime, file, line);
#ifdef KDTRACE_HOOKS
@@ -601,7 +642,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
/*
- * Only record the loops spinning and not sleeping.
+ * Only record the loops spinning and not sleeping.
*/
if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
@@ -647,6 +688,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
#ifdef KDTRACE_HOOKS
int64_t spin_time = 0;
#endif
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ int doing_lockprof;
+#endif
if (SCHEDULER_STOPPED())
return;
@@ -671,8 +715,12 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
PMC_SOFT_CALL( , , lock, failed);
#endif
lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
-#ifdef KDTRACE_HOOKS
- spin_time -= lockstat_nsecs(&m->lock_object);
+#ifdef LOCK_PROFILING
+ doing_lockprof = 1;
+#elif defined(KDTRACE_HOOKS)
+ doing_lockprof = lockstat_enabled;
+ if (__predict_false(doing_lockprof))
+ spin_time -= lockstat_nsecs(&m->lock_object);
#endif
for (;;) {
if (v == MTX_UNOWNED) {
@@ -698,18 +746,22 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
} while (v != MTX_UNOWNED);
spinlock_enter();
}
-#ifdef KDTRACE_HOOKS
- spin_time += lockstat_nsecs(&m->lock_object);
-#endif
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
"running");
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ if (__predict_true(!doing_lockprof))
+ return;
+#endif
#ifdef KDTRACE_HOOKS
+ spin_time += lockstat_nsecs(&m->lock_object);
+#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
contested, waittime, file, line);
+#ifdef KDTRACE_HOOKS
if (spin_time != 0)
LOCKSTAT_RECORD1(spin__spin, m, spin_time);
#endif
@@ -729,6 +781,9 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
#ifdef KDTRACE_HOOKS
int64_t spin_time = 0;
#endif
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ int doing_lockprof = 1;
+#endif
tid = (uintptr_t)curthread;
@@ -744,8 +799,12 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
lock_delay_arg_init(&lda, &mtx_spin_delay);
-#ifdef KDTRACE_HOOKS
- spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
+#ifdef LOCK_PROFILING
+ doing_lockprof = 1;
+#elif defined(KDTRACE_HOOKS)
+ doing_lockprof = lockstat_enabled;
+ if (__predict_false(doing_lockprof))
+ spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
for (;;) {
retry:
@@ -801,15 +860,20 @@ retry:
break;
__mtx_unlock_spin(m); /* does spinlock_exit() */
}
+ LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
+ line);
+ WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
+
+#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
+ if (__predict_true(!doing_lockprof))
+ return;
+#endif
#ifdef KDTRACE_HOOKS
spin_time += lockstat_nsecs(&m->lock_object);
#endif
if (m->mtx_recurse == 0)
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
contested, waittime, file, line);
- LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
- line);
- WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
#ifdef KDTRACE_HOOKS
if (spin_time != 0)
LOCKSTAT_RECORD1(thread__spin, m, spin_time);
@@ -855,8 +919,13 @@ thread_lock_set(struct thread *td, struct mtx *new)
* We are only called here if the lock is recursed, contested (i.e. we
* need to wake up a blocked thread) or lockstat probe is active.
*/
+#if LOCK_DEBUG > 0
void
__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
+#else
+void
+__mtx_unlock_sleep(volatile uintptr_t *c)
+#endif
{
struct mtx *m;
struct turnstile *ts;
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 6266dd9..988a44a 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -293,9 +293,14 @@ int
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
+ struct thread *td;
+ uintptr_t tid, v;
int rval;
+ bool recursed;
- if (SCHEDULER_STOPPED())
+ td = curthread;
+ tid = (uintptr_t)td;
+ if (SCHEDULER_STOPPED_TD(td))
return (1);
rw = rwlock2rw(c);
@@ -306,20 +311,28 @@ __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
- if (rw_wlocked(rw) &&
- (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
- rw->rw_recurse++;
- atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
- rval = 1;
- } else
- rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
- (uintptr_t)curthread);
+ rval = 1;
+ recursed = false;
+ v = RW_UNLOCKED;
+ for (;;) {
+ if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
+ break;
+ if (v == RW_UNLOCKED)
+ continue;
+ if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
+ rw->rw_recurse++;
+ atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
+ break;
+ }
+ rval = 0;
+ break;
+ }
LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
if (rval) {
WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
- if (!rw_recursed(rw))
+ if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
rw, 0, 0, file, line, LOCKSTAT_WRITER);
TD_LOCKS_INC(curthread);
@@ -341,7 +354,11 @@ _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
line);
+#ifdef LOCK_PROFILING
_rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
+#else
+ __rw_wunlock(rw, curthread, file, line);
+#endif
TD_LOCKS_DEC(curthread);
}
@@ -633,13 +650,13 @@ __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
curthread, rw->lock_object.lo_name, file, line));
+ x = rw->rw_lock;
for (;;) {
- x = rw->rw_lock;
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
if (!(x & RW_LOCK_READ))
break;
- if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
+ if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
line);
WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index f93292b..879e8a9 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -267,13 +267,13 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
curthread, sx->lock_object.lo_name, file, line));
+ x = sx->sx_lock;
for (;;) {
- x = sx->sx_lock;
KASSERT(x != SX_LOCK_DESTROYED,
("sx_try_slock() of destroyed sx @ %s:%d", file, line));
if (!(x & SX_LOCK_SHARED))
break;
- if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
+ if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
@@ -320,9 +320,14 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
+ struct thread *td;
+ uintptr_t tid, x;
int rval;
+ bool recursed;
- if (SCHEDULER_STOPPED())
+ td = curthread;
+ tid = (uintptr_t)td;
+ if (SCHEDULER_STOPPED_TD(td))
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@@ -331,19 +336,28 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
- if (sx_xlocked(sx) &&
- (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
- sx->sx_recurse++;
- atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
- rval = 1;
- } else
- rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
- (uintptr_t)curthread);
+ rval = 1;
+ recursed = false;
+ x = SX_LOCK_UNLOCKED;
+ for (;;) {
+ if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
+ break;
+ if (x == SX_LOCK_UNLOCKED)
+ continue;
+ if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
+ sx->sx_recurse++;
+ atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
+ break;
+ }
+ rval = 0;
+ break;
+ }
+
LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
if (rval) {
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
- if (!sx_recursed(sx))
+ if (!recursed)
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
sx, 0, 0, file, line, LOCKSTAT_WRITER);
TD_LOCKS_INC(curthread);
@@ -362,7 +376,11 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
line);
+#if LOCK_DEBUG > 0
_sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
+#else
+ __sx_xunlock(sx, curthread, file, line);
+#endif
TD_LOCKS_DEC(curthread);
}
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index 89b61ab..cc34cb4 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -125,7 +125,8 @@ struct lock_class {
* calling conventions for this debugging code in modules so that modules can
* work with both debug and non-debug kernels.
*/
-#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || (defined(KTR) && (KTR_COMPILE & KTR_LOCK))
+#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || \
+ defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || defined(KTR)
#define LOCK_DEBUG 1
#else
#define LOCK_DEBUG 0
@@ -154,8 +155,13 @@ struct lock_class {
* file - file name
* line - line number
*/
+#if LOCK_DEBUG > 0
#define LOCK_LOG_TEST(lo, flags) \
(((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0)
+#else
+#define LOCK_LOG_TEST(lo, flags) 0
+#endif
+
#define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \
if (LOCK_LOG_TEST((lo), (flags))) \
diff --git a/sys/sys/lockstat.h b/sys/sys/lockstat.h
index 77294fb..6474505 100644
--- a/sys/sys/lockstat.h
+++ b/sys/sys/lockstat.h
@@ -68,7 +68,7 @@ SDT_PROBE_DECLARE(lockstat, , , thread__spin);
#define LOCKSTAT_WRITER 0
#define LOCKSTAT_READER 1
-extern int lockstat_enabled;
+extern volatile int lockstat_enabled;
#ifdef KDTRACE_HOOKS
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 42f26fd..0097d08 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -98,10 +98,16 @@ void mtx_sysinit(void *arg);
int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
int line);
void mutex_init(void);
+#if LOCK_DEBUG > 0
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line);
void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file,
int line);
+#else
+void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid);
+void __mtx_unlock_sleep(volatile uintptr_t *c);
+#endif
+
#ifdef SMP
void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int opts, const char *file, int line);
@@ -140,10 +146,17 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
_mtx_destroy(&(m)->mtx_lock)
#define mtx_trylock_flags_(m, o, f, l) \
_mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
+#if LOCK_DEBUG > 0
#define _mtx_lock_sleep(m, v, t, o, f, l) \
__mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l)
#define _mtx_unlock_sleep(m, o, f, l) \
__mtx_unlock_sleep(&(m)->mtx_lock, o, f, l)
+#else
+#define _mtx_lock_sleep(m, v, t, o, f, l) \
+ __mtx_lock_sleep(&(m)->mtx_lock, v, t)
+#define _mtx_unlock_sleep(m, o, f, l) \
+ __mtx_unlock_sleep(&(m)->mtx_lock)
+#endif
#ifdef SMP
#define _mtx_lock_spin(m, v, t, o, f, l) \
_mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l)
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index 50b0a24..a31c328 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -145,7 +145,7 @@ struct sx_args {
* deferred to 'tougher' functions.
*/
-#if (LOCK_DEBUG == 0) && !defined(SX_NOINLINE)
+#if (LOCK_DEBUG == 0)
/* Acquire an exclusive lock. */
static __inline int
__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
OpenPOWER on IntegriCloud