diff options
author | mjg <mjg@FreeBSD.org> | 2017-03-16 08:29:09 +0000 |
---|---|---|
committer | mjg <mjg@FreeBSD.org> | 2017-03-16 08:29:09 +0000 |
commit | 0fe3bdd8cca70f33224cbb501452f1e91e226db2 (patch) | |
tree | 12e773749732bf5e9b46ed8d6ef6dbd6e53d30f4 /sys | |
parent | 0f65f9b39d04f2b331957f9ef1c7797129e83df2 (diff) | |
download | FreeBSD-src-0fe3bdd8cca70f33224cbb501452f1e91e226db2.zip FreeBSD-src-0fe3bdd8cca70f33224cbb501452f1e91e226db2.tar.gz |
MFC,r313855,r313865,r313875,r313877,r313878,r313901,r313908,r313928,r313944,r314185,r314476,r314187
locks: let primitives for modules unlock without always goging to the slsow path
It is only needed if the LOCK_PROFILING is enabled. It has to always check if
the lock is about to be released which requires an avoidable read if the option
is not specified..
==
sx: fix compilation on UP kernels after r313855
sx primitives use inlines as opposed to macros. Change the tested condition
to LOCK_DEBUG which covers the case, but is slightly overzelaous.
commit a39b839d16cd72b1df284ccfe6706fcdf362706e
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Sat Feb 18 22:06:03 2017 +0000
locks: clean up trylock primitives
In particular thius reduces accesses of the lock itself.
git-svn-id: svn+ssh://svn.freebsd.org/base/head@313928 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
commit 013560e742a5a276b0deef039bc18078d51d6eb0
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Sat Feb 18 01:52:10 2017 +0000
mtx: plug the 'opts' argument when not used
git-svn-id: svn+ssh://svn.freebsd.org/base/head@313908 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
commit 9a507901162fb476b9809da2919905735cd605af
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Fri Feb 17 22:09:55 2017 +0000
sx: fix mips builld after r313855
The namespace in this file really needs cleaning up. In the meantime
let inline primitives be defined as long as LOCK_DEBUG is not enabled.
Reported by: kib
git-svn-id: svn+ssh://svn.freebsd.org/base/head@313901 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
commit aa6243a5124b9ceb3b1683ea4dbb0a133ce70095
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Fri Feb 17 15:40:24 2017 +0000
mtx: get rid of file/line args from slow paths if they are unused
This denotes changes which went in by accident in r313877.
On most production kernels both said parameters are zeroed and have nothing
reading them in either __mtx_lock_sleep or __mtx_unlock_sleep. Thus this change
stops passing them by internal consumers which this is the case.
Kernel modules use _flags variants which are not affected kbi-wise.
git-svn-id: svn+ssh://svn.freebsd.org/base/head@313878 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
commit 688545a6af7ed0972653d6e2c6ca406ac511f39d
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Fri Feb 17 15:34:40 2017 +0000
mtx: restrict r313875 to kernels without LOCK_PROFILING
git-svn-id: svn+ssh://svn.freebsd.org/base/head@313877 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f
commit bbe6477138713da2d503f93cb5dd602e14152a08
Author: mjg <mjg@ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f>
Date: Fri Feb 17 14:55:59 2017 +0000
mtx: microoptimize lockstat handling in __mtx_lock_sleep
This saves a function call and multiple branches after the lock is acquired.
overzelaous
Diffstat (limited to 'sys')
-rw-r--r-- | sys/kern/kern_lockstat.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 121 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 41 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 42 | ||||
-rw-r--r-- | sys/sys/lock.h | 8 | ||||
-rw-r--r-- | sys/sys/lockstat.h | 2 | ||||
-rw-r--r-- | sys/sys/mutex.h | 13 | ||||
-rw-r--r-- | sys/sys/sx.h | 2 |
8 files changed, 177 insertions, 54 deletions
diff --git a/sys/kern/kern_lockstat.c b/sys/kern/kern_lockstat.c index 10da98b..679a01c 100644 --- a/sys/kern/kern_lockstat.c +++ b/sys/kern/kern_lockstat.c @@ -62,7 +62,7 @@ SDT_PROBE_DEFINE1(lockstat, , , sx__downgrade, "struct sx *"); SDT_PROBE_DEFINE2(lockstat, , , thread__spin, "struct mtx *", "uint64_t"); -int __read_mostly lockstat_enabled; +volatile int __read_mostly lockstat_enabled; uint64_t lockstat_nsecs(struct lock_object *lo) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index df8d562..7e581ff 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -272,7 +272,11 @@ __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) line); mtx_assert(m, MA_OWNED); +#ifdef LOCK_PROFILING __mtx_unlock_sleep(c, opts, file, line); +#else + __mtx_unlock(m, curthread, opts, file, line); +#endif TD_LOCKS_DEC(curthread); } @@ -367,13 +371,18 @@ int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; + struct thread *td; + uintptr_t tid, v; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; + bool recursed; - if (SCHEDULER_STOPPED()) + td = curthread; + tid = (uintptr_t)td; + if (SCHEDULER_STOPPED_TD(td)) return (1); m = mtxlock2mtx(c); @@ -387,13 +396,26 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); - if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || - (opts & MTX_RECURSE) != 0)) { - m->mtx_recurse++; - atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); - rval = 1; - } else - rval = _mtx_obtain_lock(m, (uintptr_t)curthread); + rval = 1; + recursed = false; + v = MTX_UNOWNED; + for (;;) { + if (_mtx_obtain_lock_fetch(m, &v, tid)) + break; + if (v == MTX_UNOWNED) + continue; + if (v == tid && + ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || + (opts & MTX_RECURSE) != 0)) { + m->mtx_recurse++; + atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); + recursed = true; + break; + } + rval = 0; + break; + } + opts &= ~MTX_RECURSE; LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); @@ -401,10 +423,9 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); TD_LOCKS_INC(curthread); - if (m->mtx_recurse == 0) + if (!recursed) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); - } return (rval); @@ -416,9 +437,14 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ +#if LOCK_DEBUG > 0 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) +#else +void +__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid) +#endif { struct mtx *m; struct turnstile *ts; @@ -440,6 +466,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, int64_t sleep_time = 0; int64_t all_time = 0; #endif +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + int doing_lockprof; +#endif if (SCHEDULER_STOPPED()) return; @@ -458,14 +487,18 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, (opts & MTX_RECURSE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); +#if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; +#endif m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } +#if LOCK_DEBUG > 0 opts &= ~MTX_RECURSE; +#endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -476,8 +509,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); -#ifdef KDTRACE_HOOKS - all_time -= lockstat_nsecs(&m->lock_object); +#ifdef LOCK_PROFILING + doing_lockprof = 1; +#elif defined(KDTRACE_HOOKS) + doing_lockprof = lockstat_enabled; + if (__predict_false(doing_lockprof)) + all_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { @@ -584,9 +621,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, #endif v = MTX_READ_VALUE(m); } -#ifdef KDTRACE_HOOKS - all_time += lockstat_nsecs(&m->lock_object); -#endif #ifdef KTR if (cont_logged) { CTR4(KTR_CONTENTION, @@ -594,6 +628,13 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, m->lock_object.lo_name, (void *)tid, file, line); } #endif +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + if (__predict_true(!doing_lockprof)) + return; +#endif +#ifdef KDTRACE_HOOKS + all_time += lockstat_nsecs(&m->lock_object); +#endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS @@ -601,7 +642,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); /* - * Only record the loops spinning and not sleeping. + * Only record the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); @@ -647,6 +688,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + int doing_lockprof; +#endif if (SCHEDULER_STOPPED()) return; @@ -671,8 +715,12 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); -#ifdef KDTRACE_HOOKS - spin_time -= lockstat_nsecs(&m->lock_object); +#ifdef LOCK_PROFILING + doing_lockprof = 1; +#elif defined(KDTRACE_HOOKS) + doing_lockprof = lockstat_enabled; + if (__predict_false(doing_lockprof)) + spin_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { if (v == MTX_UNOWNED) { @@ -698,18 +746,22 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, } while (v != MTX_UNOWNED); spinlock_enter(); } -#ifdef KDTRACE_HOOKS - spin_time += lockstat_nsecs(&m->lock_object); -#endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + if (__predict_true(!doing_lockprof)) + return; +#endif #ifdef KDTRACE_HOOKS + spin_time += lockstat_nsecs(&m->lock_object); +#endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); +#ifdef KDTRACE_HOOKS if (spin_time != 0) LOCKSTAT_RECORD1(spin__spin, m, spin_time); #endif @@ -729,6 +781,9 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + int doing_lockprof = 1; +#endif tid = (uintptr_t)curthread; @@ -744,8 +799,12 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) lock_delay_arg_init(&lda, &mtx_spin_delay); -#ifdef KDTRACE_HOOKS - spin_time -= lockstat_nsecs(&td->td_lock->lock_object); +#ifdef LOCK_PROFILING + doing_lockprof = 1; +#elif defined(KDTRACE_HOOKS) + doing_lockprof = lockstat_enabled; + if (__predict_false(doing_lockprof)) + spin_time -= lockstat_nsecs(&td->td_lock->lock_object); #endif for (;;) { retry: @@ -801,15 +860,20 @@ retry: break; __mtx_unlock_spin(m); /* does spinlock_exit() */ } + LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, + line); + WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); + +#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) + if (__predict_true(!doing_lockprof)) + return; +#endif #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); - LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, - line); - WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); #ifdef KDTRACE_HOOKS if (spin_time != 0) LOCKSTAT_RECORD1(thread__spin, m, spin_time); @@ -855,8 +919,13 @@ thread_lock_set(struct thread *td, struct mtx *new) * We are only called here if the lock is recursed, contested (i.e. we * need to wake up a blocked thread) or lockstat probe is active. */ +#if LOCK_DEBUG > 0 void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) +#else +void +__mtx_unlock_sleep(volatile uintptr_t *c) +#endif { struct mtx *m; struct turnstile *ts; diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 6266dd9..988a44a 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -293,9 +293,14 @@ int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; + struct thread *td; + uintptr_t tid, v; int rval; + bool recursed; - if (SCHEDULER_STOPPED()) + td = curthread; + tid = (uintptr_t)td; + if (SCHEDULER_STOPPED_TD(td)) return (1); rw = rwlock2rw(c); @@ -306,20 +311,28 @@ __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); - if (rw_wlocked(rw) && - (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { - rw->rw_recurse++; - atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); - rval = 1; - } else - rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, - (uintptr_t)curthread); + rval = 1; + recursed = false; + v = RW_UNLOCKED; + for (;;) { + if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) + break; + if (v == RW_UNLOCKED) + continue; + if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { + rw->rw_recurse++; + atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); + break; + } + rval = 0; + break; + } LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); - if (!rw_recursed(rw)) + if (!recursed) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); @@ -341,7 +354,11 @@ _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); +#ifdef LOCK_PROFILING _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); +#else + __rw_wunlock(rw, curthread, file, line); +#endif TD_LOCKS_DEC(curthread); } @@ -633,13 +650,13 @@ __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); + x = rw->rw_lock; for (;;) { - x = rw->rw_lock; KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); if (!(x & RW_LOCK_READ)) break; - if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { + if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) { LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, line); WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index f93292b..879e8a9 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -267,13 +267,13 @@ sx_try_slock_(struct sx *sx, const char *file, int line) ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); + x = sx->sx_lock; for (;;) { - x = sx->sx_lock; KASSERT(x != SX_LOCK_DESTROYED, ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); if (!(x & SX_LOCK_SHARED)) break; - if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { + if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, @@ -320,9 +320,14 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line) int sx_try_xlock_(struct sx *sx, const char *file, int line) { + struct thread *td; + uintptr_t tid, x; int rval; + bool recursed; - if (SCHEDULER_STOPPED()) + td = curthread; + tid = (uintptr_t)td; + if (SCHEDULER_STOPPED_TD(td)) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), @@ -331,19 +336,28 @@ sx_try_xlock_(struct sx *sx, const char *file, int line) KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); - if (sx_xlocked(sx) && - (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { - sx->sx_recurse++; - atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); - rval = 1; - } else - rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, - (uintptr_t)curthread); + rval = 1; + recursed = false; + x = SX_LOCK_UNLOCKED; + for (;;) { + if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) + break; + if (x == SX_LOCK_UNLOCKED) + continue; + if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { + sx->sx_recurse++; + atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); + break; + } + rval = 0; + break; + } + LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); - if (!sx_recursed(sx)) + if (!recursed) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); @@ -362,7 +376,11 @@ _sx_xunlock(struct sx *sx, const char *file, int line) WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); +#if LOCK_DEBUG > 0 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); +#else + __sx_xunlock(sx, curthread, file, line); +#endif TD_LOCKS_DEC(curthread); } diff --git a/sys/sys/lock.h b/sys/sys/lock.h index 89b61ab..cc34cb4 100644 --- a/sys/sys/lock.h +++ b/sys/sys/lock.h @@ -125,7 +125,8 @@ struct lock_class { * calling conventions for this debugging code in modules so that modules can * work with both debug and non-debug kernels. */ -#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || (defined(KTR) && (KTR_COMPILE & KTR_LOCK)) +#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || \ + defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || defined(KTR) #define LOCK_DEBUG 1 #else #define LOCK_DEBUG 0 @@ -154,8 +155,13 @@ struct lock_class { * file - file name * line - line number */ +#if LOCK_DEBUG > 0 #define LOCK_LOG_TEST(lo, flags) \ (((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0) +#else +#define LOCK_LOG_TEST(lo, flags) 0 +#endif + #define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ diff --git a/sys/sys/lockstat.h b/sys/sys/lockstat.h index 77294fb..6474505 100644 --- a/sys/sys/lockstat.h +++ b/sys/sys/lockstat.h @@ -68,7 +68,7 @@ SDT_PROBE_DECLARE(lockstat, , , thread__spin); #define LOCKSTAT_WRITER 0 #define LOCKSTAT_READER 1 -extern int lockstat_enabled; +extern volatile int lockstat_enabled; #ifdef KDTRACE_HOOKS diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 42f26fd..0097d08 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -98,10 +98,16 @@ void mtx_sysinit(void *arg); int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); +#if LOCK_DEBUG > 0 void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line); void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line); +#else +void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid); +void __mtx_unlock_sleep(volatile uintptr_t *c); +#endif + #ifdef SMP void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line); @@ -140,10 +146,17 @@ void thread_lock_flags_(struct thread *, int, const char *, int); _mtx_destroy(&(m)->mtx_lock) #define mtx_trylock_flags_(m, o, f, l) \ _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) +#if LOCK_DEBUG > 0 #define _mtx_lock_sleep(m, v, t, o, f, l) \ __mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l) #define _mtx_unlock_sleep(m, o, f, l) \ __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) +#else +#define _mtx_lock_sleep(m, v, t, o, f, l) \ + __mtx_lock_sleep(&(m)->mtx_lock, v, t) +#define _mtx_unlock_sleep(m, o, f, l) \ + __mtx_unlock_sleep(&(m)->mtx_lock) +#endif #ifdef SMP #define _mtx_lock_spin(m, v, t, o, f, l) \ _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l) diff --git a/sys/sys/sx.h b/sys/sys/sx.h index 50b0a24..a31c328 100644 --- a/sys/sys/sx.h +++ b/sys/sys/sx.h @@ -145,7 +145,7 @@ struct sx_args { * deferred to 'tougher' functions. */ -#if (LOCK_DEBUG == 0) && !defined(SX_NOINLINE) +#if (LOCK_DEBUG == 0) /* Acquire an exclusive lock. */ static __inline int __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file, |