diff options
author | mjg <mjg@FreeBSD.org> | 2016-08-11 09:28:49 +0000 |
---|---|---|
committer | mjg <mjg@FreeBSD.org> | 2016-08-11 09:28:49 +0000 |
commit | f6eab3bb444ed8b47aa811577447abfcd80c5a8b (patch) | |
tree | 3519cb6bf291ff67ec8623d701ffdd517d297284 /sys/kern | |
parent | 91c31a960acb18732dcecb025733e05cc1080cb0 (diff) | |
download | FreeBSD-src-f6eab3bb444ed8b47aa811577447abfcd80c5a8b.zip FreeBSD-src-f6eab3bb444ed8b47aa811577447abfcd80c5a8b.tar.gz |
MFC r303562,303563,r303584,r303643,r303652,r303655,r303707:
rwlock: s/READER/WRITER/ in wlock lockstat annotation
==
sx: increment spin_cnt before cpu_spinwait in xlock
The change is a no-op only done for consistency with the rest of the file.
==
locks: change sleep_cnt and spin_cnt types to u_int
Both variables are uint64_t, but they only count spins or sleeps.
All reasonable values which we can get here comfortably hit in 32-bit range.
==
Implement trivial backoff for locking primitives.
All current spinning loops retry an atomic op the first chance they get,
which leads to performance degradation under load.
One classic solution to the problem consists of delaying the test to an
extent. This implementation has a trivial linear increment and a random
factor for each attempt.
For simplicity, this first thouch implementation only modifies spinning
loops where the lock owner is running. spin mutexes and thread lock were
not modified.
Current parameters are autotuned on boot based on mp_cpus.
Autotune factors are very conservative and are subject to change later.
==
locks: fix up ifdef guards introduced in r303643
Both sx and rwlocks had copy-pasted ADAPTIVE_MUTEXES instead of the correct
define.
==
locks: fix compilation for KDTRACE_HOOKS && !ADAPTIVE_* case
==
locks: fix sx compilation on mips after r303643
The kernel.h header is required for the SYSINIT macro, which apparently
was present on amd64 by accident.
Approved by: re (gjb)
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_mutex.c | 55 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 98 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 78 | ||||
-rw-r--r-- | sys/kern/subr_lock.c | 28 |
4 files changed, 196 insertions, 63 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 453add4..97916be 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$"); #include <sys/resourcevar.h> #include <sys/sched.h> #include <sys/sbuf.h> +#include <sys/smp.h> #include <sys/sysctl.h> #include <sys/turnstile.h> #include <sys/vmmeter.h> @@ -138,6 +139,37 @@ struct lock_class lock_class_mtx_spin = { #endif }; +#ifdef ADAPTIVE_MUTEXES +static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); + +static struct lock_delay_config mtx_delay = { + .initial = 1000, + .step = 500, + .min = 100, + .max = 5000, +}; + +SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial, + 0, ""); +SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step, + 0, ""); +SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min, + 0, ""); +SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, + 0, ""); + +static void +mtx_delay_sysinit(void *dummy) +{ + + mtx_delay.initial = mp_ncpus * 25; + mtx_delay.step = (mp_ncpus * 25) / 2; + mtx_delay.min = mp_ncpus * 5; + mtx_delay.max = mp_ncpus * 25 * 10; +} +LOCK_DELAY_SYSINIT(mtx_delay_sysinit); +#endif + /* * System-wide mutexes */ @@ -408,9 +440,11 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, int contested = 0; uint64_t waittime = 0; #endif +#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -418,6 +452,11 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, if (SCHEDULER_STOPPED()) return; +#if defined(ADAPTIVE_MUTEXES) + lock_delay_arg_init(&lda, &mtx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif m = mtxlock2mtx(c); if (mtx_owned(m)) { @@ -451,7 +490,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid)) break; #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* @@ -471,12 +510,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, "spinning", "lockname:\"%s\"", m->lock_object.lo_name); while (mtx_owner(m) == owner && - TD_IS_RUNNING(owner)) { - cpu_spinwait(); -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); @@ -570,7 +605,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, /* * Only record the loops spinning and not sleeping. */ - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); #endif } diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 6a904d2..ea2f969 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include <sys/proc.h> #include <sys/rwlock.h> #include <sys/sched.h> +#include <sys/smp.h> #include <sys/sysctl.h> #include <sys/systm.h> #include <sys/turnstile.h> @@ -65,15 +66,6 @@ PMC_SOFT_DECLARE( , , lock, failed); */ #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) -#ifdef ADAPTIVE_RWLOCKS -static int rowner_retries = 10; -static int rowner_loops = 10000; -static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, - "rwlock debugging"); -SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); -SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); -#endif - #ifdef DDB #include <ddb/ddb.h> @@ -100,6 +92,42 @@ struct lock_class lock_class_rw = { #endif }; +#ifdef ADAPTIVE_RWLOCKS +static int rowner_retries = 10; +static int rowner_loops = 10000; +static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, + "rwlock debugging"); +SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); + +static struct lock_delay_config rw_delay = { + .initial = 1000, + .step = 500, + .min = 100, + .max = 5000, +}; + +SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial, + 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step, + 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min, + 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, + 0, ""); + +static void +rw_delay_sysinit(void *dummy) +{ + + rw_delay.initial = mp_ncpus * 25; + rw_delay.step = (mp_ncpus * 25) / 2; + rw_delay.min = mp_ncpus * 5; + rw_delay.max = mp_ncpus * 25 * 10; +} +LOCK_DELAY_SYSINIT(rw_delay_sysinit); +#endif + /* * Return a pointer to the owning thread if the lock is write-locked or * NULL if the lock is unlocked or read-locked. @@ -355,10 +383,12 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) int contested = 0; #endif uintptr_t v; +#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -366,6 +396,11 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) if (SCHEDULER_STOPPED()) return; +#if defined(ADAPTIVE_RWLOCKS) + lock_delay_arg_init(&lda, &rw_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif rw = rwlock2rw(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), @@ -412,7 +447,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) continue; } #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -437,12 +472,8 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == - owner && TD_IS_RUNNING(owner)) { - cpu_spinwait(); -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - } + owner && TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -459,7 +490,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) cpu_spinwait(); } #ifdef KDTRACE_HOOKS - spin_cnt += rowner_loops - i; + lda.spin_cnt += rowner_loops - i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); @@ -552,7 +583,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); @@ -740,10 +771,12 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, uint64_t waittime = 0; int contested = 0; #endif +#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -751,6 +784,11 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, if (SCHEDULER_STOPPED()) return; +#if defined(ADAPTIVE_RWLOCKS) + lock_delay_arg_init(&lda, &rw_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif rw = rwlock2rw(c); if (rw_wlocked(rw)) { @@ -775,7 +813,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid)) break; #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -798,12 +836,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && - TD_IS_RUNNING(owner)) { - cpu_spinwait(); -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -828,7 +862,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); #ifdef KDTRACE_HOOKS - spin_cnt += rowner_loops - i; + lda.spin_cnt += rowner_loops - i; #endif if (i != rowner_loops) continue; @@ -918,9 +952,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, - LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, + LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); #endif LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 2a81c04..9115d5a 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> #include <sys/kdb.h> +#include <sys/kernel.h> #include <sys/ktr.h> #include <sys/lock.h> #include <sys/mutex.h> @@ -53,6 +54,7 @@ __FBSDID("$FreeBSD$"); #include <sys/sched.h> #include <sys/sleepqueue.h> #include <sys/sx.h> +#include <sys/smp.h> #include <sys/sysctl.h> #if defined(SMP) && !defined(NO_ADAPTIVE_SX) @@ -145,6 +147,33 @@ static u_int asx_loops = 10000; static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); + +static struct lock_delay_config sx_delay = { + .initial = 1000, + .step = 500, + .min = 100, + .max = 5000, +}; + +SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, + 0, ""); + +static void +sx_delay_sysinit(void *dummy) +{ + + sx_delay.initial = mp_ncpus * 25; + sx_delay.step = (mp_ncpus * 25) / 2; + sx_delay.min = mp_ncpus * 5; + sx_delay.max = mp_ncpus * 25 * 10; +} +LOCK_DELAY_SYSINIT(sx_delay_sysinit); #endif void @@ -513,10 +542,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, int contested = 0; #endif int error = 0; +#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -524,6 +555,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, if (SCHEDULER_STOPPED()) return (0); +#if defined(ADAPTIVE_SX) + lock_delay_arg_init(&lda, &sx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif + /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, @@ -549,7 +586,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) break; #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -578,12 +615,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && - TD_IS_RUNNING(owner)) { - cpu_spinwait(); -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -605,7 +638,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, break; cpu_spinwait(); #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif } KTR_STATE0(KTR_SCHED, "thread", @@ -725,7 +758,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); @@ -818,10 +851,12 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) #endif uintptr_t x; int error = 0; +#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -829,6 +864,11 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) if (SCHEDULER_STOPPED()) return (0); +#if defined(ADAPTIVE_SX) + lock_delay_arg_init(&lda, &sx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif #ifdef KDTRACE_HOOKS state = sx->sx_lock; all_time -= lockstat_nsecs(&sx->lock_object); @@ -840,7 +880,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) */ for (;;) { #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif x = sx->sx_lock; @@ -888,12 +928,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && - TD_IS_RUNNING(owner)) { -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - cpu_spinwait(); - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -989,7 +1025,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index e78d5a9..bfe189d 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -103,6 +103,34 @@ lock_destroy(struct lock_object *lock) lock->lo_flags &= ~LO_INITIALIZED; } +void +lock_delay(struct lock_delay_arg *la) +{ + u_int i, delay, backoff, min, max; + struct lock_delay_config *lc = la->config; + + delay = la->delay; + + if (delay == 0) + delay = lc->initial; + else { + delay += lc->step; + max = lc->max; + if (delay > max) + delay = max; + } + + backoff = cpu_ticks() % delay; + min = lc->min; + if (backoff < min) + backoff = min; + for (i = 0; i < backoff; i++) + cpu_spinwait(); + + la->delay = delay; + la->spin_cnt += backoff; +} + #ifdef DDB DB_SHOW_COMMAND(lock, db_show_lock) { |