diff options
author | mjg <mjg@FreeBSD.org> | 2016-08-11 09:28:49 +0000 |
---|---|---|
committer | mjg <mjg@FreeBSD.org> | 2016-08-11 09:28:49 +0000 |
commit | f6eab3bb444ed8b47aa811577447abfcd80c5a8b (patch) | |
tree | 3519cb6bf291ff67ec8623d701ffdd517d297284 /sys/kern/kern_sx.c | |
parent | 91c31a960acb18732dcecb025733e05cc1080cb0 (diff) | |
download | FreeBSD-src-f6eab3bb444ed8b47aa811577447abfcd80c5a8b.zip FreeBSD-src-f6eab3bb444ed8b47aa811577447abfcd80c5a8b.tar.gz |
MFC r303562,303563,r303584,r303643,r303652,r303655,r303707:
rwlock: s/READER/WRITER/ in wlock lockstat annotation
==
sx: increment spin_cnt before cpu_spinwait in xlock
The change is a no-op only done for consistency with the rest of the file.
==
locks: change sleep_cnt and spin_cnt types to u_int
Both variables are uint64_t, but they only count spins or sleeps.
All reasonable values which we can get here comfortably hit in 32-bit range.
==
Implement trivial backoff for locking primitives.
All current spinning loops retry an atomic op the first chance they get,
which leads to performance degradation under load.
One classic solution to the problem consists of delaying the test to an
extent. This implementation has a trivial linear increment and a random
factor for each attempt.
For simplicity, this first thouch implementation only modifies spinning
loops where the lock owner is running. spin mutexes and thread lock were
not modified.
Current parameters are autotuned on boot based on mp_cpus.
Autotune factors are very conservative and are subject to change later.
==
locks: fix up ifdef guards introduced in r303643
Both sx and rwlocks had copy-pasted ADAPTIVE_MUTEXES instead of the correct
define.
==
locks: fix compilation for KDTRACE_HOOKS && !ADAPTIVE_* case
==
locks: fix sx compilation on mips after r303643
The kernel.h header is required for the SYSINIT macro, which apparently
was present on amd64 by accident.
Approved by: re (gjb)
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r-- | sys/kern/kern_sx.c | 78 |
1 files changed, 57 insertions, 21 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 2a81c04..9115d5a 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> #include <sys/kdb.h> +#include <sys/kernel.h> #include <sys/ktr.h> #include <sys/lock.h> #include <sys/mutex.h> @@ -53,6 +54,7 @@ __FBSDID("$FreeBSD$"); #include <sys/sched.h> #include <sys/sleepqueue.h> #include <sys/sx.h> +#include <sys/smp.h> #include <sys/sysctl.h> #if defined(SMP) && !defined(NO_ADAPTIVE_SX) @@ -145,6 +147,33 @@ static u_int asx_loops = 10000; static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); + +static struct lock_delay_config sx_delay = { + .initial = 1000, + .step = 500, + .min = 100, + .max = 5000, +}; + +SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min, + 0, ""); +SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, + 0, ""); + +static void +sx_delay_sysinit(void *dummy) +{ + + sx_delay.initial = mp_ncpus * 25; + sx_delay.step = (mp_ncpus * 25) / 2; + sx_delay.min = mp_ncpus * 5; + sx_delay.max = mp_ncpus * 25 * 10; +} +LOCK_DELAY_SYSINIT(sx_delay_sysinit); #endif void @@ -513,10 +542,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, int contested = 0; #endif int error = 0; +#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -524,6 +555,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, if (SCHEDULER_STOPPED()) return (0); +#if defined(ADAPTIVE_SX) + lock_delay_arg_init(&lda, &sx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif + /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, @@ -549,7 +586,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) break; #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -578,12 +615,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && - TD_IS_RUNNING(owner)) { - cpu_spinwait(); -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -605,7 +638,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, break; cpu_spinwait(); #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif } KTR_STATE0(KTR_SCHED, "thread", @@ -725,7 +758,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); @@ -818,10 +851,12 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) #endif uintptr_t x; int error = 0; +#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) + struct lock_delay_arg lda; +#endif #ifdef KDTRACE_HOOKS uintptr_t state; - uint64_t spin_cnt = 0; - uint64_t sleep_cnt = 0; + u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif @@ -829,6 +864,11 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) if (SCHEDULER_STOPPED()) return (0); +#if defined(ADAPTIVE_SX) + lock_delay_arg_init(&lda, &sx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init(&lda, NULL); +#endif #ifdef KDTRACE_HOOKS state = sx->sx_lock; all_time -= lockstat_nsecs(&sx->lock_object); @@ -840,7 +880,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) */ for (;;) { #ifdef KDTRACE_HOOKS - spin_cnt++; + lda.spin_cnt++; #endif x = sx->sx_lock; @@ -888,12 +928,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && - TD_IS_RUNNING(owner)) { -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif - cpu_spinwait(); - } + TD_IS_RUNNING(owner)) + lock_delay(&lda); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; @@ -989,7 +1025,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); - if (spin_cnt > sleep_cnt) + if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); |