summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_lock.c
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-12-31 16:57:05 +0000
committermjg <mjg@FreeBSD.org>2016-12-31 16:57:05 +0000
commitecc0d1137dbff8457208eda1d5387ededb4137f8 (patch)
tree1fe4e32965515d6ddeca6698922f3a2018d08a4c /sys/kern/subr_lock.c
parent4d5f81f319b8c07d6c5caff90308903ca20a6668 (diff)
downloadFreeBSD-src-ecc0d1137dbff8457208eda1d5387ededb4137f8.zip
FreeBSD-src-ecc0d1137dbff8457208eda1d5387ededb4137f8.tar.gz
MFC r285706,r303562,r303563,r303584,r303643,r303652,r303655,r303707:
(by markj) Don't increment the spin count until after the first attempt to acquire a rwlock read lock. Otherwise the lockstat:::rw-spin probe will fire spuriously. == rwlock: s/READER/WRITER/ in wlock lockstat annotation == sx: increment spin_cnt before cpu_spinwait in xlock The change is a no-op only done for consistency with the rest of the file. == locks: change sleep_cnt and spin_cnt types to u_int Both variables are uint64_t, but they only count spins or sleeps. All reasonable values which we can get here comfortably hit in 32-bit range. == Implement trivial backoff for locking primitives. All current spinning loops retry an atomic op the first chance they get, which leads to performance degradation under load. One classic solution to the problem consists of delaying the test to an extent. This implementation has a trivial linear increment and a random factor for each attempt. For simplicity, this first thouch implementation only modifies spinning loops where the lock owner is running. spin mutexes and thread lock were not modified. Current parameters are autotuned on boot based on mp_cpus. Autotune factors are very conservative and are subject to change later. == locks: fix up ifdef guards introduced in r303643 Both sx and rwlocks had copy-pasted ADAPTIVE_MUTEXES instead of the correct define. == locks: fix compilation for KDTRACE_HOOKS && !ADAPTIVE_* case == locks: fix sx compilation on mips after r303643 The kernel.h header is required for the SYSINIT macro, which apparently was present on amd64 by accident.
Diffstat (limited to 'sys/kern/subr_lock.c')
-rw-r--r--sys/kern/subr_lock.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index 8aec803..cacaf56 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#endif
#include <machine/cpufunc.h>
+#include <machine/cpu.h>
CTASSERT(LOCK_CLASS_MAX == 15);
@@ -103,6 +104,34 @@ lock_destroy(struct lock_object *lock)
lock->lo_flags &= ~LO_INITIALIZED;
}
+void
+lock_delay(struct lock_delay_arg *la)
+{
+ u_int i, delay, backoff, min, max;
+ struct lock_delay_config *lc = la->config;
+
+ delay = la->delay;
+
+ if (delay == 0)
+ delay = lc->initial;
+ else {
+ delay += lc->step;
+ max = lc->max;
+ if (delay > max)
+ delay = max;
+ }
+
+ backoff = cpu_ticks() % delay;
+ min = lc->min;
+ if (backoff < min)
+ backoff = min;
+ for (i = 0; i < backoff; i++)
+ cpu_spinwait();
+
+ la->delay = delay;
+ la->spin_cnt += backoff;
+}
+
#ifdef DDB
DB_SHOW_COMMAND(lock, db_show_lock)
{
OpenPOWER on IntegriCloud