summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_rwlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r--sys/kern/kern_rwlock.c103
1 files changed, 70 insertions, 33 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 3a42d7a..8559840 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/turnstile.h>
@@ -66,15 +67,6 @@ PMC_SOFT_DECLARE( , , lock, failed);
*/
#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
-#ifdef ADAPTIVE_RWLOCKS
-static int rowner_retries = 10;
-static int rowner_loops = 10000;
-static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
- "rwlock debugging");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
-SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
-#endif
-
#ifdef DDB
#include <ddb/ddb.h>
@@ -101,6 +93,42 @@ struct lock_class lock_class_rw = {
#endif
};
+#ifdef ADAPTIVE_RWLOCKS
+static int rowner_retries = 10;
+static int rowner_loops = 10000;
+static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
+ "rwlock debugging");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
+
+static struct lock_delay_config rw_delay = {
+ .initial = 1000,
+ .step = 500,
+ .min = 100,
+ .max = 5000,
+};
+
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min,
+ 0, "");
+SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
+ 0, "");
+
+static void
+rw_delay_sysinit(void *dummy)
+{
+
+ rw_delay.initial = mp_ncpus * 25;
+ rw_delay.step = (mp_ncpus * 25) / 2;
+ rw_delay.min = mp_ncpus * 5;
+ rw_delay.max = mp_ncpus * 25 * 10;
+}
+LOCK_DELAY_SYSINIT(rw_delay_sysinit);
+#endif
+
/*
* Return a pointer to the owning thread if the lock is write-locked or
* NULL if the lock is unlocked or read-locked.
@@ -355,10 +383,12 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
int contested = 0;
#endif
uintptr_t v;
+#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -366,6 +396,11 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+#if defined(ADAPTIVE_RWLOCKS)
+ lock_delay_arg_init(&lda, &rw_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
rw = rwlock2rw(c);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
@@ -383,9 +418,6 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
state = rw->rw_lock;
#endif
for (;;) {
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
/*
* Handle the easy case. If no other thread has a write
* lock, then try to bump up the count of read locks. Note
@@ -414,6 +446,9 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
}
continue;
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt++;
+#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -437,12 +472,8 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) ==
- owner && TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ owner && TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -458,6 +489,9 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
break;
cpu_spinwait();
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt += rowner_loops - i;
+#endif
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
if (i != rowner_loops)
@@ -549,7 +583,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_RLOCK_SPIN, rw, all_time - sleep_time,
LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
@@ -737,10 +771,12 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
uint64_t waittime = 0;
int contested = 0;
#endif
+#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
+ struct lock_delay_arg lda;
+#endif
#ifdef KDTRACE_HOOKS
uintptr_t state;
- uint64_t spin_cnt = 0;
- uint64_t sleep_cnt = 0;
+ u_int sleep_cnt = 0;
int64_t sleep_time = 0;
int64_t all_time = 0;
#endif
@@ -748,6 +784,11 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (SCHEDULER_STOPPED())
return;
+#if defined(ADAPTIVE_RWLOCKS)
+ lock_delay_arg_init(&lda, &rw_delay);
+#elif defined(KDTRACE_HOOKS)
+ lock_delay_arg_init(&lda, NULL);
+#endif
rw = rwlock2rw(c);
if (rw_wlocked(rw)) {
@@ -772,7 +813,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
break;
#ifdef KDTRACE_HOOKS
- spin_cnt++;
+ lda.spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@@ -795,12 +836,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
- TD_IS_RUNNING(owner)) {
- cpu_spinwait();
-#ifdef KDTRACE_HOOKS
- spin_cnt++;
-#endif
- }
+ TD_IS_RUNNING(owner))
+ lock_delay(&lda);
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
continue;
@@ -825,7 +862,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
#ifdef KDTRACE_HOOKS
- spin_cnt += rowner_loops - i;
+ lda.spin_cnt += rowner_loops - i;
#endif
if (i != rowner_loops)
continue;
@@ -915,9 +952,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
/* Record only the loops spinning and not sleeping. */
- if (spin_cnt > sleep_cnt)
+ if (lda.spin_cnt > sleep_cnt)
LOCKSTAT_RECORD4(LS_RW_WLOCK_SPIN, rw, all_time - sleep_time,
- LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
+ LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
(state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
OpenPOWER on IntegriCloud