summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c71
1 files changed, 50 insertions, 21 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 8c099ac..9e2a1fb 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -60,12 +60,12 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#endif
-#if !defined(SMP) && defined(ADAPTIVE_SX)
-#error "You must have SMP to enable the ADAPTIVE_SX option"
+#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
+#define ADAPTIVE_SX
#endif
-CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
- (SX_ADAPTIVESPIN | SX_RECURSE));
+CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) ==
+ (SX_NOADAPTIVE | SX_RECURSE));
/* Handy macros for sleep queues. */
#define SQ_EXCLUSIVE_QUEUE 0
@@ -133,6 +133,14 @@ struct lock_class lock_class_sx = {
#define _sx_assert(sx, what, file, line)
#endif
+#ifdef ADAPTIVE_SX
+static u_int asx_retries = 10;
+static u_int asx_loops = 10000;
+SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
+SYSCTL_INT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
+#endif
+
void
assert_sx(struct lock_object *lock, int what)
{
@@ -195,7 +203,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
- SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
+ SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)
@@ -207,7 +215,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
if (opts & SX_QUIET)
flags |= LO_QUIET;
- flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
+ flags |= opts & (SX_NOADAPTIVE | SX_RECURSE);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
@@ -453,6 +461,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
+ u_int i, spintries = 0;
#endif
uintptr_t x;
#ifdef LOCK_PROFILING
@@ -495,24 +504,44 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* running or the state of the lock changes.
*/
x = sx->sx_lock;
- if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
- if (TD_IS_RUNNING(owner)) {
- if (LOCK_LOG_TEST(&sx->lock_object, 0))
- CTR3(KTR_LOCK,
+ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0) {
+ if ((x & SX_LOCK_SHARED) == 0) {
+ x = SX_OWNER(x);
+ owner = (struct thread *)x;
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
- __func__, sx, owner);
- GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
+ __func__, sx, owner);
+ GIANT_SAVE();
+ while (SX_OWNER(sx->sx_lock) == x &&
+ TD_IS_RUNNING(owner)) {
+ cpu_spinwait();
+#ifdef KDTRACE_HOOKS
+ spin_cnt++;
+#endif
+ }
+ continue;
+ }
+ } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ spintries++;
+ for (i = 0; i < asx_loops; i++) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR4(KTR_LOCK,
+ "%s: shared spinning on %p with %u and %u",
+ __func__, sx, spintries, i);
+ GIANT_SAVE();
+ x = sx->sx_lock;
+ if ((x & SX_LOCK_SHARED) == 0 ||
+ SX_SHARERS(x) == 0)
+ break;
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
- continue;
+ if (i != asx_loops)
+ continue;
}
}
#endif
@@ -538,7 +567,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* again.
*/
if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+ (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@@ -752,7 +781,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* the owner stops running or the state of the lock
* changes.
*/
- if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
+ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
x = SX_OWNER(x);
owner = (struct thread *)x;
if (TD_IS_RUNNING(owner)) {
@@ -796,7 +825,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* changes.
*/
if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+ (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
OpenPOWER on IntegriCloud