summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-05-29 01:49:27 +0000
committerattilio <attilio@FreeBSD.org>2009-05-29 01:49:27 +0000
commite05714ba705cb30431ac886d3e60ebd247b1d87b (patch)
tree4743eb3cdc33a7b72ea4fc1d92c144595e06994e /sys
parent7515df23bd1cac655ac2e77705c42f9127c815d2 (diff)
downloadFreeBSD-src-e05714ba705cb30431ac886d3e60ebd247b1d87b.zip
FreeBSD-src-e05714ba705cb30431ac886d3e60ebd247b1d87b.tar.gz
Reverse the logic for ADAPTIVE_SX option and enable it by default.
Introduce for this operation the reverse NO_ADAPTIVE_SX option. The flag SX_ADAPTIVESPIN to be passed to sx_init_flags(9) gets suppressed and the new flag, offering the reversed logic, SX_NOADAPTIVE is added. Additively implements adaptive spininning for sx held in shared mode. The spinning limit can be handled through sysctls in order to be tuned while the code doesn't reach the release, after which time they should be dropped probabilly. This change has made been necessary by recent benchmarks where it does improve concurrency of workloads in presence of high contention (ie. ZFS). KPI breakage is documented by __FreeBSD_version bumping, manpage and UPDATING updates. Requested by: jeff, kmacy Reviewed by: jeff Tested by: pho
Diffstat (limited to 'sys')
-rw-r--r--sys/cddl/compat/opensolaris/sys/mutex.h4
-rw-r--r--sys/cddl/compat/opensolaris/sys/rwlock.h4
-rw-r--r--sys/conf/NOTES10
-rw-r--r--sys/conf/options2
-rw-r--r--sys/kern/kern_sx.c71
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/sx.h2
7 files changed, 62 insertions, 33 deletions
diff --git a/sys/cddl/compat/opensolaris/sys/mutex.h b/sys/cddl/compat/opensolaris/sys/mutex.h
index 56c41a4..8756cd0 100644
--- a/sys/cddl/compat/opensolaris/sys/mutex.h
+++ b/sys/cddl/compat/opensolaris/sys/mutex.h
@@ -47,9 +47,9 @@ typedef enum {
typedef struct sx kmutex_t;
#ifndef DEBUG
-#define MUTEX_FLAGS (SX_DUPOK | SX_NOWITNESS | SX_ADAPTIVESPIN)
+#define MUTEX_FLAGS (SX_DUPOK | SX_NOWITNESS)
#else
-#define MUTEX_FLAGS (SX_DUPOK | SX_ADAPTIVESPIN)
+#define MUTEX_FLAGS (SX_DUPOK)
#endif
#define mutex_init(lock, desc, type, arg) do { \
diff --git a/sys/cddl/compat/opensolaris/sys/rwlock.h b/sys/cddl/compat/opensolaris/sys/rwlock.h
index 0d4ac2e..a3e5515 100644
--- a/sys/cddl/compat/opensolaris/sys/rwlock.h
+++ b/sys/cddl/compat/opensolaris/sys/rwlock.h
@@ -49,9 +49,9 @@ typedef enum {
typedef struct sx krwlock_t;
#ifndef DEBUG
-#define RW_FLAGS (SX_DUPOK | SX_NOWITNESS | SX_ADAPTIVESPIN)
+#define RW_FLAGS (SX_DUPOK | SX_NOWITNESS)
#else
-#define RW_FLAGS (SX_DUPOK | SX_ADAPTIVESPIN)
+#define RW_FLAGS (SX_DUPOK)
#endif
#define RW_READ_HELD(x) (rw_read_held((x)))
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index f25c98ca..4770064 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -215,11 +215,11 @@ options NO_ADAPTIVE_MUTEXES
# to disable it.
options NO_ADAPTIVE_RWLOCKS
-# ADAPTIVE_SX changes the behavior of sx locks to spin if the thread
-# that currently owns the lock is executing on another CPU. Note that
-# in addition to enabling this option, individual sx locks must be
-# initialized with the SX_ADAPTIVESPIN flag.
-options ADAPTIVE_SX
+# ADAPTIVE_SX changes the behavior of sx locks to spin if the thread that
+# currently owns the sx lock is executing on another CPU.
+# This behaviour is enabled by default, so this option can be used to
+# disable it.
+options NO_ADAPTIVE_SX
# MUTEX_NOINLINE forces mutex operations to call functions to perform each
# operation rather than inlining the simple cases. This can be used to
diff --git a/sys/conf/options b/sys/conf/options
index 298c170..a668ea5 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -60,7 +60,6 @@ KDB_UNATTENDED opt_kdb.h
SYSCTL_DEBUG opt_sysctl.h
# Miscellaneous options.
-ADAPTIVE_SX
ALQ
AUDIT opt_global.h
CODA_COMPAT_5 opt_coda.h
@@ -134,6 +133,7 @@ MPROF_BUFFERS opt_mprof.h
MPROF_HASH_SIZE opt_mprof.h
NO_ADAPTIVE_MUTEXES opt_adaptive_mutexes.h
NO_ADAPTIVE_RWLOCKS
+NO_ADAPTIVE_SX
NO_SYSCTL_DESCR opt_global.h
NSWBUF_MIN opt_swap.h
MBUF_PACKET_ZONE_DISABLE opt_global.h
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 8c099ac..9e2a1fb 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -60,12 +60,12 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#endif
-#if !defined(SMP) && defined(ADAPTIVE_SX)
-#error "You must have SMP to enable the ADAPTIVE_SX option"
+#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
+#define ADAPTIVE_SX
#endif
-CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
- (SX_ADAPTIVESPIN | SX_RECURSE));
+CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) ==
+ (SX_NOADAPTIVE | SX_RECURSE));
/* Handy macros for sleep queues. */
#define SQ_EXCLUSIVE_QUEUE 0
@@ -133,6 +133,14 @@ struct lock_class lock_class_sx = {
#define _sx_assert(sx, what, file, line)
#endif
+#ifdef ADAPTIVE_SX
+static u_int asx_retries = 10;
+static u_int asx_loops = 10000;
+SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
+SYSCTL_INT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
+#endif
+
void
assert_sx(struct lock_object *lock, int what)
{
@@ -195,7 +203,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
- SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
+ SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)
@@ -207,7 +215,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
if (opts & SX_QUIET)
flags |= LO_QUIET;
- flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
+ flags |= opts & (SX_NOADAPTIVE | SX_RECURSE);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
@@ -453,6 +461,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
+ u_int i, spintries = 0;
#endif
uintptr_t x;
#ifdef LOCK_PROFILING
@@ -495,24 +504,44 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* running or the state of the lock changes.
*/
x = sx->sx_lock;
- if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
- if (TD_IS_RUNNING(owner)) {
- if (LOCK_LOG_TEST(&sx->lock_object, 0))
- CTR3(KTR_LOCK,
+ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0) {
+ if ((x & SX_LOCK_SHARED) == 0) {
+ x = SX_OWNER(x);
+ owner = (struct thread *)x;
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
- __func__, sx, owner);
- GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner)) {
+ __func__, sx, owner);
+ GIANT_SAVE();
+ while (SX_OWNER(sx->sx_lock) == x &&
+ TD_IS_RUNNING(owner)) {
+ cpu_spinwait();
+#ifdef KDTRACE_HOOKS
+ spin_cnt++;
+#endif
+ }
+ continue;
+ }
+ } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ spintries++;
+ for (i = 0; i < asx_loops; i++) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR4(KTR_LOCK,
+ "%s: shared spinning on %p with %u and %u",
+ __func__, sx, spintries, i);
+ GIANT_SAVE();
+ x = sx->sx_lock;
+ if ((x & SX_LOCK_SHARED) == 0 ||
+ SX_SHARERS(x) == 0)
+ break;
cpu_spinwait();
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
}
- continue;
+ if (i != asx_loops)
+ continue;
}
}
#endif
@@ -538,7 +567,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* again.
*/
if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+ (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@@ -752,7 +781,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* the owner stops running or the state of the lock
* changes.
*/
- if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
+ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
x = SX_OWNER(x);
owner = (struct thread *)x;
if (TD_IS_RUNNING(owner)) {
@@ -796,7 +825,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* changes.
*/
if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+ (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
diff --git a/sys/sys/param.h b/sys/sys/param.h
index 834dde3..34d3c13 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -57,7 +57,7 @@
* is created, otherwise 1.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 800091 /* Master, propagated to newvers */
+#define __FreeBSD_version 800092 /* Master, propagated to newvers */
#ifndef LOCORE
#include <sys/types.h>
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index e050bab..11e61f6 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -265,7 +265,7 @@ __sx_sunlock(struct sx *sx, const char *file, int line)
#define SX_NOPROFILE 0x02
#define SX_NOWITNESS 0x04
#define SX_QUIET 0x08
-#define SX_ADAPTIVESPIN 0x10
+#define SX_NOADAPTIVE 0x10
#define SX_RECURSE 0x20
/*
OpenPOWER on IntegriCloud