summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2002-05-21 20:47:11 +0000
committerjhb <jhb@FreeBSD.org>2002-05-21 20:47:11 +0000
commitd3398f2f58c001872e59fc5b51c2a69a0ab7f11d (patch)
tree5af883d743f198bf32fb18ba42652e18b049c23d /sys/kern/kern_mutex.c
parent715c35adadcc4804b9b8abab2e4f86a59606d599 (diff)
downloadFreeBSD-src-d3398f2f58c001872e59fc5b51c2a69a0ab7f11d.zip
FreeBSD-src-d3398f2f58c001872e59fc5b51c2a69a0ab7f11d.tar.gz
Add code to make default mutexes adaptive if the ADAPTIVE_MUTEXES kernel
option is used (not on by default). - In the case of trying to lock a mutex, if the MTX_CONTESTED flag is set, then we can safely read the thread pointer from the mtx_lock member while holding sched_lock. We then examine the thread to see if it is currently executing on another CPU. If it is, then we keep looping instead of blocking. - In the case of trying to unlock a mutex, it is now possible for a mutex to have MTX_CONTESTED set in mtx_lock but to not have any threads actually blocked on it, so we need to handle that case. In that case, we just release the lock as if MTX_CONTESTED was not set and return. - We do not adaptively spin on Giant as Giant is held for long times and it slows SMP systems down to a crawl (it was taking several minutes, like 5-10 or so for my test alpha and sparc64 SMP boxes to boot up when they adaptively spinned on Giant). - We only compile in the code to do this for SMP kernels, it doesn't make sense for UP kernels. Tested on: i386, alpha, sparc64
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 2a98204..a46235f 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -34,6 +34,7 @@
* Machine independent bits of mutex implementation.
*/
+#include "opt_adaptive_mutexes.h"
#include "opt_ddb.h"
#include <sys/param.h>
@@ -459,6 +460,9 @@ void
_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
{
struct thread *td = curthread;
+#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
+ struct thread *owner;
+#endif
if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
m->mtx_recurse++;
@@ -514,6 +518,19 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
continue;
}
+#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
+ /*
+ * If the current owner of the lock is executing on another
+ * CPU, spin instead of blocking.
+ */
+ owner = (struct thread *)(v & MTX_FLAGMASK);
+ if (m != &Giant && owner->td_kse != NULL &&
+ owner->td_kse->ke_oncpu != NOCPU) {
+ mtx_unlock_spin(&sched_lock);
+ continue;
+ }
+#endif /* SMP && ADAPTIVE_MUTEXES */
+
/*
* We definitely must sleep for this lock.
*/
@@ -651,6 +668,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
td1 = TAILQ_FIRST(&m->mtx_blocked);
+#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
+ if (td1 == NULL) {
+ _release_lock_quick(m);
+ if (LOCK_LOG_TEST(&m->mtx_object, opts))
+ CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
+ mtx_unlock_spin(&sched_lock);
+ return;
+ }
+#endif
MPASS(td->td_proc->p_magic == P_MAGIC);
MPASS(td1->td_proc->p_magic == P_MAGIC);
OpenPOWER on IntegriCloud