diff options
author | jhb <jhb@FreeBSD.org> | 2002-05-21 20:47:11 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2002-05-21 20:47:11 +0000 |
commit | d3398f2f58c001872e59fc5b51c2a69a0ab7f11d (patch) | |
tree | 5af883d743f198bf32fb18ba42652e18b049c23d | |
parent | 715c35adadcc4804b9b8abab2e4f86a59606d599 (diff) | |
download | FreeBSD-src-d3398f2f58c001872e59fc5b51c2a69a0ab7f11d.zip FreeBSD-src-d3398f2f58c001872e59fc5b51c2a69a0ab7f11d.tar.gz |
Add code to make default mutexes adaptive if the ADAPTIVE_MUTEXES kernel
option is used (not on by default).
- In the case of trying to lock a mutex, if the MTX_CONTESTED flag is set,
then we can safely read the thread pointer from the mtx_lock member while
holding sched_lock. We then examine the thread to see if it is currently
executing on another CPU. If it is, then we keep looping instead of
blocking.
- In the case of trying to unlock a mutex, it is now possible for a mutex
to have MTX_CONTESTED set in mtx_lock but to not have any threads
actually blocked on it, so we need to handle that case. In that case,
we just release the lock as if MTX_CONTESTED was not set and return.
- We do not adaptively spin on Giant as Giant is held for long times and
it slows SMP systems down to a crawl (it was taking several minutes,
like 5-10 or so for my test alpha and sparc64 SMP boxes to boot up when
they adaptively spinned on Giant).
- We only compile in the code to do this for SMP kernels, it doesn't make
sense for UP kernels.
Tested on: i386, alpha, sparc64
-rw-r--r-- | sys/conf/NOTES | 5 | ||||
-rw-r--r-- | sys/conf/options | 1 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 26 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 26 |
4 files changed, 58 insertions, 0 deletions
diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 1d431e3..688de2f 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -120,6 +120,11 @@ options ROOTDEVNAME=\"ufs:da0s2e\" # Mandatory: options SMP # Symmetric MultiProcessor Kernel +# ADAPTIVE_MUTEXES changes the behavior of blocking mutexes to spin +# if the thread that currently owns the mutex is executing on another +# CPU. +options ADAPTIVE_MUTEXES + # SMP Debugging Options: # # MUTEX_DEBUG enables various extra assertions in the mutex code. diff --git a/sys/conf/options b/sys/conf/options index 14133e2..5caf9b5 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -57,6 +57,7 @@ ADW_ALLOW_MEMIO opt_adw.h # Allow PCI devices to use memory # Miscellaneous options. GEOM opt_geom.h +ADAPTIVE_MUTEXES COMPAT_43 opt_compat.h COMPAT_SUNOS opt_compat.h COMPILING_LINT opt_global.h diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 2a98204..a46235f 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -34,6 +34,7 @@ * Machine independent bits of mutex implementation. */ +#include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include <sys/param.h> @@ -459,6 +460,9 @@ void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) { struct thread *td = curthread; +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + struct thread *owner; +#endif if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { m->mtx_recurse++; @@ -514,6 +518,19 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) continue; } +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + /* + * If the current owner of the lock is executing on another + * CPU, spin instead of blocking. + */ + owner = (struct thread *)(v & MTX_FLAGMASK); + if (m != &Giant && owner->td_kse != NULL && + owner->td_kse->ke_oncpu != NOCPU) { + mtx_unlock_spin(&sched_lock); + continue; + } +#endif /* SMP && ADAPTIVE_MUTEXES */ + /* * We definitely must sleep for this lock. */ @@ -651,6 +668,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); td1 = TAILQ_FIRST(&m->mtx_blocked); +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + if (td1 == NULL) { + _release_lock_quick(m); + if (LOCK_LOG_TEST(&m->mtx_object, opts)) + CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); + mtx_unlock_spin(&sched_lock); + return; + } +#endif MPASS(td->td_proc->p_magic == P_MAGIC); MPASS(td1->td_proc->p_magic == P_MAGIC); diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 2a98204..a46235f 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -34,6 +34,7 @@ * Machine independent bits of mutex implementation. */ +#include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include <sys/param.h> @@ -459,6 +460,9 @@ void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) { struct thread *td = curthread; +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + struct thread *owner; +#endif if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { m->mtx_recurse++; @@ -514,6 +518,19 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) continue; } +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + /* + * If the current owner of the lock is executing on another + * CPU, spin instead of blocking. + */ + owner = (struct thread *)(v & MTX_FLAGMASK); + if (m != &Giant && owner->td_kse != NULL && + owner->td_kse->ke_oncpu != NOCPU) { + mtx_unlock_spin(&sched_lock); + continue; + } +#endif /* SMP && ADAPTIVE_MUTEXES */ + /* * We definitely must sleep for this lock. */ @@ -651,6 +668,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); td1 = TAILQ_FIRST(&m->mtx_blocked); +#if defined(SMP) && defined(ADAPTIVE_MUTEXES) + if (td1 == NULL) { + _release_lock_quick(m); + if (LOCK_LOG_TEST(&m->mtx_object, opts)) + CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); + mtx_unlock_spin(&sched_lock); + return; + } +#endif MPASS(td->td_proc->p_magic == P_MAGIC); MPASS(td1->td_proc->p_magic == P_MAGIC); |