summaryrefslogtreecommitdiffstats
path: root/lib/libthr/thread/thr_mutex.c
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2007-10-30 05:57:37 +0000
committerdavidxu <davidxu@FreeBSD.org>2007-10-30 05:57:37 +0000
commit97a20b1db74998b4f5211f1961a0c9d7e51b697a (patch)
tree85fcbcaf75e4cc350edc0ece2d8413fd982fed32 /lib/libthr/thread/thr_mutex.c
parent9a82deac9e7798e8b266554e32ecacef71ffdee1 (diff)
downloadFreeBSD-src-97a20b1db74998b4f5211f1961a0c9d7e51b697a.zip
FreeBSD-src-97a20b1db74998b4f5211f1961a0c9d7e51b697a.tar.gz
Add my recent work of adaptive spin mutex code. Use two environments variable
to tune pthread mutex performance: 1. LIBPTHREAD_SPINLOOPS If a pthread mutex is being locked by another thread, this environment variable sets total number of spin loops before the current thread sleeps in kernel, this saves a syscall overhead if the mutex will be unlocked very soon (well written application code). 2. LIBPTHREAD_YIELDLOOPS If a pthread mutex is being locked by other threads, this environment variable sets total number of sched_yield() loops before the currrent thread sleeps in kernel. if a pthread mutex is locked, the current thread gives up cpu, but will not sleep in kernel, this means, current thread does not set contention bit in mutex, but let lock owner to run again if the owner is on kernel's run queue, and when lock owner unlocks the mutex, it does not need to enter kernel and do lots of work to resume mutex waiters, in some cases, this saves lots of syscall overheads for mutex owner. In my practice, sometimes LIBPTHREAD_YIELDLOOPS can massively improve performance than LIBPTHREAD_SPINLOOPS, this depends on application. These two environments are global to all pthread mutex, there is no interface to set them for each pthread mutex, the default values are zero, this means spinning is turned off by default.
Diffstat (limited to 'lib/libthr/thread/thr_mutex.c')
-rw-r--r--lib/libthr/thread/thr_mutex.c82
1 files changed, 37 insertions, 45 deletions
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index b1e6d0c..2cc3ce0 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -67,12 +67,6 @@
#endif
/*
- * For adaptive mutexes, how many times to spin doing trylock2
- * before entering the kernel to block
- */
-#define MUTEX_ADAPTIVE_SPINS 200
-
-/*
* Prototypes
*/
int __pthread_mutex_init(pthread_mutex_t *mutex,
@@ -279,6 +273,16 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
return (ret);
}
+
+#define ENQUEUE_MUTEX(curthread, m) \
+ m->m_owner = curthread; \
+ /* Add to the list of owned mutexes: */ \
+ MUTEX_ASSERT_NOT_OWNED(m); \
+ if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \
+ TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe); \
+ else \
+ TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe)
+
static int
mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
@@ -290,13 +294,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
m = *mutex;
ret = _thr_umutex_trylock(&m->m_lock, id);
if (ret == 0) {
- m->m_owner = curthread;
- /* Add to the list of owned mutexes. */
- MUTEX_ASSERT_NOT_OWNED(m);
- if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
- TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
- else
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
+ ENQUEUE_MUTEX(curthread, m);
} else if (m->m_owner == curthread) {
ret = mutex_self_trylock(m);
} /* else {} */
@@ -348,39 +346,43 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
struct pthread_mutex *m;
uint32_t id;
int ret;
+ int count;
id = TID(curthread);
m = *mutex;
ret = _thr_umutex_trylock2(&m->m_lock, id);
if (ret == 0) {
- m->m_owner = curthread;
- /* Add to the list of owned mutexes: */
- MUTEX_ASSERT_NOT_OWNED(m);
- if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
- TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
- else
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe);
+ ENQUEUE_MUTEX(curthread, m);
} else if (m->m_owner == curthread) {
ret = mutex_self_lock(m, abstime);
} else {
- /*
- * For adaptive mutexes, spin for a bit in the expectation
- * that if the application requests this mutex type then
- * the lock is likely to be released quickly and it is
- * faster than entering the kernel
- */
- if (m->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) {
- int count = MUTEX_ADAPTIVE_SPINS;
+ if (_thr_spinloops != 0 && _thr_is_smp &&
+ !(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)) {
+ count = _thr_spinloops;
+ while (count && m->m_lock.m_owner != UMUTEX_UNOWNED) {
+ count--;
+ CPU_SPINWAIT;
+ }
+ if (count) {
+ ret = _thr_umutex_trylock2(&m->m_lock, id);
+ if (ret == 0) {
+ ENQUEUE_MUTEX(curthread, m);
+ return (ret);
+ }
+ }
+ }
+ if (_thr_yieldloops != 0) {
+ count = _thr_yieldloops;
while (count--) {
+ _sched_yield();
ret = _thr_umutex_trylock2(&m->m_lock, id);
- if (ret == 0)
- break;
- cpu_spinwait();
+ if (ret == 0) {
+ ENQUEUE_MUTEX(curthread, m);
+ return (ret);
+ }
}
}
- if (ret == 0)
- goto done;
if (abstime == NULL) {
ret = __thr_umutex_lock(&m->m_lock);
@@ -399,17 +401,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
if (ret == EINTR)
ret = ETIMEDOUT;
}
-done:
- if (ret == 0) {
- m->m_owner = curthread;
- /* Add to the list of owned mutexes: */
- MUTEX_ASSERT_NOT_OWNED(m);
- if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
- TAILQ_INSERT_TAIL(&curthread->mutexq, m, m_qe);
- else
- TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m,
- m_qe);
- }
+ if (ret == 0)
+ ENQUEUE_MUTEX(curthread, m);
}
return (ret);
}
@@ -529,7 +522,6 @@ mutex_self_trylock(pthread_mutex_t m)
switch (m->m_type) {
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_ADAPTIVE_NP:
ret = EBUSY;
break;
OpenPOWER on IntegriCloud