summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_mutex.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-07-23 02:11:07 +0000
committerdeischen <deischen@FreeBSD.org>2003-07-23 02:11:07 +0000
commit9f8651cad61cf131bd07594ac25f3cd102fe0159 (patch)
tree2ba938226cb9f3c751737df74c5cdf8e63cf8eba /lib/libpthread/thread/thr_mutex.c
parenta8317490154f61e26d775ebd4480ec6333f94a75 (diff)
downloadFreeBSD-src-9f8651cad61cf131bd07594ac25f3cd102fe0159.zip
FreeBSD-src-9f8651cad61cf131bd07594ac25f3cd102fe0159.tar.gz
Move idle kse wakeup to outside of regions where locks are held.
This eliminates ping-ponging of locks, where the idle KSE wakes up only to find the lock it needs is being held. This gives little or no gain to M:N mode but greatly speeds up 1:1 mode. Reviewed & Tested by: davidxu
Diffstat (limited to 'lib/libpthread/thread/thr_mutex.c')
-rw-r--r--lib/libpthread/thread/thr_mutex.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index c2c28e0..7e8de44 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -67,7 +67,8 @@
/*
* Prototypes
*/
-static void mutex_handoff(struct pthread *, struct pthread_mutex *);
+static struct kse_mailbox *mutex_handoff(struct pthread *,
+ struct pthread_mutex *);
static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
static int mutex_unlock_common(pthread_mutex_t *, int);
@@ -860,6 +861,7 @@ static int
mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
+ struct kse_mailbox *kmbx = NULL;
int ret = 0;
if (m == NULL || *m == NULL)
@@ -904,7 +906,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
- mutex_handoff(curthread, *m);
+ kmbx = mutex_handoff(curthread, *m);
}
break;
@@ -961,7 +963,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
- mutex_handoff(curthread, *m);
+ kmbx = mutex_handoff(curthread, *m);
}
break;
@@ -1017,7 +1019,7 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
* Hand off the mutex to the next waiting
* thread:
*/
- mutex_handoff(curthread, *m);
+ kmbx = mutex_handoff(curthread, *m);
}
break;
@@ -1034,6 +1036,8 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ if (kmbx != NULL)
+ kse_wakeup(kmbx);
}
/* Return the completion status: */
@@ -1460,9 +1464,10 @@ _mutex_lock_backout(struct pthread *curthread)
* is necessary to lock the thread's scheduling queue while also
* holding the mutex lock.
*/
-static void
+static struct kse_mailbox *
mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
{
+ struct kse_mailbox *kmbx = NULL;
struct pthread *pthread;
/* Keep dequeueing until we find a valid thread: */
@@ -1564,7 +1569,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
}
/* Make the thread runnable and unlock the scheduling queue: */
- _thr_setrunnable_unlocked(pthread);
+ kmbx = _thr_setrunnable_unlocked(pthread);
/* Add a preemption point. */
if ((curthread->kseg == pthread->kseg) &&
@@ -1583,6 +1588,7 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
/* This mutex has no priority: */
mutex->m_prio = 0;
+ return (kmbx);
}
/*
OpenPOWER on IntegriCloud