From 9f8651cad61cf131bd07594ac25f3cd102fe0159 Mon Sep 17 00:00:00 2001 From: deischen Date: Wed, 23 Jul 2003 02:11:07 +0000 Subject: Move idle kse wakeup to outside of regions where locks are held. This eliminates ping-ponging of locks, where the idle KSE wakes up only to find the lock it needs is being held. This gives little or no gain to M:N mode but greatly speeds up 1:1 mode. Reviewed & Tested by: davidxu --- lib/libpthread/thread/thr_cond.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'lib/libpthread/thread/thr_cond.c') diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c index 9703da6..0d6d914 100644 --- a/lib/libpthread/thread/thr_cond.c +++ b/lib/libpthread/thread/thr_cond.c @@ -584,6 +584,7 @@ _pthread_cond_signal(pthread_cond_t * cond) { struct pthread *curthread = _get_curthread(); struct pthread *pthread; + struct kse_mailbox *kmbx; int rval = 0; THR_ASSERT(curthread->locklevel == 0, @@ -619,8 +620,10 @@ _pthread_cond_signal(pthread_cond_t * cond) (pthread->active_priority > curthread->active_priority)) curthread->critical_yield = 1; - _thr_setrunnable_unlocked(pthread); + kmbx = _thr_setrunnable_unlocked(pthread); THR_SCHED_UNLOCK(curthread, pthread); + if (kmbx != NULL) + kse_wakeup(kmbx); } /* Check for no more waiters: */ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) @@ -649,6 +652,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond) { struct pthread *curthread = _get_curthread(); struct pthread *pthread; + struct kse_mailbox *kmbx; int rval = 0; THR_ASSERT(curthread->locklevel == 0, @@ -682,8 +686,10 @@ _pthread_cond_broadcast(pthread_cond_t * cond) (pthread->active_priority > curthread->active_priority)) curthread->critical_yield = 1; - _thr_setrunnable_unlocked(pthread); + kmbx = _thr_setrunnable_unlocked(pthread); THR_SCHED_UNLOCK(curthread, pthread); + if (kmbx != NULL) + kse_wakeup(kmbx); } /* There are no more waiting threads: */ -- cgit v1.1