summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_cond.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-07-23 02:11:07 +0000
committerdeischen <deischen@FreeBSD.org>2003-07-23 02:11:07 +0000
commit9f8651cad61cf131bd07594ac25f3cd102fe0159 (patch)
tree2ba938226cb9f3c751737df74c5cdf8e63cf8eba /lib/libpthread/thread/thr_cond.c
parenta8317490154f61e26d775ebd4480ec6333f94a75 (diff)
downloadFreeBSD-src-9f8651cad61cf131bd07594ac25f3cd102fe0159.zip
FreeBSD-src-9f8651cad61cf131bd07594ac25f3cd102fe0159.tar.gz
Move idle kse wakeup to outside of regions where locks are held.
This eliminates ping-ponging of locks, where the idle KSE wakes up only to find the lock it needs is being held. This gives little or no gain to M:N mode but greatly speeds up 1:1 mode. Reviewed & Tested by: davidxu
Diffstat (limited to 'lib/libpthread/thread/thr_cond.c')
-rw-r--r--lib/libpthread/thread/thr_cond.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c
index 9703da6..0d6d914 100644
--- a/lib/libpthread/thread/thr_cond.c
+++ b/lib/libpthread/thread/thr_cond.c
@@ -584,6 +584,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
+ struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@@ -619,8 +620,10 @@ _pthread_cond_signal(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
- _thr_setrunnable_unlocked(pthread);
+ kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
+ if (kmbx != NULL)
+ kse_wakeup(kmbx);
}
/* Check for no more waiters: */
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
@@ -649,6 +652,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread;
+ struct kse_mailbox *kmbx;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
@@ -682,8 +686,10 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
(pthread->active_priority >
curthread->active_priority))
curthread->critical_yield = 1;
- _thr_setrunnable_unlocked(pthread);
+ kmbx = _thr_setrunnable_unlocked(pthread);
THR_SCHED_UNLOCK(curthread, pthread);
+ if (kmbx != NULL)
+ kse_wakeup(kmbx);
}
/* There are no more waiting threads: */
OpenPOWER on IntegriCloud