summaryrefslogtreecommitdiffstats
path: root/lib/libthr/thread/thr_cond.c
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2006-04-08 13:24:44 +0000
committerdavidxu <davidxu@FreeBSD.org>2006-04-08 13:24:44 +0000
commit7d0c23506ce44c37be9f669bdc1ec0ff37e66d50 (patch)
tree1dbe72ab555a424679d073fcee976c4589f5cf3a /lib/libthr/thread/thr_cond.c
parent04498a993a54742b6f15cf278d3443b6cf425f7f (diff)
downloadFreeBSD-src-7d0c23506ce44c37be9f669bdc1ec0ff37e66d50.zip
FreeBSD-src-7d0c23506ce44c37be9f669bdc1ec0ff37e66d50.tar.gz
Do not check validity of timeout if a mutex can be acquired immediately.
Completly drop recursive mutex in pthread_cond_wait and restore recursive after resumption. Reorganize code to make gcc to generate better code.
Diffstat (limited to 'lib/libthr/thread/thr_cond.c')
-rw-r--r--lib/libthr/thread/thr_cond.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c
index 108047d..f786062 100644
--- a/lib/libthr/thread/thr_cond.c
+++ b/lib/libthr/thread/thr_cond.c
@@ -160,18 +160,19 @@ struct cond_cancel_info
pthread_mutex_t *mutex;
pthread_cond_t *cond;
long seqno;
+ int count;
};
static void
cond_cancel_handler(void *arg)
{
struct pthread *curthread = _get_curthread();
- struct cond_cancel_info *cci = (struct cond_cancel_info *)arg;
+ struct cond_cancel_info *info = (struct cond_cancel_info *)arg;
pthread_cond_t cv;
- cv = *(cci->cond);
+ cv = *(info->cond);
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
- if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
+ if (cv->c_seqno != info->seqno && cv->c_wakeups != 0) {
if (cv->c_waiters > 0) {
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
@@ -182,7 +183,7 @@ cond_cancel_handler(void *arg)
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
- _mutex_cv_lock(cci->mutex);
+ _mutex_cv_lock(info->mutex, info->count);
}
static int
@@ -191,7 +192,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
{
struct pthread *curthread = _get_curthread();
struct timespec ts, ts2, *tsp;
- struct cond_cancel_info cci;
+ struct cond_cancel_info info;
pthread_cond_t cv;
long seq, oldseq;
int oldcancel;
@@ -207,15 +208,15 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
cv = *cond;
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
- ret = _mutex_cv_unlock(mutex);
+ ret = _mutex_cv_unlock(mutex, &info.count);
if (ret) {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
return (ret);
}
oldseq = seq = cv->c_seqno;
- cci.mutex = mutex;
- cci.cond = cond;
- cci.seqno = oldseq;
+ info.mutex = mutex;
+ info.cond = cond;
+ info.seqno = oldseq;
cv->c_waiters++;
do {
@@ -229,7 +230,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
tsp = NULL;
if (cancel) {
- THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
+ THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
oldcancel = _thr_cancel_enter(curthread);
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
_thr_cancel_leave(curthread, oldcancel);
@@ -256,7 +257,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
cv->c_waiters--;
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
- _mutex_cv_lock(mutex);
+ _mutex_cv_lock(mutex, info.count);
return (ret);
}
OpenPOWER on IntegriCloud