summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2006-04-08 13:24:44 +0000
committerdavidxu <davidxu@FreeBSD.org>2006-04-08 13:24:44 +0000
commit7d0c23506ce44c37be9f669bdc1ec0ff37e66d50 (patch)
tree1dbe72ab555a424679d073fcee976c4589f5cf3a
parent04498a993a54742b6f15cf278d3443b6cf425f7f (diff)
downloadFreeBSD-src-7d0c23506ce44c37be9f669bdc1ec0ff37e66d50.zip
FreeBSD-src-7d0c23506ce44c37be9f669bdc1ec0ff37e66d50.tar.gz
Do not check validity of timeout if a mutex can be acquired immediately.
Completly drop recursive mutex in pthread_cond_wait and restore recursive after resumption. Reorganize code to make gcc to generate better code.
-rw-r--r--lib/libthr/thread/thr_cond.c23
-rw-r--r--lib/libthr/thread/thr_mutex.c156
-rw-r--r--lib/libthr/thread/thr_private.h4
3 files changed, 103 insertions, 80 deletions
diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c
index 108047d..f786062 100644
--- a/lib/libthr/thread/thr_cond.c
+++ b/lib/libthr/thread/thr_cond.c
@@ -160,18 +160,19 @@ struct cond_cancel_info
pthread_mutex_t *mutex;
pthread_cond_t *cond;
long seqno;
+ int count;
};
static void
cond_cancel_handler(void *arg)
{
struct pthread *curthread = _get_curthread();
- struct cond_cancel_info *cci = (struct cond_cancel_info *)arg;
+ struct cond_cancel_info *info = (struct cond_cancel_info *)arg;
pthread_cond_t cv;
- cv = *(cci->cond);
+ cv = *(info->cond);
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
- if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
+ if (cv->c_seqno != info->seqno && cv->c_wakeups != 0) {
if (cv->c_waiters > 0) {
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
@@ -182,7 +183,7 @@ cond_cancel_handler(void *arg)
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
- _mutex_cv_lock(cci->mutex);
+ _mutex_cv_lock(info->mutex, info->count);
}
static int
@@ -191,7 +192,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
{
struct pthread *curthread = _get_curthread();
struct timespec ts, ts2, *tsp;
- struct cond_cancel_info cci;
+ struct cond_cancel_info info;
pthread_cond_t cv;
long seq, oldseq;
int oldcancel;
@@ -207,15 +208,15 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
cv = *cond;
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
- ret = _mutex_cv_unlock(mutex);
+ ret = _mutex_cv_unlock(mutex, &info.count);
if (ret) {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
return (ret);
}
oldseq = seq = cv->c_seqno;
- cci.mutex = mutex;
- cci.cond = cond;
- cci.seqno = oldseq;
+ info.mutex = mutex;
+ info.cond = cond;
+ info.seqno = oldseq;
cv->c_waiters++;
do {
@@ -229,7 +230,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
tsp = NULL;
if (cancel) {
- THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
+ THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
oldcancel = _thr_cancel_enter(curthread);
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
_thr_cancel_leave(curthread, oldcancel);
@@ -256,7 +257,7 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
cv->c_waiters--;
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
- _mutex_cv_lock(mutex);
+ _mutex_cv_lock(mutex, info.count);
return (ret);
}
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index fdceadf..e79e014 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -77,7 +77,7 @@ int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
static int mutex_self_trylock(pthread_mutex_t);
static int mutex_self_lock(pthread_mutex_t,
const struct timespec *abstime);
-static int mutex_unlock_common(pthread_mutex_t *, int);
+static int mutex_unlock_common(pthread_mutex_t *);
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
@@ -285,34 +285,36 @@ int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
struct pthread *curthread = _get_curthread();
- int ret = 0;
+ int ret;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- if ((*mutex != NULL) ||
- ((ret = init_static(curthread, mutex)) == 0))
- ret = mutex_trylock_common(curthread, mutex);
-
- return (ret);
+ if (__predict_false(*mutex == NULL)) {
+ ret = init_static(curthread, mutex);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_trylock_common(curthread, mutex));
}
int
_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
struct pthread *curthread = _get_curthread();
- int ret = 0;
+ int ret;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking the mutex private (delete safe):
*/
- if ((*mutex != NULL) ||
- ((ret = init_static_private(curthread, mutex)) == 0))
- ret = mutex_trylock_common(curthread, mutex);
-
- return (ret);
+ if (__predict_false(*mutex == NULL)) {
+ ret = init_static_private(curthread, mutex);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_trylock_common(curthread, mutex));
}
static int
@@ -321,7 +323,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
{
struct timespec ts, ts2;
struct pthread_mutex *m;
- int ret = 0;
+ int ret;
m = *mutex;
ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
@@ -336,6 +338,10 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
if (abstime == NULL) {
THR_UMTX_LOCK(curthread, &m->m_lock);
ret = 0;
+ } else if (__predict_false(
+ abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)) {
+ ret = EINVAL;
} else {
clock_gettime(CLOCK_REALTIME, &ts);
TIMESPEC_SUB(&ts2, abstime, &ts);
@@ -361,7 +367,7 @@ int
__pthread_mutex_lock(pthread_mutex_t *m)
{
struct pthread *curthread;
- int ret = 0;
+ int ret;
_thr_check_init();
@@ -371,17 +377,19 @@ __pthread_mutex_lock(pthread_mutex_t *m)
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
- ret = mutex_lock_common(curthread, m, NULL);
-
- return (ret);
+ if (__predict_false(*m == NULL)) {
+ ret = init_static(curthread, m);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_lock_common(curthread, m, NULL));
}
int
_pthread_mutex_lock(pthread_mutex_t *m)
{
struct pthread *curthread;
- int ret = 0;
+ int ret;
_thr_check_init();
@@ -391,82 +399,74 @@ _pthread_mutex_lock(pthread_mutex_t *m)
* If the mutex is statically initialized, perform the dynamic
* initialization marking it private (delete safe):
*/
- if ((*m != NULL) ||
- ((ret = init_static_private(curthread, m)) == 0))
- ret = mutex_lock_common(curthread, m, NULL);
-
- return (ret);
+ if (__predict_false(*m == NULL)) {
+ ret = init_static_private(curthread, m);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_lock_common(curthread, m, NULL));
}
int
__pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
{
struct pthread *curthread;
- int ret = 0;
+ int ret;
_thr_check_init();
- if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000))
- return (EINVAL);
-
curthread = _get_curthread();
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
- ret = mutex_lock_common(curthread, m, abstime);
-
- return (ret);
+ if (__predict_false(*m == NULL)) {
+ ret = init_static(curthread, m);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_lock_common(curthread, m, abstime));
}
int
_pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
{
struct pthread *curthread;
- int ret = 0;
+ int ret;
_thr_check_init();
- if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000))
- return (EINVAL);
-
curthread = _get_curthread();
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking it private (delete safe):
*/
- if ((*m != NULL) ||
- ((ret = init_static_private(curthread, m)) == 0))
- ret = mutex_lock_common(curthread, m, abstime);
-
- return (ret);
+ if (__predict_false(*m == NULL)) {
+ ret = init_static_private(curthread, m);
+ if (__predict_false(ret))
+ return (ret);
+ }
+ return (mutex_lock_common(curthread, m, abstime));
}
int
_pthread_mutex_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(m, /* add reference */ 0));
-}
-
-int
-_mutex_cv_unlock(pthread_mutex_t *m)
-{
- return (mutex_unlock_common(m, /* add reference */ 1));
+ return (mutex_unlock_common(m));
}
int
-_mutex_cv_lock(pthread_mutex_t *m)
+_mutex_cv_lock(pthread_mutex_t *m, int count)
{
int ret;
ret = mutex_lock_common(_get_curthread(), m, NULL);
- if (ret == 0)
+ if (ret == 0) {
(*m)->m_refcount--;
+ (*m)->m_count += count;
+ }
return (ret);
}
@@ -557,11 +557,10 @@ mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
}
static int
-mutex_unlock_common(pthread_mutex_t *mutex, int add_reference)
+mutex_unlock_common(pthread_mutex_t *mutex)
{
struct pthread *curthread = _get_curthread();
struct pthread_mutex *m;
- int ret = 0;
if (__predict_false((m = *mutex) == NULL))
return (EINVAL);
@@ -569,29 +568,52 @@ mutex_unlock_common(pthread_mutex_t *mutex, int add_reference)
/*
* Check if the running thread is not the owner of the mutex.
*/
- if (__predict_false(m->m_owner != curthread)) {
- ret = EPERM;
- } else if (__predict_false(
+ if (__predict_false(m->m_owner != curthread))
+ return (EPERM);
+
+ if (__predict_false(
m->m_type == PTHREAD_MUTEX_RECURSIVE &&
m->m_count > 0)) {
m->m_count--;
- if (add_reference)
- m->m_refcount++;
} else {
- /*
- * Clear the count in case this is a recursive mutex.
- */
- m->m_count = 0;
m->m_owner = NULL;
/* Remove the mutex from the threads queue. */
MUTEX_ASSERT_IS_OWNED(m);
TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
MUTEX_INIT_LINK(m);
- if (add_reference)
- m->m_refcount++;
THR_UMTX_UNLOCK(curthread, &m->m_lock);
}
- return (ret);
+ return (0);
+}
+
+int
+_mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_mutex *m;
+
+ if (__predict_false((m = *mutex) == NULL))
+ return (EINVAL);
+
+ /*
+ * Check if the running thread is not the owner of the mutex.
+ */
+ if (__predict_false(m->m_owner != curthread))
+ return (EPERM);
+
+ /*
+ * Clear the count in case this is a recursive mutex.
+ */
+ *count = m->m_count;
+ m->m_refcount++;
+ m->m_count = 0;
+ m->m_owner = NULL;
+ /* Remove the mutex from the threads queue. */
+ MUTEX_ASSERT_IS_OWNED(m);
+ TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
+ MUTEX_INIT_LINK(m);
+ THR_UMTX_UNLOCK(curthread, &m->m_lock);
+ return (0);
}
void
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index f7fa9ab..bc91b94 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -642,8 +642,8 @@ extern umtx_t _thr_event_lock __hidden;
*/
__BEGIN_DECLS
int _thr_setthreaded(int) __hidden;
-int _mutex_cv_lock(pthread_mutex_t *) __hidden;
-int _mutex_cv_unlock(pthread_mutex_t *) __hidden;
+int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden;
+int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden;
int _mutex_reinit(pthread_mutex_t *) __hidden;
void _mutex_fork(struct pthread *curthread) __hidden;
void _mutex_unlock_private(struct pthread *) __hidden;
OpenPOWER on IntegriCloud