summaryrefslogtreecommitdiffstats
path: root/lib/libkse/thread/thr_mutex.c
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2004-12-18 18:07:37 +0000
committerdeischen <deischen@FreeBSD.org>2004-12-18 18:07:37 +0000
commita5b13ff571d8e5b6e117756821e8aa5beb7b5d2c (patch)
tree3341fdd3fcdb8d611509d7622be5b25f271de80a /lib/libkse/thread/thr_mutex.c
parentb231943e0e593788032a55590d7960ae015bddd2 (diff)
downloadFreeBSD-src-a5b13ff571d8e5b6e117756821e8aa5beb7b5d2c.zip
FreeBSD-src-a5b13ff571d8e5b6e117756821e8aa5beb7b5d2c.tar.gz
Use a generic way to back threads out of wait queues when handling
signals instead of having more intricate knowledge of thread state within signal handling. Simplify signal code because of above (by David Xu). Use macros for libpthread usage of pthread_cleanup_push() and pthread_cleanup_pop(). This removes some instances of malloc() and free() from the semaphore and pthread_once() implementations. When single threaded and forking(), make sure that the current thread's signal mask is inherited by the forked thread. Use private mutexes for libc and libpthread. Signals are deferred while threads hold private mutexes. This fix also breaks www/linuxpluginwrapper; a patch that fixes it is at http://people.freebsd.org/~deischen/kse/linuxpluginwrapper.diff Fix race condition in condition variables where handling a signal (pthread_kill() or kill()) may not see a wakeup (pthread_cond_signal() or pthread_cond_broadcast()). In collaboration with: davidxu
Diffstat (limited to 'lib/libkse/thread/thr_mutex.c')
-rw-r--r--lib/libkse/thread/thr_mutex.c92
1 files changed, 79 insertions, 13 deletions
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index 047dca5..b502c15 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -85,26 +85,26 @@ static void mutex_rescan_owned (struct pthread *, struct pthread *,
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
-
+static void mutex_lock_backout(void *arg);
static struct pthread_mutex_attr static_mutex_attr =
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
/* Single underscore versions provided for libc internal usage: */
+__weak_reference(__pthread_mutex_init, pthread_mutex_init);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
/* No difference between libc and application usage of these: */
-__weak_reference(_pthread_mutex_init, pthread_mutex_init);
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
int
-_pthread_mutex_init(pthread_mutex_t *mutex,
+__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
struct pthread_mutex *pmutex;
@@ -206,6 +206,22 @@ _pthread_mutex_init(pthread_mutex_t *mutex,
return (ret);
}
+int
+_pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+ struct pthread_mutex_attr mattr, *mattrp;
+
+ if ((mutex_attr == NULL) || (*mutex_attr == NULL))
+ return (__pthread_mutex_init(mutex, &static_mattr));
+ else {
+ mattr = **mutex_attr;
+ mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
+ mattrp = &mattr;
+ return (__pthread_mutex_init(mutex, &mattrp));
+ }
+}
+
void
_thr_mutex_reinit(pthread_mutex_t *mutex)
{
@@ -303,6 +319,7 @@ init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
static int
mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
+ int private;
int ret = 0;
THR_ASSERT((mutex != NULL) && (*mutex != NULL),
@@ -310,6 +327,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
/* Lock the mutex structure: */
THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
+ private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
/*
* If the mutex was statically allocated, properly
@@ -417,6 +435,9 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
break;
}
+ if (ret == 0 && private)
+ THR_CRITICAL_ENTER(curthread);
+
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
@@ -468,6 +489,7 @@ static int
mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
const struct timespec * abstime)
{
+ int private;
int ret = 0;
THR_ASSERT((m != NULL) && (*m != NULL),
@@ -482,6 +504,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
curthread->timeout = 0;
curthread->wakeup_time.tv_sec = -1;
+ private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
+
/*
* Enter a loop waiting to become the mutex owner. We need a
* loop in case the waiting thread is interrupted by a signal
@@ -516,6 +540,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*m), m_qe);
+ if (private)
+ THR_CRITICAL_ENTER(curthread);
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
@@ -539,6 +565,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
*/
mutex_queue_enq(*m, curthread);
curthread->data.mutex = *m;
+ curthread->sigbackout = mutex_lock_backout;
/*
* This thread is active and is in a critical
* region (holding the mutex lock); we should
@@ -554,12 +581,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
/* Schedule the next thread: */
_thr_sched_switch(curthread);
- curthread->data.mutex = NULL;
if (THR_IN_MUTEXQ(curthread)) {
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
mutex_queue_remove(*m, curthread);
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
+ /*
+ * Only clear these after assuring the
+ * thread is dequeued.
+ */
+ curthread->data.mutex = NULL;
+ curthread->sigbackout = NULL;
}
break;
@@ -590,6 +622,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*m), m_qe);
+ if (private)
+ THR_CRITICAL_ENTER(curthread);
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
@@ -613,6 +647,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
*/
mutex_queue_enq(*m, curthread);
curthread->data.mutex = *m;
+ curthread->sigbackout = mutex_lock_backout;
/*
* This thread is active and is in a critical
@@ -633,12 +668,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
/* Schedule the next thread: */
_thr_sched_switch(curthread);
- curthread->data.mutex = NULL;
if (THR_IN_MUTEXQ(curthread)) {
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
mutex_queue_remove(*m, curthread);
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
+ /*
+ * Only clear these after assuring the
+ * thread is dequeued.
+ */
+ curthread->data.mutex = NULL;
+ curthread->sigbackout = NULL;
}
break;
@@ -679,6 +719,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*m), m_qe);
+ if (private)
+ THR_CRITICAL_ENTER(curthread);
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
@@ -702,6 +744,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
*/
mutex_queue_enq(*m, curthread);
curthread->data.mutex = *m;
+ curthread->sigbackout = mutex_lock_backout;
/* Clear any previous error: */
curthread->error = 0;
@@ -722,12 +765,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
/* Schedule the next thread: */
_thr_sched_switch(curthread);
- curthread->data.mutex = NULL;
if (THR_IN_MUTEXQ(curthread)) {
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
mutex_queue_remove(*m, curthread);
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
+ /*
+ * Only clear these after assuring the
+ * thread is dequeued.
+ */
+ curthread->data.mutex = NULL;
+ curthread->sigbackout = NULL;
/*
* The threads priority may have changed while
@@ -932,6 +980,13 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
+ /*
+ * Don't allow evil recursive mutexes for private use
+ * in libc and libpthread.
+ */
+ if (m->m_flags & MUTEX_FLAGS_PRIVATE)
+ PANIC("Recurse on a private mutex.");
+
switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
@@ -1135,8 +1190,13 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
/* Increment the reference count: */
(*m)->m_refcount++;
+ /* Leave the critical region if this is a private mutex. */
+ if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
+ THR_CRITICAL_LEAVE(curthread);
+
/* Unlock the mutex structure: */
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
if (kmbx != NULL)
kse_wakeup(kmbx);
}
@@ -1511,9 +1571,10 @@ _mutex_unlock_private(pthread_t pthread)
* This is called by the current thread when it wants to back out of a
* mutex_lock in order to run a signal handler.
*/
-void
-_mutex_lock_backout(struct pthread *curthread)
+static void
+mutex_lock_backout(void *arg)
{
+ struct pthread *curthread = (struct pthread *)arg;
struct pthread_mutex *m;
if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
@@ -1554,6 +1615,8 @@ _mutex_lock_backout(struct pthread *curthread)
THR_LOCK_RELEASE(curthread, &m->m_lock);
}
}
+ /* No need to call this again. */
+ curthread->sigbackout = NULL;
}
/*
@@ -1674,13 +1737,16 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
(pthread->active_priority > curthread->active_priority))
curthread->critical_yield = 1;
- THR_SCHED_UNLOCK(curthread, pthread);
- if (mutex->m_owner == pthread)
+ if (mutex->m_owner == pthread) {
/* We're done; a valid owner was found. */
+ if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
+ THR_CRITICAL_ENTER(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
break;
- else
- /* Get the next thread from the waiting queue: */
- pthread = TAILQ_NEXT(pthread, sqe);
+ }
+ THR_SCHED_UNLOCK(curthread, pthread);
+ /* Get the next thread from the waiting queue: */
+ pthread = TAILQ_NEXT(pthread, sqe);
}
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
OpenPOWER on IntegriCloud