summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/thread/thr_mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libpthread/thread/thr_mutex.c')
-rw-r--r--lib/libpthread/thread/thr_mutex.c1210
1 files changed, 641 insertions, 569 deletions
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index 7b22fb5..1ae12ea9 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -40,47 +40,52 @@
#include "thr_private.h"
#if defined(_PTHREADS_INVARIANTS)
-#define _MUTEX_INIT_LINK(m) do { \
+#define MUTEX_INIT_LINK(m) do { \
(m)->m_qe.tqe_prev = NULL; \
(m)->m_qe.tqe_next = NULL; \
} while (0)
-#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+#define MUTEX_ASSERT_IS_OWNED(m) do { \
if ((m)->m_qe.tqe_prev == NULL) \
PANIC("mutex is not on list"); \
} while (0)
-#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+#define MUTEX_ASSERT_NOT_OWNED(m) do { \
if (((m)->m_qe.tqe_prev != NULL) || \
((m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
+ THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
+ "thread in syncq when it shouldn't be."); \
+} while (0);
#else
-#define _MUTEX_INIT_LINK(m)
-#define _MUTEX_ASSERT_IS_OWNED(m)
-#define _MUTEX_ASSERT_NOT_OWNED(m)
+#define MUTEX_INIT_LINK(m)
+#define MUTEX_ASSERT_IS_OWNED(m)
+#define MUTEX_ASSERT_NOT_OWNED(m)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr)
#endif
/*
* Prototypes
*/
-static inline int mutex_self_trylock(pthread_mutex_t);
-static inline int mutex_self_lock(pthread_mutex_t);
-static inline int mutex_unlock_common(pthread_mutex_t *, int);
-static void mutex_priority_adjust(pthread_mutex_t);
-static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static void mutex_handoff(struct pthread *, struct pthread_mutex *);
+static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
+static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
+static int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
+static void mutex_rescan_owned (struct pthread *, struct pthread *,
+ struct pthread_mutex *);
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
-
static struct pthread_mutex_attr static_mutex_attr =
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
/* Single underscore versions provided for libc internal usage: */
-__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
+__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
/* No difference between libc and application usage of these: */
__weak_reference(_pthread_mutex_init, pthread_mutex_init);
@@ -88,45 +93,16 @@ __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-/* Reinitialize a mutex to defaults. */
-int
-_mutex_reinit(pthread_mutex_t * mutex)
-{
- int ret = 0;
-
- if (mutex == NULL)
- ret = EINVAL;
- else if (*mutex == NULL)
- ret = pthread_mutex_init(mutex, NULL);
- else {
- /*
- * Initialize the mutex structure:
- */
- (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
- (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_owner = NULL;
- (*mutex)->m_data.m_count = 0;
- (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- (*mutex)->m_refcount = 0;
- (*mutex)->m_prio = 0;
- (*mutex)->m_saved_prio = 0;
- _MUTEX_INIT_LINK(*mutex);
- memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
- }
- return (ret);
-}
int
-_pthread_mutex_init(pthread_mutex_t * mutex,
- const pthread_mutexattr_t * mutex_attr)
+_pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
{
- enum pthread_mutextype type;
+ struct pthread_mutex *pmutex;
+ enum pthread_mutextype type;
int protocol;
int ceiling;
int flags;
- pthread_mutex_t pmutex;
int ret = 0;
if (mutex == NULL)
@@ -137,7 +113,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Default to a (error checking) POSIX mutex: */
type = PTHREAD_MUTEX_ERRORCHECK;
protocol = PTHREAD_PRIO_NONE;
- ceiling = PTHREAD_MAX_PRIORITY;
+ ceiling = THR_MAX_PRIORITY;
flags = 0;
}
@@ -166,7 +142,12 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if ((pmutex = (pthread_mutex_t)
malloc(sizeof(struct pthread_mutex))) == NULL)
ret = ENOMEM;
- else {
+ else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ free(pmutex);
+ *mutex = NULL;
+ ret = ENOMEM;
+ } else {
/* Set the mutex flags: */
pmutex->m_flags = flags;
@@ -181,7 +162,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Single UNIX Spec 2 recursive mutex: */
case PTHREAD_MUTEX_RECURSIVE:
/* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
+ pmutex->m_count = 0;
break;
/* Trap invalid mutex types: */
@@ -201,10 +182,9 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if (protocol == PTHREAD_PRIO_PROTECT)
pmutex->m_prio = ceiling;
else
- pmutex->m_prio = 0;
+ pmutex->m_prio = -1;
pmutex->m_saved_prio = 0;
- _MUTEX_INIT_LINK(pmutex);
- memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ MUTEX_INIT_LINK(pmutex);
*mutex = pmutex;
} else {
free(pmutex);
@@ -213,19 +193,21 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return(ret);
+ return (ret);
}
int
-_pthread_mutex_destroy(pthread_mutex_t * mutex)
+_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ pthread_mutex_t m;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* Check to see if this mutex is in use:
@@ -236,21 +218,24 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
ret = EBUSY;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
- }
- else {
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
+ } else {
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Save a pointer to the mutex so it can be free'd
+ * and set the caller's pointer to NULL:
*/
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
- free(*mutex);
+ m = *mutex;
+ *mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
/*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
+ * Free the memory allocated for the mutex
+ * structure:
*/
- *mutex = NULL;
+ MUTEX_ASSERT_NOT_OWNED(m);
+ free(m);
}
}
@@ -259,56 +244,49 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
}
static int
-init_static(pthread_mutex_t *mutex)
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, NULL);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-init_static_private(pthread_mutex_t *mutex)
+init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, &static_mattr);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-mutex_trylock_common(pthread_mutex_t *mutex)
+mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
- struct pthread *curthread = _get_curthread();
- int ret = 0;
+ int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((mutex != NULL) && (*mutex != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* If the mutex was statically allocated, properly
@@ -316,7 +294,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
@@ -330,11 +308,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -359,11 +337,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
curthread->inherited_priority;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -395,11 +373,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -413,13 +391,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
}
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
/* Return the completion status: */
return (ret);
@@ -428,7 +400,8 @@ mutex_trylock_common(pthread_mutex_t *mutex)
int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -437,8 +410,9 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
@@ -446,6 +420,7 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
int
_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
if (mutex == NULL)
@@ -455,19 +430,19 @@ _pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization marking the mutex private (delete safe):
*/
- else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static_private(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
static int
-mutex_lock_common(pthread_mutex_t * mutex)
+mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
{
- struct pthread *curthread = _get_curthread();
int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((m != NULL) && (*m != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
/* Reset the interrupted flag: */
@@ -482,71 +457,68 @@ mutex_lock_common(pthread_mutex_t * mutex)
* waiting queue prior to executing the signal handler.
*/
do {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
- if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- _MUTEX_INIT_LINK(*mutex);
+ if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
+ TAILQ_INIT(&(*m)->m_queue);
+ (*m)->m_flags |= MUTEX_FLAGS_INITED;
+ MUTEX_INIT_LINK(*m);
}
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority inheritence mutex: */
case PTHREAD_PRIO_INHERIT:
/* Check if this mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -554,63 +526,70 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The mutex takes on attributes of the
* running thread when there are no waiters.
+ * Make sure the thread's scheduling lock is
+ * held while priorities are adjusted.
*/
- (*mutex)->m_prio = curthread->active_priority;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ (*m)->m_prio = curthread->active_priority;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- if (curthread->active_priority >
- (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio)
/* Adjust priorities: */
- mutex_priority_adjust(*mutex);
+ mutex_priority_adjust(curthread, *m);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority protection mutex: */
case PTHREAD_PRIO_PROTECT:
/* Check for a priority ceiling violation: */
- if (curthread->active_priority > (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio) {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
ret = EINVAL;
-
+ }
/* Check if this mutex is not locked: */
- else if ((*mutex)->m_owner == NULL) {
+ else if ((*m)->m_owner == NULL) {
/*
* Lock the mutex for the running
* thread:
*/
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -618,45 +597,52 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The running thread inherits the ceiling
* priority of the mutex and executes at that
- * priority:
+ * priority. Make sure the thread's
+ * scheduling lock is held while priorities
+ * are adjusted.
*/
- curthread->active_priority = (*mutex)->m_prio;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ curthread->active_priority = (*m)->m_prio;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- mutex_queue_enq(*mutex, curthread);
+ (*m), m_qe);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * Join the queue of threads waiting to lock
+ * the mutex and save a pointer to the mutex.
*/
- curthread->data.mutex = *mutex;
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/* Clear any previous error: */
curthread->error = 0;
/*
- * Unlock the mutex structure and schedule the
- * next thread:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
/*
* The threads priority may have changed while
@@ -670,107 +656,111 @@ mutex_lock_common(pthread_mutex_t * mutex)
/* Trap invalid mutex types: */
default:
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
/* Return an invalid argument error: */
ret = EINVAL;
break;
}
- /*
- * Check to see if this thread was interrupted and
- * is still in the mutex queue of waiting threads:
- */
- if (curthread->interrupted != 0)
- mutex_queue_remove(*mutex, curthread);
-
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
- } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
+ } while (((*m)->m_owner != curthread) && (ret == 0) &&
(curthread->interrupted == 0));
- if (curthread->interrupted != 0 &&
- curthread->continuation != NULL)
- curthread->continuation((void *) curthread);
+ /*
+ * Check to see if this thread was interrupted and
+ * is still in the mutex queue of waiting threads:
+ */
+ if (curthread->interrupted != 0) {
+ /* Remove this thread from the mutex queue. */
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+ if (THR_IN_SYNCQ(curthread))
+ mutex_queue_remove(*m, curthread);
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
+ /* Check for asynchronous cancellation. */
+ if (curthread->continuation != NULL)
+ curthread->continuation((void *) curthread);
+ }
/* Return the completion status: */
return (ret);
}
int
-__pthread_mutex_lock(pthread_mutex_t *mutex)
+__pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
- if (mutex == NULL)
+ curthread = _get_curthread();
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_lock(pthread_mutex_t *mutex)
+_pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+ curthread = _get_curthread();
- if (mutex == NULL)
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking it private (delete safe):
*/
- else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) ||
+ ((ret = init_static_private(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_unlock(pthread_mutex_t * mutex)
+_pthread_mutex_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 0));
+ return (mutex_unlock_common(m, /* add reference */ 0));
}
int
-_mutex_cv_unlock(pthread_mutex_t * mutex)
+_mutex_cv_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 1));
+ return (mutex_unlock_common(m, /* add reference */ 1));
}
int
-_mutex_cv_lock(pthread_mutex_t * mutex)
+_mutex_cv_lock(pthread_mutex_t *m)
{
int ret;
- if ((ret = pthread_mutex_lock(mutex)) == 0)
- (*mutex)->m_refcount--;
+ if ((ret = _pthread_mutex_lock(m)) == 0)
+ (*m)->m_refcount--;
return (ret);
}
static inline int
-mutex_self_trylock(pthread_mutex_t mutex)
+mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
-
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_NORMAL:
@@ -778,12 +768,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
* POSIX specifies that mutexes should return EDEADLK if a
* recursive lock is detected.
*/
- ret = EBUSY;
+ if (m->m_owner == curthread)
+ ret = EDEADLK;
+ else
+ ret = EBUSY;
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -791,15 +784,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
static inline int
-mutex_self_lock(pthread_mutex_t mutex)
+mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
/*
@@ -814,13 +807,18 @@ mutex_self_lock(pthread_mutex_t mutex)
* What SS2 define as a 'normal' mutex. Intentionally
* deadlock on attempts to get a lock you already own.
*/
- _thread_kern_sched_state_unlock(PS_DEADLOCK,
- &mutex->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_DEADLOCK);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -828,82 +826,58 @@ mutex_self_lock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
-static inline int
-mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+static int
+mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
- if (mutex == NULL || *mutex == NULL) {
+ if (m == NULL || *m == NULL)
ret = EINVAL;
- } else {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
+ else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
/*
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of
- * threads waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) {
- /* Make the new owner runnable: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
-
- /*
- * Add the mutex to the threads list of
- * owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -913,23 +887,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
* Clear the count in case this is recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -937,11 +911,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -949,69 +925,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of threads
- * waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) == NULL)
- /* This mutex has no priority. */
- (*mutex)->m_prio = 0;
- else {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Set the priority of the mutex. Since
- * our waiting threads are in descending
- * priority order, the priority of the
- * mutex becomes the active priority of
- * the thread we just dequeued.
- */
- (*mutex)->m_prio =
- (*mutex)->m_owner->active_priority;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning threads inherited priority
- * now becomes his active priority (the
- * priority of the mutex).
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1021,23 +944,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -1045,11 +968,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -1057,80 +982,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Enter a loop to find a waiting thread whose
- * active priority will not cause a ceiling
- * violation:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- while ((((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) &&
- ((*mutex)->m_owner->active_priority >
- (*mutex)->m_prio)) {
- /*
- * Either the mutex ceiling priority
- * been lowered and/or this threads
- * priority has been raised subsequent
- * to this thread being queued on the
- * waiting list.
- */
- (*mutex)->m_owner->error = EINVAL;
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- /*
- * The thread is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
-
- /* Check for a new owner: */
- if ((*mutex)->m_owner != NULL) {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning thread inherits the
- * ceiling priority of the mutex and
- * executes at that priority:
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
- (*mutex)->m_owner->active_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1141,19 +1002,12 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
break;
}
- if ((ret == 0) && (add_reference != 0)) {
+ if ((ret == 0) && (add_reference != 0))
/* Increment the reference count: */
- (*mutex)->m_refcount++;
- }
+ (*m)->m_refcount++;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
/* Return the completion status: */
@@ -1168,11 +1022,14 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* changes to active priorities of other threads and to the ordering
* of mutex locking by waiting threads.
*
- * This must be called while thread scheduling is deferred.
+ * This must be called without the target thread's scheduling lock held.
*/
void
-_mutex_notify_priochange(pthread_t pthread)
+_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
+ int propagate_prio)
{
+ struct pthread_mutex *m;
+
/* Adjust the priorites of any owned priority mutexes: */
if (pthread->priority_mutex_count > 0) {
/*
@@ -1180,14 +1037,29 @@ _mutex_notify_priochange(pthread_t pthread)
* their priorities to account for this threads change
* in priority. This has the side effect of changing
* the threads active priority.
+ *
+ * Be sure to lock the first mutex in the list of owned
+ * mutexes. This acts as a barrier against another
+ * simultaneous call to change the threads priority
+ * and from the owning thread releasing the mutex.
*/
- mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ m = TAILQ_FIRST(&pthread->mutexq);
+ if (m != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+ /*
+ * Make sure the thread still owns the lock.
+ */
+ if (m == TAILQ_FIRST(&pthread->mutexq))
+ mutex_rescan_owned(curthread, pthread,
+ /* rescan all owned */ NULL);
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
/*
* If this thread is waiting on a priority inheritence mutex,
* check for priority adjustments. A change in priority can
- * also effect a ceiling violation(*) for a thread waiting on
+ * also cause a ceiling violation(*) for a thread waiting on
* a priority protection mutex; we don't perform the check here
* as it is done in pthread_mutex_unlock.
*
@@ -1196,32 +1068,53 @@ _mutex_notify_priochange(pthread_t pthread)
* does not affect ownership of that mutex; the ceiling
* priority is only checked before mutex ownership occurs.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
- /* Lock the mutex structure: */
- _SPINLOCK(&pthread->data.mutex->lock);
-
+ if (propagate_prio != 0) {
/*
- * Check to make sure this thread is still in the same state
- * (the spinlock above can yield the CPU to another thread):
+ * Lock the thread's scheduling queue. This is a bit
+ * convoluted; the "in synchronization queue flag" can
+ * only be cleared with both the thread's scheduling and
+ * mutex locks held. The thread's pointer to the wanted
+ * mutex is guaranteed to be valid during this time.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
+ ((m = pthread->data.mutex) == NULL))
+ THR_SCHED_UNLOCK(curthread, pthread);
+ else {
+ /*
+ * This thread is currently waiting on a mutex; unlock
+ * the scheduling queue lock and lock the mutex. We
+ * can't hold both at the same time because the locking
+ * order could cause a deadlock.
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
/*
- * Remove and reinsert this thread into the list of
- * waiting threads to preserve decreasing priority
- * order.
+ * Check to make sure this thread is still in the
+ * same state (the lock above can yield the CPU to
+ * another thread or the thread may be running on
+ * another CPU).
*/
- mutex_queue_remove(pthread->data.mutex, pthread);
- mutex_queue_enq(pthread->data.mutex, pthread);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (pthread->data.mutex == m)) {
+ /*
+ * Remove and reinsert this thread into
+ * the list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- if (pthread->data.mutex->m_protocol ==
- PTHREAD_PRIO_INHERIT) {
- /* Adjust priorities: */
- mutex_priority_adjust(pthread->data.mutex);
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT)
+ /* Adjust priorities: */
+ mutex_priority_adjust(curthread, m);
}
- }
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&pthread->data.mutex->lock);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
}
@@ -1229,13 +1122,15 @@ _mutex_notify_priochange(pthread_t pthread)
* Called when a new thread is added to the mutex waiting queue or
* when a threads priority changes that is already in the mutex
* waiting queue.
+ *
+ * This must be called with the mutex locked by the current thread.
*/
static void
-mutex_priority_adjust(pthread_mutex_t mutex)
+mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
{
- pthread_t pthread_next, pthread = mutex->m_owner;
- int temp_prio;
pthread_mutex_t m = mutex;
+ struct pthread *pthread_next, *pthread = mutex->m_owner;
+ int done, temp_prio;
/*
* Calculate the mutex priority as the maximum of the highest
@@ -1260,7 +1155,12 @@ mutex_priority_adjust(pthread_mutex_t mutex)
/* Set new priority of the mutex: */
m->m_prio = temp_prio;
- while (m != NULL) {
+ /*
+ * Don't unlock the mutex passed in as an argument. It is
+ * expected to be locked and unlocked by the caller.
+ */
+ done = 1;
+ do {
/*
* Save the threads priority before rescanning the
* owned mutexes:
@@ -1268,11 +1168,26 @@ mutex_priority_adjust(pthread_mutex_t mutex)
temp_prio = pthread->active_priority;
/*
- * Fix the priorities for all the mutexes this thread has
- * locked since taking this mutex. This also has a
+ * Fix the priorities for all mutexes held by the owning
+ * thread since taking this mutex. This also has a
* potential side-effect of changing the threads priority.
+ *
+ * At this point the mutex is locked by the current thread.
+ * The owning thread can't release the mutex until it is
+ * unlocked, so we should be able to safely walk its list
+ * of owned mutexes.
+ */
+ mutex_rescan_owned(curthread, pthread, m);
+
+ /*
+ * If this isn't the first time through the loop,
+ * the current mutex needs to be unlocked.
*/
- mutex_rescan_owned(pthread, m);
+ if (done == 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Assume we're done unless told otherwise: */
+ done = 1;
/*
* If the thread is currently waiting on a mutex, check
@@ -1280,56 +1195,70 @@ mutex_priority_adjust(pthread_mutex_t mutex)
* priority of the mutex.
*/
if ((temp_prio != pthread->active_priority) &&
- (pthread->state == PS_MUTEX_WAIT) &&
- (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
- /* Grab the mutex this thread is waiting on: */
- m = pthread->data.mutex;
+ ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ ((m = pthread->data.mutex) != NULL) &&
+ (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
/*
- * The priority for this thread has changed. Remove
- * and reinsert this thread into the list of waiting
- * threads to preserve decreasing priority order.
+ * Make sure the thread is still waiting on the
+ * mutex:
*/
- mutex_queue_remove(m, pthread);
- mutex_queue_enq(m, pthread);
-
- /* Grab the waiting thread with highest priority: */
- pthread_next = TAILQ_FIRST(&m->m_queue);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (m == pthread->data.mutex)) {
+ /*
+ * The priority for this thread has changed.
+ * Remove and reinsert this thread into the
+ * list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- /*
- * Calculate the mutex priority as the maximum of the
- * highest active priority of any waiting threads and
- * the owning threads active priority.
- */
- temp_prio = MAX(pthread_next->active_priority,
- MAX(m->m_saved_prio, m->m_owner->base_priority));
+ /*
+ * Grab the waiting thread with highest
+ * priority:
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
- if (temp_prio != m->m_prio) {
/*
- * The priority needs to be propagated to the
- * mutex this thread is waiting on and up to
- * the owner of that mutex.
+ * Calculate the mutex priority as the maximum
+ * of the highest active priority of any
+ * waiting threads and the owning threads
+ * active priority.
*/
- m->m_prio = temp_prio;
- pthread = m->m_owner;
- }
- else
- /* We're done: */
- m = NULL;
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio,
+ m->m_owner->base_priority));
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated
+ * to the mutex this thread is waiting
+ * on and up to the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+
+ /* We're not done yet: */
+ done = 0;
+ }
+ }
+ /* Only release the mutex if we're done: */
+ if (done != 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
}
- else
- /* We're done: */
- m = NULL;
- }
+ } while (done == 0);
}
static void
-mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
+mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
+ struct pthread_mutex *mutex)
{
- int active_prio, inherited_prio;
- pthread_mutex_t m;
- pthread_t pthread_next;
+ struct pthread_mutex *m;
+ struct pthread *pthread_next;
+ int active_prio, inherited_prio;
/*
* Start walking the mutexes the thread has taken since
@@ -1344,8 +1273,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* There is no inherited priority yet. */
inherited_prio = 0;
- }
- else {
+ } else {
/*
* The caller wants to start after a specific mutex. It
* is assumed that this mutex is a priority inheritence
@@ -1359,7 +1287,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
}
active_prio = MAX(inherited_prio, pthread->base_priority);
- while (m != NULL) {
+ for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
/*
* We only want to deal with priority inheritence
* mutexes. This might be optimized by only placing
@@ -1386,9 +1314,6 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
inherited_prio = m->m_prio;
active_prio = MAX(m->m_prio, pthread->base_priority);
}
-
- /* Advance to the next mutex owned by this thread: */
- m = TAILQ_NEXT(m, m_qe);
}
/*
@@ -1399,16 +1324,22 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
active_prio = MAX(inherited_prio, pthread->base_priority);
if (active_prio != pthread->active_priority) {
- /*
- * If this thread is in the priority queue, it must be
- * removed and reinserted for its new priority.
- */
- if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
+ /* Lock the thread's scheduling queue: */
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
/*
- * Remove the thread from the priority queue
- * before changing its priority:
+ * This thread is not in a run queue. Just set
+ * its active priority.
*/
- PTHREAD_PRIOQ_REMOVE(pthread);
+ pthread->active_priority = active_prio;
+ }
+ else {
+ /*
+ * This thread is in a run queue. Remove it from
+ * the queue before changing its priority:
+ */
+ THR_RUNQ_REMOVE(pthread);
/*
* POSIX states that if the priority is being
@@ -1421,19 +1352,15 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_HEAD(pthread);
- }
- else {
+ THR_RUNQ_INSERT_HEAD(pthread);
+ } else {
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ THR_RUNQ_INSERT_TAIL(pthread);
}
}
- else {
- /* Set the new active priority. */
- pthread->active_priority = active_prio;
- }
+ THR_SCHED_UNLOCK(curthread, pthread);
}
}
@@ -1449,36 +1376,182 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+/*
+ * This is called by the current thread when it wants to back out of a
+ * mutex_lock in order to run a signal handler.
+ */
void
-_mutex_lock_backout(pthread_t pthread)
+_mutex_lock_backout(struct pthread *curthread)
{
- struct pthread_mutex *mutex;
+ struct pthread_mutex *m;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- mutex = pthread->data.mutex;
+ if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
+ /*
+ * Any other thread may clear the "in sync queue flag",
+ * but only the current thread can clear the pointer
+ * to the mutex. So if the flag is set, we can
+ * guarantee that the pointer to the mutex is valid.
+ * The only problem may be if the mutex is destroyed
+ * out from under us, but that should be considered
+ * an application bug.
+ */
+ m = curthread->data.mutex;
/* Lock the mutex structure: */
- _SPINLOCK(&mutex->lock);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
+
+ /*
+ * Check to make sure this thread doesn't already own
+ * the mutex. Since mutexes are unlocked with direct
+ * handoffs, it is possible the previous owner gave it
+ * to us after we checked the sync queue flag and before
+ * we locked the mutex structure.
+ */
+ if (m->m_owner == curthread) {
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ mutex_unlock_common(&m, /* add_reference */ 0);
+ } else {
+ /*
+ * Remove ourselves from the mutex queue and
+ * clear the pointer to the mutex. We may no
+ * longer be in the mutex queue, but the removal
+ * function will DTRT.
+ */
+ mutex_queue_remove(m, curthread);
+ curthread->data.mutex = NULL;
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ *
+ * In order to properly dequeue a thread from the mutex queue and
+ * make it runnable without the possibility of errant wakeups, it
+ * is necessary to lock the thread's scheduling queue while also
+ * holding the mutex lock.
+ */
+static void
+mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
+{
+ struct pthread *pthread;
+
+ /* Keep dequeueing until we find a valid thread: */
+ mutex->m_owner = NULL;
+ pthread = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread != NULL) {
+ /* Take the thread's scheduling lock: */
+ THR_SCHED_LOCK(curthread, pthread);
- mutex_queue_remove(mutex, pthread);
+ /* Remove the thread from the mutex queue: */
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
- /* This thread is no longer waiting for the mutex: */
+ /* This thread is no longer waiting for this mutex. */
pthread->data.mutex = NULL;
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&mutex->lock);
+ /*
+ * Only exit the loop if the thread hasn't been
+ * cancelled.
+ */
+ switch (mutex->m_protocol) {
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+ break;
+ case PTHREAD_PRIO_INHERIT:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Set the priority of the mutex. Since our waiting
+ * threads are in descending priority order, the
+ * priority of the mutex becomes the active priority
+ * of the thread we just dequeued.
+ */
+ mutex->m_prio = pthread->active_priority;
+
+ /* Save the owning threads inherited priority: */
+ mutex->m_saved_prio = pthread->inherited_priority;
+
+ /*
+ * The owning threads inherited priority now becomes
+ * his active priority (the priority of the mutex).
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ break;
+
+ case PTHREAD_PRIO_PROTECT:
+ if (pthread->active_priority > mutex->m_prio) {
+ /*
+ * Either the mutex ceiling priority has
+ * been lowered and/or this threads priority
+ * has been raised subsequent to the thread
+ * being queued on the waiting list.
+ */
+ pthread->error = EINVAL;
+ }
+ else {
+ /*
+ * Assign the new owner and add the mutex
+ * to the thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq,
+ mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ mutex->m_saved_prio =
+ pthread->inherited_priority;
+
+ /*
+ * The owning thread inherits the ceiling
+ * priority of the mutex and executes at
+ * that priority:
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ pthread->active_priority = mutex->m_prio;
+
+ }
+ break;
+ }
+
+ /* Make the thread runnable and unlock the scheduling queue: */
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
+
+ if (mutex->m_owner == pthread)
+ /* We're done; a valid owner was found. */
+ break;
+ else
+ /* Get the next thread from the waiting queue: */
+ pthread = TAILQ_NEXT(pthread, sqe);
}
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+
+ if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
+ /* This mutex has no priority: */
+ mutex->m_prio = 0;
}
/*
@@ -1486,13 +1559,13 @@ _mutex_lock_backout(pthread_t pthread)
* priority order.
*/
static inline pthread_t
-mutex_queue_deq(pthread_mutex_t mutex)
+mutex_queue_deq(struct pthread_mutex *mutex)
{
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
/*
* Only exit the loop if the thread hasn't been
@@ -1502,7 +1575,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
break;
}
- return(pthread);
+ return (pthread);
}
/*
@@ -1511,9 +1584,9 @@ mutex_queue_deq(pthread_mutex_t mutex)
static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
}
}
@@ -1525,7 +1598,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+ THR_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
@@ -1539,6 +1612,5 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags |= THR_FLAGS_IN_SYNCQ;
}
-
OpenPOWER on IntegriCloud