summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libthr/thread/Makefile.inc1
-rw-r--r--lib/libthr/thread/thr_barrier.c20
-rw-r--r--lib/libthr/thread/thr_cancel.c222
-rw-r--r--lib/libthr/thread/thr_cond.c235
-rw-r--r--lib/libthr/thread/thr_create.c1
-rw-r--r--lib/libthr/thread/thr_detach.c21
-rw-r--r--lib/libthr/thread/thr_exit.c41
-rw-r--r--lib/libthr/thread/thr_init.c21
-rw-r--r--lib/libthr/thread/thr_join.c74
-rw-r--r--lib/libthr/thread/thr_mutex.c75
-rw-r--r--lib/libthr/thread/thr_private.h78
-rw-r--r--lib/libthr/thread/thr_resume_np.c13
-rw-r--r--lib/libthr/thread/thr_setschedparam.c8
-rw-r--r--lib/libthr/thread/thr_sig.c77
-rw-r--r--lib/libthr/thread/thr_syscalls.c55
15 files changed, 320 insertions, 622 deletions
diff --git a/lib/libthr/thread/Makefile.inc b/lib/libthr/thread/Makefile.inc
index 52d428b..36b6b60 100644
--- a/lib/libthr/thread/Makefile.inc
+++ b/lib/libthr/thread/Makefile.inc
@@ -20,7 +20,6 @@ SRCS+= \
thr_exit.c \
thr_find_thread.c \
thr_getprio.c \
- thr_info.c \
thr_init.c \
thr_join.c \
thr_kern.c \
diff --git a/lib/libthr/thread/thr_barrier.c b/lib/libthr/thread/thr_barrier.c
index c635820..547a721 100644
--- a/lib/libthr/thread/thr_barrier.c
+++ b/lib/libthr/thread/thr_barrier.c
@@ -85,11 +85,12 @@ _pthread_barrier_wait(pthread_barrier_t *barrier)
UMTX_LOCK(&b->b_lock);
if (b->b_subtotal == (b->b_total - 1)) {
TAILQ_FOREACH(ptd, &b->b_barrq, sqe) {
- _thread_critical_enter(ptd);
- PTHREAD_NEW_STATE(ptd, PS_RUNNING);
+ PTHREAD_LOCK(ptd);
TAILQ_REMOVE(&b->b_barrq, ptd, sqe);
+ ptd->flags &= ~PTHREAD_FLAGS_IN_BARRQ;
ptd->flags |= PTHREAD_FLAGS_BARR_REL;
- _thread_critical_exit(ptd);
+ PTHREAD_WAKE(ptd);
+ PTHREAD_UNLOCK(ptd);
}
b->b_subtotal = 0;
UMTX_UNLOCK(&b->b_lock);
@@ -99,10 +100,10 @@ _pthread_barrier_wait(pthread_barrier_t *barrier)
/*
* More threads need to reach the barrier. Suspend this thread.
*/
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
TAILQ_INSERT_HEAD(&b->b_barrq, curthread, sqe);
- PTHREAD_NEW_STATE(curthread, PS_BARRIER_WAIT);
- _thread_critical_exit(curthread);
+ curthread->flags |= PTHREAD_FLAGS_IN_BARRQ;
+ PTHREAD_UNLOCK(curthread);
b->b_subtotal++;
PTHREAD_ASSERT(b->b_subtotal < b->b_total,
"the number of threads waiting at a barrier is too large");
@@ -114,15 +115,14 @@ _pthread_barrier_wait(pthread_barrier_t *barrier)
* Make sure this thread wasn't released from
* the barrier while it was handling the signal.
*/
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
if ((curthread->flags & PTHREAD_FLAGS_BARR_REL) != 0) {
curthread->flags &= ~PTHREAD_FLAGS_BARR_REL;
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
error = 0;
break;
}
- PTHREAD_NEW_STATE(curthread, PS_BARRIER_WAIT);
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
}
} while (error == EINTR);
return (error);
diff --git a/lib/libthr/thread/thr_cancel.c b/lib/libthr/thread/thr_cancel.c
index a539de7..5880e1f 100644
--- a/lib/libthr/thread/thr_cancel.c
+++ b/lib/libthr/thread/thr_cancel.c
@@ -17,219 +17,127 @@ __weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
__weak_reference(_pthread_testcancel, pthread_testcancel);
+/*
+ * Posix requires this function to be async-cancel-safe, so it
+ * may not aquire any type of resource or call any functions
+ * that might do so.
+ */
int
_pthread_cancel(pthread_t pthread)
{
- int ret;
- pthread_t joined;
+ /* Don't continue if cancellation has already been set. */
+ if (atomic_cmpset_int(&pthread->cancellation, (int)CS_NULL,
+ (int)CS_PENDING) != 1)
+ return (0);
/*
- * When canceling a thread that has joined another thread, this
- * routine breaks the normal lock order of locking first the
- * joined and then the joiner. Therefore, it is necessary that
- * if it can't obtain the second lock, that it release the first
- * one and restart from the top.
+ * Only wakeup threads that are in cancellation points or
+ * have set async cancel.
+ * XXX - access to pthread->flags is not safe. We should just
+ * unconditionally wake the thread and make sure that
+ * the the library correctly handles spurious wakeups.
*/
-retry:
- if ((ret = _find_thread(pthread)) != 0)
- /* The thread is not on the list of active threads */
- goto out;
-
- _thread_critical_enter(pthread);
-
- if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK
- || (pthread->flags & PTHREAD_EXITING) != 0) {
- /*
- * The thread is in the process of (or has already) exited
- * or is deadlocked.
- */
- _thread_critical_exit(pthread);
- ret = 0;
- goto out;
- }
-
- /*
- * The thread is on the active thread list and is not in the process
- * of exiting.
- */
-
- if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
- (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) &&
- ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0)))
- /* Just mark it for cancellation: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
- else {
- /*
- * Check if we need to kick it back into the
- * run queue:
- */
- switch (pthread->state) {
- case PS_RUNNING:
- /* No need to resume: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
- break;
-
- case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- break;
-
- case PS_JOIN:
- /*
- * Disconnect the thread from the joinee:
- */
- if ((joined = pthread->join_status.thread) != NULL) {
- UMTX_TRYLOCK(&joined->lock, ret);
- if (ret == EBUSY) {
- _thread_critical_exit(pthread);
- goto retry;
- }
- pthread->join_status.thread->joiner = NULL;
- UMTX_UNLOCK(&joined->lock);
- joined = pthread->join_status.thread = NULL;
- }
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- break;
-
- case PS_BARRIER_WAIT:
- case PS_MUTEX_WAIT:
- case PS_COND_WAIT:
- /*
- * Threads in these states may be in queues.
- * In order to preserve queue integrity, the
- * cancelled thread must remove itself from the
- * queue. When the thread resumes, it will
- * remove itself from the queue and call the
- * cancellation routine.
- */
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- break;
-
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_STATE_MAX:
- /* Ignore - only here to silence -Wall: */
- break;
- }
- }
-
- /* Unprotect the scheduling queues: */
- _thread_critical_exit(pthread);
-
- ret = 0;
-out:
- return (ret);
+ if ((pthread->cancellationpoint || pthread->cancelmode == M_ASYNC) &&
+ (pthread->flags & PTHREAD_FLAGS_NOT_RUNNING) != 0)
+ PTHREAD_WAKE(pthread);
+ return (0);
}
+/*
+ * Posix requires this function to be async-cancel-safe, so it
+ * may not aquire any type of resource or call any functions
+ * that might do so.
+ */
int
_pthread_setcancelstate(int state, int *oldstate)
{
- int ostate, ret;
-
- ret = 0;
-
- _thread_critical_enter(curthread);
-
- ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE;
+ int ostate;
+ ostate = (curthread->cancelmode == M_OFF) ? PTHREAD_CANCEL_DISABLE :
+ PTHREAD_CANCEL_ENABLE;
switch (state) {
case PTHREAD_CANCEL_ENABLE:
- if (oldstate != NULL)
- *oldstate = ostate;
- curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE;
- if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)
- break;
- testcancel();
+ curthread->cancelmode = curthread->cancelstate;
break;
case PTHREAD_CANCEL_DISABLE:
- if (oldstate != NULL)
- *oldstate = ostate;
- curthread->cancelflags |= PTHREAD_CANCEL_DISABLE;
+ if (curthread->cancelmode != M_OFF) {
+ curthread->cancelstate = curthread->cancelmode;
+ curthread->cancelmode = M_OFF;
+ }
break;
default:
- ret = EINVAL;
+ return (EINVAL);
}
-
- _thread_critical_exit(curthread);
- return (ret);
+ if (oldstate != NULL)
+ *oldstate = ostate;
+ return (0);
}
+/*
+ * Posix requires this function to be async-cancel-safe, so it
+ * may not aquire any type of resource or call any functions that
+ * might do so.
+ */
int
_pthread_setcanceltype(int type, int *oldtype)
{
- int otype;
+ enum cancel_mode omode;
- _thread_critical_enter(curthread);
- otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
+ omode = curthread->cancelstate;
switch (type) {
case PTHREAD_CANCEL_ASYNCHRONOUS:
- if (oldtype != NULL)
- *oldtype = otype;
- curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
- testcancel();
+ if (curthread->cancelmode != M_OFF)
+ curthread->cancelmode = M_ASYNC;
+ curthread->cancelstate = M_ASYNC;
break;
case PTHREAD_CANCEL_DEFERRED:
- if (oldtype != NULL)
- *oldtype = otype;
- curthread->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS;
+ if (curthread->cancelmode != M_OFF)
+ curthread->cancelmode = M_DEFERRED;
+ curthread->cancelstate = M_DEFERRED;
break;
default:
return (EINVAL);
}
-
- _thread_critical_exit(curthread);
+ if (oldtype != NULL) {
+ if (omode == M_DEFERRED)
+ *oldtype = PTHREAD_CANCEL_DEFERRED;
+ else if (omode == M_ASYNC)
+ *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
+ }
return (0);
}
void
_pthread_testcancel(void)
{
- _thread_critical_enter(curthread);
testcancel();
- _thread_critical_exit(curthread);
}
static void
testcancel()
{
- /*
- * This pthread should already be locked by the caller.
- */
-
- if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
- ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) &&
- ((curthread->flags & PTHREAD_EXITING) == 0)) {
- /*
- * It is possible for this thread to be swapped out
- * while performing cancellation; do not allow it
- * to be cancelled again.
- */
- curthread->cancelflags &= ~PTHREAD_CANCELLING;
- _thread_critical_exit(curthread);
- _thread_exit_cleanup();
- pthread_exit(PTHREAD_CANCELED);
- PANIC("cancel");
+ if (curthread->cancelmode != M_OFF) {
+
+ /* Cleanup a canceled thread only once. */
+ if (atomic_cmpset_int(&curthread->cancellation,
+ (int)CS_PENDING, (int)CS_SET) == 1) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
}
}
void
_thread_enter_cancellation_point(void)
{
- _thread_critical_enter(curthread);
testcancel();
- curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT;
- _thread_critical_exit(curthread);
+ curthread->cancellationpoint = 1;
}
void
_thread_leave_cancellation_point(void)
{
- _thread_critical_enter(curthread);
- curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
+ curthread->cancellationpoint = 0;
testcancel();
- _thread_critical_exit(curthread);
-
}
diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c
index 9597816..dedd3c2 100644
--- a/lib/libthr/thread/thr_cond.c
+++ b/lib/libthr/thread/thr_cond.c
@@ -200,13 +200,9 @@ cond_wait_common(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
{
int rval = 0;
- int done = 0;
- int seqno;
int mtxrval;
- _thread_enter_cancellation_point();
-
if (cond == NULL)
return (EINVAL);
/*
@@ -216,6 +212,8 @@ cond_wait_common(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
return (rval);
+ if ((*cond)->c_type != COND_TYPE_FAST)
+ return (EINVAL);
COND_LOCK(*cond);
@@ -228,124 +226,86 @@ cond_wait_common(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_flags |= COND_FLAGS_INITED;
}
- /* Process according to condition variable type. */
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
+ COND_UNLOCK(*cond);
+ return (EINVAL);
+ }
+ /* Remember the mutex */
+ (*cond)->c_mutex = *mutex;
- switch ((*cond)->c_type) {
- /* Fast condition variable: */
- case COND_TYPE_FAST:
- if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
- ((*cond)->c_mutex != *mutex))) {
- COND_UNLOCK(*cond);
- rval = EINVAL;
- break;
- }
- /* Remember the mutex */
- (*cond)->c_mutex = *mutex;
-
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
- if (rval == -1){
- printf("foo");
- fflush(stdout);
- abort();
- }
-
- COND_UNLOCK(*cond);
- break;
+ _thread_enter_cancellation_point();
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ if (rval == -1){
+ printf("mutex unlock by condvar failed!");
+ fflush(stdout);
+ abort();
}
+ _thread_leave_cancellation_point();
+ COND_UNLOCK(*cond);
+ return (rval);
+ }
- /*
- * We need to protect the queue operations. It also
- * protects c_seqno and the pthread flag fields. This is
- * dropped before calling _thread_suspend() and reaquired
- * when we return.
- */
-
- _thread_critical_enter(curthread);
- /*
- * c_seqno is protected.
- */
- seqno = (*cond)->c_seqno;
-
- do {
- /*
- * Queue the running thread on the condition
- * variable.
- */
- cond_queue_enq(*cond, curthread);
-
- if (curthread->cancelflags & PTHREAD_CANCELLING) {
- /*
- * POSIX Says that we must relock the mutex
- * even if we're being canceled.
- */
- _thread_critical_exit(curthread);
- COND_UNLOCK(*cond);
- _mutex_cv_lock(mutex);
- pthread_testcancel();
- PANIC("Shouldn't have come back.");
- }
-
- PTHREAD_SET_STATE(curthread, PS_COND_WAIT);
- _thread_critical_exit(curthread);
- COND_UNLOCK(*cond);
- rval = _thread_suspend(curthread, (struct timespec *)abstime);
- if (rval != 0 && rval != ETIMEDOUT && rval != EINTR) {
- printf("foo");
- fflush(stdout);
- abort();
- }
- COND_LOCK(*cond);
- _thread_critical_enter(curthread);
-
- done = (seqno != (*cond)->c_seqno);
+ /*
+ * We need to protect the queue operations. It also
+ * protects the pthread flag field. This is
+ * dropped before calling _thread_suspend() and reaquired
+ * when we return.
+ */
+ PTHREAD_LOCK(curthread);
+ /*
+ * Queue the running thread on the condition
+ * variable and wait to be signaled.
+ */
+ cond_queue_enq(*cond, curthread);
+ do {
+ PTHREAD_UNLOCK(curthread);
+ COND_UNLOCK(*cond);
+ if (curthread->cancellation == CS_PENDING) {
/*
- * If we timed out, this will remove us from the
- * queue. Otherwise, if we were signaled it does
- * nothing because this thread won't be on the queue.
+ * Posix says we must lock the mutex
+ * even if we're being canceled.
*/
- cond_queue_remove(*cond, curthread);
-
- } while ((done == 0) && (rval == 0));
- /*
- * If we timed out someone still may have signaled us
- * before we got a chance to run again. We check for
- * this by looking to see if our state is RUNNING.
- */
- if (rval == ETIMEDOUT) {
- if (curthread->state != PS_RUNNING) {
- PTHREAD_SET_STATE(curthread, PS_RUNNING);
- } else
- rval = 0;
+ _mutex_cv_lock(mutex);
+ _thread_leave_cancellation_point();
+ PANIC("Shouldn't have come back.");
}
- _thread_critical_exit(curthread);
- COND_UNLOCK(*cond);
-
- mtxrval = _mutex_cv_lock(mutex);
-
- /*
- * If the mutex failed return that error, otherwise we're
- * returning ETIMEDOUT.
- */
- if (mtxrval == -1) {
- printf("foo");
+ rval = _thread_suspend(curthread, (struct timespec *)abstime);
+ if (rval != 0 && rval != ETIMEDOUT && rval != EINTR) {
+ printf("thread suspend returned an invalid value");
fflush(stdout);
abort();
}
- if (mtxrval != 0)
- rval = mtxrval;
+ COND_LOCK(*cond);
+ PTHREAD_LOCK(curthread);
+ if (rval == ETIMEDOUT) {
+ /*
+ * Condition may have been signaled between the
+ * time the thread timed out and locked the condvar.
+ * If it wasn't, manually remove it from the queue.
+ */
+ if ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) == 0)
+ rval = 0;
+ else
+ cond_queue_remove(*cond, curthread);
+ }
+ } while ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0);
- break;
+ PTHREAD_UNLOCK(curthread);
+ COND_UNLOCK(*cond);
+ mtxrval = _mutex_cv_lock(mutex);
- /* Trap invalid condition variable types: */
- default:
- COND_UNLOCK(*cond);
- rval = EINVAL;
- break;
+ /* If the mutex failed return that error. */
+ if (mtxrval == -1) {
+ printf("mutex lock from condvar failed!");
+ fflush(stdout);
+ abort();
}
+ if (mtxrval != 0)
+ rval = mtxrval;
_thread_leave_cancellation_point();
-
return (rval);
}
@@ -376,40 +336,26 @@ cond_signal(pthread_cond_t * cond, int broadcast)
if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
return (rval);
+ if ((*cond)->c_type != COND_TYPE_FAST)
+ return (EINVAL);
COND_LOCK(*cond);
- /* Process according to condition variable type: */
- switch ((*cond)->c_type) {
- /* Fast condition variable: */
- case COND_TYPE_FAST:
- (*cond)->c_seqno++;
-
+ /*
+ * Enter a loop to bring all (or only one) threads off the
+ * condition queue:
+ */
+ do {
/*
- * Enter a loop to bring all (or only one) threads off the
- * condition queue:
+ * Wake up the signaled thread. It will be returned
+ * to us locked.
*/
- do {
- /*
- * Wake up the signaled thread. It will be returned
- * to us locked, and with signals disabled.
- */
- if ((pthread = cond_queue_deq(*cond)) != NULL) {
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- _thread_critical_exit(pthread);
- }
- } while (broadcast && pthread != NULL);
-
- break;
-
- /* Trap invalid condition variable types: */
- default:
- rval = EINVAL;
- break;
- }
+ if ((pthread = cond_queue_deq(*cond)) != NULL) {
+ PTHREAD_WAKE(pthread);
+ PTHREAD_UNLOCK(pthread);
+ }
+ } while (broadcast && pthread != NULL);
COND_UNLOCK(*cond);
-
-
return (rval);
}
@@ -443,20 +389,17 @@ cond_queue_deq(pthread_cond_t cond)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
- _thread_critical_enter(pthread);
- TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
+ PTHREAD_LOCK(pthread);
cond_queue_remove(cond, pthread);
- if ((pthread->cancelflags & PTHREAD_CANCELLING) == 0 &&
- pthread->state == PS_COND_WAIT)
- /*
- * Only exit the loop when we find a thread
- * that hasn't timed out or been canceled;
- * those threads are already running and don't
- * need their run state changed.
- */
+
+ /*
+ * Only exit the loop when we find a thread
+ * that hasn't been canceled.
+ */
+ if (pthread->cancellation == CS_NULL)
break;
else
- _thread_critical_exit(pthread);
+ PTHREAD_UNLOCK(pthread);
}
return(pthread);
diff --git a/lib/libthr/thread/thr_create.c b/lib/libthr/thread/thr_create.c
index 5f1887d..27b19c6 100644
--- a/lib/libthr/thread/thr_create.c
+++ b/lib/libthr/thread/thr_create.c
@@ -48,7 +48,6 @@ static u_int64_t next_uniqueid = 1;
#define OFF(f) offsetof(struct pthread, f)
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
-int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
int _thread_ctx_offset = OFF(ctx);
#undef OFF
diff --git a/lib/libthr/thread/thr_detach.c b/lib/libthr/thread/thr_detach.c
index 7d87d05..1d7d334 100644
--- a/lib/libthr/thread/thr_detach.c
+++ b/lib/libthr/thread/thr_detach.c
@@ -41,14 +41,21 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
+ int error;
+
if (pthread->magic != PTHREAD_MAGIC)
return (EINVAL);
- UMTX_LOCK(&pthread->lock);
+ PTHREAD_LOCK(pthread);
if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
- UMTX_UNLOCK(&pthread->lock);
- return ((pthread->state == PS_DEAD) ? ESRCH : EINVAL);
+ _thread_sigblock();
+ DEAD_LIST_LOCK;
+ error = pthread->isdead ? ESRCH : EINVAL;
+ DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
+ PTHREAD_UNLOCK(pthread);
+ return (error);
}
pthread->attr.flags |= PTHREAD_DETACHED;
@@ -56,10 +63,6 @@ _pthread_detach(pthread_t pthread)
/* Check if there is a joiner: */
if (pthread->joiner != NULL) {
struct pthread *joiner = pthread->joiner;
- _thread_critical_enter(joiner);
-
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(joiner, PS_RUNNING);
/* Set the return value for the woken thread: */
joiner->join_status.error = ESRCH;
@@ -70,10 +73,10 @@ _pthread_detach(pthread_t pthread)
* Disconnect the joiner from the thread being detached:
*/
pthread->joiner = NULL;
- _thread_critical_exit(joiner);
+ PTHREAD_WAKE(joiner);
}
- UMTX_UNLOCK(&pthread->lock);
+ PTHREAD_UNLOCK(pthread);
return (0);
}
diff --git a/lib/libthr/thread/thr_exit.c b/lib/libthr/thread/thr_exit.c
index 0455a97..f71c59a 100644
--- a/lib/libthr/thread/thr_exit.c
+++ b/lib/libthr/thread/thr_exit.c
@@ -96,18 +96,23 @@ _thread_exit_cleanup(void)
void
_pthread_exit(void *status)
{
- pthread_t pthread, joiner;
+ struct pthread *pthread;
int exitNow = 0;
+ /*
+ * This thread will no longer handle any signals.
+ */
+ _thread_sigblock();
+
/* Check if this thread is already in the process of exiting: */
- if ((curthread->flags & PTHREAD_EXITING) != 0) {
+ if (curthread->exiting) {
char msg[128];
snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
PANIC(msg);
}
/* Flag this thread as exiting: */
- curthread->flags |= PTHREAD_EXITING;
+ curthread->exiting = 1;
/* Save the return value: */
curthread->ret = status;
@@ -130,45 +135,24 @@ _pthread_exit(void *status)
*/
if (curthread->rwlockList != NULL)
free(curthread->rwlockList);
-retry:
- /*
- * Proper lock order, to minimize deadlocks, between joining
- * and exiting threads is: DEAD_LIST, THREAD_LIST, exiting, joiner.
- * In order to do this *and* protect from races, we must resort
- * this test-and-retry loop.
- */
- joiner = curthread->joiner;
/* Lock the dead list first to maintain correct lock order */
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
- _thread_critical_enter(curthread);
-
- if (joiner != curthread->joiner) {
- _thread_critical_exit(curthread);
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- goto retry;
- }
/* Check if there is a thread joining this one: */
if (curthread->joiner != NULL) {
pthread = curthread->joiner;
- UMTX_LOCK(&pthread->lock);
curthread->joiner = NULL;
- /* Make the joining thread runnable: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
/* Set the return value for the joining thread: */
pthread->join_status.ret = curthread->ret;
pthread->join_status.error = 0;
pthread->join_status.thread = NULL;
- UMTX_UNLOCK(&pthread->lock);
- /* Make this thread collectable by the garbage collector. */
- PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
- 0), "Cannot join a detached thread");
+ /* Make the joining thread runnable: */
+ PTHREAD_WAKE(pthread);
+
curthread->attr.flags |= PTHREAD_DETACHED;
}
@@ -180,8 +164,7 @@ retry:
deadlist_free_threads();
TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
TAILQ_REMOVE(&_thread_list, curthread, tle);
- PTHREAD_SET_STATE(curthread, PS_DEAD);
- _thread_critical_exit(curthread);
+ curthread->isdead = 1;
/* If we're the last thread, call it quits */
if (TAILQ_EMPTY(&_thread_list))
diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c
index cb2cd2a..98c355a 100644
--- a/lib/libthr/thread/thr_init.c
+++ b/lib/libthr/thread/thr_init.c
@@ -167,8 +167,9 @@ init_td_common(struct pthread *td, struct pthread_attr *attrp, int reinit)
*/
if (!reinit) {
memset(td, 0, sizeof(struct pthread));
- td->cancelflags = PTHREAD_CANCEL_ENABLE |
- PTHREAD_CANCEL_DEFERRED;
+ td->cancelmode = M_DEFERRED;
+ td->cancelstate = M_DEFERRED;
+ td->cancellation = CS_NULL;
memcpy(&td->attr, attrp, sizeof(struct pthread_attr));
td->magic = PTHREAD_MAGIC;
TAILQ_INIT(&td->mutexq);
@@ -241,7 +242,6 @@ _thread_init(void)
{
struct pthread *pthread;
int fd;
- int i;
size_t len;
int mib[2];
int error;
@@ -334,21 +334,6 @@ _thread_init(void)
getcontext(&pthread->ctx);
pthread->ctx.uc_stack.ss_sp = pthread->stack;
pthread->ctx.uc_stack.ss_size = PTHREAD_STACK_INITIAL;
-
- /* Initialise the state of the initial thread: */
- pthread->state = PS_RUNNING;
-
- /* Enter a loop to get the existing signal status: */
- for (i = 1; i < NSIG; i++) {
- /* Check for signals which cannot be trapped. */
- if (i == SIGKILL || i == SIGSTOP)
- continue;
-
- /* Get the signal handler details. */
- if (__sys_sigaction(i, NULL,
- &_thread_sigact[i - 1]) != 0)
- PANIC("Cannot read signal handler info");
- }
}
/*
diff --git a/lib/libthr/thread/thr_join.c b/lib/libthr/thread/thr_join.c
index fd7cff5..8ff4c1a 100644
--- a/lib/libthr/thread/thr_join.c
+++ b/lib/libthr/thread/thr_join.c
@@ -44,21 +44,15 @@ _pthread_join(pthread_t pthread, void **thread_return)
int ret = 0;
pthread_t thread;
- _thread_enter_cancellation_point();
-
/* Check if the caller has specified an invalid thread: */
- if (pthread->magic != PTHREAD_MAGIC) {
+ if (pthread->magic != PTHREAD_MAGIC)
/* Invalid thread: */
- _thread_leave_cancellation_point();
return(EINVAL);
- }
/* Check if the caller has specified itself: */
- if (pthread == curthread) {
+ if (pthread == curthread)
/* Avoid a deadlock condition: */
- _thread_leave_cancellation_point();
return(EDEADLK);
- }
/*
* Search for the specified thread in the list of active threads. This
@@ -66,35 +60,38 @@ _pthread_join(pthread_t pthread, void **thread_return)
* the searches in _thread_list and _dead_list (as well as setting up
* join/detach state) have to be done atomically.
*/
+ _thread_sigblock();
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
- TAILQ_FOREACH(thread, &_thread_list, tle)
- if (thread == pthread) {
- UMTX_LOCK(&pthread->lock);
- break;
+ if (!pthread->isdead) {
+ TAILQ_FOREACH(thread, &_thread_list, tle) {
+ if (thread == pthread) {
+ PTHREAD_LOCK(pthread);
+ break;
+ }
}
-
- if (thread == NULL)
- /*
- * Search for the specified thread in the list of dead threads:
- */
- TAILQ_FOREACH(thread, &_dead_list, dle)
+ } else {
+ TAILQ_FOREACH(thread, &_dead_list, dle) {
if (thread == pthread) {
- UMTX_LOCK(&pthread->lock);
+ PTHREAD_LOCK(pthread);
break;
}
+ }
+ }
/* Check if the thread was not found or has been detached: */
if (thread == NULL) {
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
ret = ESRCH;
goto out;
}
if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
- UMTX_UNLOCK(&pthread->lock);
+ PTHREAD_UNLOCK(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
ret = EINVAL;
goto out;
}
@@ -102,44 +99,60 @@ _pthread_join(pthread_t pthread, void **thread_return)
if (pthread->joiner != NULL) {
/* Multiple joiners are not supported. */
/* XXXTHR - support multiple joiners. */
- UMTX_UNLOCK(&pthread->lock);
+ PTHREAD_UNLOCK(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
ret = ENOTSUP;
goto out;
}
/* Check if the thread is not dead: */
- if (pthread->state != PS_DEAD) {
+ if (!pthread->isdead) {
/* Set the running thread to be the joiner: */
pthread->joiner = curthread;
- UMTX_UNLOCK(&pthread->lock);
- _thread_critical_enter(curthread);
+ PTHREAD_UNLOCK(pthread);
/* Keep track of which thread we're joining to: */
curthread->join_status.thread = pthread;
while (curthread->join_status.thread == pthread) {
- PTHREAD_SET_STATE(curthread, PS_JOIN);
/* Wait for our signal to wake up. */
- _thread_critical_exit(curthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
+ if (curthread->cancellation != CS_NULL)
+ pthread->joiner = NULL;
+ _thread_enter_cancellation_point();
+
+ /*
+ * XXX - Workaround to make a join a cancellation
+ * point. Must find a better solution.
+ */
+ PTHREAD_LOCK(curthread);
+ curthread->flags |= PTHREAD_FLAGS_SUSPENDED;
+ PTHREAD_UNLOCK(curthread);
ret = _thread_suspend(curthread, NULL);
if (ret != 0 && ret != EAGAIN && ret != EINTR)
PANIC("Unable to suspend in join.");
+ PTHREAD_LOCK(curthread);
+ curthread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
+ PTHREAD_UNLOCK(curthread);
+ if (curthread->cancellation != CS_NULL)
+ pthread->joiner = NULL;
+ _thread_leave_cancellation_point();
/*
* XXX - For correctness reasons.
* We must aquire these in the same order and also
- * importantly, release in the same order, order because
+ * importantly, release in the same order because
* otherwise we might deadlock with the joined thread
* when we attempt to release one of these locks.
*/
+ _thread_sigblock();
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
- _thread_critical_enter(curthread);
}
/*
@@ -149,9 +162,9 @@ _pthread_join(pthread_t pthread, void **thread_return)
ret = curthread->join_status.error;
if ((ret == 0) && (thread_return != NULL))
*thread_return = curthread->join_status.ret;
- _thread_critical_exit(curthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
} else {
/*
* The thread exited (is dead) without being detached, and no
@@ -166,11 +179,12 @@ _pthread_join(pthread_t pthread, void **thread_return)
/* Free all remaining memory allocated to the thread. */
pthread->attr.flags |= PTHREAD_DETACHED;
- UMTX_UNLOCK(&pthread->lock);
+ PTHREAD_UNLOCK(pthread);
TAILQ_REMOVE(&_dead_list, pthread, dle);
deadlist_free_onethread(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
+ _thread_sigunblock();
}
out:
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index 8bf68c1..46a0625 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -234,9 +234,9 @@ acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd)
{
mtx->m_owner = ptd;
_MUTEX_ASSERT_NOT_OWNED(mtx);
- _thread_critical_enter(ptd);
+ PTHREAD_LOCK(ptd);
TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
- _thread_critical_exit(ptd);
+ PTHREAD_UNLOCK(ptd);
}
/*
@@ -261,7 +261,7 @@ mutex_attach_to_next_pthread(struct pthread_mutex *mtx)
if ((ptd = mutex_queue_deq(mtx)) != NULL) {
TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
ptd->data.mutex = NULL;
- PTHREAD_NEW_STATE(ptd, PS_RUNNING);
+ PTHREAD_WAKE(ptd);
}
mtx->m_owner = ptd;
}
@@ -385,7 +385,7 @@ retry:
/*
* The mutex is now owned by curthread.
*/
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
/*
* The mutex's priority may have changed while waiting for it.
@@ -394,8 +394,8 @@ retry:
curthread->active_priority > (*mutex)->m_prio) {
mutex_attach_to_next_pthread(*mutex);
if ((*mutex)->m_owner != NULL)
- _thread_critical_exit((*mutex)->m_owner);
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK((*mutex)->m_owner);
+ PTHREAD_UNLOCK(curthread);
_SPINUNLOCK(&(*mutex)->lock);
return (EINVAL);
}
@@ -417,7 +417,7 @@ retry:
/* Nothing */
break;
}
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
out:
_SPINUNLOCK(&(*mutex)->lock);
return (error);
@@ -452,14 +452,14 @@ adjust_prio_inheritance(struct pthread *ptd)
tempTd = TAILQ_FIRST(&tempMtx->m_queue);
if (tempTd != NULL) {
- UMTX_LOCK(&tempTd->lock);
+ PTHREAD_LOCK(tempTd);
if (tempTd->active_priority > ptd->active_priority) {
ptd->inherited_priority =
tempTd->active_priority;
ptd->active_priority =
tempTd->active_priority;
}
- UMTX_UNLOCK(&tempTd->lock);
+ PTHREAD_UNLOCK(tempTd);
}
_SPINUNLOCK(&tempMtx->lock);
}
@@ -641,7 +641,7 @@ mutex_self_lock(pthread_mutex_t mutex, int noblock)
*/
if (noblock)
return (EBUSY);
- PTHREAD_SET_STATE(curthread, PS_DEADLOCK);
+ curthread->isdeadlocked = 1;
_SPINUNLOCK(&(mutex)->lock);
_thread_suspend(curthread, NULL);
PANIC("Shouldn't resume here?\n");
@@ -683,10 +683,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Release the mutex from this thread and attach it to
* the next thread in the queue, if there is one waiting.
*/
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
mutex_attach_to_next_pthread(*mutex);
if ((*mutex)->m_owner != NULL)
- _thread_critical_exit((*mutex)->m_owner);
+ PTHREAD_UNLOCK((*mutex)->m_owner);
if (add_reference != 0) {
/* Increment the reference count: */
(*mutex)->m_refcount++;
@@ -717,7 +717,7 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
/* Nothing */
break;
}
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
return (0);
}
@@ -759,24 +759,20 @@ mutex_queue_deq(pthread_mutex_t mutex)
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
- _thread_critical_enter(pthread);
+ PTHREAD_LOCK(pthread);
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
/*
* Only exit the loop if the thread hasn't been
- * cancelled.
+ * asynchronously cancelled.
*/
- if (((pthread->cancelflags & PTHREAD_CANCELLING) == 0 ||
- (pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0 ||
- ((pthread->cancelflags & PTHREAD_CANCELLING) != 0 &&
- (pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)) &&
- pthread->state == PS_MUTEX_WAIT)
- break;
+ if (pthread->cancelmode == M_ASYNC &&
+ pthread->cancellation != CS_NULL)
+ continue;
else
- _thread_critical_exit(pthread);
+ break;
}
-
return (pthread);
}
@@ -824,7 +820,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
}
if (mutex->m_protocol == PTHREAD_PRIO_INHERIT &&
pthread == TAILQ_FIRST(&mutex->m_queue)) {
- UMTX_LOCK(&mutex->m_owner->lock);
+ PTHREAD_LOCK(mutex->m_owner);
if (pthread->active_priority >
mutex->m_owner->active_priority) {
mutex->m_owner->inherited_priority =
@@ -832,7 +828,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
mutex->m_owner->active_priority =
pthread->active_priority;
}
- UMTX_UNLOCK(&mutex->m_owner->lock);
+ PTHREAD_UNLOCK(mutex->m_owner);
}
pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
}
@@ -843,14 +839,14 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
void
readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx)
{
- if (pthread->state == PS_MUTEX_WAIT) {
+ if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
mutex_queue_remove(mtx, pthread);
mutex_queue_enq(mtx, pthread);
- UMTX_LOCK(&mtx->m_owner->lock);
+ PTHREAD_LOCK(mtx->m_owner);
adjust_prio_inheritance(mtx->m_owner);
if (mtx->m_owner->prio_protect_count > 0)
adjust_prio_protection(mtx->m_owner);
- UMTX_UNLOCK(&mtx->m_owner->lock);
+ PTHREAD_UNLOCK(mtx->m_owner);
}
if (pthread->prio_inherit_count > 0)
adjust_prio_inheritance(pthread);
@@ -883,41 +879,36 @@ get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime)
* threads are concerned) setting of the thread state with
* it's status on the mutex queue.
*/
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
mutex_queue_enq(mutexp, curthread);
do {
- if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0 &&
- (curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0 &&
- (curthread->cancelflags & PTHREAD_CANCELLING) != 0) {
+ if (curthread->cancelmode == M_ASYNC &&
+ curthread->cancellation != CS_NULL) {
mutex_queue_remove(mutexp, curthread);
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
_SPINUNLOCK(&mutexp->lock);
pthread_testcancel();
}
- PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
curthread->data.mutex = mutexp;
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
_SPINUNLOCK(&mutexp->lock);
error = _thread_suspend(curthread, abstime);
if (error != 0 && error != ETIMEDOUT && error != EINTR)
PANIC("Cannot suspend on mutex.");
_SPINLOCK(&mutexp->lock);
- _thread_critical_enter(curthread);
+ PTHREAD_LOCK(curthread);
if (error == ETIMEDOUT) {
/*
* Between the timeout and when the mutex was
* locked the previous owner may have released
* the mutex to this thread. Or not.
*/
- if (mutexp->m_owner == curthread) {
+ if (mutexp->m_owner == curthread)
error = 0;
- } else {
+ else
_mutex_lock_backout(curthread);
- curthread->state = PS_RUNNING;
- error = ETIMEDOUT;
- }
}
} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
- _thread_critical_exit(curthread);
+ PTHREAD_UNLOCK(curthread);
return (error);
}
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index 22ea185..9263a8b 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -128,21 +128,10 @@
abort(); \
} while (0)
+#define PTHREAD_LOCK(p) UMTX_LOCK(&(p)->lock)
+#define PTHREAD_UNLOCK(p) UMTX_UNLOCK(&(p)->lock)
-/*
- * State change macro:
- */
-#define PTHREAD_SET_STATE(thrd, newstate) do { \
- (thrd)->state = newstate; \
- (thrd)->fname = __FILE__; \
- (thrd)->lineno = __LINE__; \
-} while (0)
-
-#define PTHREAD_NEW_STATE(thrd, newstate) do { \
- if (newstate == PS_RUNNING) \
- thr_wake(thrd->thr_id); \
- PTHREAD_SET_STATE(thrd, newstate); \
-} while (0)
+#define PTHREAD_WAKE(ptd) thr_wake((ptd)->thr_id)
/*
* TailQ initialization values.
@@ -452,6 +441,26 @@ struct rwlock_held {
LIST_HEAD(rwlock_listhead, rwlock_held);
/*
+ * The cancel mode a thread is in is determined by the
+ * the cancel type and state it is set in. The two values
+ * are combined into one mode:
+ * Mode State Type
+ * ---- ----- ----
+ * off disabled deferred
+ * off disabled async
+ * deferred enabled deferred
+ * async enabled async
+ */
+enum cancel_mode { M_OFF, M_DEFERRED, M_ASYNC };
+
+/*
+ * A thread's cancellation is pending until the cancel
+ * mode has been tested to determine if the thread can be
+ * cancelled immediately.
+ */
+enum cancellation_state { CS_NULL, CS_PENDING, CS_SET };
+
+/*
* Thread structure.
*/
struct pthread {
@@ -466,6 +475,12 @@ struct pthread {
thr_id_t thr_id;
sigset_t savedsig;
int signest; /* blocked signal netsting level */
+ int ptdflags; /* used by other other threads
+ to signal this thread */
+ int isdead;
+ int isdeadlocked;
+ int exiting;
+ int cancellationpoint;
/*
* Lock for accesses to this thread structure.
@@ -493,19 +508,16 @@ struct pthread {
ucontext_t ctx;
/*
- * Cancelability flags - the lower 2 bits are used by cancel
- * definitions in pthread.h
+ * The primary method of obtaining a thread's cancel state
+ * and type is through cancelmode. The cancelstate field is
+ * only so we don't loose the cancel state when the mode is
+ * turned off.
*/
-#define PTHREAD_AT_CANCEL_POINT 0x0004
-#define PTHREAD_CANCELLING 0x0008
+ enum cancel_mode cancelmode;
+ enum cancel_mode cancelstate;
- /*
- * Protected by Giant.
- */
- int cancelflags;
-
- /* Thread state: */
- enum pthread_state state;
+ /* Specifies if cancellation is pending, acted upon, or neither. */
+ enum cancellation_state cancellation;
/*
* Error variable used instead of errno. The function __error()
@@ -528,15 +540,6 @@ struct pthread {
*
* A thread can also be joining a thread (the joiner field above).
*
- * It must not be possible for a thread to belong to any of the
- * above queues while it is handling a signal. Signal handlers
- * may longjmp back to previous stack frames circumventing normal
- * control flow. This could corrupt queue integrity if the thread
- * retains membership in the queue. Therefore, if a thread is a
- * member of one of these queues when a signal handler is invoked,
- * it must remove itself from the queue before calling the signal
- * handler and reinsert itself after normal return of the handler.
- *
* Use sqe for synchronization (mutex and condition variable) queue
* links.
*/
@@ -548,14 +551,16 @@ struct pthread {
/* Miscellaneous flags; only set with signals deferred. */
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
-#define PTHREAD_EXITING 0x0002
#define PTHREAD_FLAGS_BARR_REL 0x0004 /* has been released from barrier */
+#define PTHREAD_FLAGS_IN_BARRQ 0x0008 /* in barrier queue using sqe link */
#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
#define PTHREAD_FLAGS_IN_SYNCQ \
- (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
+ (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_BARRQ)
+#define PTHREAD_FLAGS_NOT_RUNNING \
+ (PTHREAD_FLAGS_IN_SYNCQ | PTHREAD_FLAGS_SUSPENDED)
/*
* Base priority is the user setable and retrievable priority
@@ -773,7 +778,6 @@ void *_thread_cleanup(pthread_t);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);
void _thread_init(void);
-void _thread_sig_wrapper(int sig, siginfo_t *info, void *context);
void _thread_printf(int fd, const char *, ...);
void _thread_start(void);
void _thread_seterrno(pthread_t, int);
diff --git a/lib/libthr/thread/thr_resume_np.c b/lib/libthr/thread/thr_resume_np.c
index 3f56b46..a18d57d 100644
--- a/lib/libthr/thread/thr_resume_np.c
+++ b/lib/libthr/thread/thr_resume_np.c
@@ -49,12 +49,12 @@ _pthread_resume_np(pthread_t thread)
/* Find the thread in the list of active threads: */
if ((ret = _find_thread(thread)) == 0) {
- _thread_critical_enter(thread);
+ PTHREAD_LOCK(thread);
if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
resume_common(thread);
- _thread_critical_exit(thread);
+ PTHREAD_UNLOCK(thread);
}
return (ret);
}
@@ -64,16 +64,17 @@ _pthread_resume_all_np(void)
{
struct pthread *thread;
+ _thread_sigblock();
THREAD_LIST_LOCK;
TAILQ_FOREACH(thread, &_thread_list, tle) {
+ PTHREAD_LOCK(thread);
if ((thread != curthread) &&
- ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)) {
- _thread_critical_enter(thread);
+ ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
resume_common(thread);
- _thread_critical_exit(thread);
- }
+ PTHREAD_UNLOCK(thread);
}
THREAD_LIST_UNLOCK;
+ _thread_sigunblock();
}
/*
diff --git a/lib/libthr/thread/thr_setschedparam.c b/lib/libthr/thread/thr_setschedparam.c
index e90def4..de4eeea 100644
--- a/lib/libthr/thread/thr_setschedparam.c
+++ b/lib/libthr/thread/thr_setschedparam.c
@@ -76,11 +76,11 @@ _pthread_setschedparam(pthread_t pthread, int policy,
* LOR avoidance code.
*/
do {
- _thread_critical_enter(pthread);
- if (pthread->state == PS_MUTEX_WAIT) {
+ PTHREAD_LOCK(pthread);
+ if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
mtx = pthread->data.mutex;
if (_spintrylock(&mtx->lock) == EBUSY)
- _thread_critical_exit(pthread);
+ PTHREAD_UNLOCK(pthread);
else
break;
} else {
@@ -115,7 +115,7 @@ _pthread_setschedparam(pthread_t pthread, int policy,
}
pthread->attr.sched_policy = policy;
- _thread_critical_exit(pthread);
+ PTHREAD_UNLOCK(pthread);
if (mtx != NULL)
_SPINUNLOCK(&mtx->lock);
return(0);
diff --git a/lib/libthr/thread/thr_sig.c b/lib/libthr/thread/thr_sig.c
index efc6d5a..8a805af 100644
--- a/lib/libthr/thread/thr_sig.c
+++ b/lib/libthr/thread/thr_sig.c
@@ -93,80 +93,3 @@ _pthread_kill(pthread_t pthread, int sig)
return (thr_kill(pthread->thr_id, sig));
}
-
-/*
- * User thread signal handler wrapper.
- */
-void
-_thread_sig_wrapper(int sig, siginfo_t *info, void *context)
-{
- struct pthread_state_data psd;
- struct sigaction *actp;
- __siginfohandler_t *handler;
- struct umtx *up;
- spinlock_t *sp;
-
- /*
- * Do a little cleanup handling for those threads in
- * queues before calling the signal handler. Signals
- * for these threads are temporarily blocked until
- * after cleanup handling.
- */
- switch (curthread->state) {
- case PS_BARRIER_WAIT:
- /*
- * XXX - The thread has reached the barrier. We can't
- * "back it away" from the barrier.
- */
- _thread_critical_enter(curthread);
- break;
- case PS_COND_WAIT:
- /*
- * Cache the address, since it will not be available
- * after it has been backed out.
- */
- up = &curthread->data.cond->c_lock;
-
- UMTX_LOCK(up);
- _thread_critical_enter(curthread);
- _cond_wait_backout(curthread);
- UMTX_UNLOCK(up);
- break;
- case PS_MUTEX_WAIT:
- /*
- * Cache the address, since it will not be available
- * after it has been backed out.
- */
- sp = &curthread->data.mutex->lock;
-
- _SPINLOCK(sp);
- _thread_critical_enter(curthread);
- _mutex_lock_backout(curthread);
- _SPINUNLOCK(sp);
- break;
- default:
- /*
- * We need to lock the thread to read it's flags.
- */
- _thread_critical_enter(curthread);
- break;
- }
-
- /*
- * We save the flags now so that any modifications done as part
- * of the backout are reflected when the flags are restored.
- */
- psd.psd_flags = curthread->flags;
-
- PTHREAD_SET_STATE(curthread, PS_RUNNING);
- _thread_critical_exit(curthread);
- actp = proc_sigact_sigaction(sig);
- handler = (__siginfohandler_t *)actp->sa_handler;
- handler(sig, info, (ucontext_t *)context);
-
- /* Restore the thread's flags, and make it runnable */
- _thread_critical_enter(curthread);
- curthread->flags = psd.psd_flags;
- PTHREAD_SET_STATE(curthread, PS_RUNNING);
- _thread_critical_exit(curthread);
-}
diff --git a/lib/libthr/thread/thr_syscalls.c b/lib/libthr/thread/thr_syscalls.c
index 2d83ab5..f0867d1 100644
--- a/lib/libthr/thread/thr_syscalls.c
+++ b/lib/libthr/thread/thr_syscalls.c
@@ -330,61 +330,6 @@ _select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
return ret;
}
-__weak_reference(_sigaction, sigaction);
-
-int
-_sigaction(int sig, const struct sigaction *act, struct sigaction *oact)
-{
- struct sigaction *tmpact;
- struct sigaction oldact, wrapperact;
- int error;
-
- /* Detect invalid signals. */
- if (sig < 1 || sig > NSIG) {
- errno = EINVAL;
- return (-1);
- }
-
- /*
- * If act is not NULL the library's signal wrapper is passed into the
- * kernel only if the action is not SIG_DFL or SIG_IGN.
- * On the other hand if act is NULL the caller only wants
- * the old value so there is no need to call into the kernel.
- */
- error = 0;
- tmpact = NULL;
- proc_sigact_copyout(sig, &oldact);
- if (act != NULL) {
- proc_sigact_copyin(sig, act);
- tmpact = proc_sigact_sigaction(sig);
- if (tmpact->sa_handler != SIG_DFL &&
- tmpact->sa_handler != SIG_IGN) {
- bcopy((const void *)tmpact, (void *)&wrapperact,
- sizeof(struct sigaction));
- wrapperact.sa_flags |= SA_SIGINFO;
- wrapperact.sa_sigaction = &_thread_sig_wrapper;
- tmpact = &wrapperact;
- }
- error = __sys_sigaction(sig, tmpact, NULL);
- }
- if (error == 0) {
-
- /* If successful, return the old sigaction to the user */
- if (oact != NULL )
- bcopy((const void *)&oldact, (void *)oact,
- sizeof(struct sigaction));
- } else {
-
- /*
- * The only time error is non-zero is if the syscall failed,
- * which means the sigaction in the process global list
- * was altered before the syscall. Return it to it's old value.
- */
- proc_sigact_copyin(sig, &oldact);
- }
- return (error);
-}
-
__weak_reference(_sleep, sleep);
unsigned int
OpenPOWER on IntegriCloud