summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-04-18 05:04:16 +0000
committerdeischen <deischen@FreeBSD.org>2003-04-18 05:04:16 +0000
commit5d56aa9cb2bdbe0a18bafbdbb6eb8cf6a46beb79 (patch)
tree46bc1e113ddc7c1ed88e4fa724039df8664c963a
parente68f624d876da04bfb6860b450593c77d80368bd (diff)
downloadFreeBSD-src-5d56aa9cb2bdbe0a18bafbdbb6eb8cf6a46beb79.zip
FreeBSD-src-5d56aa9cb2bdbe0a18bafbdbb6eb8cf6a46beb79.tar.gz
Revamp libpthread so that it has a chance of working in an SMP
environment. This includes support for multiple KSEs and KSEGs. The ability to create more than 1 KSE via pthread_setconcurrency() is in the works as well as support for PTHREAD_SCOPE_SYSTEM threads. Those should come shortly. There are still some known issues which davidxu and I are working on, but it'll make it easier for us by committing what we have. This library now passes all of the ACE tests that libc_r passes with the exception of one. It also seems to work OK with KDE including konqueror, kwrite, etc. I haven't been able to get mozilla to run due to lack of java plugin, so I'd be interested to see how it works with that. Reviewed by: davidxu
-rw-r--r--lib/libkse/Makefile8
-rw-r--r--lib/libkse/thread/Makefile.inc5
-rw-r--r--lib/libkse/thread/thr_aio_suspend.c9
-rw-r--r--lib/libkse/thread/thr_attr_get_np.c12
-rw-r--r--lib/libkse/thread/thr_attr_init.c3
-rw-r--r--lib/libkse/thread/thr_attr_setcreatesuspend_np.c2
-rw-r--r--lib/libkse/thread/thr_attr_setguardsize.c8
-rw-r--r--lib/libkse/thread/thr_attr_setschedparam.c4
-rw-r--r--lib/libkse/thread/thr_attr_setscope.c9
-rw-r--r--lib/libkse/thread/thr_autoinit.c5
-rw-r--r--lib/libkse/thread/thr_cancel.c152
-rw-r--r--lib/libkse/thread/thr_clean.c4
-rw-r--r--lib/libkse/thread/thr_close.c7
-rw-r--r--lib/libkse/thread/thr_cond.c435
-rw-r--r--lib/libkse/thread/thr_condattr_init.c8
-rw-r--r--lib/libkse/thread/thr_creat.c7
-rw-r--r--lib/libkse/thread/thr_create.c258
-rw-r--r--lib/libkse/thread/thr_detach.c74
-rw-r--r--lib/libkse/thread/thr_exit.c89
-rw-r--r--lib/libkse/thread/thr_fcntl.c32
-rw-r--r--lib/libkse/thread/thr_find_thread.c72
-rw-r--r--lib/libkse/thread/thr_fork.c133
-rw-r--r--lib/libkse/thread/thr_fsync.c7
-rw-r--r--lib/libkse/thread/thr_getschedparam.c28
-rw-r--r--lib/libkse/thread/thr_info.c108
-rw-r--r--lib/libkse/thread/thr_init.c515
-rw-r--r--lib/libkse/thread/thr_join.c160
-rw-r--r--lib/libkse/thread/thr_kern.c2007
-rw-r--r--lib/libkse/thread/thr_kill.c22
-rw-r--r--lib/libkse/thread/thr_main_np.c4
-rw-r--r--lib/libkse/thread/thr_mattr_init.c8
-rw-r--r--lib/libkse/thread/thr_msync.c5
-rw-r--r--lib/libkse/thread/thr_mutex.c1210
-rw-r--r--lib/libkse/thread/thr_mutex_prioceiling.c16
-rw-r--r--lib/libkse/thread/thr_mutex_protocol.c2
-rw-r--r--lib/libkse/thread/thr_nanosleep.c67
-rw-r--r--lib/libkse/thread/thr_once.c12
-rw-r--r--lib/libkse/thread/thr_open.c5
-rw-r--r--lib/libkse/thread/thr_pause.c5
-rw-r--r--lib/libkse/thread/thr_poll.c5
-rw-r--r--lib/libkse/thread/thr_printf.c10
-rw-r--r--lib/libkse/thread/thr_priority_queue.c229
-rw-r--r--lib/libkse/thread/thr_private.h1123
-rw-r--r--lib/libkse/thread/thr_pselect.c5
-rw-r--r--lib/libkse/thread/thr_read.c5
-rw-r--r--lib/libkse/thread/thr_readv.c5
-rw-r--r--lib/libkse/thread/thr_resume_np.c66
-rw-r--r--lib/libkse/thread/thr_rwlock.c124
-rw-r--r--lib/libkse/thread/thr_select.c5
-rw-r--r--lib/libkse/thread/thr_self.c3
-rw-r--r--lib/libkse/thread/thr_sem.c72
-rw-r--r--lib/libkse/thread/thr_seterrno.c2
-rw-r--r--lib/libkse/thread/thr_setschedparam.c60
-rw-r--r--lib/libkse/thread/thr_sig.c980
-rw-r--r--lib/libkse/thread/thr_sigaction.c19
-rw-r--r--lib/libkse/thread/thr_sigmask.c76
-rw-r--r--lib/libkse/thread/thr_sigpending.c7
-rw-r--r--lib/libkse/thread/thr_sigsuspend.c50
-rw-r--r--lib/libkse/thread/thr_sigwait.c130
-rw-r--r--lib/libkse/thread/thr_sleep.c7
-rw-r--r--lib/libkse/thread/thr_spec.c85
-rw-r--r--lib/libkse/thread/thr_spinlock.c39
-rw-r--r--lib/libkse/thread/thr_stack.c209
-rw-r--r--lib/libkse/thread/thr_suspend_np.c58
-rw-r--r--lib/libkse/thread/thr_switch_np.c22
-rw-r--r--lib/libkse/thread/thr_system.c5
-rw-r--r--lib/libkse/thread/thr_tcdrain.c7
-rw-r--r--lib/libkse/thread/thr_wait.c5
-rw-r--r--lib/libkse/thread/thr_wait4.c5
-rw-r--r--lib/libkse/thread/thr_waitpid.c5
-rw-r--r--lib/libkse/thread/thr_write.c5
-rw-r--r--lib/libkse/thread/thr_writev.c5
-rw-r--r--lib/libkse/thread/thr_yield.c4
-rw-r--r--lib/libpthread/Makefile8
-rw-r--r--lib/libpthread/thread/Makefile.inc5
-rw-r--r--lib/libpthread/thread/thr_acl_aclcheck_fd.c47
-rw-r--r--lib/libpthread/thread/thr_aio_suspend.c9
-rw-r--r--lib/libpthread/thread/thr_attr_get_np.c12
-rw-r--r--lib/libpthread/thread/thr_attr_init.c3
-rw-r--r--lib/libpthread/thread/thr_attr_setcreatesuspend_np.c2
-rw-r--r--lib/libpthread/thread/thr_attr_setguardsize.c8
-rw-r--r--lib/libpthread/thread/thr_attr_setschedparam.c4
-rw-r--r--lib/libpthread/thread/thr_attr_setscope.c9
-rw-r--r--lib/libpthread/thread/thr_autoinit.c5
-rw-r--r--lib/libpthread/thread/thr_cancel.c152
-rw-r--r--lib/libpthread/thread/thr_clean.c4
-rw-r--r--lib/libpthread/thread/thr_close.c7
-rw-r--r--lib/libpthread/thread/thr_cond.c435
-rw-r--r--lib/libpthread/thread/thr_condattr_init.c8
-rw-r--r--lib/libpthread/thread/thr_creat.c7
-rw-r--r--lib/libpthread/thread/thr_create.c258
-rw-r--r--lib/libpthread/thread/thr_detach.c74
-rw-r--r--lib/libpthread/thread/thr_exit.c89
-rw-r--r--lib/libpthread/thread/thr_fcntl.c32
-rw-r--r--lib/libpthread/thread/thr_find_thread.c72
-rw-r--r--lib/libpthread/thread/thr_fork.c133
-rw-r--r--lib/libpthread/thread/thr_fsync.c7
-rw-r--r--lib/libpthread/thread/thr_gc.c219
-rw-r--r--lib/libpthread/thread/thr_getschedparam.c28
-rw-r--r--lib/libpthread/thread/thr_info.c108
-rw-r--r--lib/libpthread/thread/thr_init.c515
-rw-r--r--lib/libpthread/thread/thr_join.c160
-rw-r--r--lib/libpthread/thread/thr_kern.c2007
-rw-r--r--lib/libpthread/thread/thr_kill.c22
-rw-r--r--lib/libpthread/thread/thr_main_np.c4
-rw-r--r--lib/libpthread/thread/thr_mattr_init.c8
-rw-r--r--lib/libpthread/thread/thr_msync.c5
-rw-r--r--lib/libpthread/thread/thr_mutex.c1210
-rw-r--r--lib/libpthread/thread/thr_mutex_prioceiling.c16
-rw-r--r--lib/libpthread/thread/thr_mutex_protocol.c2
-rw-r--r--lib/libpthread/thread/thr_nanosleep.c67
-rw-r--r--lib/libpthread/thread/thr_once.c12
-rw-r--r--lib/libpthread/thread/thr_open.c5
-rw-r--r--lib/libpthread/thread/thr_pause.c5
-rw-r--r--lib/libpthread/thread/thr_poll.c5
-rw-r--r--lib/libpthread/thread/thr_printf.c10
-rw-r--r--lib/libpthread/thread/thr_priority_queue.c229
-rw-r--r--lib/libpthread/thread/thr_private.h1123
-rw-r--r--lib/libpthread/thread/thr_pselect.c5
-rw-r--r--lib/libpthread/thread/thr_read.c5
-rw-r--r--lib/libpthread/thread/thr_readv.c5
-rw-r--r--lib/libpthread/thread/thr_resume_np.c66
-rw-r--r--lib/libpthread/thread/thr_rwlock.c124
-rw-r--r--lib/libpthread/thread/thr_select.c5
-rw-r--r--lib/libpthread/thread/thr_self.c3
-rw-r--r--lib/libpthread/thread/thr_sem.c72
-rw-r--r--lib/libpthread/thread/thr_seterrno.c2
-rw-r--r--lib/libpthread/thread/thr_setschedparam.c60
-rw-r--r--lib/libpthread/thread/thr_sig.c980
-rw-r--r--lib/libpthread/thread/thr_sigaction.c19
-rw-r--r--lib/libpthread/thread/thr_sigmask.c76
-rw-r--r--lib/libpthread/thread/thr_sigpending.c7
-rw-r--r--lib/libpthread/thread/thr_sigsuspend.c50
-rw-r--r--lib/libpthread/thread/thr_sigwait.c130
-rw-r--r--lib/libpthread/thread/thr_sleep.c7
-rw-r--r--lib/libpthread/thread/thr_spec.c85
-rw-r--r--lib/libpthread/thread/thr_spinlock.c39
-rw-r--r--lib/libpthread/thread/thr_stack.c209
-rw-r--r--lib/libpthread/thread/thr_suspend_np.c58
-rw-r--r--lib/libpthread/thread/thr_switch_np.c22
-rw-r--r--lib/libpthread/thread/thr_system.c5
-rw-r--r--lib/libpthread/thread/thr_tcdrain.c7
-rw-r--r--lib/libpthread/thread/thr_wait.c5
-rw-r--r--lib/libpthread/thread/thr_wait4.c5
-rw-r--r--lib/libpthread/thread/thr_waitpid.c5
-rw-r--r--lib/libpthread/thread/thr_write.c5
-rw-r--r--lib/libpthread/thread/thr_writev.c5
-rw-r--r--lib/libpthread/thread/thr_yield.c4
148 files changed, 10658 insertions, 7516 deletions
diff --git a/lib/libkse/Makefile b/lib/libkse/Makefile
index bebda3f..28a4763 100644
--- a/lib/libkse/Makefile
+++ b/lib/libkse/Makefile
@@ -9,16 +9,18 @@
# system call stubs.
LIB=kse
SHLIB_MAJOR= 1
-CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
+CFLAGS+=-DPTHREAD_KERNEL
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
-I${.CURDIR}/../../include
+CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
+CFLAGS+=-I${.CURDIR}/sys
# Uncomment this if you want libpthread to contain debug information for
# thread locking.
-CFLAGS+=-D_LOCK_DEBUG
+CFLAGS+=-D_LOCK_DEBUG -g
# enable extra internal consistancy checks
-CFLAGS+=-D_PTHREADS_INVARIANTS
+CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
PRECIOUSLIB= yes
diff --git a/lib/libkse/thread/Makefile.inc b/lib/libkse/thread/Makefile.inc
index 2b7800a..6b00117 100644
--- a/lib/libkse/thread/Makefile.inc
+++ b/lib/libkse/thread/Makefile.inc
@@ -5,6 +5,7 @@
SRCS+= \
thr_aio_suspend.c \
+ thr_autoinit.c \
thr_attr_destroy.c \
thr_attr_init.c \
thr_attr_get_np.c \
@@ -27,7 +28,6 @@ SRCS+= \
thr_attr_setstack.c \
thr_attr_setstackaddr.c \
thr_attr_setstacksize.c \
- thr_autoinit.c \
thr_cancel.c \
thr_clean.c \
thr_close.c \
@@ -43,7 +43,6 @@ SRCS+= \
thr_find_thread.c \
thr_fork.c \
thr_fsync.c \
- thr_gc.c \
thr_getprio.c \
thr_getschedparam.c \
thr_info.c \
@@ -82,6 +81,8 @@ SRCS+= \
thr_sig.c \
thr_sigaction.c \
thr_sigmask.c \
+ thr_sigpending.c \
+ thr_sigprocmask.c \
thr_sigsuspend.c \
thr_sigwait.c \
thr_single_np.c \
diff --git a/lib/libkse/thread/thr_aio_suspend.c b/lib/libkse/thread/thr_aio_suspend.c
index 05a6c82..94eed27 100644
--- a/lib/libkse/thread/thr_aio_suspend.c
+++ b/lib/libkse/thread/thr_aio_suspend.c
@@ -39,12 +39,13 @@ int
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
timespec *timeout)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_aio_suspend(iocbs, niocb, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_attr_get_np.c b/lib/libkse/thread/thr_attr_get_np.c
index 2a37f4d..4431824 100644
--- a/lib/libkse/thread/thr_attr_get_np.c
+++ b/lib/libkse/thread/thr_attr_get_np.c
@@ -36,22 +36,18 @@ __weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
int
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
{
+ struct pthread *curthread;
int ret;
if (pid == NULL || dst == NULL || *dst == NULL)
return (EINVAL);
- if ((ret = _find_thread(pid)) != 0)
+ curthread = _get_curthread();
+ if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
return (ret);
memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
-
- /*
- * Special case, if stack address was not provided by caller
- * of pthread_create(), then return address allocated internally
- */
- if ((*dst)->stackaddr_attr == NULL)
- (*dst)->stackaddr_attr = pid->stack;
+ _thr_ref_delete(curthread, pid);
return (0);
}
diff --git a/lib/libkse/thread/thr_attr_init.c b/lib/libkse/thread/thr_attr_init.c
index a3befed..d8b701e 100644
--- a/lib/libkse/thread/thr_attr_init.c
+++ b/lib/libkse/thread/thr_attr_init.c
@@ -51,7 +51,8 @@ _pthread_attr_init(pthread_attr_t *attr)
ret = ENOMEM;
else {
/* Initialise the attribute object with the defaults: */
- memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
+ memcpy(pattr, &_pthread_attr_default,
+ sizeof(struct pthread_attr));
/* Return a pointer to the attribute object: */
*attr = pattr;
diff --git a/lib/libkse/thread/thr_attr_setcreatesuspend_np.c b/lib/libkse/thread/thr_attr_setcreatesuspend_np.c
index eddfc46..28f0546 100644
--- a/lib/libkse/thread/thr_attr_setcreatesuspend_np.c
+++ b/lib/libkse/thread/thr_attr_setcreatesuspend_np.c
@@ -45,7 +45,7 @@ _pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
errno = EINVAL;
ret = -1;
} else {
- (*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
+ (*attr)->suspend = THR_CREATE_SUSPENDED;
ret = 0;
}
return(ret);
diff --git a/lib/libkse/thread/thr_attr_setguardsize.c b/lib/libkse/thread/thr_attr_setguardsize.c
index 94da871..59ec908 100644
--- a/lib/libkse/thread/thr_attr_setguardsize.c
+++ b/lib/libkse/thread/thr_attr_setguardsize.c
@@ -47,11 +47,11 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
else {
/*
* Round guardsize up to the nearest multiple of
- * _pthread_page_size.
+ * _thr_page_size.
*/
- if (guardsize % _pthread_page_size != 0)
- guardsize = ((guardsize / _pthread_page_size) + 1) *
- _pthread_page_size;
+ if (guardsize % _thr_page_size != 0)
+ guardsize = ((guardsize / _thr_page_size) + 1) *
+ _thr_page_size;
/* Save the stack size. */
(*attr)->guardsize_attr = guardsize;
diff --git a/lib/libkse/thread/thr_attr_setschedparam.c b/lib/libkse/thread/thr_attr_setschedparam.c
index 2f34c77..bbb4b1e 100644
--- a/lib/libkse/thread/thr_attr_setschedparam.c
+++ b/lib/libkse/thread/thr_attr_setschedparam.c
@@ -46,8 +46,8 @@ _pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *para
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
- } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
- (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
} else
diff --git a/lib/libkse/thread/thr_attr_setscope.c b/lib/libkse/thread/thr_attr_setscope.c
index dc9e2dd..70dd69e 100644
--- a/lib/libkse/thread/thr_attr_setscope.c
+++ b/lib/libkse/thread/thr_attr_setscope.c
@@ -45,12 +45,11 @@ _pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
if ((attr == NULL) || (*attr == NULL)) {
/* Return an invalid argument: */
ret = EINVAL;
- } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) ||
- (contentionscope == PTHREAD_SCOPE_SYSTEM)) {
- /* We don't support PTHREAD_SCOPE_SYSTEM. */
- ret = ENOTSUP;
+ } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
+ (contentionscope != PTHREAD_SCOPE_SYSTEM)) {
+ ret = EINVAL;
} else
(*attr)->flags |= contentionscope;
- return(ret);
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_autoinit.c b/lib/libkse/thread/thr_autoinit.c
index 31e2d48..95b2a85 100644
--- a/lib/libkse/thread/thr_autoinit.c
+++ b/lib/libkse/thread/thr_autoinit.c
@@ -38,13 +38,16 @@
* threads package at program start-up time.
*/
+#include <pthread.h>
+#include "thr_private.h"
+
void _thread_init_hack(void) __attribute__ ((constructor));
void
_thread_init_hack(void)
{
- _thread_init();
+ _libpthread_init(NULL);
}
/*
diff --git a/lib/libkse/thread/thr_cancel.c b/lib/libkse/thread/thr_cancel.c
index 8b0b4c0..23e0dfa 100644
--- a/lib/libkse/thread/thr_cancel.c
+++ b/lib/libkse/thread/thr_cancel.c
@@ -6,32 +6,32 @@
#include <pthread.h>
#include "thr_private.h"
-static void finish_cancellation(void *arg);
-
__weak_reference(_pthread_cancel, pthread_cancel);
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
__weak_reference(_pthread_testcancel, pthread_testcancel);
+static int checkcancel(struct pthread *curthread);
+static void testcancel(struct pthread *curthread);
+static void finish_cancellation(void *arg);
+
int
_pthread_cancel(pthread_t pthread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- if ((ret = _find_thread(pthread)) != 0) {
- /* NOTHING */
- } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK
- || (pthread->flags & PTHREAD_EXITING) != 0) {
- ret = 0;
- } else {
- /* Protect the scheduling queues: */
- _thread_kern_sig_defer();
+ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
+ /*
+ * Take the scheduling lock while we change the cancel flags.
+ */
+ THR_SCHED_LOCK(curthread, pthread);
if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
- (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) &&
- ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0)))
+ (((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
/* Just mark it for cancellation: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
+ pthread->cancelflags |= THR_CANCELLING;
else {
/*
* Check if we need to kick it back into the
@@ -40,23 +40,27 @@ _pthread_cancel(pthread_t pthread)
switch (pthread->state) {
case PS_RUNNING:
/* No need to resume: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
+ pthread->cancelflags |= THR_CANCELLING;
+ break;
+
+ case PS_LOCKWAIT:
+ /*
+ * These can't be removed from the queue.
+ * Just mark it as cancelling and tell it
+ * to yield once it leaves the critical
+ * region.
+ */
+ pthread->cancelflags |= THR_CANCELLING;
+ pthread->critical_yield = 1;
break;
- case PS_SPINBLOCK:
- /* Remove these threads from the work queue: */
- if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- != 0)
- PTHREAD_WORKQ_REMOVE(pthread);
- /* Fall through: */
case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
case PS_SIGSUSPEND:
case PS_SIGWAIT:
/* Interrupt and resume: */
pthread->interrupted = 1;
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ pthread->cancelflags |= THR_CANCELLING;
+ _thr_setrunnable_unlocked(pthread);
break;
case PS_JOIN:
@@ -68,8 +72,8 @@ _pthread_cancel(pthread_t pthread)
= NULL;
pthread->join_status.thread = NULL;
}
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ pthread->cancelflags |= THR_CANCELLING;
+ _thr_setrunnable_unlocked(pthread);
break;
case PS_SUSPENDED:
@@ -86,8 +90,8 @@ _pthread_cancel(pthread_t pthread)
* cancellation completion routine.
*/
pthread->interrupted = 1;
- pthread->cancelflags |= PTHREAD_CANCEL_NEEDED;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ pthread->cancelflags |= THR_CANCEL_NEEDED;
+ _thr_setrunnable_unlocked(pthread);
pthread->continuation = finish_cancellation;
break;
@@ -97,12 +101,17 @@ _pthread_cancel(pthread_t pthread)
/* Ignore - only here to silence -Wall: */
break;
}
+ if ((pthread->blocked != 0) &&
+ ((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
+ kse_thr_interrupt(&pthread->tmbx);
}
- /* Unprotect the scheduling queues: */
- _thread_kern_sig_undefer();
-
- ret = 0;
+ /*
+ * Release the thread's scheduling lock and remove the
+ * reference:
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
}
return (ret);
}
@@ -113,6 +122,10 @@ _pthread_setcancelstate(int state, int *oldstate)
struct pthread *curthread = _get_curthread();
int ostate;
int ret;
+ int need_exit = 0;
+
+ /* Take the scheduling lock while fiddling with the thread's state: */
+ THR_SCHED_LOCK(curthread, curthread);
ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE;
@@ -122,7 +135,7 @@ _pthread_setcancelstate(int state, int *oldstate)
*oldstate = ostate;
curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE;
if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)
- pthread_testcancel();
+ need_exit = checkcancel(curthread);
ret = 0;
break;
case PTHREAD_CANCEL_DISABLE:
@@ -135,6 +148,12 @@ _pthread_setcancelstate(int state, int *oldstate)
ret = EINVAL;
}
+ THR_SCHED_UNLOCK(curthread, curthread);
+ if (need_exit != 0) {
+ _thr_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
return (ret);
}
@@ -144,6 +163,10 @@ _pthread_setcanceltype(int type, int *oldtype)
struct pthread *curthread = _get_curthread();
int otype;
int ret;
+ int need_exit = 0;
+
+ /* Take the scheduling lock while fiddling with the state: */
+ THR_SCHED_LOCK(curthread, curthread);
otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
switch (type) {
@@ -151,7 +174,7 @@ _pthread_setcanceltype(int type, int *oldtype)
if (oldtype != NULL)
*oldtype = otype;
curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
- pthread_testcancel();
+ need_exit = checkcancel(curthread);
ret = 0;
break;
case PTHREAD_CANCEL_DEFERRED:
@@ -164,47 +187,72 @@ _pthread_setcanceltype(int type, int *oldtype)
ret = EINVAL;
}
+ THR_SCHED_UNLOCK(curthread, curthread);
+ if (need_exit != 0) {
+ _thr_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
return (ret);
}
-void
-_pthread_testcancel(void)
+static int
+checkcancel(struct pthread *curthread)
{
- struct pthread *curthread = _get_curthread();
-
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
- ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) &&
- ((curthread->flags & PTHREAD_EXITING) == 0)) {
+ ((curthread->cancelflags & THR_CANCELLING) != 0)) {
/*
* It is possible for this thread to be swapped out
* while performing cancellation; do not allow it
* to be cancelled again.
*/
- curthread->cancelflags &= ~PTHREAD_CANCELLING;
- _thread_exit_cleanup();
+ curthread->cancelflags &= ~THR_CANCELLING;
+ return (1);
+ }
+ else
+ return (0);
+}
+
+static void
+testcancel(struct pthread *curthread)
+{
+ /* Take the scheduling lock while fiddling with the state: */
+ THR_SCHED_LOCK(curthread, curthread);
+
+ if (checkcancel(curthread) != 0) {
+ /* Unlock before exiting: */
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ _thr_exit_cleanup();
pthread_exit(PTHREAD_CANCELED);
PANIC("cancel");
}
+
+ THR_SCHED_UNLOCK(curthread, curthread);
}
void
-_thread_enter_cancellation_point(void)
+_pthread_testcancel(void)
{
struct pthread *curthread = _get_curthread();
- /* Look for a cancellation before we block: */
- pthread_testcancel();
- curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT;
+ testcancel(curthread);
}
void
-_thread_leave_cancellation_point(void)
+_thr_enter_cancellation_point(struct pthread *thread)
{
- struct pthread *curthread = _get_curthread();
+ /* Look for a cancellation before we block: */
+ testcancel(thread);
+ thread->cancelflags |= THR_AT_CANCEL_POINT;
+}
- curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
+void
+_thr_leave_cancellation_point(struct pthread *thread)
+{
+ thread->cancelflags &= ~THR_AT_CANCEL_POINT;
/* Look for a cancellation after we unblock: */
- pthread_testcancel();
+ testcancel(thread);
}
static void
@@ -215,9 +263,9 @@ finish_cancellation(void *arg)
curthread->continuation = NULL;
curthread->interrupted = 0;
- if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
- curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
- _thread_exit_cleanup();
+ if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) {
+ curthread->cancelflags &= ~THR_CANCEL_NEEDED;
+ _thr_exit_cleanup();
pthread_exit(PTHREAD_CANCELED);
}
}
diff --git a/lib/libkse/thread/thr_clean.c b/lib/libkse/thread/thr_clean.c
index 8ae6b42..a8cedb4 100644
--- a/lib/libkse/thread/thr_clean.c
+++ b/lib/libkse/thread/thr_clean.c
@@ -46,7 +46,8 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
struct pthread *curthread = _get_curthread();
struct pthread_cleanup *new;
- if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
+ if ((new = (struct pthread_cleanup *)
+ malloc(sizeof(struct pthread_cleanup))) != NULL) {
new->routine = routine;
new->routine_arg = routine_arg;
new->next = curthread->cleanup;
@@ -69,4 +70,3 @@ _pthread_cleanup_pop(int execute)
free(old);
}
}
-
diff --git a/lib/libkse/thread/thr_close.c b/lib/libkse/thread/thr_close.c
index d03961b..269140b 100644
--- a/lib/libkse/thread/thr_close.c
+++ b/lib/libkse/thread/thr_close.c
@@ -44,11 +44,12 @@ __weak_reference(__close, close);
int
__close(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_close(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index a22d983..1221fd8 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -37,12 +37,17 @@
#include <pthread.h>
#include "thr_private.h"
+#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+#define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
+#define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
+
/*
* Prototypes
*/
-static inline pthread_t cond_queue_deq(pthread_cond_t);
-static inline void cond_queue_remove(pthread_cond_t, pthread_t);
-static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+static inline struct pthread *cond_queue_deq(pthread_cond_t);
+static inline void cond_queue_remove(pthread_cond_t, pthread_t);
+static inline void cond_queue_enq(pthread_cond_t, pthread_t);
__weak_reference(_pthread_cond_init, pthread_cond_init);
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
@@ -52,35 +57,12 @@ __weak_reference(_pthread_cond_signal, pthread_cond_signal);
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
-/* Reinitialize a condition variable to defaults. */
-int
-_cond_reinit(pthread_cond_t *cond)
-{
- int ret = 0;
-
- if (cond == NULL)
- ret = EINVAL;
- else if (*cond == NULL)
- ret = pthread_cond_init(cond, NULL);
- else {
- /*
- * Initialize the condition variable structure:
- */
- TAILQ_INIT(&(*cond)->c_queue);
- (*cond)->c_flags = COND_FLAGS_INITED;
- (*cond)->c_type = COND_TYPE_FAST;
- (*cond)->c_mutex = NULL;
- (*cond)->c_seqno = 0;
- memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
- }
- return (ret);
-}
-
int
_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
enum pthread_cond_type type;
pthread_cond_t pcond;
+ int flags;
int rval = 0;
if (cond == NULL)
@@ -93,9 +75,11 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
if (cond_attr != NULL && *cond_attr != NULL) {
/* Default to a fast condition variable: */
type = (*cond_attr)->c_type;
+ flags = (*cond_attr)->c_flags;
} else {
/* Default to a fast condition variable: */
type = COND_TYPE_FAST;
+ flags = 0;
}
/* Process according to condition variable type: */
@@ -117,6 +101,10 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
if ((pcond = (pthread_cond_t)
malloc(sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
+ } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0) {
+ free(pcond);
+ rval = ENOMEM;
} else {
/*
* Initialise the condition variable
@@ -127,7 +115,6 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
pcond->c_type = type;
pcond->c_mutex = NULL;
pcond->c_seqno = 0;
- memset(&pcond->lock,0,sizeof(pcond->lock));
*cond = pcond;
}
}
@@ -139,25 +126,32 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
- int rval = 0;
+ struct pthread_cond *cv;
+ struct pthread *curthread = _get_curthread();
+ int rval = 0;
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
- /*
- * Free the memory allocated for the condition
- * variable structure:
- */
- free(*cond);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* NULL the caller's pointer now that the condition
* variable has been destroyed:
*/
+ cv = *cond;
*cond = NULL;
+
+ /* Unlock the condition variable structure: */
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+
+ /*
+ * Free the memory allocated for the condition
+ * variable structure:
+ */
+ free(cv);
+
}
/* Return the completion status: */
return (rval);
@@ -170,20 +164,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
int rval = 0;
int done = 0;
int interrupted = 0;
+ int unlock_mutex = 1;
int seqno;
- _thread_enter_cancellation_point();
-
- if (cond == NULL)
+ _thr_enter_cancellation_point(curthread);
+
+ if (cond == NULL) {
+ _thr_leave_cancellation_point(curthread);
return (EINVAL);
+ }
/*
* If the condition variable is statically initialized,
* perform the dynamic initialization:
*/
if (*cond == NULL &&
- (rval = pthread_cond_init(cond, NULL)) != 0)
+ (rval = pthread_cond_init(cond, NULL)) != 0) {
+ _thr_leave_cancellation_point(curthread);
return (rval);
+ }
/*
* Enter a loop waiting for a condition signal or broadcast
@@ -196,7 +195,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
*/
do {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* If the condvar was statically allocated, properly
@@ -214,7 +213,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
((*cond)->c_mutex != *mutex))) {
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return invalid argument error: */
rval = EINVAL;
@@ -237,7 +236,8 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
curthread->wakeup_time.tv_sec = -1;
/* Unlock the mutex: */
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ if ((unlock_mutex != 0) &&
+ ((rval = _mutex_cv_unlock(mutex)) != 0)) {
/*
* Cannot unlock the mutex, so remove
* the running thread from the condition
@@ -246,45 +246,60 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
cond_queue_remove(*cond, curthread);
/* Check for no more waiters: */
- if (TAILQ_FIRST(&(*cond)->c_queue) ==
- NULL)
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
- } else {
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
+ }
+ else {
/*
- * Schedule the next thread and unlock
- * the condition variable structure:
+ * Don't unlock the mutex the next
+ * time through the loop (if the
+ * thread has to be requeued after
+ * handling a signal).
*/
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ unlock_mutex = 0;
- done = (seqno != (*cond)->c_seqno);
+ /*
+ * This thread is active and is in a
+ * critical region (holding the cv
+ * lock); we should be able to safely
+ * set the state.
+ */
+ THR_SET_STATE(curthread, PS_COND_WAIT);
- interrupted = curthread->interrupted;
+ /* Remember the CV: */
+ curthread->data.cond = *cond;
+
+ /* Unlock the CV structure: */
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+
+ curthread->data.cond = NULL;
/*
- * Check if the wait was interrupted
- * (canceled) or needs to be resumed
- * after handling a signal.
+ * XXX - This really isn't a good check
+ * since there can be more than one
+ * thread waiting on the CV. Signals
+ * sent to threads waiting on mutexes
+ * or CVs should really be deferred
+ * until the threads are no longer
+ * waiting, but POSIX says that signals
+ * should be sent "as soon as possible".
*/
- if (interrupted != 0) {
- /*
- * Lock the mutex and ignore any
- * errors. Note that even
- * though this thread may have
- * been canceled, POSIX requires
- * that the mutex be reaquired
- * prior to cancellation.
- */
- (void)_mutex_cv_lock(mutex);
- } else {
+ done = (seqno != (*cond)->c_seqno);
+
+ if (THR_IN_SYNCQ(curthread)) {
/*
* Lock the condition variable
* while removing the thread.
*/
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread,
+ &(*cond)->c_lock);
cond_queue_remove(*cond,
curthread);
@@ -293,11 +308,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+ }
- /* Lock the mutex: */
+ /*
+ * Save the interrupted flag; locking
+ * the mutex may destroy it.
+ */
+ interrupted = curthread->interrupted;
+
+ /*
+ * Note that even though this thread may
+ * have been canceled, POSIX requires
+ * that the mutex be reaquired prior to
+ * cancellation.
+ */
+ if (done != 0)
rval = _mutex_cv_lock(mutex);
- }
}
}
break;
@@ -305,7 +333,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
/* Trap invalid condition variable types: */
default:
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return an invalid argument error: */
rval = EINVAL;
@@ -316,13 +344,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
curthread->continuation((void *) curthread);
} while ((done == 0) && (rval == 0));
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (rval);
}
int
+__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ _thr_enter_cancellation_point(curthread);
+ ret = _pthread_cond_wait(cond, mutex);
+ _thr_leave_cancellation_point(curthread);
+ return (ret);
+}
+
+int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
{
@@ -330,19 +370,24 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
int rval = 0;
int done = 0;
int interrupted = 0;
+ int unlock_mutex = 1;
int seqno;
- _thread_enter_cancellation_point();
-
+ _thr_enter_cancellation_point(curthread);
+
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000)
+ abstime->tv_nsec >= 1000000000) {
+ _thr_leave_cancellation_point(curthread);
return (EINVAL);
+ }
/*
* If the condition variable is statically initialized, perform dynamic
* initialization.
*/
- if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
+ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) {
+ _thr_leave_cancellation_point(curthread);
return (rval);
+ }
/*
* Enter a loop waiting for a condition signal or broadcast
@@ -355,7 +400,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
do {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* If the condvar was statically allocated, properly
@@ -376,11 +421,10 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
rval = EINVAL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
} else {
/* Set the wakeup time: */
- curthread->wakeup_time.tv_sec =
- abstime->tv_sec;
+ curthread->wakeup_time.tv_sec = abstime->tv_sec;
curthread->wakeup_time.tv_nsec =
abstime->tv_nsec;
@@ -399,10 +443,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
seqno = (*cond)->c_seqno;
/* Unlock the mutex: */
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ if ((unlock_mutex != 0) &&
+ ((rval = _mutex_cv_unlock(mutex)) != 0)) {
/*
- * Cannot unlock the mutex, so remove
- * the running thread from the condition
+ * Cannot unlock the mutex; remove the
+ * running thread from the condition
* variable queue:
*/
cond_queue_remove(*cond, curthread);
@@ -412,40 +457,55 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_mutex = NULL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
} else {
/*
- * Schedule the next thread and unlock
- * the condition variable structure:
+ * Don't unlock the mutex the next
+ * time through the loop (if the
+ * thread has to be requeued after
+ * handling a signal).
*/
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ unlock_mutex = 0;
- done = (seqno != (*cond)->c_seqno);
+ /*
+ * This thread is active and is in a
+ * critical region (holding the cv
+ * lock); we should be able to safely
+ * set the state.
+ */
+ THR_SET_STATE(curthread, PS_COND_WAIT);
- interrupted = curthread->interrupted;
+ /* Remember the CV: */
+ curthread->data.cond = *cond;
+
+ /* Unlock the CV structure: */
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+
+ curthread->data.cond = NULL;
/*
- * Check if the wait was interrupted
- * (canceled) or needs to be resumed
- * after handling a signal.
+ * XXX - This really isn't a good check
+ * since there can be more than one
+ * thread waiting on the CV. Signals
+ * sent to threads waiting on mutexes
+ * or CVs should really be deferred
+ * until the threads are no longer
+ * waiting, but POSIX says that signals
+ * should be sent "as soon as possible".
*/
- if (interrupted != 0) {
- /*
- * Lock the mutex and ignore any
- * errors. Note that even
- * though this thread may have
- * been canceled, POSIX requires
- * that the mutex be reaquired
- * prior to cancellation.
- */
- (void)_mutex_cv_lock(mutex);
- } else {
+ done = (seqno != (*cond)->c_seqno);
+
+ if (THR_IN_CONDQ(curthread)) {
/*
* Lock the condition variable
* while removing the thread.
*/
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread,
+ &(*cond)->c_lock);
cond_queue_remove(*cond,
curthread);
@@ -454,21 +514,22 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+ }
- /* Lock the mutex: */
+ /*
+ * Save the interrupted flag; locking
+ * the mutex may destroy it.
+ */
+ interrupted = curthread->interrupted;
+ if (curthread->timeout != 0) {
+ /* The wait timedout. */
+ rval = ETIMEDOUT;
+ (void)_mutex_cv_lock(mutex);
+ } else if ((interrupted == 0) ||
+ (done != 0))
rval = _mutex_cv_lock(mutex);
-
- /*
- * Return ETIMEDOUT if the wait
- * timed out and there wasn't an
- * error locking the mutex:
- */
- if ((curthread->timeout != 0)
- && rval == 0)
- rval = ETIMEDOUT;
-
- }
}
}
break;
@@ -476,7 +537,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
/* Trap invalid condition variable types: */
default:
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return an invalid argument error: */
rval = EINVAL;
@@ -484,20 +545,35 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
}
if ((interrupted != 0) && (curthread->continuation != NULL))
- curthread->continuation((void *) curthread);
+ curthread->continuation((void *)curthread);
} while ((done == 0) && (rval == 0));
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (rval);
}
int
+__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ _thr_enter_cancellation_point(curthread);
+ ret = _pthread_cond_timedwait(cond, mutex, abstime);
+ _thr_leave_cancellation_point(curthread);
+ return (ret);
+}
+
+
+int
_pthread_cond_signal(pthread_cond_t * cond)
{
- int rval = 0;
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *pthread;
+ int rval = 0;
if (cond == NULL)
rval = EINVAL;
@@ -506,14 +582,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
@@ -522,13 +592,19 @@ _pthread_cond_signal(pthread_cond_t * cond)
/* Increment the sequence number: */
(*cond)->c_seqno++;
- if ((pthread = cond_queue_deq(*cond)) != NULL) {
- /*
- * Wake up the signaled thread:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ /*
+ * Wakeups have to be done with the CV lock held;
+ * otherwise there is a race condition where the
+ * thread can timeout, run on another KSE, and enter
+ * another blocking state (including blocking on a CV).
+ */
+ if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
+ != NULL) {
+ THR_SCHED_LOCK(curthread, pthread);
+ cond_queue_remove(*cond, pthread);
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
-
/* Check for no more waiters: */
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
@@ -542,13 +618,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
}
/* Return the completion status: */
@@ -558,8 +628,9 @@ _pthread_cond_signal(pthread_cond_t * cond)
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
- int rval = 0;
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *pthread;
+ int rval = 0;
if (cond == NULL)
rval = EINVAL;
@@ -568,14 +639,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
@@ -588,11 +653,12 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
* Enter a loop to bring all threads off the
* condition queue:
*/
- while ((pthread = cond_queue_deq(*cond)) != NULL) {
- /*
- * Wake up the signaled thread:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
+ != NULL) {
+ THR_SCHED_LOCK(curthread, pthread);
+ cond_queue_remove(*cond, pthread);
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
/* There are no more waiting threads: */
@@ -607,13 +673,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
}
/* Return the completion status: */
@@ -621,26 +681,20 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
}
void
-_cond_wait_backout(pthread_t pthread)
+_cond_wait_backout(struct pthread *curthread)
{
pthread_cond_t cond;
- cond = pthread->data.cond;
+ cond = curthread->data.cond;
if (cond != NULL) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&cond->lock);
+ THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
/* Process according to condition variable type: */
switch (cond->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- cond_queue_remove(cond, pthread);
+ cond_queue_remove(cond, curthread);
/* Check for no more waiters: */
if (TAILQ_FIRST(&cond->c_queue) == NULL)
@@ -652,13 +706,7 @@ _cond_wait_backout(pthread_t pthread)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&cond->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &cond->c_lock);
}
}
@@ -666,14 +714,14 @@ _cond_wait_backout(pthread_t pthread)
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
*/
-static inline pthread_t
+static inline struct pthread *
cond_queue_deq(pthread_cond_t cond)
{
- pthread_t pthread;
+ struct pthread *pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_SET(pthread);
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
/*
* Only exit the loop when we find a thread
@@ -684,7 +732,7 @@ cond_queue_deq(pthread_cond_t cond)
break;
}
- return(pthread);
+ return (pthread);
}
/*
@@ -692,7 +740,7 @@ cond_queue_deq(pthread_cond_t cond)
* order.
*/
static inline void
-cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
{
/*
* Because pthread_cond_timedwait() can timeout as well
@@ -700,9 +748,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
- if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
+ if (THR_IN_CONDQ(pthread)) {
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_CLEAR(pthread);
}
}
@@ -711,11 +759,12 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* order.
*/
static inline void
-cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
{
- pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+ struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+ THR_ASSERT(!THR_IN_SYNCQ(pthread),
+ "cond_queue_enq: thread already queued!");
/*
* For the common case of all threads having equal priority,
@@ -730,6 +779,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_SET(pthread);
pthread->data.cond = cond;
}
diff --git a/lib/libkse/thread/thr_condattr_init.c b/lib/libkse/thread/thr_condattr_init.c
index 1af12e1..7cf4c9e 100644
--- a/lib/libkse/thread/thr_condattr_init.c
+++ b/lib/libkse/thread/thr_condattr_init.c
@@ -46,13 +46,13 @@ _pthread_condattr_init(pthread_condattr_t *attr)
pthread_condattr_t pattr;
if ((pattr = (pthread_condattr_t)
- malloc(sizeof(struct pthread_cond_attr))) == NULL) {
+ malloc(sizeof(struct pthread_cond_attr))) == NULL) {
ret = ENOMEM;
} else {
- memcpy(pattr, &pthread_condattr_default,
- sizeof(struct pthread_cond_attr));
+ memcpy(pattr, &_pthread_condattr_default,
+ sizeof(struct pthread_cond_attr));
*attr = pattr;
ret = 0;
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_creat.c b/lib/libkse/thread/thr_creat.c
index 3d5be08..bba8ec3 100644
--- a/lib/libkse/thread/thr_creat.c
+++ b/lib/libkse/thread/thr_creat.c
@@ -38,11 +38,12 @@ __weak_reference(___creat, creat);
int
___creat(const char *path, mode_t mode)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __creat(path, mode);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c
index c9f5a42..5a435ca 100644
--- a/lib/libkse/thread/thr_create.c
+++ b/lib/libkse/thread/thr_create.c
@@ -50,102 +50,150 @@ int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
-int _thread_ctx_offset = OFF(mailbox.tm_context);
+int _thread_ctx_offset = OFF(tmbx.tm_context);
#undef OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
+static int create_stack(struct pthread_attr *pattr);
+static void thread_start(struct pthread *curthread,
+ void *(*start_routine) (void *), void *arg);
+
__weak_reference(_pthread_create, pthread_create);
+/*
+ * Some notes on new thread creation and first time initializion
+ * to enable multi-threading.
+ *
+ * There are basically two things that need to be done.
+ *
+ * 1) The internal library variables must be initialized.
+ * 2) Upcalls need to be enabled to allow multiple threads
+ * to be run.
+ *
+ * The first may be done as a result of other pthread functions
+ * being called. When _thr_initial is null, _libpthread_init is
+ * called to initialize the internal variables; this also creates
+ * or sets the initial thread. It'd be nice to automatically
+ * have _libpthread_init called on program execution so we don't
+ * have to have checks throughout the library.
+ *
+ * The second part is only triggered by the creation of the first
+ * thread (other than the initial/main thread). If the thread
+ * being created is a scope system thread, then a new KSE/KSEG
+ * pair needs to be allocated. Also, if upcalls haven't been
+ * enabled on the initial thread's KSE, they must be now that
+ * there is more than one thread; this could be delayed until
+ * the initial KSEG has more than one thread.
+ */
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
- struct pthread *curthread = _get_curthread();
- struct itimerval itimer;
- int f_gc = 0;
- int ret = 0;
- pthread_t gc_thread;
- pthread_t new_thread;
- pthread_attr_t pattr;
- void *stack;
-
- /*
- * Locking functions in libc are required when there are
- * threads other than the initial thread.
- */
- __isthreaded = 1;
+ struct kse *curkse;
+ struct pthread *curthread, *new_thread;
+ struct kse *kse = NULL;
+ struct kse_group *kseg = NULL;
+ kse_critical_t crit;
+ int i;
+ int ret = 0;
+
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+
+ crit = _kse_critical_enter();
+ curthread = _get_curthread();
+ curkse = curthread->kse;
/* Allocate memory for the thread structure: */
- if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
+ if ((new_thread = _thr_alloc(curkse)) == NULL) {
/* Insufficient memory to create a thread: */
ret = EAGAIN;
} else {
+ /* Initialize the thread structure: */
+ memset(new_thread, 0, sizeof(struct pthread));
+
/* Check if default thread attributes are required: */
- if (attr == NULL || *attr == NULL) {
+ if (attr == NULL || *attr == NULL)
/* Use the default thread attributes: */
- pattr = &pthread_attr_default;
- } else {
- pattr = *attr;
+ new_thread->attr = _pthread_attr_default;
+ else
+ new_thread->attr = *(*attr);
+
+ if (create_stack(&new_thread->attr) != 0) {
+ /* Insufficient memory to create a stack: */
+ ret = EAGAIN;
+ _thr_free(curkse, new_thread);
}
- /* Check if a stack was specified in the thread attributes: */
- if ((stack = pattr->stackaddr_attr) != NULL) {
+ else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
+ (((kse = _kse_alloc(curkse)) == NULL)
+ || ((kseg = _kseg_alloc(curkse)) == NULL))) {
+ /* Insufficient memory to create a new KSE/KSEG: */
+ ret = EAGAIN;
+ if (kse != NULL)
+ _kse_free(curkse, kse);
+ if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ _thr_stack_free(&new_thread->attr);
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ }
+ _thr_free(curkse, new_thread);
}
- /* Allocate a stack: */
else {
- stack = _thread_stack_alloc(pattr->stacksize_attr,
- pattr->guardsize_attr);
- if (stack == NULL) {
- ret = EAGAIN;
- free(new_thread);
+ if (kseg != NULL) {
+ /* Add the KSE to the KSEG's list of KSEs. */
+ TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe);
+ kse->k_kseg = kseg;
+ kse->k_schedq = &kseg->kg_schedq;
}
- }
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ new_thread->magic = THR_MAGIC;
- /* Check for errors: */
- if (ret != 0) {
- } else {
- /* Initialise the thread structure: */
- memset(new_thread, 0, sizeof(struct pthread));
new_thread->slice_usec = -1;
- new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
-
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
- /*
- * Write a magic value to the thread structure
- * to help identify valid ones:
- */
- new_thread->magic = PTHREAD_MAGIC;
+ /* Initialize the thread for signals: */
+ new_thread->sigmask = curthread->sigmask;
+
+ /* No thread is wanting to join to this one: */
+ new_thread->joiner = NULL;
- /* Initialise the machine context: */
- getcontext(&new_thread->mailbox.tm_context);
- new_thread->mailbox.tm_context.uc_stack.ss_sp =
- new_thread->stack;
- new_thread->mailbox.tm_context.uc_stack.ss_size =
- pattr->stacksize_attr;
- makecontext(&new_thread->mailbox.tm_context,
- _thread_start, 1);
- new_thread->mailbox.tm_udata = (void *)new_thread;
+ /* Initialize the signal frame: */
+ new_thread->curframe = NULL;
- /* Copy the thread attributes: */
- memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
+ /* Initialize the machine context: */
+ THR_GETCONTEXT(&new_thread->tmbx.tm_context);
+ new_thread->tmbx.tm_udata = new_thread;
+ new_thread->tmbx.tm_context.uc_sigmask =
+ new_thread->sigmask;
+ new_thread->tmbx.tm_context.uc_stack.ss_size =
+ new_thread->attr.stacksize_attr;
+ new_thread->tmbx.tm_context.uc_stack.ss_sp =
+ new_thread->attr.stackaddr_attr;
+
+ makecontext(&new_thread->tmbx.tm_context,
+ (void (*)(void))thread_start, 4, new_thread,
+ start_routine, arg);
/*
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
- if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
+ if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
/* Copy the scheduling attributes: */
new_thread->base_priority =
curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
+ ~THR_SIGNAL_PRIORITY;
new_thread->attr.prio =
curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
+ ~THR_SIGNAL_PRIORITY;
new_thread->attr.sched_policy =
curthread->attr.sched_policy;
} else {
@@ -160,23 +208,49 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
- /* Initialize joiner to NULL (no joiner): */
- new_thread->joiner = NULL;
-
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
+ /* Initialize thread locking. */
+ if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize thread lock");
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_init(&new_thread->lockusers[i],
+ (void *)new_thread);
+ _LCK_SET_PRIVATE2(&new_thread->lockusers[i],
+ (void *)new_thread);
+ }
+
/* Initialise hooks in the thread structure: */
new_thread->specific = NULL;
new_thread->cleanup = NULL;
new_thread->flags = 0;
new_thread->continuation = NULL;
+ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
+ new_thread->state = PS_SUSPENDED;
+ else
+ new_thread->state = PS_RUNNING;
+
/*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
+ * System scope threads have their own kse and
+ * kseg. Process scope threads are all hung
+ * off the main process kseg.
*/
- _thread_kern_sig_defer();
+ if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
+ new_thread->kseg = _kse_initial->k_kseg;
+ new_thread->kse = _kse_initial;
+ }
+ else {
+ kse->k_curthread = NULL;
+ kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
+ new_thread->kse = kse;
+ new_thread->kseg = kse->k_kseg;
+ kse->k_mbx.km_udata = kse;
+ kse->k_mbx.km_curthread = NULL;
+ }
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
/*
* Initialise the unique id which GDB uses to
@@ -184,57 +258,53 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
new_thread->uniqueid = next_uniqueid++;
- /*
- * Check if the garbage collector thread
- * needs to be started.
- */
- f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
-
/* Add the thread to the linked list of all threads: */
- TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
+ THR_LIST_ADD(new_thread);
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
- new_thread->state = PS_SUSPENDED;
- } else {
- new_thread->state = PS_RUNNING;
- PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
- }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
/*
- * Undefer and handle pending signals, yielding
- * if necessary.
+ * Schedule the new thread starting a new KSEG/KSE
+ * pair if necessary.
*/
- _thread_kern_sig_undefer();
+ _thr_schedule_add(curthread, new_thread);
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
-
- /* Schedule the new user thread: */
- _thread_kern_sched();
-
- /*
- * Start a garbage collector thread
- * if necessary.
- */
- if (f_gc && pthread_create(&gc_thread,NULL,
- _thread_gc,NULL) != 0)
- PANIC("Can't create gc thread");
-
}
}
+ _kse_critical_leave(crit);
+
+ if ((ret == 0) && (_kse_isthreaded() == 0))
+ _kse_setthreaded(1);
/* Return the status: */
return (ret);
}
-void
-_thread_start(void)
+static int
+create_stack(struct pthread_attr *pattr)
{
- struct pthread *curthread = _get_curthread();
+ int ret;
+ /* Check if a stack was specified in the thread attributes: */
+ if ((pattr->stackaddr_attr) != NULL) {
+ pattr->guardsize_attr = 0;
+ pattr->flags = THR_STACK_USER;
+ ret = 0;
+ }
+ else
+ ret = _thr_stack_alloc(pattr);
+ return (ret);
+}
+
+
+static void
+thread_start(struct pthread *curthread, void *(*start_routine) (void *),
+ void *arg)
+{
/* Run the current thread's start routine with argument: */
- pthread_exit(curthread->start_routine(curthread->arg));
+ pthread_exit(start_routine(arg));
/* This point should never be reached. */
PANIC("Thread has resumed after exit");
diff --git a/lib/libkse/thread/thr_detach.c b/lib/libkse/thread/thr_detach.c
index 646dcbf..59d363e 100644
--- a/lib/libkse/thread/thr_detach.c
+++ b/lib/libkse/thread/thr_detach.c
@@ -31,6 +31,8 @@
*
* $FreeBSD$
*/
+#include <sys/types.h>
+#include <machine/atomic.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
@@ -40,50 +42,60 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
- int rval = 0;
+ struct pthread *curthread, *joiner;
+ int rval = 0;
/* Check for invalid calling parameters: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
+ if (pthread == NULL || pthread->magic != THR_MAGIC)
/* Return an invalid argument error: */
rval = EINVAL;
- /* Check if the thread has not been detached: */
- else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
+ /* Check if the thread is already detached: */
+ else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
+ /* Return an error: */
+ rval = EINVAL;
+ else {
+ /* Lock the detached thread: */
+ curthread = _get_curthread();
+ THR_SCHED_LOCK(curthread, pthread);
+
/* Flag the thread as detached: */
pthread->attr.flags |= PTHREAD_DETACHED;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Retrieve any joining thread and remove it: */
+ joiner = pthread->joiner;
+ pthread->joiner = NULL;
- /* Check if there is a joiner: */
- if (pthread->joiner != NULL) {
- struct pthread *joiner = pthread->joiner;
+ /* We are already in a critical region. */
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
+ THR_LIST_REMOVE(pthread);
+ THR_GCLIST_ADD(pthread);
+ atomic_store_rel_int(&_gc_check, 1);
+ if (KSE_WAITING(_kse_initial))
+ KSE_WAKEUP(_kse_initial);
+ }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(joiner, PS_RUNNING);
+ THR_SCHED_UNLOCK(curthread, pthread);
- /* Set the return value for the woken thread: */
- joiner->join_status.error = ESRCH;
- joiner->join_status.ret = NULL;
- joiner->join_status.thread = NULL;
+ /* See if there is a thread waiting in pthread_join(): */
+ if (joiner != NULL) {
+ /* Lock the joiner before fiddling with it. */
+ THR_SCHED_LOCK(curthread, joiner);
+ if (joiner->join_status.thread == pthread) {
+ /*
+ * Set the return value for the woken thread:
+ */
+ joiner->join_status.error = ESRCH;
+ joiner->join_status.ret = NULL;
+ joiner->join_status.thread = NULL;
- /*
- * Disconnect the joiner from the thread being detached:
- */
- pthread->joiner = NULL;
+ _thr_setrunnable_unlocked(joiner);
+ }
+ THR_SCHED_UNLOCK(curthread, joiner);
}
-
- /*
- * Undefer and handle pending signals, yielding if a
- * scheduling signal occurred while in the critical region.
- */
- _thread_kern_sig_undefer();
- } else
- /* Return an error: */
- rval = EINVAL;
+ }
/* Return the completion status: */
return (rval);
diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c
index 96d288e..4a82b12 100644
--- a/lib/libkse/thread/thr_exit.c
+++ b/lib/libkse/thread/thr_exit.c
@@ -40,31 +40,24 @@
#include <pthread.h>
#include "thr_private.h"
-#define FLAGS_IN_SCHEDQ \
- (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
+void _pthread_exit(void *status);
__weak_reference(_pthread_exit, pthread_exit);
void
-_thread_exit(char *fname, int lineno, char *string)
+_thr_exit(char *fname, int lineno, char *msg)
{
- char s[256];
+ char s[256];
/* Prepare an error message string: */
snprintf(s, sizeof(s),
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
- string, lineno, fname, errno);
+ msg, lineno, fname, errno);
/* Write the string to the standard error file descriptor: */
__sys_write(2, s, strlen(s));
- /* Force this process to exit: */
- /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
-#if defined(_PTHREADS_INVARIANTS)
abort();
-#else
- __sys_exit(1);
-#endif
}
/*
@@ -73,7 +66,7 @@ _thread_exit(char *fname, int lineno, char *string)
* abnormal thread termination can be found.
*/
void
-_thread_exit_cleanup(void)
+_thr_exit_cleanup(void)
{
struct pthread *curthread = _get_curthread();
@@ -96,22 +89,25 @@ _thread_exit_cleanup(void)
void
_pthread_exit(void *status)
{
- struct pthread *curthread = _get_curthread();
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
/* Check if this thread is already in the process of exiting: */
- if ((curthread->flags & PTHREAD_EXITING) != 0) {
+ if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
char msg[128];
- snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
+ snprintf(msg, sizeof(msg), "Thread %p has called "
+ "pthread_exit() from a destructor. POSIX 1003.1 "
+ "1996 s16.2.5.2 does not allow this!", curthread);
PANIC(msg);
}
- /* Flag this thread as exiting: */
- curthread->flags |= PTHREAD_EXITING;
+ /*
+ * Flag this thread as exiting. Threads should now be prevented
+ * from joining to this thread.
+ */
+ curthread->flags |= THR_FLAGS_EXITING;
/* Save the return value: */
curthread->ret = status;
-
while (curthread->cleanup != NULL) {
pthread_cleanup_pop(1);
}
@@ -124,58 +120,11 @@ _pthread_exit(void *status)
_thread_cleanupspecific();
}
- /*
- * Lock the garbage collector mutex to ensure that the garbage
- * collector is not using the dead thread list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /* Add this thread to the list of dead threads. */
- TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
-
- /*
- * Signal the garbage collector thread that there is something
- * to clean up.
- */
- if (pthread_cond_signal(&_gc_cond) != 0)
- PANIC("Cannot signal gc cond");
-
- /*
- * Avoid a race condition where a scheduling signal can occur
- * causing the garbage collector thread to run. If this happens,
- * the current thread can be cleaned out from under us.
- */
- _thread_kern_sig_defer();
-
- /* Unlock the garbage collector mutex: */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
-
- /* Check if there is a thread joining this one: */
- if (curthread->joiner != NULL) {
- pthread = curthread->joiner;
- curthread->joiner = NULL;
-
- /* Make the joining thread runnable: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Set the return value for the joining thread: */
- pthread->join_status.ret = curthread->ret;
- pthread->join_status.error = 0;
- pthread->join_status.thread = NULL;
-
- /* Make this thread collectable by the garbage collector. */
- PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
- 0), "Cannot join a detached thread");
- curthread->attr.flags |= PTHREAD_DETACHED;
- }
-
- /* Remove this thread from the thread list: */
- TAILQ_REMOVE(&_thread_list, curthread, tle);
-
/* This thread will never be re-scheduled. */
- _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
+ THR_SCHED_LOCK(curthread, curthread);
+ THR_SET_STATE(curthread, PS_DEAD);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
/* This point should not be reached. */
PANIC("Dead thread has resumed");
diff --git a/lib/libkse/thread/thr_fcntl.c b/lib/libkse/thread/thr_fcntl.c
index 33a1c2f..0b4a990 100644
--- a/lib/libkse/thread/thr_fcntl.c
+++ b/lib/libkse/thread/thr_fcntl.c
@@ -32,8 +32,9 @@
* $FreeBSD$
*/
#include <stdarg.h>
-#include <unistd.h>
+#include "namespace.h"
#include <fcntl.h>
+#include "un-namespace.h"
#include <pthread.h>
#include "thr_private.h"
@@ -42,28 +43,29 @@ __weak_reference(__fcntl, fcntl);
int
__fcntl(int fd, int cmd,...)
{
+ struct pthread *curthread = _get_curthread();
int ret;
va_list ap;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
va_start(ap, cmd);
switch (cmd) {
- case F_DUPFD:
- case F_SETFD:
- case F_SETFL:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
- break;
- case F_GETFD:
- case F_GETFL:
- ret = __sys_fcntl(fd, cmd);
- break;
- default:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
+ case F_DUPFD:
+ case F_SETFD:
+ case F_SETFL:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
+ break;
+ case F_GETFD:
+ case F_GETFL:
+ ret = __sys_fcntl(fd, cmd);
+ break;
+ default:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
}
va_end(ap);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_find_thread.c b/lib/libkse/thread/thr_find_thread.c
index 032fcfb..9b291b1 100644
--- a/lib/libkse/thread/thr_find_thread.c
+++ b/lib/libkse/thread/thr_find_thread.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@@ -35,32 +36,65 @@
#include <pthread.h>
#include "thr_private.h"
-/* Find a thread in the linked list of active threads: */
+/*
+ * Find a thread in the linked list of active threads and add a reference
+ * to it. Threads with positive reference counts will not be deallocated
+ * until all references are released.
+ */
int
-_find_thread(pthread_t pthread)
+_thr_ref_add(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
{
- pthread_t pthread1;
+ kse_critical_t crit;
+ struct pthread *pthread;
- /* Check if the caller has specified an invalid thread: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
+ if (thread == NULL)
/* Invalid thread: */
- return(EINVAL);
-
- /*
- * Defer signals to protect the thread list from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ return (EINVAL);
- /* Search for the specified thread: */
- TAILQ_FOREACH(pthread1, &_thread_list, tle) {
- if (pthread == pthread1)
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
+ if (pthread == thread) {
+ if ((include_dead == 0) &&
+ ((pthread->state == PS_DEAD) ||
+ ((pthread->state == PS_DEADLOCK) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0))))
+ pthread = NULL;
+ else {
+ thread->refcount++;
+ curthread->critical_count++;
+ }
break;
+ }
}
-
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
/* Return zero if the thread exists: */
- return ((pthread1 != NULL) ? 0:ESRCH);
+ return ((pthread != NULL) ? 0 : ESRCH);
+}
+
+void
+_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
+{
+ kse_critical_t crit;
+
+ if (thread != NULL) {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ thread->refcount--;
+ curthread->critical_count--;
+ if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
+ (thread->refcount == 0) &&
+ ((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
+ THR_LIST_REMOVE(thread);
+ THR_GCLIST_ADD(thread);
+ _gc_check = 1;
+ if (KSE_WAITING(_kse_initial))
+ KSE_WAKEUP(_kse_initial);
+ }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
+ }
}
diff --git a/lib/libkse/thread/thr_fork.c b/lib/libkse/thread/thr_fork.c
index 5bb64f7..a279621 100644
--- a/lib/libkse/thread/thr_fork.c
+++ b/lib/libkse/thread/thr_fork.c
@@ -31,7 +31,6 @@
*
* $FreeBSD$
*/
-#include <sys/param.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
@@ -40,141 +39,21 @@
#include <pthread.h>
#include "thr_private.h"
-static void free_thread_resources(struct pthread *thread);
-
__weak_reference(_fork, fork);
pid_t
_fork(void)
{
- struct pthread *curthread = _get_curthread();
- int i, flags, use_deadlist = 0;
- pid_t ret;
- pthread_t pthread;
- pthread_t pthread_save;
+ struct pthread *curthread;
+ pid_t ret;
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ curthread = _get_curthread();
/* Fork a new process: */
- if ((ret = __sys_fork()) != 0) {
- /* Parent process or error. Nothing to do here. */
- } else {
- /* Reinitialize the GC mutex: */
- if (_mutex_reinit(&_gc_mutex) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize GC mutex for forked process");
- }
- /* Reinitialize the GC condition variable: */
- else if (_cond_reinit(&_gc_cond) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize GC condvar for forked process");
- }
- /* Initialize the ready queue: */
- else if (_pq_init(&_readyq) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize priority ready queue.");
- } else {
- /*
- * Enter a loop to remove all threads other than
- * the running thread from the thread list:
- */
- if ((pthread = TAILQ_FIRST(&_thread_list)) == NULL) {
- pthread = TAILQ_FIRST(&_dead_list);
- use_deadlist = 1;
- }
- while (pthread != NULL) {
- /* Save the thread to be freed: */
- pthread_save = pthread;
-
- /*
- * Advance to the next thread before
- * destroying the current thread:
- */
- if (use_deadlist != 0)
- pthread = TAILQ_NEXT(pthread, dle);
- else
- pthread = TAILQ_NEXT(pthread, tle);
-
- /* Make sure this isn't the running thread: */
- if (pthread_save != curthread) {
- /*
- * Remove this thread from the
- * appropriate list:
- */
- if (use_deadlist != 0)
- TAILQ_REMOVE(&_thread_list,
- pthread_save, dle);
- else
- TAILQ_REMOVE(&_thread_list,
- pthread_save, tle);
-
- free_thread_resources(pthread_save);
- }
-
- /*
- * Switch to the deadlist when the active
- * thread list has been consumed. This can't
- * be at the top of the loop because it is
- * used to determine to which list the thread
- * belongs (when it is removed from the list).
- */
- if (pthread == NULL) {
- pthread = TAILQ_FIRST(&_dead_list);
- use_deadlist = 1;
- }
- }
-
- /* Treat the current thread as the initial thread: */
- _thread_initial = curthread;
-
- /* Re-init the dead thread list: */
- TAILQ_INIT(&_dead_list);
-
- /* Re-init the waiting and work queues. */
- TAILQ_INIT(&_waitingq);
- TAILQ_INIT(&_workq);
-
- /* Re-init the threads mutex queue: */
- TAILQ_INIT(&curthread->mutexq);
-
- /* No spinlocks yet: */
- _spinblock_count = 0;
-
- /* Initialize the scheduling switch hook routine: */
- _sched_switch_hook = NULL;
- }
- }
-
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ if ((ret = __sys_fork()) == 0)
+ /* Child process */
+ _kse_single_thread(curthread);
/* Return the process ID: */
return (ret);
}
-
-static void
-free_thread_resources(struct pthread *thread)
-{
-
- /* Check to see if the threads library allocated the stack. */
- if ((thread->attr.stackaddr_attr == NULL) && (thread->stack != NULL)) {
- /*
- * Since this is being called from fork, we are currently single
- * threaded so there is no need to protect the call to
- * _thread_stack_free() with _gc_mutex.
- */
- _thread_stack_free(thread->stack, thread->attr.stacksize_attr,
- thread->attr.guardsize_attr);
- }
-
- if (thread->specific != NULL)
- free(thread->specific);
-
- free(thread);
-}
diff --git a/lib/libkse/thread/thr_fsync.c b/lib/libkse/thread/thr_fsync.c
index 0ede529..d5d3398 100644
--- a/lib/libkse/thread/thr_fsync.c
+++ b/lib/libkse/thread/thr_fsync.c
@@ -40,11 +40,12 @@ __weak_reference(__fsync, fsync);
int
__fsync(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_fsync(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_getschedparam.c b/lib/libkse/thread/thr_getschedparam.c
index 7f1503c..d00e498 100644
--- a/lib/libkse/thread/thr_getschedparam.c
+++ b/lib/libkse/thread/thr_getschedparam.c
@@ -41,19 +41,33 @@ int
_pthread_getschedparam(pthread_t pthread, int *policy,
struct sched_param *param)
{
+ struct pthread *curthread = _get_curthread();
int ret;
if ((param == NULL) || (policy == NULL))
/* Return an invalid argument error: */
ret = EINVAL;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0) {
- /* Return the threads base priority and scheduling policy: */
+ else if (pthread == curthread) {
+ /*
+ * Avoid searching the thread list when it is the current
+ * thread.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
param->sched_priority =
- PTHREAD_BASE_PRIORITY(pthread->base_priority);
+ THR_BASE_PRIORITY(pthread->base_priority);
*policy = pthread->attr.sched_policy;
+ THR_SCHED_UNLOCK(curthread, curthread);
+ ret = 0;
}
-
- return(ret);
+ /* Find the thread in the list of active threads. */
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ THR_SCHED_LOCK(curthread, pthread);
+ param->sched_priority =
+ THR_BASE_PRIORITY(pthread->base_priority);
+ *policy = pthread->attr.sched_policy;
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+ }
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_info.c b/lib/libkse/thread/thr_info.c
index 9ade157..3218b5b 100644
--- a/lib/libkse/thread/thr_info.c
+++ b/lib/libkse/thread/thr_info.c
@@ -56,11 +56,12 @@ struct s_thread_info {
/* Static variables: */
static const struct s_thread_info thread_info[] = {
{PS_RUNNING , "Running"},
+ {PS_LOCKWAIT , "Waiting on an internal lock"},
{PS_MUTEX_WAIT , "Waiting on a mutex"},
{PS_COND_WAIT , "Waiting on a condition variable"},
{PS_SLEEP_WAIT , "Sleeping"},
- {PS_WAIT_WAIT , "Waiting process"},
- {PS_SPINBLOCK , "Waiting for a spinlock"},
+ {PS_SIGSUSPEND , "Suspended, waiting for a signal"},
+ {PS_SIGWAIT , "Waiting for a signal"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
@@ -71,12 +72,9 @@ static const struct s_thread_info thread_info[] = {
void
_thread_dump_info(void)
{
- char s[512];
- int fd;
- int i;
- pthread_t pthread;
- char tmpfile[128];
- pq_list_t *pq_list;
+ char s[512], tmpfile[128];
+ pthread_t pthread;
+ int fd, i;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
@@ -102,64 +100,34 @@ _thread_dump_info(void)
/* all 100000 possibilities are in use :( */
return;
} else {
- /* Output a header for active threads: */
- strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
+ /* Dump the active threads. */
+ strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
__sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
TAILQ_FOREACH(pthread, &_thread_list, tle) {
- dump_thread(fd, pthread, /*long_verson*/ 1);
+ if (pthread->state != PS_DEAD)
+ dump_thread(fd, pthread, /*long_verson*/ 1);
}
- /* Output a header for ready threads: */
- strcpy(s, "\n\n=============\nREADY THREADS\n\n");
- __sys_write(fd, s, strlen(s));
-
- /* Enter a loop to report each thread in the ready queue: */
- TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
- TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- }
-
- /* Output a header for waiting threads: */
- strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
+ /*
+ * Dump the ready threads.
+ * XXX - We can't easily do this because the run queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
- /* Enter a loop to report each thread in the waiting queue: */
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- /* Output a header for threads in the work queue: */
- strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
+ /*
+ * Dump the waiting threads.
+ * XXX - We can't easily do this because the wait queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
- /* Enter a loop to report each thread in the waiting queue: */
- TAILQ_FOREACH (pthread, &_workq, qe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
-
- /* Check if there are no dead threads: */
- if (TAILQ_FIRST(&_dead_list) == NULL) {
- /* Output a record: */
- strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
- __sys_write(fd, s, strlen(s));
- } else {
- /* Output a header for dead threads: */
- strcpy(s, "\n\nDEAD THREADS\n\n");
- __sys_write(fd, s, strlen(s));
-
- /*
- * Enter a loop to report each thread in the global
- * dead thread list:
- */
- TAILQ_FOREACH(pthread, &_dead_list, dle) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- }
-
- /* Close the dump file: */
+ /* Close the dump file. */
__sys_close(fd);
}
}
@@ -167,9 +135,9 @@ _thread_dump_info(void)
static void
dump_thread(int fd, pthread_t pthread, int long_version)
{
- struct pthread *curthread = _get_curthread();
- char s[512];
- int i;
+ struct pthread *curthread = _get_curthread();
+ char s[512];
+ int i;
/* Find the state: */
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
@@ -178,10 +146,11 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Output a record for the thread: */
snprintf(s, sizeof(s),
- "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ "--------------------\n"
+ "Thread %p (%s) prio %3d, blocked %s, state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ? "" : pthread->name,
- pthread->active_priority, thread_info[i].name, pthread->fname,
- pthread->lineno);
+ pthread->active_priority, (pthread->blocked != 0) ? "yes" : "no",
+ thread_info[i].name, pthread->fname, pthread->lineno);
__sys_write(fd, s, strlen(s));
if (long_version != 0) {
@@ -192,13 +161,24 @@ dump_thread(int fd, pthread_t pthread, int long_version)
__sys_write(fd, s, strlen(s));
}
/* Check if this is the initial thread: */
- if (pthread == _thread_initial) {
+ if (pthread == _thr_initial) {
/* Output a record for the initial thread: */
strcpy(s, "This is the initial thread\n");
__sys_write(fd, s, strlen(s));
}
/* Process according to thread state: */
switch (pthread->state) {
+ case PS_SIGWAIT:
+ snprintf(s, sizeof(s), "sigmask (hi)");
+ __sys_write(fd, s, strlen(s));
+ for (i = _SIG_WORDS - 1; i >= 0; i--) {
+ snprintf(s, sizeof(s), "%08x\n",
+ pthread->sigmask.__bits[i]);
+ __sys_write(fd, s, strlen(s));
+ }
+ snprintf(s, sizeof(s), "(lo)\n");
+ __sys_write(fd, s, strlen(s));
+ break;
/*
* Trap other states that are not explicitly
* coded to dump information:
@@ -212,10 +192,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Set the thread name for debug: */
void
-_pthread_set_name_np(pthread_t thread, const char *name)
+_pthread_set_name_np(pthread_t thread, char *name)
{
/* Check if the caller has specified a valid thread: */
- if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
+ if (thread != NULL && thread->magic == THR_MAGIC) {
if (thread->name != NULL) {
/* Free space for previous name. */
free(thread->name);
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index 8e19924..6ff88c8 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@FreeBSD.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -49,7 +50,6 @@
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
-#include <sys/user.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <dirent.h>
@@ -57,6 +57,7 @@
#include <fcntl.h>
#include <paths.h>
#include <pthread.h>
+#include <pthread_np.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -64,11 +65,20 @@
#include <unistd.h>
#include "un-namespace.h"
+#include "libc_private.h"
#include "thr_private.h"
+#include "ksd.h"
+
+int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int __pthread_mutex_lock(pthread_mutex_t *);
+int __pthread_mutex_trylock(pthread_mutex_t *);
+
+static void init_private(void);
+static void init_main_thread(struct pthread *thread);
/*
* All weak references used within libc should be in this table.
- * This will is so that static libraries will work.
+ * This is so that static libraries will work.
*/
static void *references[] = {
&_accept,
@@ -145,40 +155,64 @@ static void *libgcc_references[] = {
&_pthread_mutex_unlock
};
-int _pthread_guard_default;
-int _pthread_page_size;
+#define DUAL_ENTRY(entry) \
+ (pthread_func_t)entry, (pthread_func_t)entry
+
+static pthread_func_t jmp_table[][2] = {
+ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
+ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
+ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
+ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
+ {(pthread_func_t)__pthread_cond_wait,
+ (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
+ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
+ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
+ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
+ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
+ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
+ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
+ {(pthread_func_t)__pthread_mutex_lock,
+ (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
+ {(pthread_func_t)__pthread_mutex_trylock,
+ (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
+ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
+ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
+ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
+ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
+ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
+ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
+ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
+ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
+ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
+ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
+ {DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */
+};
+
+static int init_once = 0;
/*
- * Threaded process initialization
+ * Threaded process initialization.
+ *
+ * This is only called under two conditions:
+ *
+ * 1) Some thread routines have detected that the library hasn't yet
+ * been initialized (_thr_initial == NULL && curthread == NULL), or
+ *
+ * 2) An explicit call to reinitialize after a fork (indicated
+ * by curthread != NULL)
*/
void
-_thread_init(void)
+_libpthread_init(struct pthread *curthread)
{
- int fd;
- int flags;
- int i;
- size_t len;
- int mib[2];
- int sched_stack_size; /* Size of scheduler stack. */
-
- struct clockinfo clockinfo;
- struct sigaction act;
+ int fd;
/* Check if this function has already been called: */
- if (_thread_initial)
- /* Only initialise the threaded application once. */
- return;
-
- _pthread_page_size = getpagesize();
- _pthread_guard_default = getpagesize();
- sched_stack_size = getpagesize();
-
- pthread_attr_default.guardsize_attr = _pthread_guard_default;
-
-
- /* Check if this function has already been called: */
- if (_thread_initial)
- /* Only initialise the threaded application once. */
+ if ((_thr_initial != NULL) && (curthread == NULL))
+ /* Only initialize the threaded application once. */
return;
/*
@@ -189,10 +223,18 @@ _thread_init(void)
PANIC("Failed loading mandatory references in _thread_init");
/*
+ * Check the size of the jump table to make sure it is preset
+ * with the correct number of entries.
+ */
+ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
+ PANIC("Thread jump table not properly initialized");
+ memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
+
+ /*
* Check for the special case of this process running as
* or in place of init as pid = 1:
*/
- if (getpid() == 1) {
+ if ((_thr_pid = getpid()) == 1) {
/*
* Setup a new session for this process which is
* assumed to be running as root.
@@ -207,200 +249,271 @@ _thread_init(void)
PANIC("Can't set login to root");
if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
PANIC("Can't set controlling terminal");
- if (__sys_dup2(fd, 0) == -1 ||
- __sys_dup2(fd, 1) == -1 ||
- __sys_dup2(fd, 2) == -1)
- PANIC("Can't dup2");
}
- /* Allocate and initialize the ready queue: */
- if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) !=
- 0) {
- /* Abort this application: */
- PANIC("Cannot allocate priority ready queue.");
- }
- /* Allocate memory for the thread structure of the initial thread: */
- else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
+ /* Initialize pthread private data. */
+ init_private();
+ _kse_init();
+
+ /* Initialize the initial kse and kseg. */
+ _kse_initial = _kse_alloc(NULL);
+ if (_kse_initial == NULL)
+ PANIC("Can't allocate initial kse.");
+ _kse_initial->k_kseg = _kseg_alloc(NULL);
+ if (_kse_initial->k_kseg == NULL)
+ PANIC("Can't allocate initial kseg.");
+ _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
+
+ /* Set the initial thread. */
+ if (curthread == NULL) {
+ /* Create and initialize the initial thread. */
+ curthread = _thr_alloc(NULL);
+ if (curthread == NULL)
+ PANIC("Can't allocate initial thread");
+ _thr_initial = curthread;
+ init_main_thread(curthread);
+ } else {
/*
- * Insufficient memory to initialise this application, so
- * abort:
+ * The initial thread is the current thread. It is
+ * assumed that the current thread is already initialized
+ * because it is left over from a fork().
*/
- PANIC("Cannot allocate memory for initial thread");
+ _thr_initial = curthread;
+ }
+ _kse_initial->k_kseg->kg_threadcount = 1;
+ _thr_initial->kse = _kse_initial;
+ _thr_initial->kseg = _kse_initial->k_kseg;
+ _thr_initial->active = 1;
+
+ /*
+ * Add the thread to the thread list and to the KSEG's thread
+ * queue.
+ */
+ THR_LIST_ADD(_thr_initial);
+ TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle);
+
+ /* Setup the KSE/thread specific data for the current KSE/thread. */
+ if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0)
+ PANIC("Can't set initial KSE specific data");
+ _set_curkse(_thr_initial->kse);
+ _thr_initial->kse->k_curthread = _thr_initial;
+ _thr_initial->kse->k_flags |= KF_INITIALIZED;
+ _kse_initial->k_curthread = _thr_initial;
+}
+
+/*
+ * This function and pthread_create() do a lot of the same things.
+ * It'd be nice to consolidate the common stuff in one place.
+ */
+static void
+init_main_thread(struct pthread *thread)
+{
+ int i;
+
+ /* Zero the initial thread structure. */
+ memset(thread, 0, sizeof(struct pthread));
+
+ /* Setup the thread attributes. */
+ thread->attr = _pthread_attr_default;
+
+ /*
+ * Set up the thread stack.
+ *
+ * Create a red zone below the main stack. All other stacks
+ * are constrained to a maximum size by the parameters
+ * passed to mmap(), but this stack is only limited by
+ * resource limits, so this stack needs an explicitly mapped
+ * red zone to protect the thread stack that is just beyond.
+ */
+ if (mmap((void *)_usrstack - THR_STACK_INITIAL -
+ _thr_guard_default, _thr_guard_default, 0, MAP_ANON,
+ -1, 0) == MAP_FAILED)
+ PANIC("Cannot allocate red zone for initial thread");
+
+ /*
+ * Mark the stack as an application supplied stack so that it
+ * isn't deallocated.
+ *
+ * XXX - I'm not sure it would hurt anything to deallocate
+ * the main thread stack because deallocation doesn't
+ * actually free() it; it just puts it in the free
+ * stack queue for later reuse.
+ */
+ thread->attr.stackaddr_attr = (void *)_usrstack - THR_STACK_INITIAL;
+ thread->attr.stacksize_attr = THR_STACK_INITIAL;
+ thread->attr.guardsize_attr = _thr_guard_default;
+ thread->attr.flags |= THR_STACK_USER;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ thread->magic = THR_MAGIC;
+
+ thread->slice_usec = -1;
+ thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
+ thread->name = strdup("initial thread");
+
+ /* Initialize the thread for signals: */
+ sigemptyset(&thread->sigmask);
+
+ /*
+ * Set up the thread mailbox. The threads saved context
+ * is also in the mailbox.
+ */
+ thread->tmbx.tm_udata = thread;
+ thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
+ thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr;
+ thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr;
+
+ /* Default the priority of the initial thread: */
+ thread->base_priority = THR_DEFAULT_PRIORITY;
+ thread->active_priority = THR_DEFAULT_PRIORITY;
+ thread->inherited_priority = 0;
+
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&thread->mutexq);
+
+ /* Initialize thread locking. */
+ if (_lock_init(&thread->lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize initial thread lock");
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_init(&thread->lockusers[i], (void *)thread);
+ _LCK_SET_PRIVATE2(&thread->lockusers[i], (void *)thread);
}
- /* Allocate memory for the scheduler stack: */
- else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
- PANIC("Failed to allocate stack for scheduler");
- /* Allocate memory for the idle stack: */
- else if ((_idle_thr_stack = malloc(sched_stack_size)) == NULL)
- PANIC("Failed to allocate stack for scheduler");
- else {
- /* Zero the global kernel thread structure: */
- memset(&_thread_kern_thread, 0, sizeof(struct pthread));
- _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
- memset(_thread_initial, 0, sizeof(struct pthread));
-
- /* Initialize the waiting and work queues: */
- TAILQ_INIT(&_waitingq);
- TAILQ_INIT(&_workq);
-
- /* Initialize the scheduling switch hook routine: */
- _sched_switch_hook = NULL;
-
- /* Give this thread default attributes: */
- memcpy((void *) &_thread_initial->attr, &pthread_attr_default,
- sizeof(struct pthread_attr));
+ /* Initialize hooks in the thread structure: */
+ thread->specific = NULL;
+ thread->cleanup = NULL;
+ thread->flags = 0;
+ thread->continuation = NULL;
+
+ thread->state = PS_RUNNING;
+ thread->uniqueid = 0;
+}
+
+static void
+init_private(void)
+{
+ struct clockinfo clockinfo;
+ struct sigaction act;
+ size_t len;
+ int mib[2];
+ int i;
+
+ /*
+ * Avoid reinitializing some things if they don't need to be,
+ * e.g. after a fork().
+ */
+ if (init_once == 0) {
/* Find the stack top */
mib[0] = CTL_KERN;
mib[1] = KERN_USRSTACK;
len = sizeof (_usrstack);
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
- _usrstack = (void *)USRSTACK;
+ PANIC("Cannot get kern.usrstack from sysctl");
+
/*
- * Create a red zone below the main stack. All other stacks are
- * constrained to a maximum size by the paramters passed to
- * mmap(), but this stack is only limited by resource limits, so
- * this stack needs an explicitly mapped red zone to protect the
- * thread stack that is just beyond.
+ * Create a red zone below the main stack. All other
+ * stacks are constrained to a maximum size by the
+ * parameters passed to mmap(), but this stack is only
+ * limited by resource limits, so this stack needs an
+ * explicitly mapped red zone to protect the thread stack
+ * that is just beyond.
*/
- if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
- _pthread_guard_default, _pthread_guard_default, 0,
- MAP_ANON, -1, 0) == MAP_FAILED)
+ if (mmap((void *)_usrstack - THR_STACK_INITIAL -
+ _thr_guard_default, _thr_guard_default,
+ 0, MAP_ANON, -1, 0) == MAP_FAILED)
PANIC("Cannot allocate red zone for initial thread");
- /* Set the main thread stack pointer. */
- _thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL;
-
- /* Set the stack attributes. */
- _thread_initial->attr.stackaddr_attr = _thread_initial->stack;
- _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
-
- /* Setup the context for the scheduler. */
- _thread_kern_kse_mailbox.km_stack.ss_sp =
- _thread_kern_sched_stack;
- _thread_kern_kse_mailbox.km_stack.ss_size = sched_stack_size;
- _thread_kern_kse_mailbox.km_func =
- (void *)_thread_kern_scheduler;
-
- /* Initialize the idle context. */
- bzero(&_idle_thr_mailbox, sizeof(struct kse_thr_mailbox));
- getcontext(&_idle_thr_mailbox.tm_context);
- _idle_thr_mailbox.tm_context.uc_stack.ss_sp = _idle_thr_stack;
- _idle_thr_mailbox.tm_context.uc_stack.ss_size =
- sched_stack_size;
- makecontext(&_idle_thr_mailbox.tm_context, _thread_kern_idle,
- 1);
+ /* Get the kernel clockrate: */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_CLOCKRATE;
+ len = sizeof (struct clockinfo);
+ if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
+ _clock_res_usec = clockinfo.tick;
+ else
+ _clock_res_usec = CLOCK_RES_USEC;
+
+ _thr_page_size = getpagesize();
+ _thr_guard_default = _thr_page_size;
+ init_once = 1; /* Don't do this again. */
+ } else {
/*
- * Write a magic value to the thread structure
- * to help identify valid ones:
+ * Destroy the locks before creating them. We don't
+ * know what state they are in so it is better to just
+ * recreate them.
*/
- _thread_initial->magic = PTHREAD_MAGIC;
-
- /* Set the initial cancel state */
- _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
- PTHREAD_CANCEL_DEFERRED;
-
- /* Setup the context for initial thread. */
- getcontext(&_thread_initial->mailbox.tm_context);
- _thread_initial->mailbox.tm_context.uc_stack.ss_sp =
- _thread_initial->stack;
- _thread_initial->mailbox.tm_context.uc_stack.ss_size =
- PTHREAD_STACK_INITIAL;
- _thread_initial->mailbox.tm_udata = (void *)_thread_initial;
-
- /* Default the priority of the initial thread: */
- _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
- _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
- _thread_initial->inherited_priority = 0;
-
- /* Initialise the state of the initial thread: */
- _thread_initial->state = PS_RUNNING;
-
- /* Set the name of the thread: */
- _thread_initial->name = strdup("_thread_initial");
-
- /* Initialize joiner to NULL (no joiner): */
- _thread_initial->joiner = NULL;
-
- /* Initialize the owned mutex queue and count: */
- TAILQ_INIT(&(_thread_initial->mutexq));
- _thread_initial->priority_mutex_count = 0;
-
- /* Initialize the global scheduling time: */
- _sched_ticks = 0;
- gettimeofday((struct timeval *) &_sched_tod, NULL);
-
- /* Initialize last active: */
- _thread_initial->last_active = (long) _sched_ticks;
-
- /* Initialise the rest of the fields: */
- _thread_initial->sig_defer_count = 0;
- _thread_initial->specific = NULL;
- _thread_initial->cleanup = NULL;
- _thread_initial->flags = 0;
- _thread_initial->error = 0;
- TAILQ_INIT(&_thread_list);
- TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
- _set_curthread(_thread_initial);
-
- /* Clear the pending signals for the process. */
- sigemptyset(&_thread_sigpending);
-
- /* Enter a loop to get the existing signal status: */
- for (i = 1; i < NSIG; i++) {
- /* Check for signals which cannot be trapped. */
- if (i == SIGKILL || i == SIGSTOP)
- continue;
-
- /* Get the signal handler details. */
- if (__sys_sigaction(i, NULL,
- &_thread_sigact[i - 1]) != 0)
- PANIC("Cannot read signal handler info");
- }
+ _lock_destroy(&_thread_signal_lock);
+ _lock_destroy(&_mutex_static_lock);
+ _lock_destroy(&_rwlock_static_lock);
+ _lock_destroy(&_keytable_lock);
+ }
- /* Register SIGCHLD (needed for wait(2)). */
- sigfillset(&act.sa_mask);
- act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = SA_SIGINFO | SA_RESTART;
- if (__sys_sigaction(SIGCHLD, &act, NULL) != 0)
- PANIC("Can't initialize signal handler");
- /* Get the process signal mask. */
- __sys_sigprocmask(SIG_SETMASK, NULL, &_thread_sigmask);
+ /* Initialize everything else. */
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INIT(&_thread_gc_list);
- /* Get the kernel clockrate: */
- mib[0] = CTL_KERN;
- mib[1] = KERN_CLOCKRATE;
- len = sizeof (struct clockinfo);
- if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
- clockinfo.tick : CLOCK_RES_USEC_MIN;
-
- /* Start KSE. */
- _thread_kern_kse_mailbox.km_curthread =
- &_thread_initial->mailbox;
- if (kse_create(&_thread_kern_kse_mailbox, 0) != 0)
- PANIC("kse_new failed");
+ /* Enter a loop to get the existing signal status: */
+ for (i = 1; i < NSIG; i++) {
+ /* Check for signals which cannot be trapped: */
+ if (i == SIGKILL || i == SIGSTOP) {
+ }
+
+ /* Get the signal handler details: */
+ else if (__sys_sigaction(i, NULL,
+ &_thread_sigact[i - 1]) != 0) {
+ /*
+ * Abort this process if signal
+ * initialisation fails:
+ */
+ PANIC("Cannot read signal handler info");
+ }
+
+ /* Initialize the SIG_DFL dummy handler count. */
+ _thread_dfl_count[i] = 0;
}
- /* Initialise the garbage collector mutex and condition variable. */
- if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
- pthread_cond_init(&_gc_cond,NULL) != 0)
- PANIC("Failed to initialise garbage collector mutex or condvar");
-}
+ /*
+ * Install the signal handler for SIGINFO. It isn't
+ * really needed, but it is nice to have for debugging
+ * purposes.
+ */
+ if (__sys_sigaction(SIGINFO, &act, NULL) != 0) {
+ /*
+ * Abort this process if signal initialisation fails:
+ */
+ PANIC("Cannot initialize signal handler");
+ }
+ _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART;
-/*
- * Special start up code for NetBSD/Alpha
- */
-#if defined(__NetBSD__) && defined(__alpha__)
-int
-main(int argc, char *argv[], char *env);
+ /*
+ * Initialize the lock for temporary installation of signal
+ * handlers (to support sigwait() semantics) and for the
+ * process signal mask and pending signal sets.
+ */
+ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize _thread_signal_lock");
+ if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize mutex static init lock");
+ if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize rwlock static init lock");
+ if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize thread specific keytable lock");
+
+ /* Clear pending signals and get the process signal mask. */
+ sigemptyset(&_thr_proc_sigpending);
+ __sys_sigprocmask(SIG_SETMASK, NULL, &_thr_proc_sigmask);
-int
-_thread_main(int argc, char *argv[], char *env)
-{
- _thread_init();
- return (main(argc, argv, env));
+ /*
+ * _thread_list_lock and _kse_count are initialized
+ * by _kse_init()
+ */
}
-#endif
diff --git a/lib/libkse/thread/thr_join.c b/lib/libkse/thread/thr_join.c
index c2e7cec..38cc8b3 100644
--- a/lib/libkse/thread/thr_join.c
+++ b/lib/libkse/thread/thr_join.c
@@ -41,121 +41,91 @@ int
_pthread_join(pthread_t pthread, void **thread_return)
{
struct pthread *curthread = _get_curthread();
- int ret = 0;
- pthread_t thread;
+ int ret = 0;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
/* Check if the caller has specified an invalid thread: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) {
+ if (pthread == NULL || pthread->magic != THR_MAGIC) {
/* Invalid thread: */
- _thread_leave_cancellation_point();
- return(EINVAL);
+ _thr_leave_cancellation_point(curthread);
+ return (EINVAL);
}
/* Check if the caller has specified itself: */
if (pthread == curthread) {
/* Avoid a deadlock condition: */
- _thread_leave_cancellation_point();
- return(EDEADLK);
+ _thr_leave_cancellation_point(curthread);
+ return (EDEADLK);
}
/*
- * Lock the garbage collector mutex to ensure that the garbage
- * collector is not using the dead thread list.
+ * Find the thread in the list of active threads or in the
+ * list of dead threads:
*/
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /*
- * Defer signals to protect the thread list from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /*
- * Unlock the garbage collector mutex, now that the garbage collector
- * can't be run:
- */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /*
- * Search for the specified thread in the list of active threads. This
- * is done manually here rather than calling _find_thread() because
- * the searches in _thread_list and _dead_list (as well as setting up
- * join/detach state) have to be done atomically.
- */
- TAILQ_FOREACH(thread, &_thread_list, tle) {
- if (thread == pthread)
- break;
- }
- if (thread == NULL) {
- /*
- * Search for the specified thread in the list of dead threads:
- */
- TAILQ_FOREACH(thread, &_dead_list, dle) {
- if (thread == pthread)
- break;
- }
+ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) {
+ /* Return an error: */
+ _thr_leave_cancellation_point(curthread);
+ return (ESRCH);
}
- /* Check if the thread was not found or has been detached: */
- if (thread == NULL ||
- ((pthread->attr.flags & PTHREAD_DETACHED) != 0)) {
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
-
- /* Return an error: */
+ /* Check if this thread has been detached: */
+ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
+ /* Remove the reference and return an error: */
+ _thr_ref_delete(curthread, pthread);
ret = ESRCH;
-
- } else if (pthread->joiner != NULL) {
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
-
- /* Multiple joiners are not supported. */
- ret = ENOTSUP;
-
- /* Check if the thread is not dead: */
- } else if (pthread->state != PS_DEAD) {
- /* Set the running thread to be the joiner: */
- pthread->joiner = curthread;
-
- /* Keep track of which thread we're joining to: */
- curthread->join_status.thread = pthread;
-
- while (curthread->join_status.thread == pthread) {
- /* Schedule the next thread: */
- _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
- }
-
- /*
- * The thread return value and error are set by the thread we're
- * joining to when it exits or detaches:
- */
- ret = curthread->join_status.error;
- if ((ret == 0) && (thread_return != NULL))
- *thread_return = curthread->join_status.ret;
} else {
- /*
- * The thread exited (is dead) without being detached, and no
- * thread has joined it.
- */
-
- /* Check if the return value is required: */
- if (thread_return != NULL) {
- /* Return the thread's return value: */
- *thread_return = pthread->ret;
+ /* Lock the target thread while checking its state. */
+ THR_SCHED_LOCK(curthread, pthread);
+ if ((pthread->state == PS_DEAD) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0)) {
+ if (thread_return != NULL)
+ /* Return the thread's return value: */
+ *thread_return = pthread->ret;
+
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
}
+ else if (pthread->joiner != NULL) {
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
- /* Make the thread collectable by the garbage collector. */
- pthread->attr.flags |= PTHREAD_DETACHED;
-
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
+ /* Multiple joiners are not supported. */
+ ret = ENOTSUP;
+ }
+ else {
+ /* Set the running thread to be the joiner: */
+ pthread->joiner = curthread;
+
+ /* Keep track of which thread we're joining to: */
+ curthread->join_status.thread = pthread;
+
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+
+ THR_SCHED_LOCK(curthread, curthread);
+ if (curthread->join_status.thread == pthread)
+ THR_SET_STATE(curthread, PS_JOIN);
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ while (curthread->join_status.thread == pthread) {
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+ }
+
+ /*
+ * The thread return value and error are set by the
+ * thread we're joining to when it exits or detaches:
+ */
+ ret = curthread->join_status.error;
+ if ((ret == 0) && (thread_return != NULL))
+ *thread_return = curthread->join_status.ret;
+ }
}
-
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (ret);
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index 6e59a43..b87ae3d 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -32,474 +34,1452 @@
* $FreeBSD$
*
*/
-#include <errno.h>
-#include <poll.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD");
+
+#include <sys/types.h>
+#include <sys/kse.h>
+#include <sys/signalvar.h>
+#include <sys/queue.h>
+#include <machine/atomic.h>
+
+#include <assert.h>
+#include <signal.h>
#include <stdlib.h>
-#include <stdarg.h>
#include <string.h>
+#include <time.h>
+#include <ucontext.h>
#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/signalvar.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/socket.h>
-#include <sys/uio.h>
-#include <sys/syscall.h>
-#include <fcntl.h>
-#include <pthread.h>
+
+#include "atomic_ops.h"
#include "thr_private.h"
+#include "pthread_md.h"
+#include "libc_private.h"
-/* #define DEBUG_THREAD_KERN */
+/*#define DEBUG_THREAD_KERN */
#ifdef DEBUG_THREAD_KERN
#define DBG_MSG stdout_debug
#else
#define DBG_MSG(x...)
#endif
-static int _kern_idle_running = 0;
-static struct timeval _kern_idle_timeout;
+/*
+ * Define a high water mark for the maximum number of threads that
+ * will be cached. Once this level is reached, any extra threads
+ * will be free()'d.
+ *
+ * XXX - It doesn't make sense to worry about the maximum number of
+ * KSEs that we can cache because the system will limit us to
+ * something *much* less than the maximum number of threads
+ * that we can have. Disregarding KSEs in their own group,
+ * the maximum number of KSEs is the number of processors in
+ * the system.
+ */
+#define MAX_CACHED_THREADS 100
+#define KSE_STACKSIZE 16384
-/* Static function prototype definitions: */
-static void
-thread_kern_idle(void);
+#define KSE_SET_MBOX(kse, thrd) \
+ (kse)->k_mbx.km_curthread = &(thrd)->tmbx
-static void
-dequeue_signals(void);
+#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
-static inline void
-thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/*
+ * Add/remove threads from a KSE's scheduling queue.
+ * For now the scheduling queue is hung off the KSEG.
+ */
+#define KSEG_THRQ_ADD(kseg, thr) \
+ TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle)
+#define KSEG_THRQ_REMOVE(kseg, thr) \
+ TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle)
-/* Static variables: */
-static int last_tick = 0;
+/*
+ * Macros for manipulating the run queues. The priority queue
+ * routines use the thread's pqe link and also handle the setting
+ * and clearing of the thread's THR_FLAGS_IN_RUNQ flag.
+ */
+#define KSE_RUNQ_INSERT_HEAD(kse, thrd) \
+ _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
+ _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_REMOVE(kse, thrd) \
+ _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
+
+
+/*
+ * We've got to keep track of everything that is allocated, not only
+ * to have a speedy free list, but also so they can be deallocated
+ * after a fork().
+ */
+static TAILQ_HEAD(, kse) active_kseq;
+static TAILQ_HEAD(, kse) free_kseq;
+static TAILQ_HEAD(, kse_group) free_kse_groupq;
+static TAILQ_HEAD(, kse_group) active_kse_groupq;
+static struct lock kse_lock; /* also used for kseg queue */
+static int free_kse_count = 0;
+static int free_kseg_count = 0;
+static TAILQ_HEAD(, pthread) free_threadq;
+static struct lock thread_lock;
+static int free_thread_count = 0;
+static int inited = 0;
+static int active_kse_count = 0;
+static int active_kseg_count = 0;
+
+static void kse_check_completed(struct kse *kse);
+static void kse_check_waitq(struct kse *kse);
+static void kse_check_signals(struct kse *kse);
+static void kse_entry(struct kse_mailbox *mbx);
+static void kse_fini(struct kse *curkse);
+static void kse_sched_multi(struct kse *curkse);
+static void kse_sched_single(struct kse *curkse);
+static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
+static void kse_wait(struct kse *kse);
+static void kseg_free(struct kse_group *kseg);
+static void kseg_init(struct kse_group *kseg);
+static void kse_waitq_insert(struct pthread *thread);
+static void thr_cleanup(struct kse *kse, struct pthread *curthread);
+static void thr_gc(struct kse *curkse);
+static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
+ ucontext_t *ucp);
+static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf);
+static int thr_timedout(struct pthread *thread, struct timespec *curtime);
+
+/*
+ * This is called after a fork().
+ * No locks need to be taken here since we are guaranteed to be
+ * single threaded.
+ */
void
-_thread_kern_sched(void)
+_kse_single_thread(struct pthread *curthread)
{
- struct timespec ts;
- struct timeval tv;
- struct pthread *curthread = _get_curthread();
- unsigned int current_tick;
-
- /* Get the current time of day. */
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &ts);
- current_tick = _sched_ticks;
+ struct kse *kse, *kse_next;
+ struct kse_group *kseg, *kseg_next;
+ struct pthread *thread, *thread_next;
+ kse_critical_t crit;
+ int i;
/*
- * Enter a critical section.
+ * Disable upcalls and clear the threaded flag.
+ * XXX - I don't think we need to disable upcalls after a fork().
+ * but it doesn't hurt.
*/
- _thread_kern_kse_mailbox.km_curthread = NULL;
+ crit = _kse_critical_enter();
+ __isthreaded = 0;
/*
- * If this thread is becoming inactive, make note of the
- * time.
+ * Enter a loop to remove and free all threads other than
+ * the running thread from the active thread list:
*/
- if (curthread->state != PS_RUNNING) {
+ for (thread = TAILQ_FIRST(&_thread_list); thread != NULL;
+ thread = thread_next) {
/*
- * Save the current time as the time that the
- * thread became inactive:
+ * Advance to the next thread before the destroying
+ * the current thread.
+ */
+ thread_next = TAILQ_NEXT(thread, tle);
+
+ /*
+ * Remove this thread from the list (the current
+ * thread will be removed but re-added by libpthread
+ * initialization.
*/
- curthread->last_inactive = (long)current_tick;
- if (curthread->last_inactive <
- curthread->last_active) {
- /* Account for a rollover: */
- curthread->last_inactive =+
- UINT_MAX + 1;
+ TAILQ_REMOVE(&_thread_list, thread, tle);
+ /* Make sure this isn't the running thread: */
+ if (thread != curthread) {
+ _thr_stack_free(&thread->attr);
+ if (thread->specific != NULL)
+ free(thread->specific);
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_destroy(&thread->lockusers[i]);
+ }
+ _lock_destroy(&thread->lock);
+ free(thread);
+ }
+ }
+
+ TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
+ curthread->joiner = NULL; /* no joining threads yet */
+ sigemptyset(&curthread->sigpend); /* clear pending signals */
+ if (curthread->specific != NULL) {
+ free(curthread->specific);
+ curthread->specific = NULL;
+ curthread->specific_data_count = 0;
+ }
+
+ /* Free the free KSEs: */
+ while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
+ TAILQ_REMOVE(&free_kseq, kse, k_qe);
+ _ksd_destroy(&kse->k_ksd);
+ free(kse);
+ }
+ free_kse_count = 0;
+
+ /* Free the active KSEs: */
+ for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) {
+ kse_next = TAILQ_NEXT(kse, k_qe);
+ TAILQ_REMOVE(&active_kseq, kse, k_qe);
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_destroy(&kse->k_lockusers[i]);
}
+ _lock_destroy(&kse->k_lock);
+ free(kse);
+ }
+ active_kse_count = 0;
+
+ /* Free the free KSEGs: */
+ while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
+ TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
+ _lock_destroy(&kseg->kg_lock);
+ free(kseg);
+ }
+ free_kseg_count = 0;
+
+ /* Free the active KSEGs: */
+ for (kseg = TAILQ_FIRST(&active_kse_groupq);
+ kseg != NULL; kseg = kseg_next) {
+ kseg_next = TAILQ_NEXT(kseg, kg_qe);
+ TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
+ _lock_destroy(&kseg->kg_lock);
+ free(kseg);
+ }
+ active_kseg_count = 0;
+
+ /* Free the free threads. */
+ while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ if (thread->specific != NULL)
+ free(thread->specific);
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_destroy(&thread->lockusers[i]);
+ }
+ _lock_destroy(&thread->lock);
+ free(thread);
+ }
+ free_thread_count = 0;
+
+ /* Free the to-be-gc'd threads. */
+ while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
+ TAILQ_REMOVE(&_thread_gc_list, thread, tle);
+ free(thread);
+ }
+
+ if (inited != 0) {
+ /*
+ * Destroy these locks; they'll be recreated to assure they
+ * are in the unlocked state.
+ */
+ _lock_destroy(&kse_lock);
+ _lock_destroy(&thread_lock);
+ _lock_destroy(&_thread_list_lock);
+ inited = 0;
+ }
+
+ /*
+ * After a fork(), the leftover thread goes back to being
+ * scope process.
+ */
+ curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
+ curthread->attr.flags |= PTHREAD_SCOPE_PROCESS;
+
+ /*
+ * After a fork, we are still operating on the thread's original
+ * stack. Don't clear the THR_FLAGS_USER from the thread's
+ * attribute flags.
+ */
+
+ /* Initialize the threads library. */
+ curthread->kse = NULL;
+ curthread->kseg = NULL;
+ _kse_initial = NULL;
+ _libpthread_init(curthread);
+}
+
+/*
+ * This is used to initialize housekeeping and to initialize the
+ * KSD for the KSE.
+ */
+void
+_kse_init(void)
+{
+ if (inited == 0) {
+ TAILQ_INIT(&active_kseq);
+ TAILQ_INIT(&active_kse_groupq);
+ TAILQ_INIT(&free_kseq);
+ TAILQ_INIT(&free_kse_groupq);
+ TAILQ_INIT(&free_threadq);
+ if (_lock_init(&kse_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize free KSE queue lock");
+ if (_lock_init(&thread_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize free thread queue lock");
+ if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize thread list lock");
+ active_kse_count = 0;
+ active_kseg_count = 0;
+ inited = 1;
}
+}
+
+int
+_kse_isthreaded(void)
+{
+ return (__isthreaded != 0);
+}
+
+/*
+ * This is called when the first thread (other than the initial
+ * thread) is created.
+ */
+void
+_kse_setthreaded(int threaded)
+{
+ if ((threaded != 0) && (__isthreaded == 0)) {
+ /*
+ * Locking functions in libc are required when there are
+ * threads other than the initial thread.
+ */
+ __isthreaded = 1;
+
+ /*
+ * Tell the kernel to create a KSE for the initial thread
+ * and enable upcalls in it.
+ */
+ kse_create(&_kse_initial->k_mbx, 0);
+ KSE_SET_MBOX(_kse_initial, _thr_initial);
+ }
+}
+
+/*
+ * Lock wait and wakeup handlers for KSE locks. These are only used by
+ * KSEs, and should never be used by threads. KSE locks include the
+ * KSE group lock (used for locking the scheduling queue) and the
+ * kse_lock defined above.
+ *
+ * When a KSE lock attempt blocks, the entire KSE blocks allowing another
+ * KSE to run. For the most part, it doesn't make much sense to try and
+ * schedule another thread because you need to lock the scheduling queue
+ * in order to do that. And since the KSE lock is used to lock the scheduling
+ * queue, you would just end up blocking again.
+ */
+void
+_kse_lock_wait(struct lock *lock, struct lockuser *lu)
+{
+ struct kse *curkse = (struct kse *)_LCK_GET_PRIVATE(lu);
+ struct timespec ts;
+ kse_critical_t crit;
/*
- * Place this thread into the appropriate queue(s).
+ * Enter a loop to wait until we get the lock.
*/
- switch (curthread->state) {
- case PS_DEAD:
- case PS_STATE_MAX: /* XXX: silences -Wall */
- case PS_SUSPENDED:
- /* Dead or suspended threads are not placed in any queue. */
- break;
- case PS_RUNNING:
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000; /* 1 sec */
+ KSE_SET_WAIT(curkse);
+ while (_LCK_BUSY(lu)) {
+ /*
+ * Yield the kse and wait to be notified when the lock
+ * is granted.
+ */
+ crit = _kse_critical_enter();
+ __sys_nanosleep(&ts, NULL);
+ _kse_critical_leave(crit);
+
+ /*
+ * Make sure that the wait flag is set again in case
+ * we wokeup without the lock being granted.
+ */
+ KSE_SET_WAIT(curkse);
+ }
+ KSE_CLEAR_WAIT(curkse);
+}
+
+void
+_kse_lock_wakeup(struct lock *lock, struct lockuser *lu)
+{
+ struct kse *curkse;
+ struct kse *kse;
+
+ curkse = _get_curkse();
+ kse = (struct kse *)_LCK_GET_PRIVATE(lu);
+
+ if (kse == curkse)
+ PANIC("KSE trying to wake itself up in lock");
+ else if (KSE_WAITING(kse)) {
/*
- * Save the current time as the time that the
- * thread became inactive:
+ * Notify the owning kse that it has the lock.
*/
- current_tick = _sched_ticks;
- curthread->last_inactive = (long)current_tick;
- if (curthread->last_inactive <
- curthread->last_active) {
- /* Account for a rollover: */
- curthread->last_inactive =+ UINT_MAX + 1;
+ KSE_WAKEUP(kse);
+ }
+}
+
+/*
+ * Thread wait and wakeup handlers for thread locks. These are only used
+ * by threads, never by KSEs. Thread locks include the per-thread lock
+ * (defined in its structure), and condition variable and mutex locks.
+ */
+void
+_thr_lock_wait(struct lock *lock, struct lockuser *lu)
+{
+ struct pthread *curthread = (struct pthread *)lu->lu_private;
+ int count;
+
+ /*
+ * Spin for a bit.
+ *
+ * XXX - We probably want to make this a bit smarter. It
+ * doesn't make sense to spin unless there is more
+ * than 1 CPU. A thread that is holding one of these
+ * locks is prevented from being swapped out for another
+ * thread within the same scheduling entity.
+ */
+ count = 0;
+ while (_LCK_BUSY(lu) && count < 300)
+ count++;
+ while (_LCK_BUSY(lu)) {
+ THR_SCHED_LOCK(curthread, curthread);
+ if (_LCK_BUSY(lu)) {
+ /* Wait for the lock: */
+ atomic_store_rel_int(&curthread->need_wakeup, 1);
+ THR_SET_STATE(curthread, PS_LOCKWAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
}
+ else
+ THR_SCHED_UNLOCK(curthread, curthread);
+ }
+}
- if ((curthread->slice_usec != -1) &&
- (curthread->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds for
- * which the current thread has run:
- */
- curthread->slice_usec +=
- (curthread->last_inactive -
- curthread->last_active) *
- (long)_clock_res_usec;
- /* Check for time quantum exceeded: */
- if (curthread->slice_usec > TIMESLICE_USEC)
- curthread->slice_usec = -1;
+void
+_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
+{
+ struct pthread *thread;
+ struct pthread *curthread;
+
+ curthread = _get_curthread();
+ thread = (struct pthread *)_LCK_GET_PRIVATE(lu);
+
+ THR_SCHED_LOCK(curthread, thread);
+ _thr_setrunnable_unlocked(thread);
+ atomic_store_rel_int(&thread->need_wakeup, 0);
+ THR_SCHED_UNLOCK(curthread, thread);
+}
+
+kse_critical_t
+_kse_critical_enter(void)
+{
+ kse_critical_t crit;
+
+ crit = _ksd_readandclear_tmbx;
+ return (crit);
+}
+
+void
+_kse_critical_leave(kse_critical_t crit)
+{
+ struct pthread *curthread;
+
+ _ksd_set_tmbx(crit);
+ if ((crit != NULL) && ((curthread = _get_curthread()) != NULL))
+ THR_YIELD_CHECK(curthread);
+}
+
+void
+_thr_critical_enter(struct pthread *thread)
+{
+ thread->critical_count++;
+}
+
+void
+_thr_critical_leave(struct pthread *thread)
+{
+ thread->critical_count--;
+ THR_YIELD_CHECK(thread);
+}
+
+/*
+ * XXX - We may need to take the scheduling lock before calling
+ * this, or perhaps take the lock within here before
+ * doing anything else.
+ */
+void
+_thr_sched_switch(struct pthread *curthread)
+{
+ struct pthread_sigframe psf;
+ kse_critical_t crit;
+ struct kse *curkse;
+ volatile int once = 0;
+
+ /* We're in the scheduler, 5 by 5: */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+
+ curthread->need_switchout = 1; /* The thread yielded on its own. */
+ curthread->critical_yield = 0; /* No need to yield anymore. */
+ curthread->slice_usec = -1; /* Restart the time slice. */
+
+ /*
+ * The signal frame is allocated off the stack because
+ * a thread can be interrupted by other signals while
+ * it is running down pending signals.
+ */
+ sigemptyset(&psf.psf_sigset);
+ curthread->curframe = &psf;
+
+ _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx);
+
+ /*
+ * This thread is being resumed; check for cancellations.
+ */
+ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) {
+ once = 1;
+ thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf);
+ }
+}
+
+/*
+ * This is the entry point of the KSE upcall.
+ */
+static void
+kse_entry(struct kse_mailbox *mbx)
+{
+ struct kse *curkse;
+
+ /* The kernel should always clear this before making the upcall. */
+ assert(mbx->km_curthread == NULL);
+ curkse = (struct kse *)mbx->km_udata;
+
+ /* Check for first time initialization: */
+ if ((curkse->k_flags & KF_INITIALIZED) == 0) {
+ /* Setup this KSEs specific data. */
+ _ksd_setprivate(&curkse->k_ksd);
+ _set_curkse(curkse);
+
+ /* Set this before grabbing the context. */
+ curkse->k_flags |= KF_INITIALIZED;
+ }
+
+ /* Avoid checking the type of KSE more than once. */
+ if ((curkse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) {
+ curkse->k_mbx.km_func = (void *)kse_sched_single;
+ kse_sched_single(curkse);
+ } else {
+ curkse->k_mbx.km_func = (void *)kse_sched_multi;
+ kse_sched_multi(curkse);
+ }
+}
+
+/*
+ * This is the scheduler for a KSE which runs a scope system thread.
+ * The multi-thread KSE scheduler should also work for a single threaded
+ * KSE, but we use a separate scheduler so that it can be fine-tuned
+ * to be more efficient (and perhaps not need a separate stack for
+ * the KSE, allowing it to use the thread's stack).
+ *
+ * XXX - This probably needs some work.
+ */
+static void
+kse_sched_single(struct kse *curkse)
+{
+ struct pthread *curthread;
+ struct timespec ts;
+ int level;
+
+ /* This may have returned from a kse_release(). */
+ if (KSE_WAITING(curkse))
+ KSE_CLEAR_WAIT(curkse);
+
+ curthread = curkse->k_curthread;
+
+ if (curthread->active == 0) {
+ if (curthread->state != PS_RUNNING) {
+ /* Check to see if the thread has timed out. */
+ KSE_GET_TOD(curkse, &ts);
+ if (thr_timedout(curthread, &ts) != 0) {
+ curthread->timeout = 1;
+ curthread->state = PS_RUNNING;
+ }
}
+ } else if (curthread->need_switchout != 0) {
+ /*
+ * This has to do the job of kse_switchout_thread(), only
+ * for a single threaded KSE/KSEG.
+ */
- if (curthread->slice_usec == -1) {
- /*
- * The thread exceeded its time
- * quantum or it yielded the CPU;
- * place it at the tail of the
- * queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_TAIL(curthread);
- } else {
+ /* This thread no longer needs to yield the CPU: */
+ curthread->critical_yield = 0;
+ curthread->need_switchout = 0;
+
+ /*
+ * Lock the scheduling queue.
+ *
+ * There is no scheduling queue for single threaded KSEs,
+ * but we need a lock for protection regardless.
+ */
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+
+ switch (curthread->state) {
+ case PS_DEAD:
+ /* Unlock the scheduling queue and exit the KSE. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_fini(curkse); /* does not return */
+ break;
+
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Only insert threads that can timeout: */
+ if (curthread->wakeup_time.tv_sec != -1) {
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(curkse, curthread);
+ }
+ break;
+
+ case PS_LOCKWAIT:
+ level = curthread->locklevel - 1;
+ if (_LCK_BUSY(&curthread->lockusers[level]))
+ KSE_WAITQ_INSERT(curkse, curthread);
+ else
+ THR_SET_STATE(curthread, PS_RUNNING);
+ break;
+
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_RUNNING:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_DEADLOCK:
+ default:
/*
- * The thread hasn't exceeded its
- * interval. Place it at the head
- * of the queue for its priority.
+ * These states don't timeout and don't need
+ * to be in the waiting queue.
*/
- PTHREAD_PRIOQ_INSERT_HEAD(curthread);
+ break;
}
- break;
- case PS_SPINBLOCK:
- /* Increment spinblock count. */
- _spinblock_count++;
+ if (curthread->state != PS_RUNNING)
+ curthread->active = 0;
+ }
- /* No timeouts for these states. */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
+ while (curthread->state != PS_RUNNING) {
+ kse_wait(curkse);
+ }
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /* Remove the frame reference. */
+ curthread->curframe = NULL;
- /* Insert into the work queue. */
- PTHREAD_WORKQ_INSERT(curthread);
- break;
+ /* Unlock the scheduling queue. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- case PS_DEADLOCK:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_WAIT_WAIT:
- /* No timeouts for these states. */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
+ /*
+ * Continue the thread at its current frame:
+ */
+ _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
+}
+
+void
+dump_queues(struct kse *curkse)
+{
+ struct pthread *thread;
+
+ DBG_MSG("Threads in waiting queue:\n");
+ TAILQ_FOREACH(thread, &curkse->k_kseg->kg_schedq.sq_waitq, pqe) {
+ DBG_MSG(" thread %p, state %d, blocked %d\n",
+ thread, thread->state, thread->blocked);
+ }
+}
+
+
+/*
+ * This is the scheduler for a KSE which runs multiple threads.
+ */
+static void
+kse_sched_multi(struct kse *curkse)
+{
+ struct pthread *curthread;
+ struct pthread_sigframe *curframe;
+ int ret;
+
+ /* This may have returned from a kse_release(). */
+ if (KSE_WAITING(curkse))
+ KSE_CLEAR_WAIT(curkse);
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /* Lock the scheduling lock. */
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
- /* Insert into the waiting queue. */
- PTHREAD_WAITQ_INSERT(curthread);
- break;
+ /*
+ * If the current thread was completed in another KSE, then
+ * it will be in the run queue. Don't mark it as being blocked.
+ */
+ if (((curthread = curkse->k_curthread) != NULL) &&
+ ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
+ (curthread->need_switchout == 0)) {
+ /*
+ * Assume the current thread is blocked; when the
+ * completed threads are checked and if the current
+ * thread is among the completed, the blocked flag
+ * will be cleared.
+ */
+ curthread->blocked = 1;
+ }
+
+ /* Check for any unblocked threads in the kernel. */
+ kse_check_completed(curkse);
- case PS_COND_WAIT:
- case PS_SLEEP_WAIT:
- /* These states can timeout. */
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /*
+ * Check for threads that have timed-out.
+ */
+ kse_check_waitq(curkse);
- /* Insert into the waiting queue. */
- PTHREAD_WAITQ_INSERT(curthread);
- break;
+ /*
+ * Switchout the current thread, if necessary, as the last step
+ * so that it is inserted into the run queue (if it's runnable)
+ * _after_ any other threads that were added to it above.
+ */
+ if (curthread == NULL)
+ ; /* Nothing to do here. */
+ else if ((curthread->need_switchout == 0) &&
+ (curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) {
+ /*
+ * Resume the thread and tell it to yield when
+ * it leaves the critical region.
+ */
+ curthread->critical_yield = 0;
+ curthread->active = 1;
+ if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0)
+ KSE_RUNQ_REMOVE(curkse, curthread);
+ curkse->k_curthread = curthread;
+ curthread->kse = curkse;
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ DBG_MSG("Continuing thread %p in critical region\n",
+ curthread);
+ ret = _thread_switch(&curthread->tmbx,
+ &curkse->k_mbx.km_curthread);
+ if (ret != 0)
+ PANIC("Can't resume thread in critical region\n");
}
+ else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ kse_switchout_thread(curkse, curthread);
+ curkse->k_curthread = NULL;
- /* Switch into the scheduler's context. */
- DBG_MSG("Calling _thread_enter_uts()\n");
- _thread_enter_uts(&curthread->mailbox, &_thread_kern_kse_mailbox);
- DBG_MSG("Returned from _thread_enter_uts, thread %p\n", curthread);
+ /* This has to be done without the scheduling lock held. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_check_signals(curkse);
+
+ /* Check for GC: */
+ if (_gc_check != 0)
+ thr_gc(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+
+ dump_queues(curkse);
+
+ /* Check if there are no threads ready to run: */
+ while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
+ (curkse->k_kseg->kg_threadcount != 0)) {
+ /*
+ * Wait for a thread to become active or until there are
+ * no more threads.
+ */
+ kse_wait(curkse);
+ kse_check_waitq(curkse);
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_check_signals(curkse);
+ if (_gc_check != 0)
+ thr_gc(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+ }
+
+ /* Check for no more threads: */
+ if (curkse->k_kseg->kg_threadcount == 0) {
+ /*
+ * Normally this shouldn't return, but it will if there
+ * are other KSEs running that create new threads that
+ * are assigned to this KSE[G]. For instance, if a scope
+ * system thread were to create a scope process thread
+ * and this kse[g] is the initial kse[g], then that newly
+ * created thread would be assigned to us (the initial
+ * kse[g]).
+ */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_fini(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+ curthread = KSE_RUNQ_FIRST(curkse);
+ }
+
+ THR_ASSERT(curthread != NULL,
+ "Return from kse_wait/fini without thread.");
+ THR_ASSERT(curthread->state != PS_DEAD,
+ "Trying to resume dead thread!");
+ KSE_RUNQ_REMOVE(curkse, curthread);
/*
- * This point is reached when _thread_switch() is called
- * to restore the state of a thread.
- *
- * This is the normal way out of the scheduler (for synchronous
- * switches).
+ * Make the selected thread the current thread.
*/
+ curkse->k_curthread = curthread;
- /* XXXKSE: Do this inside _thread_kern_scheduler() */
- if (curthread->sig_defer_count == 0) {
- if (((curthread->cancelflags &
- PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((curthread->cancelflags &
- PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
- /*
- * Stick a cancellation point at the
- * start of each async-cancellable
- * thread's resumption.
- *
- * We allow threads woken at cancel
- * points to do their own checks.
- */
- pthread_testcancel();
+ /*
+ * Make sure the current thread's kse points to this kse.
+ */
+ curthread->kse = curkse;
+
+ /*
+ * Reset accounting.
+ */
+ curthread->tmbx.tm_uticks = 0;
+ curthread->tmbx.tm_sticks = 0;
+
+ /*
+ * Reset the time slice if this thread is running for the first
+ * time or running again after using its full time slice allocation.
+ */
+ if (curthread->slice_usec == -1)
+ curthread->slice_usec = 0;
+
+ /* Mark the thread active. */
+ curthread->active = 1;
+
+ /* Remove the frame reference. */
+ curframe = curthread->curframe;
+ curthread->curframe = NULL;
+
+ /* Unlock the scheduling queue: */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+
+ /*
+ * The thread's current signal frame will only be NULL if it
+ * is being resumed after being blocked in the kernel. In
+ * this case, and if the thread needs to run down pending
+ * signals or needs a cancellation check, we need to add a
+ * signal frame to the thread's context.
+ */
+#if 0
+ if ((curframe == NULL) && ((curthread->check_pending != 0) ||
+ (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
+ signalcontext(&curthread->tmbx.tm_context, 0,
+ (__sighandler_t *)thr_resume_wrapper);
}
+#endif
+ /*
+ * Continue the thread at its current frame:
+ */
+ DBG_MSG("Continuing thread %p\n", curthread);
+ ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
+ if (ret != 0)
+ PANIC("Thread has returned from _thread_switch");
- if (_sched_switch_hook != NULL) {
- /* Run the installed switch hook: */
- thread_run_switch_hook(_last_user_thread, curthread);
+ /* This point should not be reached. */
+ PANIC("Thread has returned from _thread_switch");
+}
+
+static void
+kse_check_signals(struct kse *curkse)
+{
+ sigset_t sigset;
+ int i;
+
+ /* Deliver posted signals. */
+ for (i = 0; i < _SIG_WORDS; i++) {
+ atomic_swap_int(&curkse->k_mbx.km_sigscaught.__bits[i],
+ 0, &sigset.__bits[i]);
+ }
+ if (SIGNOTEMPTY(sigset)) {
+ /*
+ * Dispatch each signal.
+ *
+ * XXX - There is no siginfo for any of these.
+ * I think there should be, especially for
+ * signals from other processes (si_pid, si_uid).
+ */
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ DBG_MSG("Dispatching signal %d\n", i);
+ _thr_sig_dispatch(curkse, i,
+ NULL /* no siginfo */);
+ }
+ }
+ sigemptyset(&sigset);
+ __sys_sigprocmask(SIG_SETMASK, &sigset, NULL);
}
}
-void
-_thread_kern_scheduler(struct kse_mailbox *km)
+static void
+thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
{
- struct timespec ts;
- struct timeval tv;
- pthread_t td, pthread, pthread_h;
- unsigned int current_tick;
- struct kse_thr_mailbox *tm, *p;
- sigset_t sigset;
- int i;
+ struct pthread *curthread = _get_curthread();
- DBG_MSG("entering\n");
- while (!TAILQ_EMPTY(&_thread_list)) {
+ thr_resume_check(curthread, ucp, NULL);
+}
- /* Get the current time of day. */
- ts = km->km_timeofday;
- TIMESPEC_TO_TIMEVAL(&_sched_tod, &ts);
- current_tick = _sched_ticks;
+static void
+thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf)
+{
+ /* Check signals before cancellations. */
+ while (curthread->check_pending != 0) {
+ /* Clear the pending flag. */
+ curthread->check_pending = 0;
/*
- * Pick up threads that had blocked in the kernel and
- * have now completed their trap (syscall, vm fault, etc).
- * These threads were PS_RUNNING (and still are), but they
- * need to be added to the run queue so that they can be
- * scheduled again.
+ * It's perfectly valid, though not portable, for
+ * signal handlers to munge their interrupted context
+ * and expect to return to it. Ensure we use the
+ * correct context when running down signals.
*/
- DBG_MSG("Picking up km_completed\n");
- p = km->km_completed;
- km->km_completed = NULL; /* XXX: Atomic xchg here. */
- while ((tm = p) != NULL) {
- p = tm->tm_next;
- tm->tm_next = NULL;
- if (tm->tm_udata == NULL) {
- DBG_MSG("\tidle context\n");
- _kern_idle_running = 0;
- continue;
+ _thr_sig_rundown(curthread, ucp, psf);
+ }
+
+ if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ pthread_testcancel();
+}
+
+/*
+ * Clean up a thread. This must be called with the thread's KSE
+ * scheduling lock held. The thread must be a thread from the
+ * KSE's group.
+ */
+static void
+thr_cleanup(struct kse *curkse, struct pthread *thread)
+{
+ struct pthread *joiner;
+ int free_thread = 0;
+
+ if ((joiner = thread->joiner) != NULL) {
+ thread->joiner = NULL;
+ if ((joiner->state == PS_JOIN) &&
+ (joiner->join_status.thread == thread)) {
+ joiner->join_status.thread = NULL;
+
+ /* Set the return status for the joining thread: */
+ joiner->join_status.ret = thread->ret;
+
+ /* Make the thread runnable. */
+ if (joiner->kseg == curkse->k_kseg)
+ _thr_setrunnable_unlocked(joiner);
+ else {
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ _thr_setrunnable_unlocked(joiner);
+ KSE_SCHED_UNLOCK(curkse, joiner->kseg);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
- DBG_MSG("\tmailbox=%p pthread=%p\n", tm, tm->tm_udata);
- PTHREAD_PRIOQ_INSERT_TAIL((pthread_t)tm->tm_udata);
}
+ thread->attr.flags |= PTHREAD_DETACHED;
+ }
- /* Deliver posted signals. */
- DBG_MSG("Picking up signals\n");
- bcopy(&km->km_sigscaught, &sigset, sizeof(sigset_t));
- sigemptyset(&km->km_sigscaught); /* XXX */
- if (SIGNOTEMPTY(sigset))
- for (i = 1; i < NSIG; i++)
- if (sigismember(&sigset, i) != 0)
- _thread_sig_dispatch(i);
+ thread->flags |= THR_FLAGS_GC_SAFE;
+ thread->kseg->kg_threadcount--;
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ _thr_stack_free(&thread->attr);
+ if ((thread->attr.flags & PTHREAD_DETACHED) != 0) {
+ /* Remove this thread from the list of all threads: */
+ THR_LIST_REMOVE(thread);
+ if (thread->refcount == 0) {
+ THR_GCLIST_REMOVE(thread);
+ TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle);
+ free_thread = 1;
+ }
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ if (free_thread != 0)
+ _thr_free(curkse, thread);
+}
- if (_spinblock_count != 0) {
- /*
- * Enter a loop to look for threads waiting on
- * a spinlock that is now available.
- */
- PTHREAD_WAITQ_SETACTIVE();
- TAILQ_FOREACH(pthread, &_workq, qe) {
- if (pthread->state == PS_SPINBLOCK) {
- /*
- * If the lock is available, let the
- * thread run.
- */
- if (pthread->data.spinlock->
- access_lock == 0) {
- PTHREAD_WAITQ_CLEARACTIVE();
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(
- pthread);
- PTHREAD_SET_STATE(pthread,
- PS_RUNNING);
- PTHREAD_WAITQ_SETACTIVE();
-
- /*
- * One less thread in a
- * spinblock state:
- */
- _spinblock_count--;
- }
- }
+void
+thr_gc(struct pthread *curthread)
+{
+ struct pthread *td, *joiner;
+ struct kse_group *free_kseg;
+
+ _gc_check = 0;
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
+ THR_GCLIST_REMOVE(td);
+ clean = (td->attr.flags & PTHREAD_DETACHED) != 0;
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+
+ KSE_SCHED_LOCK(curkse, td->kseg);
+ TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle);
+ if (TAILQ_EMPTY(&td->kseg->kg_threadq))
+ free_kseg = td->kseg;
+ else
+ free_kseg = NULL;
+ joiner = NULL;
+ if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) &&
+ (td->joiner->join_status.thread == td)) {
+ joiner = td->joiner;
+ joiner->join_status.thread = NULL;
+
+ /* Set the return status for the joining thread: */
+ joiner->join_status.ret = td->ret;
+
+ /* Make the thread runnable. */
+ if (td->kseg == joiner->kseg) {
+ _thr_setrunnable_unlocked(joiner);
+ joiner = NULL;
}
- PTHREAD_WAITQ_CLEARACTIVE();
}
+ td->joiner = NULL;
+ KSE_SCHED_UNLOCK(curkse, td->kseg);
+ if (free_kseg != NULL)
+ kseg_free(free_kseg);
+ if (joiner != NULL) {
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ _thr_setrunnable_unlocked(joiner);
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ }
+ _thr_free(curkse, td);
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+}
+
+
+/*
+ * Only new threads that are running or suspended may be scheduled.
+ */
+void
+_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
+{
+ struct kse *curkse;
+ kse_critical_t crit;
+ int need_start;
- /* Wake up threads that have timed out. */
- DBG_MSG("setactive\n");
- PTHREAD_WAITQ_SETACTIVE();
- DBG_MSG("Picking up timeouts (%x)\n", TAILQ_FIRST(&_waitingq));
- while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
- (pthread->wakeup_time.tv_sec != -1) &&
- (((pthread->wakeup_time.tv_sec == 0) &&
- (pthread->wakeup_time.tv_nsec == 0)) ||
- (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
- ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
- (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
- DBG_MSG("\t...\n");
+ /*
+ * If this is the first time creating a thread, make sure
+ * the mailbox is set for the current thread.
+ */
+ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
+ /*
+ * No need to lock the scheduling queue since the
+ * KSE/KSEG pair have not yet been started.
+ */
+ KSEG_THRQ_ADD(newthread->kseg, newthread);
+ if (newthread->state == PS_RUNNING)
+ THR_RUNQ_INSERT_TAIL(newthread);
+ newthread->kseg->kg_threadcount++;
+ /*
+ * This thread needs a new KSE and KSEG.
+ */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+ _ksd_setprivate(&newthread->kse->k_ksd);
+ kse_create(&newthread->kse->k_mbx, 1);
+ _ksd_setprivate(&curkse->k_ksd);
+ _kse_critical_leave(crit);
+ }
+ else {
+ /*
+ * Lock the KSE and add the new thread to its list of
+ * assigned threads. If the new thread is runnable, also
+ * add it to the KSE's run queue.
+ */
+ need_start = 0;
+ KSE_SCHED_LOCK(curthread->kse, newthread->kseg);
+ KSEG_THRQ_ADD(newthread->kseg, newthread);
+ if (newthread->state == PS_RUNNING)
+ THR_RUNQ_INSERT_TAIL(newthread);
+ newthread->kseg->kg_threadcount++;
+ if ((newthread->kse->k_flags & KF_STARTED) == 0) {
/*
- * Remove this thread from the waiting queue
- * (and work queue if necessary) and place it
- * in the ready queue.
+ * This KSE hasn't been started yet. Start it
+ * outside of holding the lock.
*/
- PTHREAD_WAITQ_CLEARACTIVE();
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
- DBG_MSG("\twaking thread\n");
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- PTHREAD_WAITQ_SETACTIVE();
+ newthread->kse->k_flags |= KF_STARTED;
+ need_start = 1;
+ }
+ KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
+
+ if (need_start != 0)
+ kse_create(&newthread->kse->k_mbx, 0);
+ else if ((newthread->state == PS_RUNNING) &&
+ KSE_WAITING(newthread->kse)) {
/*
- * Flag the timeout in the thread structure:
+ * The thread is being scheduled on another KSEG.
*/
- pthread->timeout = 1;
+ KSE_WAKEUP(newthread->kse);
}
- DBG_MSG("clearactive\n");
- PTHREAD_WAITQ_CLEARACTIVE();
+ }
+}
- /*
- * Get the highest priority thread in the ready queue.
- */
- DBG_MSG("Selecting thread\n");
- pthread_h = PTHREAD_PRIOQ_FIRST();
+void
+kse_waitq_insert(struct pthread *thread)
+{
+ struct pthread *td;
+
+ if (thread->wakeup_time.tv_sec == -1)
+ TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread,
+ pqe);
+ else {
+ td = TAILQ_FIRST(&thread->kse->k_schedq->sq_waitq);
+ while ((td != NULL) && (td->wakeup_time.tv_sec != -1) &&
+ ((td->wakeup_time.tv_sec < thread->wakeup_time.tv_sec) ||
+ ((td->wakeup_time.tv_sec == thread->wakeup_time.tv_sec) &&
+ (td->wakeup_time.tv_nsec <= thread->wakeup_time.tv_nsec))))
+ td = TAILQ_NEXT(td, pqe);
+ if (td == NULL)
+ TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq,
+ thread, pqe);
+ else
+ TAILQ_INSERT_BEFORE(td, thread, pqe);
+ }
+ thread->flags |= THR_FLAGS_IN_WAITQ;
+}
- /* Check if there are no threads ready to run: */
- if (pthread_h) {
- DBG_MSG("Scheduling thread\n");
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread_h);
+/*
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_check_completed(struct kse *kse)
+{
+ struct pthread *thread;
+ struct kse_thr_mailbox *completed;
+
+ if ((completed = kse->k_mbx.km_completed) != NULL) {
+ kse->k_mbx.km_completed = NULL;
+ while (completed != NULL) {
+ thread = completed->tm_udata;
+ DBG_MSG("Found completed thread %p, name %s\n",
+ thread,
+ (thread->name == NULL) ? "none" : thread->name);
+ thread->blocked = 0;
+ if (thread != kse->k_curthread)
+ KSE_RUNQ_INSERT_TAIL(kse, thread);
+ completed = completed->tm_next;
+ }
+ }
+}
- /* Make the selected thread the current thread: */
- _set_curthread(pthread_h);
+/*
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_check_waitq(struct kse *kse)
+{
+ struct pthread *pthread;
+ struct timespec ts;
- /*
- * Save the current time as the time that the thread
- * became active:
- */
- current_tick = _sched_ticks;
- pthread_h->last_active = (long) current_tick;
+ KSE_GET_TOD(kse, &ts);
+ /*
+ * Wake up threads that have timedout. This has to be
+ * done before adding the current thread to the run queue
+ * so that a CPU intensive thread doesn't get preference
+ * over waiting threads.
+ */
+ while (((pthread = KSE_WAITQ_FIRST(kse)) != NULL) &&
+ thr_timedout(pthread, &ts)) {
+ /* Remove the thread from the wait queue: */
+ KSE_WAITQ_REMOVE(kse, pthread);
+ DBG_MSG("Found timedout thread %p in waitq\n", pthread);
+
+ /* Indicate the thread timedout: */
+ pthread->timeout = 1;
+
+ /* Add the thread to the priority queue: */
+ THR_SET_STATE(pthread, PS_RUNNING);
+ KSE_RUNQ_INSERT_TAIL(kse, pthread);
+ }
+}
+
+static int
+thr_timedout(struct pthread *thread, struct timespec *curtime)
+{
+ if (thread->wakeup_time.tv_sec < 0)
+ return (0);
+ else if (thread->wakeup_time.tv_sec > curtime->tv_sec)
+ return (0);
+ else if ((thread->wakeup_time.tv_sec == curtime->tv_sec) &&
+ (thread->wakeup_time.tv_nsec > curtime->tv_nsec))
+ return (0);
+ else
+ return (1);
+}
+
+/*
+ * This must be called with the scheduling lock held.
+ *
+ * Each thread has a time slice, a wakeup time (used when it wants
+ * to wait for a specified amount of time), a run state, and an
+ * active flag.
+ *
+ * When a thread gets run by the scheduler, the active flag is
+ * set to non-zero (1). When a thread performs an explicit yield
+ * or schedules a state change, it enters the scheduler and the
+ * active flag is cleared. When the active flag is still seen
+ * set in the scheduler, that means that the thread is blocked in
+ * the kernel (because it is cleared before entering the scheduler
+ * in all other instances).
+ *
+ * The wakeup time is only set for those states that can timeout.
+ * It is set to (-1, -1) for all other instances.
+ *
+ * The thread's run state, aside from being useful when debugging,
+ * is used to place the thread in an appropriate queue. There
+ * are 2 basic queues:
+ *
+ * o run queue - queue ordered by priority for all threads
+ * that are runnable
+ * o waiting queue - queue sorted by wakeup time for all threads
+ * that are not otherwise runnable (not blocked
+ * in kernel, not waiting for locks)
+ *
+ * The thread's time slice is used for round-robin scheduling
+ * (the default scheduling policy). While a SCHED_RR thread
+ * is runnable it's time slice accumulates. When it reaches
+ * the time slice interval, it gets reset and added to the end
+ * of the queue of threads at its priority. When a thread no
+ * longer becomes runnable (blocks in kernel, waits, etc), its
+ * time slice is reset.
+ *
+ * The job of kse_switchout_thread() is to handle all of the above.
+ */
+static void
+kse_switchout_thread(struct kse *kse, struct pthread *thread)
+{
+ int level;
+
+ /*
+ * Place the currently running thread into the
+ * appropriate queue(s).
+ */
+ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state);
+ if (thread->blocked != 0) {
+ /* This thread must have blocked in the kernel. */
+ /* thread->slice_usec = -1;*/ /* restart timeslice */
+ /*
+ * XXX - Check for pending signals for this thread to
+ * see if we need to interrupt it in the kernel.
+ */
+ /* if (thread->check_pending != 0) */
+ if ((thread->slice_usec != -1) &&
+ (thread->attr.sched_policy != SCHED_FIFO))
+ thread->slice_usec += (thread->tmbx.tm_uticks
+ + thread->tmbx.tm_sticks) * _clock_res_usec;
+ }
+ else {
+ switch (thread->state) {
+ case PS_DEAD:
/*
- * Check if this thread is running for the first time
- * or running again after using its full time slice
- * allocation:
+ * The scheduler is operating on a different
+ * stack. It is safe to do garbage collecting
+ * here.
*/
- if (pthread_h->slice_usec == -1) {
- /* Reset the accumulated time slice period: */
- pthread_h->slice_usec = 0;
- }
+ thr_cleanup(kse, thread);
+ return;
+ break;
+ case PS_RUNNING:
+ /* Nothing to do here. */
+ break;
+
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(kse, thread);
+ break;
+
+ case PS_LOCKWAIT:
/*
- * If we had a context switch, run any
- * installed switch hooks.
+ * This state doesn't timeout.
*/
- if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != pthread_h)) {
- thread_run_switch_hook(_last_user_thread,
- pthread_h);
- }
+ thread->wakeup_time.tv_sec = -1;
+ thread->wakeup_time.tv_nsec = -1;
+ level = thread->locklevel - 1;
+ if (_LCK_BUSY(&thread->lockusers[level]))
+ KSE_WAITQ_INSERT(kse, thread);
+ else
+ THR_SET_STATE(thread, PS_RUNNING);
+ break;
+
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_DEADLOCK:
+ default:
/*
- * Continue the thread at its current frame:
+ * These states don't timeout.
*/
- _last_user_thread = td;
- DBG_MSG("switch in\n");
- _thread_switch(&pthread_h->mailbox,
- &_thread_kern_kse_mailbox.km_curthread);
- DBG_MSG("switch out\n");
+ thread->wakeup_time.tv_sec = -1;
+ thread->wakeup_time.tv_nsec = -1;
+
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(kse, thread);
+ break;
+ }
+ if (thread->state != PS_RUNNING) {
+ /* Restart the time slice: */
+ thread->slice_usec = -1;
} else {
- /*
- * There is nothing for us to do. Either
- * yield, or idle until something wakes up.
- */
- DBG_MSG("No runnable threads, idling.\n");
- if (_kern_idle_running) {
- DBG_MSG("kse_release");
- kse_release(NULL);
+ if (thread->need_switchout != 0)
+ /*
+ * The thread yielded on its own;
+ * restart the timeslice.
+ */
+ thread->slice_usec = -1;
+ else if ((thread->slice_usec != -1) &&
+ (thread->attr.sched_policy != SCHED_FIFO)) {
+ thread->slice_usec += (thread->tmbx.tm_uticks
+ + thread->tmbx.tm_sticks) * _clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (thread->slice_usec > TIMESLICE_USEC)
+ thread->slice_usec = -1;
}
- _kern_idle_running = 1;
- if ((pthread == NULL) ||
- (pthread->wakeup_time.tv_sec == -1))
+ if (thread->slice_usec == -1) {
+ /*
+ * The thread exceeded its time quantum or
+ * it yielded the CPU; place it at the tail
+ * of the queue for its priority.
+ */
+ KSE_RUNQ_INSERT_TAIL(kse, thread);
+ } else {
/*
- * Nothing is waiting on a timeout, so
- * idling gains us nothing; spin.
+ * The thread hasn't exceeded its interval
+ * Place it at the head of the queue for its
+ * priority.
*/
- continue;
- TIMESPEC_TO_TIMEVAL(&_kern_idle_timeout,
- &pthread->wakeup_time);
- _thread_switch(&_idle_thr_mailbox,
- &_thread_kern_kse_mailbox.km_curthread);
+ KSE_RUNQ_INSERT_HEAD(kse, thread);
+ }
}
- DBG_MSG("looping\n");
}
- /* There are no threads; exit. */
- DBG_MSG("No threads, exiting.\n");
- exit(0);
+ thread->active = 0;
+ thread->need_switchout = 0;
}
-void
-_thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
+/*
+ * This function waits for the smallest timeout value of any waiting
+ * thread, or until it receives a message from another KSE.
+ *
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_wait(struct kse *kse)
{
- struct pthread *curthread = _get_curthread();
+ struct timespec *ts, ts_sleep;
+ struct pthread *td_wait, *td_run;
- /*
- * Flag the pthread kernel as executing scheduler code
- * to avoid an upcall from interrupting this execution
- * and calling the scheduler again.
- */
- _thread_kern_kse_mailbox.km_curthread = NULL;
+ ts = &kse->k_mbx.km_timeofday;
+ KSE_SET_WAIT(kse);
- /* Change the state of the current thread: */
- curthread->state = state;
- curthread->fname = fname;
- curthread->lineno = lineno;
+ td_wait = KSE_WAITQ_FIRST(kse);
+ td_run = KSE_RUNQ_FIRST(kse);
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
- /* Schedule the next thread that is ready: */
- _thread_kern_sched();
+ if (td_run == NULL) {
+ if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
+ /* Limit sleep to no more than 2 minutes. */
+ ts_sleep.tv_sec = 120;
+ ts_sleep.tv_nsec = 0;
+ } else {
+ TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, ts);
+ if (ts_sleep.tv_sec > 120) {
+ ts_sleep.tv_sec = 120;
+ ts_sleep.tv_nsec = 0;
+ }
+ }
+ if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) {
+ /* Don't sleep for negative times. */
+ kse_release(&ts_sleep);
+ /*
+ * The above never returns.
+ * XXX - Actually, it would be nice if it did
+ * for KSE's with only one thread.
+ */
+ }
+ }
+ KSE_CLEAR_WAIT(kse);
}
-void
-_thread_kern_sched_state_unlock(enum pthread_state state,
- spinlock_t *lock, char *fname, int lineno)
+/*
+ * Avoid calling this kse_exit() so as not to confuse it with the
+ * system call of the same name.
+ */
+static void
+kse_fini(struct kse *kse)
{
- struct pthread *curthread = _get_curthread();
+ struct timespec ts;
/*
- * Flag the pthread kernel as executing scheduler code
- * to avoid an upcall from interrupting this execution
- * and calling the scheduler again.
+ * Check to see if this is the main kse.
*/
- _thread_kern_kse_mailbox.km_curthread = NULL;
-
- /* Change the state of the current thread: */
- curthread->state = state;
- curthread->fname = fname;
- curthread->lineno = lineno;
-
- _SPINUNLOCK(lock);
+ if (kse == _kse_initial) {
+ /*
+ * Wait for the last KSE/thread to exit, or for more
+ * threads to be created (it is possible for additional
+ * scope process threads to be created after the main
+ * thread exits).
+ */
+ ts.tv_sec = 120;
+ ts.tv_nsec = 0;
+ KSE_SET_WAIT(kse);
+ KSE_SCHED_LOCK(kse, kse->k_kseg);
+ if ((active_kse_count > 1) &&
+ (kse->k_kseg->kg_threadcount == 0)) {
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
+ /*
+ * XXX - We need a way for the KSE to do a timed
+ * wait.
+ */
+ kse_release(&ts);
+ /* The above never returns. */
+ }
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
- /* Schedule the next thread that is ready: */
- _thread_kern_sched();
+ /* There are no more threads; exit this process: */
+ if (kse->k_kseg->kg_threadcount == 0) {
+ /* kse_exit(); */
+ __isthreaded = 0;
+ exit(0);
+ }
+ } else {
+ /* Mark this KSE for GC: */
+ KSE_LOCK_ACQUIRE(kse, &_thread_list_lock);
+ TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe);
+ KSE_LOCK_RELEASE(kse, &_thread_list_lock);
+ kse_exit();
+ }
}
-/*
- * Block until the next timeout.
- */
void
-_thread_kern_idle(void)
+_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp)
{
- struct timespec ts;
- struct timeval timeout;
+ struct kse *curkse;
- for (;;) {
- timersub(&_kern_idle_timeout, &_sched_tod, &timeout);
- TIMEVAL_TO_TIMESPEC(&timeout, &ts);
- __sys_nanosleep(&ts, NULL);
+ curkse = _get_curkse();
+
+ KSE_SCHED_LOCK(curkse, thread->kseg);
+ /*
+ * A threads assigned KSE can't change out from under us
+ * when we hold the scheduler lock.
+ */
+ if (THR_IS_ACTIVE(thread)) {
+ /* Thread is active. Can't install the signal for it. */
+ /* Make a note in the thread that it has a signal. */
+ sigaddset(&thread->sigpend, sig);
+ thread->check_pending = 1;
}
+ else {
+ /* Make a note in the thread that it has a signal. */
+ sigaddset(&thread->sigpend, sig);
+ thread->check_pending = 1;
+
+ if (thread->blocked != 0) {
+ /* Tell the kernel to interrupt the thread. */
+ kse_thr_interrupt(&thread->tmbx);
+ }
+ }
+ KSE_SCHED_UNLOCK(curkse, thread->kseg);
}
void
-_thread_kern_set_timeout(const struct timespec * timeout)
+_thr_set_timeout(const struct timespec *timeout)
{
struct pthread *curthread = _get_curthread();
- struct timespec current_time;
- struct timeval tv;
+ struct timespec ts;
/* Reset the timeout flag for the running thread: */
curthread->timeout = 0;
@@ -514,94 +1494,311 @@ _thread_kern_set_timeout(const struct timespec * timeout)
curthread->wakeup_time.tv_nsec = -1;
}
/* Check if no waiting is required: */
- else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+ else if ((timeout->tv_sec == 0) && (timeout->tv_nsec == 0)) {
/* Set the wake up time to 'immediately': */
curthread->wakeup_time.tv_sec = 0;
curthread->wakeup_time.tv_nsec = 0;
} else {
- /* Get the current time: */
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time);
-
- /* Calculate the time for the current thread to wake up: */
- curthread->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
- curthread->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
-
- /* Check if the nanosecond field needs to wrap: */
- if (curthread->wakeup_time.tv_nsec >= 1000000000) {
- /* Wrap the nanosecond field: */
- curthread->wakeup_time.tv_sec += 1;
- curthread->wakeup_time.tv_nsec -= 1000000000;
- }
+ /* Calculate the time for the current thread to wakeup: */
+ KSE_GET_TOD(curthread->kse, &ts);
+ TIMESPEC_ADD(&curthread->wakeup_time, &ts, timeout);
}
}
void
-_thread_kern_sig_defer(void)
+_thr_panic_exit(char *file, int line, char *msg)
{
- struct pthread *curthread = _get_curthread();
+ char buf[256];
- /* Allow signal deferral to be recursive. */
- curthread->sig_defer_count++;
+ snprintf(buf, sizeof(buf), "(%s:%d) %s\n", file, line, msg);
+ __sys_write(2, buf, strlen(buf));
+ abort();
}
void
-_thread_kern_sig_undefer(void)
+_thr_setrunnable(struct pthread *curthread, struct pthread *thread)
{
- struct pthread *curthread = _get_curthread();
+ kse_critical_t crit;
+
+ crit = _kse_critical_enter();
+ KSE_SCHED_LOCK(curthread->kse, thread->kseg);
+ _thr_setrunnable_unlocked(thread);
+ KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
+ _kse_critical_leave(crit);
+}
+
+void
+_thr_setrunnable_unlocked(struct pthread *thread)
+{
+ if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
+ /* No silly queues for these threads. */
+ THR_SET_STATE(thread, PS_RUNNING);
+ else {
+ if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0)
+ KSE_WAITQ_REMOVE(thread->kse, thread);
+ THR_SET_STATE(thread, PS_RUNNING);
+ if ((thread->blocked == 0) &&
+ (thread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ THR_RUNQ_INSERT_TAIL(thread);
+ }
+ /*
+ * XXX - Threads are not yet assigned to specific KSEs; they are
+ * assigned to the KSEG. So the fact that a thread's KSE is
+ * waiting doesn't necessarily mean that it will be the KSE
+ * that runs the thread after the lock is granted. But we
+ * don't know if the other KSEs within the same KSEG are
+ * also in a waiting state or not so we err on the side of
+ * caution and wakeup the thread's last known KSE. We
+ * ensure that the threads KSE doesn't change while it's
+ * scheduling lock is held so it is safe to reference it
+ * (the KSE). If the KSE wakes up and doesn't find any more
+ * work it will again go back to waiting so no harm is done.
+ */
+ if (KSE_WAITING(thread->kse))
+ KSE_WAKEUP(thread->kse);
+}
+
+struct pthread *
+_get_curthread(void)
+{
+ return (_ksd_curthread);
+}
+
+/* This assumes the caller has disabled upcalls. */
+struct kse *
+_get_curkse(void)
+{
+ return (_ksd_curkse);
+}
+
+void
+_set_curkse(struct kse *kse)
+{
+ _ksd_setprivate(&kse->k_ksd);
+}
+
+/*
+ * Allocate a new KSEG.
+ *
+ * We allow the current KSE (curkse) to be NULL in the case that this
+ * is the first time a KSEG is being created (library initialization).
+ * In this case, we don't need to (and can't) take any locks.
+ */
+struct kse_group *
+_kseg_alloc(struct kse *curkse)
+{
+ struct kse_group *kseg = NULL;
+
+ if ((curkse != NULL) && (free_kseg_count > 0)) {
+ /* Use the kse lock for the kseg queue. */
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
+ TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
+ free_kseg_count--;
+ active_kseg_count++;
+ TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
+ }
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ }
/*
- * Perform checks to yield only if we are about to undefer
- * signals.
+ * If requested, attempt to allocate a new KSE group only if the
+ * KSE allocation was successful and a KSE group wasn't found in
+ * the free list.
*/
- if (curthread->sig_defer_count > 1) {
- /* Decrement the signal deferral count. */
- curthread->sig_defer_count--;
+ if ((kseg == NULL) &&
+ ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
+ THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq,
+ THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0,
+ "Unable to allocate priority queue.");
+ kseg_init(kseg);
+ if (curkse != NULL)
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ kseg_free(kseg);
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ }
+ return (kseg);
+}
+
+/*
+ * This must be called with the kse lock held and when there are
+ * no more threads that reference it.
+ */
+static void
+kseg_free(struct kse_group *kseg)
+{
+ TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
+ kseg_init(kseg);
+ free_kseg_count++;
+ active_kseg_count--;
+}
+
+/*
+ * Allocate a new KSE.
+ *
+ * We allow the current KSE (curkse) to be NULL in the case that this
+ * is the first time a KSE is being created (library initialization).
+ * In this case, we don't need to (and can't) take any locks.
+ */
+struct kse *
+_kse_alloc(struct kse *curkse)
+{
+ struct kse *kse = NULL;
+ int need_ksd = 0;
+ int i;
+
+ if ((curkse != NULL) && (free_kse_count > 0)) {
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ /* Search for a finished KSE. */
+ kse = TAILQ_FIRST(&free_kseq);
+#define KEMBX_DONE 0x01
+ while ((kse != NULL) &&
+ ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
+ kse = TAILQ_NEXT(kse, k_qe);
+ }
+#undef KEMBX_DONE
+ if (kse != NULL) {
+ TAILQ_REMOVE(&free_kseq, kse, k_qe);
+ free_kse_count--;
+ active_kse_count++;
+ TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
+ }
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
}
- else if (curthread->sig_defer_count == 1) {
- /* Reenable signals: */
- curthread->sig_defer_count = 0;
+ if ((kse == NULL) &&
+ ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
+ bzero(kse, sizeof(*kse));
+
+ /* Initialize the lockusers. */
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_init(&kse->k_lockusers[i], (void *)kse);
+ _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL);
+ }
+
+ /* We had to malloc a kse; mark it as needing a new ID.*/
+ need_ksd = 1;
/*
- * Check for asynchronous cancellation before delivering any
- * pending signals:
+ * Create the KSE context.
+ *
+ * XXX - For now this is done here in the allocation.
+ * In the future, we may want to have it done
+ * outside the allocation so that scope system
+ * threads (one thread per KSE) are not required
+ * to have a stack for an unneeded kse upcall.
*/
- if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
- pthread_testcancel();
+ kse->k_mbx.km_func = kse_entry;
+ kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE);
+ kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE;
+ kse->k_mbx.km_udata = (void *)kse;
+ kse->k_mbx.km_quantum = 20000;
+ if (kse->k_mbx.km_stack.ss_size == NULL) {
+ free(kse);
+ kse = NULL;
+ }
}
+ if ((kse != NULL) && (need_ksd != 0)) {
+ /* This KSE needs initialization. */
+ if (curkse != NULL)
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ /* Initialize KSD inside of the lock. */
+ if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ free(kse->k_mbx.km_stack.ss_sp);
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_destroy(&kse->k_lockusers[i]);
+ }
+ free(kse);
+ return (NULL);
+ }
+ kse->k_flags = 0;
+ active_kse_count++;
+ TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+
+ }
+ return (kse);
}
-static inline void
-thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
+void
+_kse_free(struct kse *curkse, struct kse *kse)
{
- pthread_t tid_out = thread_out;
- pthread_t tid_in = thread_in;
-
- if ((tid_out != NULL) &&
- (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
- tid_out = NULL;
- if ((tid_in != NULL) &&
- (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
- tid_in = NULL;
-
- if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
- /* Run the scheduler switch hook: */
- _sched_switch_hook(tid_out, tid_in);
+ struct kse_group *kseg = NULL;
+
+ if (curkse == kse)
+ PANIC("KSE trying to free itself");
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ active_kse_count--;
+ if ((kseg = kse->k_kseg) != NULL) {
+ TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe);
+ /*
+ * Free the KSEG if there are no more threads associated
+ * with it.
+ */
+ if (TAILQ_EMPTY(&kseg->kg_threadq))
+ kseg_free(kseg);
}
+ kse->k_kseg = NULL;
+ kse->k_flags &= ~KF_INITIALIZED;
+ TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
+ free_kse_count++;
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
}
-struct pthread *
-_get_curthread(void)
+static void
+kseg_init(struct kse_group *kseg)
{
- if (_thread_initial == NULL)
- _thread_init();
+ TAILQ_INIT(&kseg->kg_kseq);
+ TAILQ_INIT(&kseg->kg_threadq);
+ TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
+ TAILQ_INIT(&kseg->kg_schedq.sq_blockedq);
+ _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
+ _kse_lock_wakeup);
+ kseg->kg_threadcount = 0;
+ kseg->kg_idle_kses = 0;
+ kseg->kg_flags = 0;
+}
- return (_thread_run);
+struct pthread *
+_thr_alloc(struct pthread *curthread)
+{
+ kse_critical_t crit;
+ struct pthread *thread = NULL;
+
+ if (curthread != NULL) {
+ if (_gc_check != 0)
+ thread_gc(curthread);
+ if (free_thread_count > 0) {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curkse, &thread_lock);
+ if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ free_thread_count--;
+ }
+ KSE_LOCK_RELEASE(curkse, &thread_lock);
+ }
+ }
+ if (thread == NULL)
+ thread = (struct pthread *)malloc(sizeof(struct pthread));
+ return (thread);
}
void
-_set_curthread(struct pthread *newthread)
+_thr_free(struct pthread *curthread, struct pthread *thread)
{
- _thread_run = newthread;
+ kse_critical_t crit;
+
+ if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
+ free(thread);
+ else {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curkse, &thread_lock);
+ TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
+ free_thread_count++;
+ KSE_LOCK_RELEASE(curkse, &thread_lock);
+ _kse_critical_leave(crit);
+ }
}
diff --git a/lib/libkse/thread/thr_kill.c b/lib/libkse/thread/thr_kill.c
index 24f9150..19f34bb 100644
--- a/lib/libkse/thread/thr_kill.c
+++ b/lib/libkse/thread/thr_kill.c
@@ -41,8 +41,26 @@ __weak_reference(_pthread_kill, pthread_kill);
int
_pthread_kill(pthread_t pthread, int sig)
{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /* Check for invalid signal numbers: */
+ if (sig < 0 || sig >= NSIG)
+ /* Invalid signal: */
+ ret = EINVAL;
/*
- * All signals are unsupported.
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
*/
- return (EINVAL);
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ if ((sig > 0) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_IGN))
+ _thr_sig_send(pthread, sig);
+ _thr_ref_delete(curthread, pthread);
+ }
+
+ /* Return the completion status: */
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_main_np.c b/lib/libkse/thread/thr_main_np.c
index 1d5849d..0dacd48 100644
--- a/lib/libkse/thread/thr_main_np.c
+++ b/lib/libkse/thread/thr_main_np.c
@@ -40,8 +40,8 @@ int
_pthread_main_np()
{
- if (!_thread_initial)
+ if (!_thr_initial)
return (-1);
else
- return (pthread_equal(pthread_self(), _thread_initial) ? 1 : 0);
+ return (pthread_equal(pthread_self(), _thr_initial) ? 1 : 0);
}
diff --git a/lib/libkse/thread/thr_mattr_init.c b/lib/libkse/thread/thr_mattr_init.c
index 6b403d6..d5a7a18 100644
--- a/lib/libkse/thread/thr_mattr_init.c
+++ b/lib/libkse/thread/thr_mattr_init.c
@@ -46,13 +46,13 @@ _pthread_mutexattr_init(pthread_mutexattr_t *attr)
pthread_mutexattr_t pattr;
if ((pattr = (pthread_mutexattr_t)
- malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
+ malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
ret = ENOMEM;
} else {
- memcpy(pattr, &pthread_mutexattr_default,
- sizeof(struct pthread_mutex_attr));
+ memcpy(pattr, &_pthread_mutexattr_default,
+ sizeof(struct pthread_mutex_attr));
*attr = pattr;
ret = 0;
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_msync.c b/lib/libkse/thread/thr_msync.c
index 302f314f..24b78ec 100644
--- a/lib/libkse/thread/thr_msync.c
+++ b/lib/libkse/thread/thr_msync.c
@@ -16,6 +16,7 @@ __weak_reference(__msync, msync);
int
__msync(void *addr, size_t len, int flags)
{
+ struct pthread *curthread = _get_curthread();
int ret;
/*
@@ -24,9 +25,9 @@ __msync(void *addr, size_t len, int flags)
* write. The only real use of this wrapper is to guarantee
* a cancellation point, as per the standard. sigh.
*/
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_msync(addr, len, flags);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index 7b22fb5..1ae12ea9 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -40,47 +40,52 @@
#include "thr_private.h"
#if defined(_PTHREADS_INVARIANTS)
-#define _MUTEX_INIT_LINK(m) do { \
+#define MUTEX_INIT_LINK(m) do { \
(m)->m_qe.tqe_prev = NULL; \
(m)->m_qe.tqe_next = NULL; \
} while (0)
-#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+#define MUTEX_ASSERT_IS_OWNED(m) do { \
if ((m)->m_qe.tqe_prev == NULL) \
PANIC("mutex is not on list"); \
} while (0)
-#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+#define MUTEX_ASSERT_NOT_OWNED(m) do { \
if (((m)->m_qe.tqe_prev != NULL) || \
((m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
+ THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
+ "thread in syncq when it shouldn't be."); \
+} while (0);
#else
-#define _MUTEX_INIT_LINK(m)
-#define _MUTEX_ASSERT_IS_OWNED(m)
-#define _MUTEX_ASSERT_NOT_OWNED(m)
+#define MUTEX_INIT_LINK(m)
+#define MUTEX_ASSERT_IS_OWNED(m)
+#define MUTEX_ASSERT_NOT_OWNED(m)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr)
#endif
/*
* Prototypes
*/
-static inline int mutex_self_trylock(pthread_mutex_t);
-static inline int mutex_self_lock(pthread_mutex_t);
-static inline int mutex_unlock_common(pthread_mutex_t *, int);
-static void mutex_priority_adjust(pthread_mutex_t);
-static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static void mutex_handoff(struct pthread *, struct pthread_mutex *);
+static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
+static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
+static int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
+static void mutex_rescan_owned (struct pthread *, struct pthread *,
+ struct pthread_mutex *);
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
-
static struct pthread_mutex_attr static_mutex_attr =
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
/* Single underscore versions provided for libc internal usage: */
-__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
+__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
/* No difference between libc and application usage of these: */
__weak_reference(_pthread_mutex_init, pthread_mutex_init);
@@ -88,45 +93,16 @@ __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-/* Reinitialize a mutex to defaults. */
-int
-_mutex_reinit(pthread_mutex_t * mutex)
-{
- int ret = 0;
-
- if (mutex == NULL)
- ret = EINVAL;
- else if (*mutex == NULL)
- ret = pthread_mutex_init(mutex, NULL);
- else {
- /*
- * Initialize the mutex structure:
- */
- (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
- (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_owner = NULL;
- (*mutex)->m_data.m_count = 0;
- (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- (*mutex)->m_refcount = 0;
- (*mutex)->m_prio = 0;
- (*mutex)->m_saved_prio = 0;
- _MUTEX_INIT_LINK(*mutex);
- memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
- }
- return (ret);
-}
int
-_pthread_mutex_init(pthread_mutex_t * mutex,
- const pthread_mutexattr_t * mutex_attr)
+_pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
{
- enum pthread_mutextype type;
+ struct pthread_mutex *pmutex;
+ enum pthread_mutextype type;
int protocol;
int ceiling;
int flags;
- pthread_mutex_t pmutex;
int ret = 0;
if (mutex == NULL)
@@ -137,7 +113,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Default to a (error checking) POSIX mutex: */
type = PTHREAD_MUTEX_ERRORCHECK;
protocol = PTHREAD_PRIO_NONE;
- ceiling = PTHREAD_MAX_PRIORITY;
+ ceiling = THR_MAX_PRIORITY;
flags = 0;
}
@@ -166,7 +142,12 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if ((pmutex = (pthread_mutex_t)
malloc(sizeof(struct pthread_mutex))) == NULL)
ret = ENOMEM;
- else {
+ else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ free(pmutex);
+ *mutex = NULL;
+ ret = ENOMEM;
+ } else {
/* Set the mutex flags: */
pmutex->m_flags = flags;
@@ -181,7 +162,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Single UNIX Spec 2 recursive mutex: */
case PTHREAD_MUTEX_RECURSIVE:
/* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
+ pmutex->m_count = 0;
break;
/* Trap invalid mutex types: */
@@ -201,10 +182,9 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if (protocol == PTHREAD_PRIO_PROTECT)
pmutex->m_prio = ceiling;
else
- pmutex->m_prio = 0;
+ pmutex->m_prio = -1;
pmutex->m_saved_prio = 0;
- _MUTEX_INIT_LINK(pmutex);
- memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ MUTEX_INIT_LINK(pmutex);
*mutex = pmutex;
} else {
free(pmutex);
@@ -213,19 +193,21 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return(ret);
+ return (ret);
}
int
-_pthread_mutex_destroy(pthread_mutex_t * mutex)
+_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ pthread_mutex_t m;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* Check to see if this mutex is in use:
@@ -236,21 +218,24 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
ret = EBUSY;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
- }
- else {
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
+ } else {
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Save a pointer to the mutex so it can be free'd
+ * and set the caller's pointer to NULL:
*/
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
- free(*mutex);
+ m = *mutex;
+ *mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
/*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
+ * Free the memory allocated for the mutex
+ * structure:
*/
- *mutex = NULL;
+ MUTEX_ASSERT_NOT_OWNED(m);
+ free(m);
}
}
@@ -259,56 +244,49 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
}
static int
-init_static(pthread_mutex_t *mutex)
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, NULL);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-init_static_private(pthread_mutex_t *mutex)
+init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, &static_mattr);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-mutex_trylock_common(pthread_mutex_t *mutex)
+mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
- struct pthread *curthread = _get_curthread();
- int ret = 0;
+ int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((mutex != NULL) && (*mutex != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* If the mutex was statically allocated, properly
@@ -316,7 +294,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
@@ -330,11 +308,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -359,11 +337,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
curthread->inherited_priority;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -395,11 +373,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -413,13 +391,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
}
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
/* Return the completion status: */
return (ret);
@@ -428,7 +400,8 @@ mutex_trylock_common(pthread_mutex_t *mutex)
int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -437,8 +410,9 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
@@ -446,6 +420,7 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
int
_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
if (mutex == NULL)
@@ -455,19 +430,19 @@ _pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization marking the mutex private (delete safe):
*/
- else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static_private(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
static int
-mutex_lock_common(pthread_mutex_t * mutex)
+mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
{
- struct pthread *curthread = _get_curthread();
int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((m != NULL) && (*m != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
/* Reset the interrupted flag: */
@@ -482,71 +457,68 @@ mutex_lock_common(pthread_mutex_t * mutex)
* waiting queue prior to executing the signal handler.
*/
do {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
- if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- _MUTEX_INIT_LINK(*mutex);
+ if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
+ TAILQ_INIT(&(*m)->m_queue);
+ (*m)->m_flags |= MUTEX_FLAGS_INITED;
+ MUTEX_INIT_LINK(*m);
}
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority inheritence mutex: */
case PTHREAD_PRIO_INHERIT:
/* Check if this mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -554,63 +526,70 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The mutex takes on attributes of the
* running thread when there are no waiters.
+ * Make sure the thread's scheduling lock is
+ * held while priorities are adjusted.
*/
- (*mutex)->m_prio = curthread->active_priority;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ (*m)->m_prio = curthread->active_priority;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- if (curthread->active_priority >
- (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio)
/* Adjust priorities: */
- mutex_priority_adjust(*mutex);
+ mutex_priority_adjust(curthread, *m);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority protection mutex: */
case PTHREAD_PRIO_PROTECT:
/* Check for a priority ceiling violation: */
- if (curthread->active_priority > (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio) {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
ret = EINVAL;
-
+ }
/* Check if this mutex is not locked: */
- else if ((*mutex)->m_owner == NULL) {
+ else if ((*m)->m_owner == NULL) {
/*
* Lock the mutex for the running
* thread:
*/
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -618,45 +597,52 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The running thread inherits the ceiling
* priority of the mutex and executes at that
- * priority:
+ * priority. Make sure the thread's
+ * scheduling lock is held while priorities
+ * are adjusted.
*/
- curthread->active_priority = (*mutex)->m_prio;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ curthread->active_priority = (*m)->m_prio;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- mutex_queue_enq(*mutex, curthread);
+ (*m), m_qe);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * Join the queue of threads waiting to lock
+ * the mutex and save a pointer to the mutex.
*/
- curthread->data.mutex = *mutex;
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/* Clear any previous error: */
curthread->error = 0;
/*
- * Unlock the mutex structure and schedule the
- * next thread:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
/*
* The threads priority may have changed while
@@ -670,107 +656,111 @@ mutex_lock_common(pthread_mutex_t * mutex)
/* Trap invalid mutex types: */
default:
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
/* Return an invalid argument error: */
ret = EINVAL;
break;
}
- /*
- * Check to see if this thread was interrupted and
- * is still in the mutex queue of waiting threads:
- */
- if (curthread->interrupted != 0)
- mutex_queue_remove(*mutex, curthread);
-
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
- } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
+ } while (((*m)->m_owner != curthread) && (ret == 0) &&
(curthread->interrupted == 0));
- if (curthread->interrupted != 0 &&
- curthread->continuation != NULL)
- curthread->continuation((void *) curthread);
+ /*
+ * Check to see if this thread was interrupted and
+ * is still in the mutex queue of waiting threads:
+ */
+ if (curthread->interrupted != 0) {
+ /* Remove this thread from the mutex queue. */
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+ if (THR_IN_SYNCQ(curthread))
+ mutex_queue_remove(*m, curthread);
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
+ /* Check for asynchronous cancellation. */
+ if (curthread->continuation != NULL)
+ curthread->continuation((void *) curthread);
+ }
/* Return the completion status: */
return (ret);
}
int
-__pthread_mutex_lock(pthread_mutex_t *mutex)
+__pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
- if (mutex == NULL)
+ curthread = _get_curthread();
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_lock(pthread_mutex_t *mutex)
+_pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+ curthread = _get_curthread();
- if (mutex == NULL)
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking it private (delete safe):
*/
- else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) ||
+ ((ret = init_static_private(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_unlock(pthread_mutex_t * mutex)
+_pthread_mutex_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 0));
+ return (mutex_unlock_common(m, /* add reference */ 0));
}
int
-_mutex_cv_unlock(pthread_mutex_t * mutex)
+_mutex_cv_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 1));
+ return (mutex_unlock_common(m, /* add reference */ 1));
}
int
-_mutex_cv_lock(pthread_mutex_t * mutex)
+_mutex_cv_lock(pthread_mutex_t *m)
{
int ret;
- if ((ret = pthread_mutex_lock(mutex)) == 0)
- (*mutex)->m_refcount--;
+ if ((ret = _pthread_mutex_lock(m)) == 0)
+ (*m)->m_refcount--;
return (ret);
}
static inline int
-mutex_self_trylock(pthread_mutex_t mutex)
+mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
-
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_NORMAL:
@@ -778,12 +768,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
* POSIX specifies that mutexes should return EDEADLK if a
* recursive lock is detected.
*/
- ret = EBUSY;
+ if (m->m_owner == curthread)
+ ret = EDEADLK;
+ else
+ ret = EBUSY;
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -791,15 +784,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
static inline int
-mutex_self_lock(pthread_mutex_t mutex)
+mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
/*
@@ -814,13 +807,18 @@ mutex_self_lock(pthread_mutex_t mutex)
* What SS2 define as a 'normal' mutex. Intentionally
* deadlock on attempts to get a lock you already own.
*/
- _thread_kern_sched_state_unlock(PS_DEADLOCK,
- &mutex->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_DEADLOCK);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -828,82 +826,58 @@ mutex_self_lock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
-static inline int
-mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+static int
+mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
- if (mutex == NULL || *mutex == NULL) {
+ if (m == NULL || *m == NULL)
ret = EINVAL;
- } else {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
+ else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
/*
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of
- * threads waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) {
- /* Make the new owner runnable: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
-
- /*
- * Add the mutex to the threads list of
- * owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -913,23 +887,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
* Clear the count in case this is recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -937,11 +911,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -949,69 +925,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of threads
- * waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) == NULL)
- /* This mutex has no priority. */
- (*mutex)->m_prio = 0;
- else {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Set the priority of the mutex. Since
- * our waiting threads are in descending
- * priority order, the priority of the
- * mutex becomes the active priority of
- * the thread we just dequeued.
- */
- (*mutex)->m_prio =
- (*mutex)->m_owner->active_priority;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning threads inherited priority
- * now becomes his active priority (the
- * priority of the mutex).
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1021,23 +944,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -1045,11 +968,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -1057,80 +982,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Enter a loop to find a waiting thread whose
- * active priority will not cause a ceiling
- * violation:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- while ((((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) &&
- ((*mutex)->m_owner->active_priority >
- (*mutex)->m_prio)) {
- /*
- * Either the mutex ceiling priority
- * been lowered and/or this threads
- * priority has been raised subsequent
- * to this thread being queued on the
- * waiting list.
- */
- (*mutex)->m_owner->error = EINVAL;
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- /*
- * The thread is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
-
- /* Check for a new owner: */
- if ((*mutex)->m_owner != NULL) {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning thread inherits the
- * ceiling priority of the mutex and
- * executes at that priority:
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
- (*mutex)->m_owner->active_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1141,19 +1002,12 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
break;
}
- if ((ret == 0) && (add_reference != 0)) {
+ if ((ret == 0) && (add_reference != 0))
/* Increment the reference count: */
- (*mutex)->m_refcount++;
- }
+ (*m)->m_refcount++;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
/* Return the completion status: */
@@ -1168,11 +1022,14 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* changes to active priorities of other threads and to the ordering
* of mutex locking by waiting threads.
*
- * This must be called while thread scheduling is deferred.
+ * This must be called without the target thread's scheduling lock held.
*/
void
-_mutex_notify_priochange(pthread_t pthread)
+_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
+ int propagate_prio)
{
+ struct pthread_mutex *m;
+
/* Adjust the priorites of any owned priority mutexes: */
if (pthread->priority_mutex_count > 0) {
/*
@@ -1180,14 +1037,29 @@ _mutex_notify_priochange(pthread_t pthread)
* their priorities to account for this threads change
* in priority. This has the side effect of changing
* the threads active priority.
+ *
+ * Be sure to lock the first mutex in the list of owned
+ * mutexes. This acts as a barrier against another
+ * simultaneous call to change the threads priority
+ * and from the owning thread releasing the mutex.
*/
- mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ m = TAILQ_FIRST(&pthread->mutexq);
+ if (m != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+ /*
+ * Make sure the thread still owns the lock.
+ */
+ if (m == TAILQ_FIRST(&pthread->mutexq))
+ mutex_rescan_owned(curthread, pthread,
+ /* rescan all owned */ NULL);
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
/*
* If this thread is waiting on a priority inheritence mutex,
* check for priority adjustments. A change in priority can
- * also effect a ceiling violation(*) for a thread waiting on
+ * also cause a ceiling violation(*) for a thread waiting on
* a priority protection mutex; we don't perform the check here
* as it is done in pthread_mutex_unlock.
*
@@ -1196,32 +1068,53 @@ _mutex_notify_priochange(pthread_t pthread)
* does not affect ownership of that mutex; the ceiling
* priority is only checked before mutex ownership occurs.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
- /* Lock the mutex structure: */
- _SPINLOCK(&pthread->data.mutex->lock);
-
+ if (propagate_prio != 0) {
/*
- * Check to make sure this thread is still in the same state
- * (the spinlock above can yield the CPU to another thread):
+ * Lock the thread's scheduling queue. This is a bit
+ * convoluted; the "in synchronization queue flag" can
+ * only be cleared with both the thread's scheduling and
+ * mutex locks held. The thread's pointer to the wanted
+ * mutex is guaranteed to be valid during this time.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
+ ((m = pthread->data.mutex) == NULL))
+ THR_SCHED_UNLOCK(curthread, pthread);
+ else {
+ /*
+ * This thread is currently waiting on a mutex; unlock
+ * the scheduling queue lock and lock the mutex. We
+ * can't hold both at the same time because the locking
+ * order could cause a deadlock.
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
/*
- * Remove and reinsert this thread into the list of
- * waiting threads to preserve decreasing priority
- * order.
+ * Check to make sure this thread is still in the
+ * same state (the lock above can yield the CPU to
+ * another thread or the thread may be running on
+ * another CPU).
*/
- mutex_queue_remove(pthread->data.mutex, pthread);
- mutex_queue_enq(pthread->data.mutex, pthread);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (pthread->data.mutex == m)) {
+ /*
+ * Remove and reinsert this thread into
+ * the list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- if (pthread->data.mutex->m_protocol ==
- PTHREAD_PRIO_INHERIT) {
- /* Adjust priorities: */
- mutex_priority_adjust(pthread->data.mutex);
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT)
+ /* Adjust priorities: */
+ mutex_priority_adjust(curthread, m);
}
- }
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&pthread->data.mutex->lock);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
}
@@ -1229,13 +1122,15 @@ _mutex_notify_priochange(pthread_t pthread)
* Called when a new thread is added to the mutex waiting queue or
* when a threads priority changes that is already in the mutex
* waiting queue.
+ *
+ * This must be called with the mutex locked by the current thread.
*/
static void
-mutex_priority_adjust(pthread_mutex_t mutex)
+mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
{
- pthread_t pthread_next, pthread = mutex->m_owner;
- int temp_prio;
pthread_mutex_t m = mutex;
+ struct pthread *pthread_next, *pthread = mutex->m_owner;
+ int done, temp_prio;
/*
* Calculate the mutex priority as the maximum of the highest
@@ -1260,7 +1155,12 @@ mutex_priority_adjust(pthread_mutex_t mutex)
/* Set new priority of the mutex: */
m->m_prio = temp_prio;
- while (m != NULL) {
+ /*
+ * Don't unlock the mutex passed in as an argument. It is
+ * expected to be locked and unlocked by the caller.
+ */
+ done = 1;
+ do {
/*
* Save the threads priority before rescanning the
* owned mutexes:
@@ -1268,11 +1168,26 @@ mutex_priority_adjust(pthread_mutex_t mutex)
temp_prio = pthread->active_priority;
/*
- * Fix the priorities for all the mutexes this thread has
- * locked since taking this mutex. This also has a
+ * Fix the priorities for all mutexes held by the owning
+ * thread since taking this mutex. This also has a
* potential side-effect of changing the threads priority.
+ *
+ * At this point the mutex is locked by the current thread.
+ * The owning thread can't release the mutex until it is
+ * unlocked, so we should be able to safely walk its list
+ * of owned mutexes.
+ */
+ mutex_rescan_owned(curthread, pthread, m);
+
+ /*
+ * If this isn't the first time through the loop,
+ * the current mutex needs to be unlocked.
*/
- mutex_rescan_owned(pthread, m);
+ if (done == 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Assume we're done unless told otherwise: */
+ done = 1;
/*
* If the thread is currently waiting on a mutex, check
@@ -1280,56 +1195,70 @@ mutex_priority_adjust(pthread_mutex_t mutex)
* priority of the mutex.
*/
if ((temp_prio != pthread->active_priority) &&
- (pthread->state == PS_MUTEX_WAIT) &&
- (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
- /* Grab the mutex this thread is waiting on: */
- m = pthread->data.mutex;
+ ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ ((m = pthread->data.mutex) != NULL) &&
+ (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
/*
- * The priority for this thread has changed. Remove
- * and reinsert this thread into the list of waiting
- * threads to preserve decreasing priority order.
+ * Make sure the thread is still waiting on the
+ * mutex:
*/
- mutex_queue_remove(m, pthread);
- mutex_queue_enq(m, pthread);
-
- /* Grab the waiting thread with highest priority: */
- pthread_next = TAILQ_FIRST(&m->m_queue);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (m == pthread->data.mutex)) {
+ /*
+ * The priority for this thread has changed.
+ * Remove and reinsert this thread into the
+ * list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- /*
- * Calculate the mutex priority as the maximum of the
- * highest active priority of any waiting threads and
- * the owning threads active priority.
- */
- temp_prio = MAX(pthread_next->active_priority,
- MAX(m->m_saved_prio, m->m_owner->base_priority));
+ /*
+ * Grab the waiting thread with highest
+ * priority:
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
- if (temp_prio != m->m_prio) {
/*
- * The priority needs to be propagated to the
- * mutex this thread is waiting on and up to
- * the owner of that mutex.
+ * Calculate the mutex priority as the maximum
+ * of the highest active priority of any
+ * waiting threads and the owning threads
+ * active priority.
*/
- m->m_prio = temp_prio;
- pthread = m->m_owner;
- }
- else
- /* We're done: */
- m = NULL;
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio,
+ m->m_owner->base_priority));
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated
+ * to the mutex this thread is waiting
+ * on and up to the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+
+ /* We're not done yet: */
+ done = 0;
+ }
+ }
+ /* Only release the mutex if we're done: */
+ if (done != 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
}
- else
- /* We're done: */
- m = NULL;
- }
+ } while (done == 0);
}
static void
-mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
+mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
+ struct pthread_mutex *mutex)
{
- int active_prio, inherited_prio;
- pthread_mutex_t m;
- pthread_t pthread_next;
+ struct pthread_mutex *m;
+ struct pthread *pthread_next;
+ int active_prio, inherited_prio;
/*
* Start walking the mutexes the thread has taken since
@@ -1344,8 +1273,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* There is no inherited priority yet. */
inherited_prio = 0;
- }
- else {
+ } else {
/*
* The caller wants to start after a specific mutex. It
* is assumed that this mutex is a priority inheritence
@@ -1359,7 +1287,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
}
active_prio = MAX(inherited_prio, pthread->base_priority);
- while (m != NULL) {
+ for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
/*
* We only want to deal with priority inheritence
* mutexes. This might be optimized by only placing
@@ -1386,9 +1314,6 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
inherited_prio = m->m_prio;
active_prio = MAX(m->m_prio, pthread->base_priority);
}
-
- /* Advance to the next mutex owned by this thread: */
- m = TAILQ_NEXT(m, m_qe);
}
/*
@@ -1399,16 +1324,22 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
active_prio = MAX(inherited_prio, pthread->base_priority);
if (active_prio != pthread->active_priority) {
- /*
- * If this thread is in the priority queue, it must be
- * removed and reinserted for its new priority.
- */
- if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
+ /* Lock the thread's scheduling queue: */
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
/*
- * Remove the thread from the priority queue
- * before changing its priority:
+ * This thread is not in a run queue. Just set
+ * its active priority.
*/
- PTHREAD_PRIOQ_REMOVE(pthread);
+ pthread->active_priority = active_prio;
+ }
+ else {
+ /*
+ * This thread is in a run queue. Remove it from
+ * the queue before changing its priority:
+ */
+ THR_RUNQ_REMOVE(pthread);
/*
* POSIX states that if the priority is being
@@ -1421,19 +1352,15 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_HEAD(pthread);
- }
- else {
+ THR_RUNQ_INSERT_HEAD(pthread);
+ } else {
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ THR_RUNQ_INSERT_TAIL(pthread);
}
}
- else {
- /* Set the new active priority. */
- pthread->active_priority = active_prio;
- }
+ THR_SCHED_UNLOCK(curthread, pthread);
}
}
@@ -1449,36 +1376,182 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+/*
+ * This is called by the current thread when it wants to back out of a
+ * mutex_lock in order to run a signal handler.
+ */
void
-_mutex_lock_backout(pthread_t pthread)
+_mutex_lock_backout(struct pthread *curthread)
{
- struct pthread_mutex *mutex;
+ struct pthread_mutex *m;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- mutex = pthread->data.mutex;
+ if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
+ /*
+ * Any other thread may clear the "in sync queue flag",
+ * but only the current thread can clear the pointer
+ * to the mutex. So if the flag is set, we can
+ * guarantee that the pointer to the mutex is valid.
+ * The only problem may be if the mutex is destroyed
+ * out from under us, but that should be considered
+ * an application bug.
+ */
+ m = curthread->data.mutex;
/* Lock the mutex structure: */
- _SPINLOCK(&mutex->lock);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
+
+ /*
+ * Check to make sure this thread doesn't already own
+ * the mutex. Since mutexes are unlocked with direct
+ * handoffs, it is possible the previous owner gave it
+ * to us after we checked the sync queue flag and before
+ * we locked the mutex structure.
+ */
+ if (m->m_owner == curthread) {
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ mutex_unlock_common(&m, /* add_reference */ 0);
+ } else {
+ /*
+ * Remove ourselves from the mutex queue and
+ * clear the pointer to the mutex. We may no
+ * longer be in the mutex queue, but the removal
+ * function will DTRT.
+ */
+ mutex_queue_remove(m, curthread);
+ curthread->data.mutex = NULL;
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ *
+ * In order to properly dequeue a thread from the mutex queue and
+ * make it runnable without the possibility of errant wakeups, it
+ * is necessary to lock the thread's scheduling queue while also
+ * holding the mutex lock.
+ */
+static void
+mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
+{
+ struct pthread *pthread;
+
+ /* Keep dequeueing until we find a valid thread: */
+ mutex->m_owner = NULL;
+ pthread = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread != NULL) {
+ /* Take the thread's scheduling lock: */
+ THR_SCHED_LOCK(curthread, pthread);
- mutex_queue_remove(mutex, pthread);
+ /* Remove the thread from the mutex queue: */
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
- /* This thread is no longer waiting for the mutex: */
+ /* This thread is no longer waiting for this mutex. */
pthread->data.mutex = NULL;
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&mutex->lock);
+ /*
+ * Only exit the loop if the thread hasn't been
+ * cancelled.
+ */
+ switch (mutex->m_protocol) {
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+ break;
+ case PTHREAD_PRIO_INHERIT:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Set the priority of the mutex. Since our waiting
+ * threads are in descending priority order, the
+ * priority of the mutex becomes the active priority
+ * of the thread we just dequeued.
+ */
+ mutex->m_prio = pthread->active_priority;
+
+ /* Save the owning threads inherited priority: */
+ mutex->m_saved_prio = pthread->inherited_priority;
+
+ /*
+ * The owning threads inherited priority now becomes
+ * his active priority (the priority of the mutex).
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ break;
+
+ case PTHREAD_PRIO_PROTECT:
+ if (pthread->active_priority > mutex->m_prio) {
+ /*
+ * Either the mutex ceiling priority has
+ * been lowered and/or this threads priority
+ * has been raised subsequent to the thread
+ * being queued on the waiting list.
+ */
+ pthread->error = EINVAL;
+ }
+ else {
+ /*
+ * Assign the new owner and add the mutex
+ * to the thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq,
+ mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ mutex->m_saved_prio =
+ pthread->inherited_priority;
+
+ /*
+ * The owning thread inherits the ceiling
+ * priority of the mutex and executes at
+ * that priority:
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ pthread->active_priority = mutex->m_prio;
+
+ }
+ break;
+ }
+
+ /* Make the thread runnable and unlock the scheduling queue: */
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
+
+ if (mutex->m_owner == pthread)
+ /* We're done; a valid owner was found. */
+ break;
+ else
+ /* Get the next thread from the waiting queue: */
+ pthread = TAILQ_NEXT(pthread, sqe);
}
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+
+ if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
+ /* This mutex has no priority: */
+ mutex->m_prio = 0;
}
/*
@@ -1486,13 +1559,13 @@ _mutex_lock_backout(pthread_t pthread)
* priority order.
*/
static inline pthread_t
-mutex_queue_deq(pthread_mutex_t mutex)
+mutex_queue_deq(struct pthread_mutex *mutex)
{
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
/*
* Only exit the loop if the thread hasn't been
@@ -1502,7 +1575,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
break;
}
- return(pthread);
+ return (pthread);
}
/*
@@ -1511,9 +1584,9 @@ mutex_queue_deq(pthread_mutex_t mutex)
static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
}
}
@@ -1525,7 +1598,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+ THR_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
@@ -1539,6 +1612,5 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags |= THR_FLAGS_IN_SYNCQ;
}
-
diff --git a/lib/libkse/thread/thr_mutex_prioceiling.c b/lib/libkse/thread/thr_mutex_prioceiling.c
index 7d2e92e..a78b5d1 100644
--- a/lib/libkse/thread/thr_mutex_prioceiling.c
+++ b/lib/libkse/thread/thr_mutex_prioceiling.c
@@ -98,16 +98,14 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
ret = EINVAL;
else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
- else {
- /* Lock the mutex: */
- if ((ret = pthread_mutex_lock(mutex)) == 0) {
- /* Return the old ceiling and set the new ceiling: */
- *old_ceiling = (*mutex)->m_prio;
- (*mutex)->m_prio = prioceiling;
+ /* Lock the mutex: */
+ else if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ /* Return the old ceiling and set the new ceiling: */
+ *old_ceiling = (*mutex)->m_prio;
+ (*mutex)->m_prio = prioceiling;
- /* Unlock the mutex: */
- ret = pthread_mutex_unlock(mutex);
- }
+ /* Unlock the mutex: */
+ ret = pthread_mutex_unlock(mutex);
}
return(ret);
}
diff --git a/lib/libkse/thread/thr_mutex_protocol.c b/lib/libkse/thread/thr_mutex_protocol.c
index f7be5a6..9f0f262 100644
--- a/lib/libkse/thread/thr_mutex_protocol.c
+++ b/lib/libkse/thread/thr_mutex_protocol.c
@@ -63,7 +63,7 @@ _pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
ret = EINVAL;
else {
(*mattr)->m_protocol = protocol;
- (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
+ (*mattr)->m_ceiling = THR_MAX_PRIORITY;
}
return(ret);
}
diff --git a/lib/libkse/thread/thr_nanosleep.c b/lib/libkse/thread/thr_nanosleep.c
index 6cccb87..bec3b66 100644
--- a/lib/libkse/thread/thr_nanosleep.c
+++ b/lib/libkse/thread/thr_nanosleep.c
@@ -39,57 +39,42 @@
__weak_reference(__nanosleep, nanosleep);
int
-_nanosleep(const struct timespec * time_to_sleep,
- struct timespec * time_remaining)
+_nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
- struct timespec current_time;
- struct timespec current_time1;
+ struct timespec ts, ts1;
struct timespec remaining_time;
- struct timeval tv;
/* Check if the time to sleep is legal: */
- if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
- time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
+ if ((time_to_sleep == NULL) || (time_to_sleep->tv_sec < 0) ||
+ (time_to_sleep->tv_nsec < 0) ||
+ (time_to_sleep->tv_nsec >= 1000000000)) {
/* Return an EINVAL error : */
errno = EINVAL;
ret = -1;
} else {
- /*
- * As long as we're going to get the time of day, we
- * might as well store it in the global time of day:
- */
- gettimeofday((struct timeval *) &_sched_tod, NULL);
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time);
+ KSE_GET_TOD(curthread->kse, &ts);
/* Calculate the time for the current thread to wake up: */
- curthread->wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
- curthread->wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;
+ TIMESPEC_ADD(&curthread->wakeup_time, &ts, time_to_sleep);
- /* Check if the nanosecond field has overflowed: */
- if (curthread->wakeup_time.tv_nsec >= 1000000000) {
- /* Wrap the nanosecond field: */
- curthread->wakeup_time.tv_sec += 1;
- curthread->wakeup_time.tv_nsec -= 1000000000;
- }
+ THR_SCHED_LOCK(curthread, curthread);
curthread->interrupted = 0;
- /* Reschedule the current thread to sleep: */
- _thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_SLEEP_WAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
- /*
- * As long as we're going to get the time of day, we
- * might as well store it in the global time of day:
- */
- gettimeofday((struct timeval *) &_sched_tod, NULL);
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time1);
+ /* Reschedule the current thread to sleep: */
+ _thr_sched_switch(curthread);
/* Calculate the remaining time to sleep: */
- remaining_time.tv_sec = time_to_sleep->tv_sec + current_time.tv_sec - current_time1.tv_sec;
- remaining_time.tv_nsec = time_to_sleep->tv_nsec + current_time.tv_nsec - current_time1.tv_nsec;
+ KSE_GET_TOD(curthread->kse, &ts1);
+ remaining_time.tv_sec = time_to_sleep->tv_sec
+ + ts.tv_sec - ts1.tv_sec;
+ remaining_time.tv_nsec = time_to_sleep->tv_nsec
+ + ts.tv_nsec - ts1.tv_nsec;
/* Check if the nanosecond field has underflowed: */
if (remaining_time.tv_nsec < 0) {
@@ -97,9 +82,8 @@ _nanosleep(const struct timespec * time_to_sleep,
remaining_time.tv_sec -= 1;
remaining_time.tv_nsec += 1000000000;
}
-
/* Check if the nanosecond field has overflowed: */
- if (remaining_time.tv_nsec >= 1000000000) {
+ else if (remaining_time.tv_nsec >= 1000000000) {
/* Handle the overflow: */
remaining_time.tv_sec += 1;
remaining_time.tv_nsec -= 1000000000;
@@ -130,14 +114,15 @@ _nanosleep(const struct timespec * time_to_sleep,
}
int
-__nanosleep(const struct timespec * time_to_sleep, struct timespec *
- time_remaining)
+__nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = _nanosleep(time_to_sleep, time_remaining);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_once.c b/lib/libkse/thread/thr_once.c
index cef478d..152fdec 100644
--- a/lib/libkse/thread/thr_once.c
+++ b/lib/libkse/thread/thr_once.c
@@ -31,23 +31,25 @@
*
* $FreeBSD$
*/
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
__weak_reference(_pthread_once, pthread_once);
int
-_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
+_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
if (once_control->state == PTHREAD_NEEDS_INIT) {
- if (_thread_initial == NULL)
- _thread_init();
- pthread_mutex_lock(&(once_control->mutex));
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+ _pthread_mutex_lock(&(once_control->mutex));
if (once_control->state == PTHREAD_NEEDS_INIT) {
init_routine();
once_control->state = PTHREAD_DONE_INIT;
}
- pthread_mutex_unlock(&(once_control->mutex));
+ _pthread_mutex_unlock(&(once_control->mutex));
}
return (0);
}
diff --git a/lib/libkse/thread/thr_open.c b/lib/libkse/thread/thr_open.c
index b8a6635..ec60ba4 100644
--- a/lib/libkse/thread/thr_open.c
+++ b/lib/libkse/thread/thr_open.c
@@ -45,11 +45,12 @@ __weak_reference(__open, open);
int
__open(const char *path, int flags,...)
{
+ struct pthread *curthread = _get_curthread();
int ret;
int mode = 0;
va_list ap;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
/* Check if the file is being created: */
if (flags & O_CREAT) {
@@ -60,7 +61,7 @@ __open(const char *path, int flags,...)
}
ret = __sys_open(path, flags, mode);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_pause.c b/lib/libkse/thread/thr_pause.c
index 4a46be0..b6bcc51 100644
--- a/lib/libkse/thread/thr_pause.c
+++ b/lib/libkse/thread/thr_pause.c
@@ -38,11 +38,12 @@ __weak_reference(_pause, pause);
int
_pause(void)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __pause();
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_poll.c b/lib/libkse/thread/thr_poll.c
index 353b3cc..0b78047 100644
--- a/lib/libkse/thread/thr_poll.c
+++ b/lib/libkse/thread/thr_poll.c
@@ -46,11 +46,12 @@ __weak_reference(__poll, poll);
int
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_poll(fds, nfds, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_printf.c b/lib/libkse/thread/thr_printf.c
index 0da9ae5..f0791df 100644
--- a/lib/libkse/thread/thr_printf.c
+++ b/lib/libkse/thread/thr_printf.c
@@ -29,13 +29,9 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/types.h>
-#include <sys/fcntl.h>
-#include <sys/uio.h>
-#include <errno.h>
#include <stdarg.h>
+#include <string.h>
#include <unistd.h>
-#include <pthread.h>
#include "thr_private.h"
@@ -109,7 +105,7 @@ static void
pchar(int fd, char c)
{
- write(fd, &c, 1);
+ __sys_write(fd, &c, 1);
}
/*
@@ -119,6 +115,6 @@ static void
pstr(int fd, const char *s)
{
- write(fd, s, strlen(s));
+ __sys_write(fd, s, strlen(s));
}
diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c
index 7dcd752..b8bb2ca 100644
--- a/lib/libkse/thread/thr_priority_queue.c
+++ b/lib/libkse/thread/thr_priority_queue.c
@@ -42,47 +42,40 @@ static void pq_insert_prio_list(pq_queue_t *pq, int prio);
#if defined(_PTHREADS_INVARIANTS)
-static int _pq_active = 0;
+#define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ)
-#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
-
-#define _PQ_SET_ACTIVE() _pq_active = 1
-#define _PQ_CLEAR_ACTIVE() _pq_active = 0
-#define _PQ_ASSERT_ACTIVE(msg) do { \
- if (_pq_active == 0) \
+#define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE
+#define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE
+#define PQ_ASSERT_ACTIVE(pq, msg) do { \
+ if (((pq)->pq_flags & PQF_ACTIVE) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_INACTIVE(msg) do { \
- if (_pq_active != 0) \
+#define PQ_ASSERT_INACTIVE(pq, msg) do { \
+ if (((pq)->pq_flags & PQF_ACTIVE) != 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
- if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
+#define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
- if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
+#define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
- if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
+#define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
+ if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_PROTECTED(msg) \
- PTHREAD_ASSERT((_thread_kern_kse_mailbox.km_curthread == NULL) || \
- ((_get_curthread())->sig_defer_count > 0), \
- msg);
#else
-#define _PQ_SET_ACTIVE()
-#define _PQ_CLEAR_ACTIVE()
-#define _PQ_ASSERT_ACTIVE(msg)
-#define _PQ_ASSERT_INACTIVE(msg)
-#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
-#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
-#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
-#define _PQ_ASSERT_PROTECTED(msg)
+#define PQ_SET_ACTIVE(pq)
+#define PQ_CLEAR_ACTIVE(pq)
+#define PQ_ASSERT_ACTIVE(pq, msg)
+#define PQ_ASSERT_INACTIVE(pq, msg)
+#define PQ_ASSERT_IN_WAITQ(thrd, msg)
+#define PQ_ASSERT_IN_RUNQ(thrd, msg)
+#define PQ_ASSERT_NOT_QUEUED(thrd, msg)
#endif
@@ -123,10 +116,9 @@ _pq_init(pq_queue_t *pq)
pq->pq_lists[i].pl_prio = i;
pq->pq_lists[i].pl_queued = 0;
}
-
/* Initialize the priority queue: */
TAILQ_INIT(&pq->pq_queue);
- _PQ_CLEAR_ACTIVE();
+ pq->pq_flags = 0;
}
return (ret);
}
@@ -139,10 +131,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
+ PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue");
/*
* Remove this thread from priority list. Note that if
@@ -155,9 +146,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
/* This thread is now longer in the priority queue. */
- pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
+ pthread->flags &= ~THR_FLAGS_IN_RUNQ;
- _PQ_CLEAR_ACTIVE();
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -167,34 +158,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
int prio;
/*
- * Don't insert suspended threads into the priority queue.
- * The caller is responsible for setting the threads state.
+ * Make some assertions when debugging is enabled:
*/
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /* Make sure the threads state is suspended. */
- if (pthread->state != PS_SUSPENDED)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- } else {
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread,
- "_pq_insert_head: Already in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
-
- prio = pthread->active_priority;
- TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
- if (pq->pq_lists[prio].pl_queued == 0)
- /* Insert the list into the priority queue: */
- pq_insert_prio_list(pq, prio);
-
- /* Mark this thread as being in the priority queue. */
- pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
-
- _PQ_CLEAR_ACTIVE();
- }
+ PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_head: Already in priority queue");
+
+ prio = pthread->active_priority;
+ TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= THR_FLAGS_IN_RUNQ;
+
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -204,34 +184,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
int prio;
/*
- * Don't insert suspended threads into the priority queue.
- * The caller is responsible for setting the threads state.
+ * Make some assertions when debugging is enabled:
*/
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /* Make sure the threads state is suspended. */
- if (pthread->state != PS_SUSPENDED)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- } else {
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread,
- "_pq_insert_tail: Already in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
-
- prio = pthread->active_priority;
- TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
- if (pq->pq_lists[prio].pl_queued == 0)
- /* Insert the list into the priority queue: */
- pq_insert_prio_list(pq, prio);
-
- /* Mark this thread as being in the priority queue. */
- pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
-
- _PQ_CLEAR_ACTIVE();
- }
+ PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_tail: Already in priority queue");
+
+ prio = pthread->active_priority;
+ TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= THR_FLAGS_IN_RUNQ;
+
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -244,9 +213,8 @@ _pq_first(pq_queue_t *pq)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_INACTIVE("_pq_first: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
+ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active");
+ PQ_SET_ACTIVE(pq);
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
@@ -259,21 +227,10 @@ _pq_first(pq_queue_t *pq)
/* Mark the list as not being in the queue: */
pql->pl_queued = 0;
- } else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /*
- * This thread is suspended; remove it from the
- * list and ensure its state is suspended.
- */
- TAILQ_REMOVE(&pql->pl_head, pthread, pqe);
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
-
- /* This thread is now longer in the priority queue. */
- pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
- pthread = NULL;
}
}
- _PQ_CLEAR_ACTIVE();
+ PQ_CLEAR_ACTIVE(pq);
return (pthread);
}
@@ -286,8 +243,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
- _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
+ PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active");
/*
* The priority queue is in descending priority order. Start at
@@ -307,64 +263,3 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
/* Mark this list as being in the queue: */
pq->pq_lists[prio].pl_queued = 1;
}
-
-void
-_waitq_insert(pthread_t pthread)
-{
- pthread_t tid;
-
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
-
- if (pthread->wakeup_time.tv_sec == -1)
- TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
- else {
- tid = TAILQ_FIRST(&_waitingq);
- while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
- ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
- ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
- (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
- tid = TAILQ_NEXT(tid, pqe);
- if (tid == NULL)
- TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
- else
- TAILQ_INSERT_BEFORE(tid, pthread, pqe);
- }
- pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
-
- _PQ_CLEAR_ACTIVE();
-}
-
-void
-_waitq_remove(pthread_t pthread)
-{
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
-
- TAILQ_REMOVE(&_waitingq, pthread, pqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
-
- _PQ_CLEAR_ACTIVE();
-}
-
-void
-_waitq_setactive(void)
-{
- _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
- _PQ_SET_ACTIVE();
-}
-
-void
-_waitq_clearactive(void)
-{
- _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
- _PQ_CLEAR_ACTIVE();
-}
diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h
index 7c5cc87..41a6693 100644
--- a/lib/libkse/thread/thr_private.h
+++ b/lib/libkse/thread/thr_private.h
@@ -38,17 +38,9 @@
#define _THR_PRIVATE_H
/*
- * Evaluate the storage class specifier.
- */
-#ifdef GLOBAL_PTHREAD_PRIVATE
-#define SCLASS
-#else
-#define SCLASS extern
-#endif
-
-/*
* Include files.
*/
+#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <sys/queue.h>
@@ -57,110 +49,81 @@
#include <sys/cdefs.h>
#include <sys/kse.h>
#include <sched.h>
-#include <spinlock.h>
#include <ucontext.h>
+#include <unistd.h>
+#include <pthread.h>
#include <pthread_np.h>
+#include "ksd.h"
+#include "lock.h"
+#include "pthread_md.h"
+
+/*
+ * Evaluate the storage class specifier.
+ */
+#ifdef GLOBAL_PTHREAD_PRIVATE
+#define SCLASS
+#define SCLASS_PRESET(x...) = x
+#else
+#define SCLASS extern
+#define SCLASS_PRESET(x...)
+#endif
+
/*
* Kernel fatal error handler macro.
*/
-#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
+#define PANIC(string) _thr_exit(__FILE__,__LINE__,string)
/* Output debug messages like this: */
-#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args)
-#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args)
+#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
+#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
-/*
- * Priority queue manipulation macros (using pqe link):
- */
-#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
-#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
-#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
-#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
+#define DBG_MUTEX 0x0001
+#define DBG_SIG 0x0002
-/*
- * Waiting queue manipulation macros (using pqe link):
- */
-#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
-#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
-#if defined(_PTHREADS_INVARIANTS)
-#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
-#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
-#else
-#define PTHREAD_WAITQ_CLEARACTIVE()
-#define PTHREAD_WAITQ_SETACTIVE()
-#endif
-
-/*
- * Work queue manipulation macros (using qe link):
- */
-#define PTHREAD_WORKQ_INSERT(thrd) do { \
- TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
- (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
-} while (0)
-#define PTHREAD_WORKQ_REMOVE(thrd) do { \
- TAILQ_REMOVE(&_workq,thrd,qe); \
- (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
+#define THR_ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ PANIC(msg); \
} while (0)
/*
* State change macro without scheduling queue change:
*/
-#define PTHREAD_SET_STATE(thrd, newstate) do { \
+#define THR_SET_STATE(thrd, newstate) do { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
} while (0)
+
/*
- * State change macro with scheduling queue change - This must be
- * called with preemption deferred (see thread_kern_sched_[un]defer).
+ * Define the signals to be used for scheduling.
*/
-#if defined(_PTHREADS_INVARIANTS)
-#include <assert.h>
-#define PTHREAD_ASSERT(cond, msg) do { \
- if (!(cond)) \
- PANIC(msg); \
-} while (0)
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
- PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
- "Illegal call from signal handler");
-#define PTHREAD_NEW_STATE(thrd, newstate) do { \
- if (_thread_kern_new_state != 0) \
- PANIC("Recursive PTHREAD_NEW_STATE"); \
- _thread_kern_new_state = 1; \
- if ((thrd)->state != newstate) { \
- if ((thrd)->state == PS_RUNNING) { \
- PTHREAD_PRIOQ_REMOVE(thrd); \
- PTHREAD_SET_STATE(thrd, newstate); \
- PTHREAD_WAITQ_INSERT(thrd); \
- } else if (newstate == PS_RUNNING) { \
- PTHREAD_WAITQ_REMOVE(thrd); \
- PTHREAD_SET_STATE(thrd, newstate); \
- PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
- } \
- } \
- _thread_kern_new_state = 0; \
-} while (0)
-#else
-#define PTHREAD_ASSERT(cond, msg)
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
-#define PTHREAD_NEW_STATE(thrd, newstate) do { \
- if ((thrd)->state != newstate) { \
- if ((thrd)->state == PS_RUNNING) { \
- PTHREAD_PRIOQ_REMOVE(thrd); \
- PTHREAD_WAITQ_INSERT(thrd); \
- } else if (newstate == PS_RUNNING) { \
- PTHREAD_WAITQ_REMOVE(thrd); \
- PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
- } \
- } \
- PTHREAD_SET_STATE(thrd, newstate); \
-} while (0)
-#endif
+#define _ITIMER_SCHED_TIMER ITIMER_PROF
+#define _SCHED_SIGNAL SIGPROF
+
+#define TIMESPEC_ADD(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
+ if ((dst)->tv_nsec > 1000000000) { \
+ (dst)->tv_sec++; \
+ (dst)->tv_nsec -= 1000000000; \
+ } \
+ } while (0)
+
+#define TIMESPEC_SUB(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
+ if ((dst)->tv_nsec < 0) { \
+ (dst)->tv_sec--; \
+ (dst)->tv_nsec += 1000000000; \
+ } \
+ } while (0)
/*
* Priority queues.
@@ -178,29 +141,167 @@ typedef struct pq_queue {
TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
pq_list_t *pq_lists; /* array of all priority lists */
int pq_size; /* number of priority lists */
+#define PQF_ACTIVE 0x0001
+ int pq_flags;
} pq_queue_t;
+/*
+ * Each KSEG has a scheduling queue. For now, threads that exist in their
+ * own KSEG (system scope) will get a full priority queue. In the future
+ * this can be optimized for the single thread per KSEG case.
+ */
+struct sched_queue {
+ pq_queue_t sq_runq;
+ TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
+ TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */
+};
+
+/* Used to maintain pending and active signals: */
+struct sigstatus {
+ siginfo_t *info; /* arg 2 to signal handler */
+ int pending; /* Is this a pending signal? */
+ int blocked; /*
+ * This signal has occured and hasn't
+ * yet been handled; ignore subsequent
+ * signals until the handler is done.
+ */
+ int signo;
+};
+
+typedef struct kse_thr_mailbox *kse_critical_t;
+
+struct kse_group;
+
+#define MAX_KSE_LOCKLEVEL 3
+struct kse {
+ struct kse_mailbox k_mbx; /* kernel kse mailbox */
+ /* -- location and order specific items for gdb -- */
+ struct pthread *k_curthread; /* current thread */
+ struct kse_group *k_kseg; /* parent KSEG */
+ struct sched_queue *k_schedq; /* scheduling queue */
+ /* -- end of location and order specific items -- */
+ TAILQ_ENTRY(kse) k_qe; /* link entry */
+ struct ksd k_ksd; /* KSE specific data */
+ /*
+ * Items that are only modified by the kse, or that otherwise
+ * don't need to be locked when accessed
+ */
+ struct lock k_lock;
+ struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
+ int k_locklevel;
+ sigset_t k_sigmask;
+ struct sigstatus k_sigq[NSIG];
+ int k_check_sigq;
+ long k_resched; /* scheduling signal arrived */
+ int k_flags;
+#define KF_STARTED 0x0001 /* kernel kse created */
+#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
+ int k_cpu; /* CPU ID when bound */
+ int k_done; /* this KSE is done */
+};
+
+/*
+ * Each KSE group contains one or more KSEs in which threads can run.
+ * At least for now, there is one scheduling queue per KSE group; KSEs
+ * within the same KSE group compete for threads from the same scheduling
+ * queue. A scope system thread has one KSE in one KSE group; the group
+ * does not use its scheduling queue.
+ */
+struct kse_group {
+ TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
+ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
+ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
+ struct sched_queue kg_schedq; /* scheduling queue */
+ struct lock kg_lock;
+ int kg_threadcount; /* # of assigned threads */
+ int kg_idle_kses;
+ int kg_flags;
+#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
+#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
+};
+
+/*
+ * Lock acquire and release for KSEs.
+ */
+#define KSE_LOCK_ACQUIRE(kse, lck) \
+do { \
+ if ((kse)->k_locklevel >= MAX_KSE_LOCKLEVEL) \
+ PANIC("Exceeded maximum lock level"); \
+ else { \
+ (kse)->k_locklevel++; \
+ _lock_acquire((lck), \
+ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
+ } \
+} while (0)
+
+#define KSE_LOCK_RELEASE(kse, lck) \
+do { \
+ if ((kse)->k_locklevel > 0) { \
+ _lock_release((lck), \
+ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
+ (kse)->k_locklevel--; \
+ } \
+} while (0)
+
+/*
+ * Lock our own KSEG.
+ */
+#define KSE_LOCK(curkse) \
+ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
+#define KSE_UNLOCK(curkse) \
+ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
+
+/*
+ * Lock a potentially different KSEG.
+ */
+#define KSE_SCHED_LOCK(curkse, kseg) \
+ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
+#define KSE_SCHED_UNLOCK(curkse, kseg) \
+ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
+
+/*
+ * Waiting queue manipulation macros (using pqe link):
+ */
+#define KSE_WAITQ_REMOVE(kse, thrd) \
+do { \
+ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
+ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
+ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
+ } \
+} while (0)
+#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
+#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
+
+#define KSE_SET_WAIT(kse) \
+ atomic_store_rel_int(&(kse)->k_mbx.km_flags, 1)
+
+#define KSE_CLEAR_WAIT(kse) \
+ atomic_set_acq_int(&(kse)->k_mbx.km_flags, 0)
+
+#define KSE_WAITING(kse) (kse)->k_mbx.km_flags != 0
+#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx)
/*
* TailQ initialization values.
*/
#define TAILQ_INITIALIZER { NULL, NULL }
-/*
- * Mutex definitions.
+/*
+ * lock initialization values.
*/
-union pthread_mutex_data {
- void *m_ptr;
- int m_count;
-};
+#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
struct pthread_mutex {
+ /*
+ * Lock for accesses to this structure.
+ */
+ struct lock m_lock;
enum pthread_mutextype m_type;
int m_protocol;
TAILQ_HEAD(mutex_head, pthread) m_queue;
struct pthread *m_owner;
- union pthread_mutex_data m_data;
long m_flags;
+ int m_count;
int m_refcount;
/*
@@ -221,11 +322,6 @@ struct pthread_mutex {
* Link for list of all mutexes a thread currently owns.
*/
TAILQ_ENTRY(pthread_mutex) m_qe;
-
- /*
- * Lock for accesses to this structure.
- */
- spinlock_t lock;
};
/*
@@ -238,10 +334,10 @@ struct pthread_mutex {
/*
* Static mutex initialization values.
*/
-#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
- NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
- _SPINLOCK_INITIALIZER }
+#define PTHREAD_MUTEX_STATIC_INITIALIZER \
+ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
+ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
+ TAILQ_INITIALIZER }
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
@@ -262,17 +358,15 @@ enum pthread_cond_type {
};
struct pthread_cond {
- enum pthread_cond_type c_type;
- TAILQ_HEAD(cond_head, pthread) c_queue;
- pthread_mutex_t c_mutex;
- void *c_data;
- long c_flags;
- int c_seqno;
-
/*
* Lock for accesses to this structure.
*/
- spinlock_t lock;
+ struct lock c_lock;
+ enum pthread_cond_type c_type;
+ TAILQ_HEAD(cond_head, pthread) c_queue;
+ struct pthread_mutex *c_mutex;
+ long c_flags;
+ long c_seqno;
};
struct pthread_cond_attr {
@@ -290,9 +384,9 @@ struct pthread_cond_attr {
/*
* Static cond initialization values.
*/
-#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
- 0, 0, _SPINLOCK_INITIALIZER }
+#define PTHREAD_COND_STATIC_INITIALIZER \
+ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
+ NULL, NULL, 0, 0 }
/*
* Semaphore definitions.
@@ -321,6 +415,7 @@ struct pthread_attr {
int sched_interval;
int prio;
int suspend;
+#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
int flags;
void *arg_attr;
void (*cleanup_attr) ();
@@ -332,31 +427,20 @@ struct pthread_attr {
/*
* Thread creation state attributes.
*/
-#define PTHREAD_CREATE_RUNNING 0
-#define PTHREAD_CREATE_SUSPENDED 1
+#define THR_CREATE_RUNNING 0
+#define THR_CREATE_SUSPENDED 1
/*
* Miscellaneous definitions.
*/
-#define PTHREAD_STACK_DEFAULT 65536
-/*
- * Size of default red zone at the end of each stack. In actuality, this "red
- * zone" is merely an unmapped region, except in the case of the initial stack.
- * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
- * region, an unmapped gap between thread stacks achieves the same effect as
- * explicitly mapped red zones.
- * This is declared and initialized in uthread_init.c.
- */
-extern int _pthread_guard_default;
-
-extern int _pthread_page_size;
+#define THR_STACK_DEFAULT 65536
/*
* Maximum size of initial thread's stack. This perhaps deserves to be larger
* than the stacks of other threads, since many applications are likely to run
* almost entirely on this stack.
*/
-#define PTHREAD_STACK_INITIAL 0x100000
+#define THR_STACK_INITIAL 0x100000
/*
* Define the different priority ranges. All applications have thread
@@ -372,21 +456,20 @@ extern int _pthread_page_size;
* The approach taken is that, within each class, signal delivery
* always has priority over thread execution.
*/
-#define PTHREAD_DEFAULT_PRIORITY 15
-#define PTHREAD_MIN_PRIORITY 0
-#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
-#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
-#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
-#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
-#define PTHREAD_LAST_PRIORITY \
- (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
-#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
+#define THR_DEFAULT_PRIORITY 15
+#define THR_MIN_PRIORITY 0
+#define THR_MAX_PRIORITY 31 /* 0x1F */
+#define THR_SIGNAL_PRIORITY 32 /* 0x20 */
+#define THR_RT_PRIORITY 64 /* 0x40 */
+#define THR_FIRST_PRIORITY THR_MIN_PRIORITY
+#define THR_LAST_PRIORITY \
+ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
+#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
/*
* Clock resolution in microseconds.
*/
#define CLOCK_RES_USEC 10000
-#define CLOCK_RES_USEC_MIN 1000
/*
* Time slice period in microseconds.
@@ -394,16 +477,17 @@ extern int _pthread_page_size;
#define TIMESLICE_USEC 20000
/*
- * Define a thread-safe macro to get the current time of day
- * which is updated at regular intervals by the scheduling signal
- * handler.
+ * XXX - Define a thread-safe macro to get the current time of day
+ * which is updated at regular intervals by something.
+ *
+ * For now, we just make the system call to get the time.
*/
-#define GET_CURRENT_TOD(tv) \
- do { \
- tv.tv_sec = _sched_tod.tv_sec; \
- tv.tv_usec = _sched_tod.tv_usec; \
- } while (tv.tv_sec != _sched_tod.tv_sec)
-
+#define KSE_GET_TOD(curkse, tsp) \
+do { \
+ *tsp = (curkse)->k_mbx.km_timeofday; \
+ if ((tsp)->tv_sec == 0) \
+ clock_gettime(CLOCK_REALTIME, tsp); \
+} while (0)
struct pthread_rwlockattr {
int pshared;
@@ -422,13 +506,12 @@ struct pthread_rwlock {
*/
enum pthread_state {
PS_RUNNING,
+ PS_LOCKWAIT,
PS_MUTEX_WAIT,
PS_COND_WAIT,
PS_SLEEP_WAIT,
- PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
- PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
@@ -437,19 +520,11 @@ enum pthread_state {
};
-/*
- * File descriptor locking definitions.
- */
-#define FD_READ 0x1
-#define FD_WRITE 0x2
-#define FD_RDWR (FD_READ | FD_WRITE)
-
union pthread_wait_data {
pthread_mutex_t mutex;
pthread_cond_t cond;
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
- spinlock_t *spinlock;
- struct pthread *thread;
+ struct lock *lock;
};
/*
@@ -458,26 +533,37 @@ union pthread_wait_data {
*/
typedef void (*thread_continuation_t) (void *);
+/*
+ * This stores a thread's state prior to running a signal handler.
+ * It is used when a signal is delivered to a thread blocked in
+ * userland. If the signal handler returns normally, the thread's
+ * state is restored from here.
+ */
+struct pthread_sigframe {
+ int psf_flags;
+ int psf_interrupted;
+ int psf_signo;
+ enum pthread_state psf_state;
+ union pthread_wait_data psf_wait_data;
+ struct timespec psf_wakeup_time;
+ sigset_t psf_sigset;
+ sigset_t psf_sigmask;
+ int psf_seqno;
+};
+
struct join_status {
struct pthread *thread;
void *ret;
int error;
};
-struct pthread_state_data {
- struct timespec psd_wakeup_time;
- union pthread_wait_data psd_wait_data;
- enum pthread_state psd_state;
- int psd_flags;
- int psd_interrupted;
- int psd_sig_defer_count;
-};
-
struct pthread_specific_elem {
const void *data;
int seqno;
};
+
+#define MAX_THR_LOCKLEVEL 3
/*
* Thread structure.
*/
@@ -486,21 +572,27 @@ struct pthread {
* Magic value to help recognize a valid thread structure
* from an invalid one:
*/
-#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
+#define THR_MAGIC ((u_int32_t) 0xd09ba115)
u_int32_t magic;
char *name;
u_int64_t uniqueid; /* for gdb */
+ /* Queue entry for list of all threads: */
+ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
+ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
+
+ /* Queue entry for GC lists: */
+ TAILQ_ENTRY(pthread) gcle;
+
/*
* Lock for accesses to this thread structure.
*/
- spinlock_t lock;
-
- /* Queue entry for list of all threads: */
- TAILQ_ENTRY(pthread) tle;
-
- /* Queue entry for list of dead threads: */
- TAILQ_ENTRY(pthread) dle;
+ struct lock lock;
+ struct lockuser lockusers[MAX_THR_LOCKLEVEL];
+ int locklevel;
+ kse_critical_t critical[MAX_KSE_LOCKLEVEL];
+ struct kse *kse;
+ struct kse_group *kseg;
/*
* Thread start routine, argument, stack pointer and thread
@@ -508,57 +600,67 @@ struct pthread {
*/
void *(*start_routine)(void *);
void *arg;
- void *stack;
struct pthread_attr attr;
/*
- * Machine context, including signal state.
+ * Thread mailbox.
*/
- struct kse_thr_mailbox mailbox;
+ struct kse_thr_mailbox tmbx;
+ int active; /* thread running */
+ int blocked; /* thread blocked in kernel */
+ int need_switchout;
+ int need_wakeup;
/*
+ * Used for tracking delivery of signal handlers.
+ */
+ struct pthread_sigframe *curframe;
+ siginfo_t siginfo[NSIG];
+
+ /*
* Cancelability flags - the lower 2 bits are used by cancel
* definitions in pthread.h
*/
-#define PTHREAD_AT_CANCEL_POINT 0x0004
-#define PTHREAD_CANCELLING 0x0008
-#define PTHREAD_CANCEL_NEEDED 0x0010
- int cancelflags;
+#define THR_AT_CANCEL_POINT 0x0004
+#define THR_CANCELLING 0x0008
+#define THR_CANCEL_NEEDED 0x0010
+ int cancelflags;
thread_continuation_t continuation;
- /* Currently pending signals. */
- sigset_t sigpend;
+ /*
+ * The thread's base and pending signal masks. The active
+ * signal mask is stored in the thread's context (in mailbox).
+ */
+ sigset_t sigmask;
+ sigset_t sigpend;
+ int sigmask_seqno;
+ int check_pending;
+ int refcount;
/* Thread state: */
enum pthread_state state;
- /* Scheduling clock when this thread was last made active. */
- long last_active;
-
- /* Scheduling clock when this thread was last made inactive. */
- long last_inactive;
-
/*
* Number of microseconds accumulated by this thread when
* time slicing is active.
*/
- long slice_usec;
+ long slice_usec;
/*
* Time to wake up thread. This is used for sleeping threads and
- * for any operation which may time out.
+ * for any operation which may time out (such as select).
*/
- struct timespec wakeup_time;
+ struct timespec wakeup_time;
/* TRUE if operation has timed out. */
- int timeout;
+ int timeout;
/*
* Error variable used instead of errno. The function __error()
* returns a pointer to this.
*/
- int error;
+ int error;
/*
* The joiner is the thread that is joining to this thread. The
@@ -573,28 +675,21 @@ struct pthread {
*
* o A queue of threads waiting for a mutex
* o A queue of threads waiting for a condition variable
- * o A queue of threads waiting for a file descriptor lock
- * o A queue of threads needing work done by the kernel thread
- * (waiting for a spinlock or file I/O)
- *
- * A thread can also be joining a thread (the joiner field above).
*
- * It must not be possible for a thread to belong to any of the
- * above queues while it is handling a signal. Signal handlers
- * may longjmp back to previous stack frames circumventing normal
- * control flow. This could corrupt queue integrity if the thread
- * retains membership in the queue. Therefore, if a thread is a
- * member of one of these queues when a signal handler is invoked,
- * it must remove itself from the queue before calling the signal
- * handler and reinsert itself after normal return of the handler.
+ * It is possible for a thread to belong to more than one of the
+ * above queues if it is handling a signal. A thread may only
+ * enter a mutex or condition variable queue when it is not
+ * being called from a signal handler. If a thread is a member
+ * of one of these queues when a signal handler is invoked, it
+ * must be removed from the queue before invoking the handler
+ * and then added back to the queue after return from the handler.
*
* Use pqe for the scheduling queue link (both ready and waiting),
- * sqe for synchronization (mutex and condition variable) queue
- * links, and qe for all other links.
+ * sqe for synchronization (mutex, condition variable, and join)
+ * queue links, and qe for all other links.
*/
- TAILQ_ENTRY(pthread) pqe; /* priority queue link */
+ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
- TAILQ_ENTRY(pthread) qe; /* all other queues link */
/* Wait data. */
union pthread_wait_data data;
@@ -603,40 +698,43 @@ struct pthread {
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
- int interrupted;
+ int interrupted;
/* Signal number when in state PS_SIGWAIT: */
- int signo;
+ int signo;
/*
- * Set to non-zero when this thread has deferred signals.
- * We allow for recursive deferral.
+ * Set to non-zero when this thread has entered a critical
+ * region. We allow for recursive entries into critical regions.
*/
- int sig_defer_count;
-
- /* Miscellaneous flags; only set with signals deferred. */
- int flags;
-#define PTHREAD_FLAGS_PRIVATE 0x0001
-#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
-#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
- /* 0x0040 Unused. */
-#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
-#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
-#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
-#define PTHREAD_FLAGS_IN_SYNCQ \
- (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
+ int critical_count;
/*
+ * Set to TRUE if this thread should yield after leaving a
+ * critical region to check for signals, messages, etc.
+ */
+ int critical_yield;
+
+ int sflags;
+#define THR_FLAGS_IN_SYNCQ 0x0001
+
+ /* Miscellaneous flags; only set with scheduling lock held. */
+ int flags;
+#define THR_FLAGS_PRIVATE 0x0001
+#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
+#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
+#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
+#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
+#define THR_FLAGS_GC_SAFE 0x0020 /* thread safe for cleaning */
+#define THR_FLAGS_IN_TDLIST 0x0040 /* thread in all thread list */
+#define THR_FLAGS_IN_GCLIST 0x0080 /* thread in gc list */
+ /*
* Base priority is the user setable and retrievable priority
* of the thread. It is only affected by explicit calls to
* set thread priority and upon thread creation via a thread
* attribute or default priority.
*/
- char base_priority;
+ char base_priority;
/*
* Inherited priority is the priority a thread inherits by
@@ -646,7 +744,7 @@ struct pthread {
* that is being waited on by any other thread whose priority
* is non-zero.
*/
- char inherited_priority;
+ char inherited_priority;
/*
* Active priority is always the maximum of the threads base
@@ -654,10 +752,10 @@ struct pthread {
* in either the base or inherited priority, the active
* priority must be recalculated.
*/
- char active_priority;
+ char active_priority;
/* Number of priority ceiling or protection mutexes owned. */
- int priority_mutex_count;
+ int priority_mutex_count;
/*
* Queue of currently owned mutexes.
@@ -675,212 +773,243 @@ struct pthread {
};
/*
- * Global variables for the uthread kernel.
+ * Critical regions can also be detected by looking at the threads
+ * current lock level. Ensure these macros increment and decrement
+ * the lock levels such that locks can not be held with a lock level
+ * of 0.
*/
+#define THR_IN_CRITICAL(thrd) \
+ (((thrd)->locklevel > 0) || \
+ ((thrd)->critical_count > 0))
+
+#define THR_YIELD_CHECK(thrd) \
+do { \
+ if (((thrd)->critical_yield != 0) && \
+ !(THR_IN_CRITICAL(thrd))) \
+ _thr_sched_switch(thrd); \
+ else if (((thrd)->check_pending != 0) && \
+ !(THR_IN_CRITICAL(thrd))) \
+ _thr_sig_check_pending(thrd); \
+} while (0)
-SCLASS void *_usrstack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= (void *) USRSTACK;
-#else
-;
-#endif
-
-/* Kernel thread structure used when there are no running threads: */
-SCLASS struct pthread _thread_kern_thread;
+#define THR_LOCK_ACQUIRE(thrd, lck) \
+do { \
+ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \
+ PANIC("Exceeded maximum lock level"); \
+ else { \
+ (thrd)->locklevel++; \
+ _lock_acquire((lck), \
+ &(thrd)->lockusers[(thrd)->locklevel - 1], \
+ (thrd)->active_priority); \
+ } \
+} while (0)
-/* Ptr to the thread structure for the running thread: */
-SCLASS struct pthread * volatile _thread_run
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= &_thread_kern_thread;
-#else
-;
-#endif
+#define THR_LOCK_RELEASE(thrd, lck) \
+do { \
+ if ((thrd)->locklevel > 0) { \
+ _lock_release((lck), \
+ &(thrd)->lockusers[(thrd)->locklevel - 1]); \
+ (thrd)->locklevel--; \
+ if ((thrd)->locklevel != 0) \
+ ; \
+ else if ((thrd)->critical_yield != 0) \
+ _thr_sched_switch(thrd); \
+ else if ((thrd)->check_pending != 0) \
+ _thr_sig_check_pending(thrd); \
+ } \
+} while (0)
-/* Ptr to the thread structure for the last user thread to run: */
-SCLASS struct pthread * volatile _last_user_thread
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= &_thread_kern_thread;
-#else
-;
-#endif
+/*
+ * For now, threads will have their own lock separate from their
+ * KSE scheduling lock.
+ */
+#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
+#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
+#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
+#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
-/* List of all threads: */
-SCLASS TAILQ_HEAD(, pthread) _thread_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_thread_list);
-#else
-;
-#endif
+/*
+ * Priority queue manipulation macros (using pqe link). We use
+ * the thread's kseg link instead of the kse link because a thread
+ * does not (currently) have a statically assigned kse.
+ */
+#define THR_RUNQ_INSERT_HEAD(thrd) \
+ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_INSERT_TAIL(thrd) \
+ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_REMOVE(thrd) \
+ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_FIRST() \
+ _pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
-/* Time of day at last scheduling timer signal: */
-SCLASS struct timeval volatile _sched_tod
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { 0, 0 };
-#else
-;
-#endif
+/*
+ * Macros to insert/remove threads to the all thread list and
+ * the gc list.
+ */
+#define THR_LIST_ADD(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
+ (thrd)->flags |= THR_FLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_LIST_REMOVE(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_list, thrd, tle); \
+ (thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_ADD(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \
+ (thrd)->flags |= THR_FLAGS_IN_GCLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_REMOVE(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \
+ (thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \
+ } \
+} while (0)
/*
- * Current scheduling timer ticks; used as resource usage.
+ * Locking the scheduling queue for another thread uses that thread's
+ * KSEG lock.
*/
-SCLASS unsigned int volatile _sched_ticks
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0;
-#else
-;
-#endif
+#define THR_SCHED_LOCK(curthr, thr) do { \
+ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
+ (curthr)->locklevel++; \
+ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
+} while (0)
-/* Dead threads: */
-SCLASS TAILQ_HEAD(, pthread) _dead_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_dead_list);
-#else
-;
-#endif
+#define THR_SCHED_UNLOCK(curthr, thr) do { \
+ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
+ (curthr)->locklevel--; \
+ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
+ if ((curthr)->locklevel == 0) \
+ THR_YIELD_CHECK(curthr); \
+} while (0)
-/* Initial thread: */
-SCLASS struct pthread *_thread_initial
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
-#else
-;
-#endif
+#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
+#define THR_CRITICAL_LEAVE(thr) do { \
+ (thr)->critical_count--; \
+ if (((thr)->critical_yield != 0) && \
+ ((thr)->critical_count == 0)) { \
+ (thr)->critical_yield = 0; \
+ _thr_sched_switch(thr); \
+ } \
+} while (0)
-/* Default thread attributes: */
-SCLASS struct pthread_attr pthread_attr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
- PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
- PTHREAD_STACK_DEFAULT, -1 };
-#else
-;
-#endif
+#define THR_IS_ACTIVE(thrd) \
+ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
-/* Default mutex attributes: */
-SCLASS struct pthread_mutex_attr pthread_mutexattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
-#else
-;
-#endif
+#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+
+/*
+ * Global variables for the pthread kernel.
+ */
-/* Default condition variable attributes: */
-SCLASS struct pthread_cond_attr pthread_condattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { COND_TYPE_FAST, 0 };
-#else
-;
-#endif
+SCLASS void *_usrstack SCLASS_PRESET(NULL);
+SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
+SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
-SCLASS int _clock_res_usec /* Clock resolution in usec. */
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_USEC;
-#else
-;
-#endif
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
-/* Garbage collector mutex and condition variable. */
-SCLASS pthread_mutex_t _gc_mutex
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
-SCLASS pthread_cond_t _gc_cond
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* List of threads needing GC: */
+SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
-/*
- * Array of signal actions for this process.
- */
-SCLASS struct sigaction _thread_sigact[NSIG];
+/* Default thread attributes: */
+SCLASS struct pthread_attr _pthread_attr_default
+ SCLASS_PRESET({
+ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
+ THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
+ NULL, NULL, THR_STACK_DEFAULT
+ });
-/*
- * Scheduling queues:
- */
-SCLASS pq_queue_t _readyq;
-SCLASS TAILQ_HEAD(, pthread) _waitingq;
+/* Default mutex attributes: */
+SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
+ SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
-/*
- * Work queue:
- */
-SCLASS TAILQ_HEAD(, pthread) _workq;
+/* Default condition variable attributes: */
+SCLASS struct pthread_cond_attr _pthread_condattr_default
+ SCLASS_PRESET({COND_TYPE_FAST, 0});
-/* Tracks the number of threads blocked while waiting for a spinlock. */
-SCLASS volatile int _spinblock_count
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+/* Clock resolution in usec. */
+SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
-/* Thread switch hook. */
-SCLASS pthread_switch_routine_t _sched_switch_hook
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Array of signal actions for this process: */
+SCLASS struct sigaction _thread_sigact[NSIG];
/*
- * Signals pending and masked.
+ * Array of counts of dummy handlers for SIG_DFL signals. This is used to
+ * assure that there is always a dummy signal handler installed while there
+ * is a thread sigwait()ing on the corresponding signal.
*/
-SCLASS sigset_t _thread_sigpending;
-SCLASS sigset_t _thread_sigmask;
+SCLASS int _thread_dfl_count[NSIG];
/*
- * Declare the kernel scheduler jump buffer and stack:
+ * Lock for above count of dummy handlers and for the process signal
+ * mask and pending signal sets.
*/
-SCLASS struct kse_mailbox _thread_kern_kse_mailbox;
+SCLASS struct lock _thread_signal_lock;
-SCLASS void * _thread_kern_sched_stack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Pending signals and mask for this process: */
+SCLASS sigset_t _thr_proc_sigpending;
+SCLASS sigset_t _thr_proc_sigmask SCLASS_PRESET({{0, 0, 0, 0}});
+SCLASS siginfo_t _thr_proc_siginfo[NSIG];
-/*
- * Delcare the idle context.
- */
-SCLASS struct kse_thr_mailbox _idle_thr_mailbox;
+SCLASS pid_t _thr_pid SCLASS_PRESET(0);
-SCLASS void * _idle_thr_stack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Garbage collector lock. */
+SCLASS struct lock _gc_lock;
+SCLASS int _gc_check SCLASS_PRESET(0);
+SCLASS pthread_t _gc_thread;
+SCLASS struct lock _mutex_static_lock;
+SCLASS struct lock _rwlock_static_lock;
+SCLASS struct lock _keytable_lock;
+SCLASS struct lock _thread_list_lock;
+SCLASS int _thr_guard_default;
+SCLASS int _thr_page_size;
-/* Used for _PTHREADS_INVARIANTS checking. */
-SCLASS int _thread_kern_new_state
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+SCLASS int _thr_debug_flags SCLASS_PRESET(0);
-/* Undefine the storage class specifier: */
+/* Undefine the storage class and preset specifiers: */
#undef SCLASS
+#undef SCLASS_PRESET
+
/*
* Function prototype definitions.
*/
__BEGIN_DECLS
-char *__ttyname_basic(int);
-char *__ttyname_r_basic(int, char *, size_t);
-char *ttyname_r(int, char *, size_t);
-void _cond_wait_backout(pthread_t);
-int _find_thread(pthread_t);
+int _cond_reinit(pthread_cond_t *);
+void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
-void _set_curthread(struct pthread *);
-void *_thread_stack_alloc(size_t, size_t);
-void _thread_stack_free(void *, size_t, size_t);
-int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
+struct kse *_get_curkse(void);
+void _set_curkse(struct kse *);
+struct kse *_kse_alloc(struct kse *);
+kse_critical_t _kse_critical_enter(void);
+void _kse_critical_leave(kse_critical_t);
+void _kse_free(struct kse *, struct kse *);
+void _kse_init();
+struct kse_group *_kseg_alloc(struct kse *);
+void _kse_lock_wait(struct lock *, struct lockuser *lu);
+void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
+void _kse_sig_check_pending(struct kse *);
+void _kse_single_thread(struct pthread *);
+void _kse_start(struct kse *);
+void _kse_setthreaded(int);
+int _kse_isthreaded(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
-void _mutex_lock_backout(pthread_t);
-void _mutex_notify_priochange(pthread_t);
-int _mutex_reinit(pthread_mutex_t *);
-void _mutex_unlock_private(pthread_t);
-int _cond_reinit(pthread_cond_t *);
+void _mutex_lock_backout(struct pthread *);
+void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
+int _mutex_reinit(struct pthread_mutex *);
+void _mutex_unlock_private(struct pthread *);
+void _libpthread_init(struct pthread *);
int _pq_alloc(struct pq_queue *, int, int);
int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
@@ -899,50 +1028,61 @@ int _pthread_mutexattr_init(pthread_mutexattr_t *);
int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
-pthread_t _pthread_self(void);
+struct pthread *_pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
-void _waitq_insert(pthread_t pthread);
-void _waitq_remove(pthread_t pthread);
-#if defined(_PTHREADS_INVARIANTS)
-void _waitq_setactive(void);
-void _waitq_clearactive(void);
-#endif
-void _thread_exit(char *, int, char *);
-void _thread_exit_cleanup(void);
-void *_thread_cleanup(pthread_t);
+struct pthread *_thr_alloc(struct kse *);
+void _thr_exit(char *, int, char *);
+void _thr_exit_cleanup(void);
+void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
+void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
+int _thr_ref_add(struct pthread *, struct pthread *, int);
+void _thr_ref_delete(struct pthread *, struct pthread *);
+void _thr_schedule_add(struct pthread *, struct pthread *);
+void _thr_schedule_remove(struct pthread *, struct pthread *);
+void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
+void _thr_setrunnable_unlocked(struct pthread *thread);
+void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *);
+void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
+int _thr_stack_alloc(struct pthread_attr *);
+void _thr_stack_free(struct pthread_attr *);
+void _thr_exit_cleanup(void);
+void _thr_free(struct kse *, struct pthread *);
+void _thr_panic_exit(char *, int, char *);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);
-void _thread_init(void);
-void _thread_kern_idle(void);
-void _thread_kern_sched(void);
-void _thread_kern_scheduler(struct kse_mailbox *);
-void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
-void _thread_kern_sched_state_unlock(enum pthread_state state,
- spinlock_t *lock, char *fname, int lineno);
-void _thread_kern_set_timeout(const struct timespec *);
-void _thread_kern_sig_defer(void);
-void _thread_kern_sig_undefer(void);
-void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
-void _thread_printf(int fd, const char *, ...);
-void _thread_start(void);
-void _thread_seterrno(pthread_t, int);
-int _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km);
-int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
-pthread_addr_t _thread_gc(pthread_addr_t);
-void _thread_enter_cancellation_point(void);
-void _thread_leave_cancellation_point(void);
-void _thread_cancellation_point(void);
-
+void _thread_printf(int, const char *, ...);
+void _thr_sched_frame(struct pthread_sigframe *);
+void _thr_sched_switch(struct pthread *);
+void _thr_set_timeout(const struct timespec *);
+void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
+void _thr_sig_check_pending(struct pthread *);
+void _thr_sig_rundown(struct pthread *, ucontext_t *,
+ struct pthread_sigframe *);
+void _thr_sig_send(struct pthread *pthread, int sig);
+void _thr_sig_wrapper(void);
+void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
+void _thr_seterrno(struct pthread *, int);
+void _thr_enter_cancellation_point(struct pthread *);
+void _thr_leave_cancellation_point(struct pthread *);
+
+/* XXX - Stuff that goes away when my sources get more up to date. */
+/* #include <sys/kse.h> */
+#ifdef SYS_KSE_H
+int __sys_kse_create(struct kse_mailbox *, int);
+int __sys_kse_thr_wakeup(struct kse_mailbox *);
+int __sys_kse_exit(struct kse_mailbox *);
+int __sys_kse_release(struct kse_mailbox *);
+#endif
/* #include <sys/aio.h> */
#ifdef _SYS_AIO_H_
int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
#endif
-/* #include <sys/event.h> */
-#ifdef _SYS_EVENT_H_
-int __sys_kevent(int, const struct kevent *, int, struct kevent *,
- int, const struct timespec *);
+/* #include <fcntl.h> */
+#ifdef _SYS_FCNTL_H_
+int __sys_fcntl(int, int, ...);
+int __sys_open(const char *, int, ...);
#endif
/* #include <sys/ioctl.h> */
@@ -950,95 +1090,60 @@ int __sys_kevent(int, const struct kevent *, int, struct kevent *,
int __sys_ioctl(int, unsigned long, ...);
#endif
-/* #include <sys/mman.h> */
-#ifdef _SYS_MMAN_H_
-int __sys_msync(void *, size_t, int);
+/* #inclde <sched.h> */
+#ifdef _SCHED_H_
+int __sys_sched_yield(void);
#endif
-/* #include <sys/mount.h> */
-#ifdef _SYS_MOUNT_H_
-int __sys_fstatfs(int, struct statfs *);
+/* #include <signal.h> */
+#ifdef _SIGNAL_H_
+int __sys_kill(pid_t, int);
+int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
+int __sys_sigpending(sigset_t *);
+int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
+int __sys_sigsuspend(const sigset_t *);
+int __sys_sigreturn(ucontext_t *);
+int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
#endif
/* #include <sys/socket.h> */
#ifdef _SYS_SOCKET_H_
-int __sys_accept(int, struct sockaddr *, socklen_t *);
-int __sys_bind(int, const struct sockaddr *, socklen_t);
-int __sys_connect(int, const struct sockaddr *, socklen_t);
-int __sys_getpeername(int, struct sockaddr *, socklen_t *);
-int __sys_getsockname(int, struct sockaddr *, socklen_t *);
-int __sys_getsockopt(int, int, int, void *, socklen_t *);
-int __sys_listen(int, int);
-ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
-ssize_t __sys_recvmsg(int, struct msghdr *, int);
-int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
-ssize_t __sys_sendmsg(int, const struct msghdr *, int);
-ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
-int __sys_setsockopt(int, int, int, const void *, socklen_t);
-int __sys_shutdown(int, int);
-int __sys_socket(int, int, int);
-int __sys_socketpair(int, int, int, int *);
-#endif
-
-/* #include <sys/stat.h> */
-#ifdef _SYS_STAT_H_
-int __sys_fchflags(int, u_long);
-int __sys_fchmod(int, mode_t);
-int __sys_fstat(int, struct stat *);
+int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
+ off_t *, int);
#endif
/* #include <sys/uio.h> */
-#ifdef _SYS_UIO_H_
-ssize_t __sys_readv(int, const struct iovec *, int);
-ssize_t __sys_writev(int, const struct iovec *, int);
-#endif
-
-/* #include <sys/wait.h> */
-#ifdef WNOHANG
-pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
+#ifdef _SYS_UIO_H_
+ssize_t __sys_readv(int, const struct iovec *, int);
+ssize_t __sys_writev(int, const struct iovec *, int);
#endif
-/* #include <dirent.h> */
-#ifdef _DIRENT_H_
-int __sys_getdirentries(int, char *, int, long *);
+/* #include <time.h> */
+#ifdef _TIME_H_
+int __sys_nanosleep(const struct timespec *, struct timespec *);
#endif
-/* #include <fcntl.h> */
-#ifdef _SYS_FCNTL_H_
-int __sys_fcntl(int, int, ...);
-int __sys_flock(int, int);
-int __sys_open(const char *, int, ...);
+/* #include <unistd.h> */
+#ifdef _UNISTD_H_
+int __sys_close(int);
+int __sys_execve(const char *, char * const *, char * const *);
+int __sys_fork(void);
+int __sys_fsync(int);
+pid_t __sys_getpid(void);
+int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+ssize_t __sys_read(int, void *, size_t);
+ssize_t __sys_write(int, const void *, size_t);
+void __sys_exit(int);
#endif
/* #include <poll.h> */
#ifdef _SYS_POLL_H_
-int __sys_poll(struct pollfd *, unsigned, int);
-#endif
-
-/* #include <signal.h> */
-#ifdef _SIGNAL_H_
-int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
-int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
-int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
-int __sys_sigreturn(ucontext_t *);
+int __sys_poll(struct pollfd *, unsigned, int);
#endif
-/* #include <unistd.h> */
-#ifdef _UNISTD_H_
-int __sys_close(int);
-int __sys_dup(int);
-int __sys_dup2(int, int);
-int __sys_execve(const char *, char * const *, char * const *);
-void __sys_exit(int);
-int __sys_fchown(int, uid_t, gid_t);
-pid_t __sys_fork(void);
-long __sys_fpathconf(int, int);
-int __sys_fsync(int);
-int __sys_pipe(int *);
-ssize_t __sys_read(int, void *, size_t);
-ssize_t __sys_write(int, const void *, size_t);
+/* #include <sys/mman.h> */
+#ifdef _SYS_MMAN_H_
+int __sys_msync(void *, size_t, int);
#endif
-__END_DECLS
-
-#endif /* !_PTHREAD_PRIVATE_H */
+#endif /* !_THR_PRIVATE_H */
diff --git a/lib/libkse/thread/thr_pselect.c b/lib/libkse/thread/thr_pselect.c
index af22337..c80a1cf 100644
--- a/lib/libkse/thread/thr_pselect.c
+++ b/lib/libkse/thread/thr_pselect.c
@@ -44,11 +44,12 @@ int
pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __pselect(count, rfds, wfds, efds, timo, mask);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return (ret);
}
diff --git a/lib/libkse/thread/thr_read.c b/lib/libkse/thread/thr_read.c
index 8d095c4..34dabd3 100644
--- a/lib/libkse/thread/thr_read.c
+++ b/lib/libkse/thread/thr_read.c
@@ -45,11 +45,12 @@ __weak_reference(__read, read);
ssize_t
__read(int fd, void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_read(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_readv.c b/lib/libkse/thread/thr_readv.c
index c3b03eb..3a8823f 100644
--- a/lib/libkse/thread/thr_readv.c
+++ b/lib/libkse/thread/thr_readv.c
@@ -45,11 +45,12 @@ __weak_reference(__readv, readv);
ssize_t
__readv(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_readv(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_resume_np.c b/lib/libkse/thread/thr_resume_np.c
index cc00f09..d0b45b3 100644
--- a/lib/libkse/thread/thr_resume_np.c
+++ b/lib/libkse/thread/thr_resume_np.c
@@ -35,33 +35,32 @@
#include <pthread.h>
#include "thr_private.h"
-static void resume_common(struct pthread *);
+static void resume_common(struct pthread *);
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
+
/* Resume a thread: */
int
_pthread_resume_np(pthread_t thread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(thread)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Add a reference to the thread: */
+ if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
+ /* Is it currently suspended? */
+ if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) {
+ /* Lock the threads scheduling queue: */
+ THR_SCHED_LOCK(curthread, thread);
- if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
resume_common(thread);
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
+ _thr_ref_delete(curthread, thread);
}
return (ret);
}
@@ -69,43 +68,42 @@ _pthread_resume_np(pthread_t thread)
void
_pthread_resume_all_np(void)
{
- struct pthread *curthread = _get_curthread();
- struct pthread *thread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+ kse_critical_t crit;
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Take the thread list lock: */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if ((thread != curthread) &&
- ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
+ ((thread->flags & THR_FLAGS_SUSPENDED) != 0) &&
+ (thread->state != PS_DEAD) &&
+ (thread->state != PS_DEADLOCK) &&
+ ((thread->flags & THR_FLAGS_EXITING) == 0)) {
+ THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
}
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ /* Release the thread list lock: */
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
}
static void
resume_common(struct pthread *thread)
{
/* Clear the suspend flag: */
- thread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
+ thread->flags &= ~THR_FLAGS_SUSPENDED;
/*
* If the thread's state is suspended, that means it is
* now runnable but not in any scheduling queue. Set the
* state to running and insert it into the run queue.
*/
- if (thread->state == PS_SUSPENDED) {
- PTHREAD_SET_STATE(thread, PS_RUNNING);
- if (thread->priority_mutex_count > 0)
- PTHREAD_PRIOQ_INSERT_HEAD(thread);
- else
- PTHREAD_PRIOQ_INSERT_TAIL(thread);
- }
+ if (thread->state == PS_SUSPENDED)
+ _thr_setrunnable_unlocked(thread);
}
diff --git a/lib/libkse/thread/thr_rwlock.c b/lib/libkse/thread/thr_rwlock.c
index f41e8a2..5f1d8e7 100644
--- a/lib/libkse/thread/thr_rwlock.c
+++ b/lib/libkse/thread/thr_rwlock.c
@@ -30,7 +30,9 @@
#include <limits.h>
#include <stdlib.h>
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
/* maximum number of times a read lock may be obtained */
@@ -44,25 +46,28 @@ __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
-static int init_static (pthread_rwlock_t *rwlock);
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+/*
+ * Prototypes
+ */
+static int init_static(pthread_rwlock_t *rwlock);
+
static int
-init_static (pthread_rwlock_t *rwlock)
+init_static(pthread_rwlock_t *rwlock)
{
+ struct pthread *thread = _get_curthread();
int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
if (*rwlock == NULL)
- ret = pthread_rwlock_init(rwlock, NULL);
+ ret = _pthread_rwlock_init(rwlock, NULL);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
-
- return(ret);
+ THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
+ return (ret);
}
int
@@ -77,9 +82,9 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
prwlock = *rwlock;
- pthread_mutex_destroy(&prwlock->lock);
- pthread_cond_destroy(&prwlock->read_signal);
- pthread_cond_destroy(&prwlock->write_signal);
+ _pthread_mutex_destroy(&prwlock->lock);
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_cond_destroy(&prwlock->write_signal);
free(prwlock);
*rwlock = NULL;
@@ -87,7 +92,7 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
ret = 0;
}
- return(ret);
+ return (ret);
}
int
@@ -100,25 +105,25 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
if (prwlock == NULL)
- return(ENOMEM);
+ return (ENOMEM);
/* initialize the lock */
- if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
+ if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
free(prwlock);
else {
/* initialize the read condition signal */
- ret = pthread_cond_init(&prwlock->read_signal, NULL);
+ ret = _pthread_cond_init(&prwlock->read_signal, NULL);
if (ret != 0) {
- pthread_mutex_destroy(&prwlock->lock);
+ _pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* initialize the write condition signal */
- ret = pthread_cond_init(&prwlock->write_signal, NULL);
+ ret = _pthread_cond_init(&prwlock->write_signal, NULL);
if (ret != 0) {
- pthread_cond_destroy(&prwlock->read_signal);
- pthread_mutex_destroy(&prwlock->lock);
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* success */
@@ -130,7 +135,7 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
}
}
- return(ret);
+ return (ret);
}
int
@@ -140,30 +145,30 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
/* give writers priority over readers */
while (prwlock->blocked_writers || prwlock->state < 0) {
- ret = pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
+ ret = _pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
if (ret != 0) {
/* can't do a whole lot if this fails */
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
}
}
@@ -179,9 +184,9 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
* lock. Decrementing 'state' is no good because we probably
* don't have the monitor lock.
*/
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -191,21 +196,21 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
/* give writers priority over readers */
if (prwlock->blocked_writers || prwlock->state < 0)
@@ -216,9 +221,9 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
++prwlock->state; /* indicate we are locked for reading */
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -228,21 +233,21 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
if (prwlock->state != 0)
ret = EBUSY;
@@ -251,9 +256,9 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
prwlock->state = -1;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -263,34 +268,34 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
if (prwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
if (prwlock->state > 0) {
if (--prwlock->state == 0 && prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
+ ret = _pthread_cond_signal(&prwlock->write_signal);
} else if (prwlock->state < 0) {
prwlock->state = 0;
if (prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
+ ret = _pthread_cond_signal(&prwlock->write_signal);
else
- ret = pthread_cond_broadcast(&prwlock->read_signal);
+ ret = _pthread_cond_broadcast(&prwlock->read_signal);
} else
ret = EINVAL;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -300,31 +305,31 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
while (prwlock->state != 0) {
++prwlock->blocked_writers;
- ret = pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
+ ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
if (ret != 0) {
--prwlock->blocked_writers;
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
}
--prwlock->blocked_writers;
@@ -334,8 +339,7 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
prwlock->state = -1;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
-
diff --git a/lib/libkse/thread/thr_select.c b/lib/libkse/thread/thr_select.c
index a4f4a15..6714af0 100644
--- a/lib/libkse/thread/thr_select.c
+++ b/lib/libkse/thread/thr_select.c
@@ -49,11 +49,12 @@ int
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_self.c b/lib/libkse/thread/thr_self.c
index d213e5e..0c702a6 100644
--- a/lib/libkse/thread/thr_self.c
+++ b/lib/libkse/thread/thr_self.c
@@ -39,6 +39,9 @@ __weak_reference(_pthread_self, pthread_self);
pthread_t
_pthread_self(void)
{
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+
/* Return the running thread pointer: */
return (_get_curthread());
}
diff --git a/lib/libkse/thread/thr_sem.c b/lib/libkse/thread/thr_sem.c
index 70a9721..d6021a8 100644
--- a/lib/libkse/thread/thr_sem.c
+++ b/lib/libkse/thread/thr_sem.c
@@ -32,7 +32,9 @@
#include <stdlib.h>
#include <errno.h>
#include <semaphore.h>
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
#define _SEM_CHECK_VALIDITY(sem) \
@@ -88,15 +90,15 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
/*
* Initialize the semaphore.
*/
- if (pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
+ if (_pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
free(*sem);
errno = ENOSPC;
retval = -1;
goto RETURN;
}
- if (pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
- pthread_mutex_destroy(&(*sem)->lock);
+ if (_pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
+ _pthread_mutex_destroy(&(*sem)->lock);
free(*sem);
errno = ENOSPC;
retval = -1;
@@ -109,7 +111,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
int
@@ -120,71 +122,72 @@ _sem_destroy(sem_t *sem)
_SEM_CHECK_VALIDITY(sem);
/* Make sure there are no waiters. */
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->nwaiters > 0) {
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
errno = EBUSY;
retval = -1;
goto RETURN;
}
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
- pthread_mutex_destroy(&(*sem)->lock);
- pthread_cond_destroy(&(*sem)->gtzero);
+ _pthread_mutex_destroy(&(*sem)->lock);
+ _pthread_cond_destroy(&(*sem)->gtzero);
(*sem)->magic = 0;
free(*sem);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
sem_t *
_sem_open(const char *name, int oflag, ...)
{
errno = ENOSYS;
- return SEM_FAILED;
+ return (SEM_FAILED);
}
int
_sem_close(sem_t *sem)
{
errno = ENOSYS;
- return -1;
+ return (-1);
}
int
_sem_unlink(const char *name)
{
errno = ENOSYS;
- return -1;
+ return (-1);
}
int
_sem_wait(sem_t *sem)
{
- int retval;
+ struct pthread *curthread = _get_curthread();
+ int retval;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
while ((*sem)->count == 0) {
(*sem)->nwaiters++;
- pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
+ _pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
(*sem)->nwaiters--;
}
(*sem)->count--;
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
- _thread_leave_cancellation_point();
- return retval;
+ _thr_leave_cancellation_point(curthread);
+ return (retval);
}
int
@@ -194,7 +197,7 @@ _sem_trywait(sem_t *sem)
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->count > 0) {
(*sem)->count--;
@@ -204,37 +207,38 @@ _sem_trywait(sem_t *sem)
retval = -1;
}
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
RETURN:
- return retval;
+ return (retval);
}
int
_sem_post(sem_t *sem)
{
- int retval;
+ kse_critical_t crit;
+ int retval;
_SEM_CHECK_VALIDITY(sem);
/*
* sem_post() is required to be safe to call from within signal
- * handlers. Thus, we must defer signals.
+ * handlers. Thus, we must enter a critical region.
*/
- _thread_kern_sig_defer();
+ crit = _kse_critical_enter();
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
(*sem)->count++;
if ((*sem)->nwaiters > 0)
- pthread_cond_signal(&(*sem)->gtzero);
+ _pthread_cond_signal(&(*sem)->gtzero);
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
- _thread_kern_sig_undefer();
+ _kse_critical_leave(crit);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
int
@@ -244,11 +248,11 @@ _sem_getvalue(sem_t *sem, int *sval)
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
*sval = (int)(*sem)->count;
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
diff --git a/lib/libkse/thread/thr_seterrno.c b/lib/libkse/thread/thr_seterrno.c
index 4f585ac..245d43f 100644
--- a/lib/libkse/thread/thr_seterrno.c
+++ b/lib/libkse/thread/thr_seterrno.c
@@ -47,7 +47,7 @@ void
_thread_seterrno(pthread_t thread, int error)
{
/* Check for the initial thread: */
- if (thread == _thread_initial)
+ if (thread == _thr_initial)
/* The initial thread always uses the global error variable: */
errno = error;
else
diff --git a/lib/libkse/thread/thr_setschedparam.c b/lib/libkse/thread/thr_setschedparam.c
index 5117a26..1be9f91 100644
--- a/lib/libkse/thread/thr_setschedparam.c
+++ b/lib/libkse/thread/thr_setschedparam.c
@@ -42,40 +42,55 @@ int
_pthread_setschedparam(pthread_t pthread, int policy,
const struct sched_param *param)
{
- int old_prio, in_readyq = 0, ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int in_syncq;
+ int in_readyq = 0;
+ int old_prio;
+ int ret = 0;
if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
/* Return an invalid argument error: */
ret = EINVAL;
- } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
- (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
/* Find the thread in the list of active threads: */
- } else if ((ret = _find_thread(pthread)) == 0) {
+ } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
/*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
+ * Lock the threads scheduling queue while we change
+ * its priority:
*/
- _thread_kern_sig_defer();
+ THR_SCHED_LOCK(curthread, pthread);
+ in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ;
- if (param->sched_priority !=
- PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ if (param->sched_priority ==
+ THR_BASE_PRIORITY(pthread->base_priority))
+ /*
+ * There is nothing to do; unlock the threads
+ * scheduling queue.
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ else {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
* active priority:
*/
old_prio = pthread->active_priority;
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) {
in_readyq = 1;
- PTHREAD_PRIOQ_REMOVE(pthread);
+ THR_RUNQ_REMOVE(pthread);
}
/* Set the thread base priority: */
pthread->base_priority &=
- (PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
+ (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
@@ -92,28 +107,23 @@ _pthread_setschedparam(pthread_t pthread, int policy,
* its priority if it owns any priority
* protection or inheritence mutexes.
*/
- PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ THR_RUNQ_INSERT_HEAD(pthread);
}
else
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ THR_RUNQ_INSERT_TAIL(pthread);
}
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, pthread);
+
/*
* Check for any mutex priority adjustments. This
* includes checking for a priority mutex on which
* this thread is waiting.
*/
- _mutex_notify_priochange(pthread);
+ _mutex_notify_priochange(curthread, pthread, in_syncq);
}
-
- /* Set the scheduling policy: */
- pthread->attr.sched_policy = policy;
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ _thr_ref_delete(curthread, pthread);
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c
index fa63b5f..de177d9 100644
--- a/lib/libkse/thread/thr_sig.c
+++ b/lib/libkse/thread/thr_sig.c
@@ -35,25 +35,29 @@
#include <sys/types.h>
#include <sys/signalvar.h>
#include <signal.h>
+#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
-#include <setjmp.h>
-#include <errno.h>
+#include <string.h>
#include <pthread.h>
#include "thr_private.h"
+#include "pthread_md.h"
/* Prototypes: */
-static void thread_sig_handle_special(int sig);
-
-static void thread_sig_add(struct pthread *pthread, int sig, int has_args);
-static void thread_sig_check_state(struct pthread *pthread, int sig);
-static struct pthread *thread_sig_find(int sig);
-static void thread_sigframe_add(struct pthread *thread, int sig,
- int has_args);
-static void thread_sigframe_save(struct pthread *thread,
- struct pthread_state_data *psd);
-static void thread_sigframe_restore(struct pthread *thread,
- struct pthread_state_data *psd);
+static void build_siginfo(siginfo_t *info, int signo);
+static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info);
+static void thr_sig_check_state(struct pthread *pthread, int sig);
+static struct pthread *thr_sig_find(struct kse *curkse, int sig,
+ siginfo_t *info);
+static void handle_special_signals(struct kse *curkse, int sig);
+static void thr_sigframe_add(struct pthread *thread, int sig,
+ siginfo_t *info);
+static void thr_sigframe_restore(struct pthread *thread,
+ struct pthread_sigframe *psf);
+static void thr_sigframe_save(struct pthread *thread,
+ struct pthread_sigframe *psf);
+static void thr_sig_invoke_handler(struct pthread *, int sig,
+ siginfo_t *info, ucontext_t *ucp);
/* #define DEBUG_SIGNAL */
#ifdef DEBUG_SIGNAL
@@ -63,55 +67,210 @@ static void thread_sigframe_restore(struct pthread *thread,
#endif
/*
- * Dispatch a signal to a thread, if appropriate.
+ * Signal setup and delivery.
+ *
+ * 1) Delivering signals to threads in the same KSE.
+ * These signals are sent by upcall events and are set in the
+ * km_sigscaught field of the KSE mailbox. Since these signals
+ * are received while operating on the KSE stack, they can be
+ * delivered either by using signalcontext() to add a stack frame
+ * to the target thread's stack, or by adding them in the thread's
+ * pending set and having the thread run them down after it
+ * 2) Delivering signals to threads in other KSEs/KSEGs.
+ * 3) Delivering signals to threads in critical regions.
+ * 4) Delivering signals to threads after they change their signal masks.
+ *
+ * Methods of delivering signals.
+ *
+ * 1) Add a signal frame to the thread's saved context.
+ * 2) Add the signal to the thread structure, mark the thread as
+ * having signals to handle, and let the thread run them down
+ * after it resumes from the KSE scheduler.
+ *
+ * Problem with 1). You can't do this to a running thread or a
+ * thread in a critical region.
+ *
+ * Problem with 2). You can't do this to a thread that doesn't
+ * yield in some way (explicitly enters the scheduler). A thread
+ * blocked in the kernel or a CPU hungry thread will not see the
+ * signal without entering the scheduler.
+ *
+ * The solution is to use both 1) and 2) to deliver signals:
+ *
+ * o Thread in critical region - use 2). When the thread
+ * leaves the critical region it will check to see if it
+ * has pending signals and run them down.
+ *
+ * o Thread enters scheduler explicitly - use 2). The thread
+ * can check for pending signals after it returns from the
+ * the scheduler.
+ *
+ * o Thread is running and not current thread - use 2). When the
+ * thread hits a condition specified by one of the other bullets,
+ * the signal will be delivered.
+ *
+ * o Thread is running and is current thread (e.g., the thread
+ * has just changed its signal mask and now sees that it has
+ * pending signals) - just run down the pending signals.
+ *
+ * o Thread is swapped out due to quantum expiration - use 1)
+ *
+ * o Thread is blocked in kernel - kse_thr_wakeup() and then
+ * use 1)
+ */
+
+/*
+ * Rules for selecting threads for signals received:
+ *
+ * 1) If the signal is a sychronous signal, it is delivered to
+ * the generating (current thread). If the thread has the
+ * signal masked, it is added to the threads pending signal
+ * set until the thread unmasks it.
+ *
+ * 2) A thread in sigwait() where the signal is in the thread's
+ * waitset.
+ *
+ * 3) A thread in sigsuspend() where the signal is not in the
+ * thread's suspended signal mask.
+ *
+ * 4) Any thread (first found/easiest to deliver) that has the
+ * signal unmasked.
+ */
+
+/*
+ * This signal handler only delivers asynchronous signals.
+ * This must be called with upcalls disabled and without
+ * holding any locks.
*/
void
-_thread_sig_dispatch(int sig)
+_thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
+{
+ struct pthread *thread;
+
+ DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
+
+ /* Some signals need special handling: */
+ handle_special_signals(curkse, sig);
+
+ if ((thread = thr_sig_find(curkse, sig, info)) != NULL) {
+ /*
+ * Setup the target thread to receive the signal:
+ */
+ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread);
+ KSE_SCHED_LOCK(curkse, thread->kseg);
+ thr_sig_add(thread, sig, info);
+ KSE_SCHED_UNLOCK(curkse, thread->kseg);
+ }
+}
+
+void
+_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
+{
+ void (*sigfunc)(int, siginfo_t *, void *);
+ struct kse *curkse;
+
+ curkse = _get_curkse();
+ if ((curkse == NULL) || ((curkse->k_flags & KF_STARTED) == 0)) {
+ /* Upcalls are not yet started; just call the handler. */
+ sigfunc = _thread_sigact[sig - 1].sa_sigaction;
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
+ if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO)
+ != 0) || (info == NULL))
+ (*(sigfunc))(sig, info, ucp);
+ else
+ (*(sigfunc))(sig, (siginfo_t *)info->si_code,
+ ucp);
+ }
+ }
+ else {
+ /* Nothing. */
+ DBG_MSG("Got signal %d\n", sig);
+ sigaddset(&curkse->k_mbx.km_sigscaught, sig);
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ }
+}
+
+static void
+thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
+ ucontext_t *ucp)
{
- struct pthread *pthread;
+ void (*sigfunc)(int, siginfo_t *, void *);
+ sigset_t saved_mask;
+ int saved_seqno;
- DBG_MSG(">>> _thread_sig_dispatch(%d)\n", sig);
+ /* Invoke the signal handler without going through the scheduler:
+ */
+ DBG_MSG("Got signal %d, calling handler for current thread %p\n",
+ sig, curthread);
- thread_sig_handle_special(sig);
- if (sigismember(&_thread_sigmask, sig))
- /* Don't deliver the signal if it's masked. */
- return;
- /* Mask the signal until it's handled. */
- sigaddset(&_thread_sigmask, sig);
- /* This signal will be handled; clear the pending flag. */
- sigdelset(&_thread_sigpending, sig);
+ /*
+ * Setup the threads signal mask.
+ *
+ * The mask is changed in the thread's active signal mask
+ * (in the context) and not in the base signal mask because
+ * a thread is allowed to change its signal mask within a
+ * signal handler. If it does, the signal mask restored
+ * after the handler should be the same as that set by the
+ * thread during the handler, not the original mask from
+ * before calling the handler. The thread could also
+ * modify the signal mask in the context and expect this
+ * mask to be used.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
+ saved_mask = curthread->tmbx.tm_context.uc_sigmask;
+ saved_seqno = curthread->sigmask_seqno;
+ SIGSETOR(curthread->tmbx.tm_context.uc_sigmask,
+ _thread_sigact[sig - 1].sa_mask);
+ sigaddset(&curthread->tmbx.tm_context.uc_sigmask, sig);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
- * Deliver the signal to a thread.
+ * Check that a custom handler is installed and if
+ * the signal is not blocked:
*/
- if ((pthread = thread_sig_find(sig)) == NULL) {
- DBG_MSG("No thread to handle signal %d\n", sig);
- return;
+ sigfunc = _thread_sigact[sig - 1].sa_sigaction;
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
+ if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) ||
+ (info == NULL))
+ (*(sigfunc))(sig, info, ucp);
+ else
+ (*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp);
}
- DBG_MSG("Got signal %d, selecting thread %p\n", sig, pthread);
- thread_sig_add(pthread, sig, /*has_args*/ 1);
+
+ /*
+ * Restore the thread's signal mask.
+ */
+ if (saved_seqno == curthread->sigmask_seqno)
+ curthread->tmbx.tm_context.uc_sigmask = saved_mask;
+ else
+ curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
}
/*
- * Find a thread that can handle the signal.
+ * Find a thread that can handle the signal. This must be called
+ * with upcalls disabled.
*/
-static struct pthread *
-thread_sig_find(int sig)
+struct pthread *
+thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
- struct pthread *curthread = _get_curthread();
+ int handler_installed;
struct pthread *pthread, *pthread_next;
struct pthread *suspended_thread, *signaled_thread;
DBG_MSG("Looking for thread to handle signal %d\n", sig);
+
+ handler_installed = (_thread_sigact[sig - 1].sa_handler != SIG_IGN) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_DFL);
+
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
-
- /* Unblock this signal to allow further dumps: */
- sigdelset(&_thread_sigmask, sig);
}
-
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
@@ -123,13 +282,10 @@ thread_sig_find(int sig)
* installed, the signal only affects threads in sigwait.
*/
suspended_thread = NULL;
- if ((curthread != &_thread_kern_thread) &&
- !sigismember(&curthread->mailbox.tm_context.uc_sigmask, sig))
- signaled_thread = curthread;
- else
- signaled_thread = NULL;
+ signaled_thread = NULL;
- for (pthread = TAILQ_FIRST(&_waitingq);
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ for (pthread = TAILQ_FIRST(&_thread_list);
pthread != NULL; pthread = pthread_next) {
/*
* Grab the next thread before possibly destroying
@@ -139,19 +295,17 @@ thread_sig_find(int sig)
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ /* Take the scheduling lock. */
+ KSE_SCHED_LOCK(curkse, pthread->kseg);
/*
- * A signal handler is not invoked for threads
- * in sigwait. Clear the blocked and pending
- * flags.
+ * Return the signal number and make the
+ * thread runnable.
*/
- sigdelset(&_thread_sigmask, sig);
- sigdelset(&_thread_sigpending, sig);
-
- /* Return the signal number: */
pthread->signo = sig;
+ _thr_setrunnable_unlocked(pthread);
+
+ KSE_SCHED_UNLOCK(curkse, pthread->kseg);
/*
* POSIX doesn't doesn't specify which thread
@@ -163,11 +317,18 @@ thread_sig_find(int sig)
* to other threads and do not add the signal
* to the process pending set.
*/
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ DBG_MSG("Waking thread %p in sigwait with signal %d\n",
+ pthread, sig);
return (NULL);
}
- if (!sigismember(
- &pthread->mailbox.tm_context.uc_sigmask, sig) &&
- ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) == 0)) {
+ else if ((pthread->state == PS_DEAD) ||
+ (pthread->state == PS_DEADLOCK) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0))
+ ; /* Skip this thread. */
+ else if ((handler_installed != 0) &&
+ !sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig) &&
+ ((pthread->flags & THR_FLAGS_SUSPENDED) == 0)) {
if (pthread->state == PS_SIGSUSPEND) {
if (suspended_thread == NULL)
suspended_thread = pthread;
@@ -175,34 +336,33 @@ thread_sig_find(int sig)
signaled_thread = pthread;
}
}
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/*
- * If we didn't find a thread in the waiting queue,
- * check the all threads queue:
+ * Only perform wakeups and signal delivery if there is a
+ * custom handler installed:
*/
- if (suspended_thread == NULL &&
- signaled_thread == NULL) {
+ if (handler_installed == 0) {
/*
- * Enter a loop to look for other threads
- * capable of receiving the signal:
+ * There is no handler installed; nothing to do here.
*/
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- if (!sigismember(
- &pthread->mailbox.tm_context.uc_sigmask, sig)) {
- signaled_thread = pthread;
- break;
- }
- }
- }
-
- if (suspended_thread == NULL &&
- signaled_thread == NULL)
+ } else if (suspended_thread == NULL &&
+ signaled_thread == NULL) {
/*
* Add it to the set of signals pending
* on the process:
*/
- sigaddset(&_thread_sigpending, sig);
- else {
+ KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
+ if (!sigismember(&_thr_proc_sigpending, sig)) {
+ sigaddset(&_thr_proc_sigpending, sig);
+ if (info == NULL)
+ build_siginfo(&_thr_proc_siginfo[sig], sig);
+ else
+ memcpy(&_thr_proc_siginfo[sig], info,
+ sizeof(*info));
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
+ } else {
/*
* We only deliver the signal to one thread;
* give preference to the suspended thread:
@@ -213,119 +373,187 @@ thread_sig_find(int sig)
pthread = signaled_thread;
return (pthread);
}
-
- /* Returns nothing. */
return (NULL);
}
-#if __XXX_NOT_YET__
+static void
+build_siginfo(siginfo_t *info, int signo)
+{
+ bzero(info, sizeof(*info));
+ info->si_signo = signo;
+ info->si_pid = _thr_pid;
+}
+
+/*
+ * This is called by a thread when it has pending signals to deliver.
+ * It should only be called from the context of the thread.
+ */
void
-_thread_sig_check_pending(struct pthread *pthread)
+_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf)
{
- sigset_t sigset;
- int i;
+ struct pthread_sigframe psf_save;
+ sigset_t sigset;
+ int i;
+
+ THR_SCHED_LOCK(curthread, curthread);
+ memcpy(&sigset, &curthread->sigpend, sizeof(sigset));
+ sigemptyset(&curthread->sigpend);
+ if (psf != NULL) {
+ memcpy(&psf_save, psf, sizeof(*psf));
+ SIGSETOR(sigset, psf_save.psf_sigset);
+ sigemptyset(&psf->psf_sigset);
+ }
+ THR_SCHED_UNLOCK(curthread, curthread);
+ /* Check the threads previous state: */
+ if ((psf != NULL) && (psf_save.psf_state != PS_RUNNING)) {
+ /*
+ * Do a little cleanup handling for those threads in
+ * queues before calling the signal handler. Signals
+ * for these threads are temporarily blocked until
+ * after cleanup handling.
+ */
+ switch (psf_save.psf_state) {
+ case PS_COND_WAIT:
+ _cond_wait_backout(curthread);
+ psf_save.psf_state = PS_RUNNING;
+ break;
+
+ case PS_MUTEX_WAIT:
+ _mutex_lock_backout(curthread);
+ psf_save.psf_state = PS_RUNNING;
+ break;
+
+ default:
+ break;
+ }
+ }
/*
- * Check if there are pending signals for the running
- * thread or process that aren't blocked:
+ * Lower the priority before calling the handler in case
+ * it never returns (longjmps back):
*/
- sigset = pthread->sigpend;
- SIGSETOR(sigset, _thread_sigpending);
- SIGSETNAND(sigset, pthread->sigmask);
- SIGSETNAND(sigset, _thread_sigmask);
- if (SIGNOTEMPTY(sigset)) {
- for (i = 1; i < NSIG; i++) {
- if (sigismember(&sigset, i) != 0) {
- if (sigismember(&pthread->sigpend, i) != 0)
- thread_sig_add(pthread, i,
- /*has_args*/ 0);
- else {
- thread_sig_add(pthread, i,
- /*has_args*/ 1);
- sigdelset(&_thread_sigpending, i);
- }
- }
+ curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
+
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ /* Call the handler: */
+ thr_sig_invoke_handler(curthread, i,
+ &curthread->siginfo[i], ucp);
}
}
+
+ THR_SCHED_LOCK(curthread, curthread);
+ if (psf != NULL)
+ thr_sigframe_restore(curthread, &psf_save);
+ /* Restore the signal mask. */
+ curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sig_check_pending(curthread);
}
-#endif
-#if __XXX_NOT_YET__
/*
- * This can only be called from the kernel scheduler. It assumes that
- * all thread contexts are saved and that a signal frame can safely be
- * added to any user thread.
+ * This checks pending signals for the current thread. It should be
+ * called whenever a thread changes its signal mask. Note that this
+ * is called from a thread (using its stack).
+ *
+ * XXX - We might want to just check to see if there are pending
+ * signals for the thread here, but enter the UTS scheduler
+ * to actually install the signal handler(s).
*/
void
-_thread_sig_handle_pending(void)
+_thr_sig_check_pending(struct pthread *curthread)
{
- struct pthread *pthread;
- int sig;
+ sigset_t sigset;
+ sigset_t pending_process;
+ sigset_t pending_thread;
+ kse_critical_t crit;
+ int i;
+
+ curthread->check_pending = 0;
/*
- * Check the array of pending signals:
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- for (sig = 1; sig <= NSIG; sig++) {
- if (sigismember(&_thread_sigpending, sig)) {
- /* This signal is no longer pending. */
- sigdelset(&_thread_sigpending, sig);
- /* Some signals need special handling. */
- thread_sig_handle_special(sig);
- /* Deliver the signal. */
- if (sigismember(&_thread_sigmask, sig)) {
- sigaddset(&_thread_sigmask, sig);
- if ((pthread = thread_sig_find(sig)) != NULL) {
- /*
- * Setup the target thread to receive
- * the signal:
- */
- thread_sig_add(pthread, sig,
- /*has_args*/ 1);
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ sigset = _thr_proc_sigpending;
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
+
+ THR_SCHED_LOCK(curthread, curthread);
+ SIGSETOR(sigset, curthread->sigpend);
+ SIGSETNAND(sigset, curthread->tmbx.tm_context.uc_sigmask);
+ if (SIGNOTEMPTY(sigset)) {
+ ucontext_t uc;
+ volatile int once;
+
+ curthread->check_pending = 0;
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /*
+ * Split the pending signals into those that were
+ * pending on the process and those that were pending
+ * on the thread.
+ */
+ sigfillset(&pending_process);
+ sigfillset(&pending_thread);
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ if (sigismember(&curthread->sigpend, i) != 0) {
+ build_siginfo(&curthread->siginfo[i], i);
+ sigdelset(&pending_thread, i);
+ } else {
+ memcpy(&curthread->siginfo[i],
+ &_thr_proc_siginfo[i],
+ sizeof(siginfo_t));
+ sigdelset(&pending_process, i);
+ }
+ }
+ }
+ /*
+ * Remove any process pending signals that were scheduled
+ * to be delivered from process' pending set.
+ */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ SIGSETAND(_thr_proc_sigpending, pending_process);
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
+
+ /*
+ * Remove any thread pending signals that were scheduled
+ * to be delivered from thread's pending set.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
+ SIGSETAND(curthread->sigpend, pending_thread);
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ once = 0;
+ THR_GETCONTEXT(&uc);
+ if (once == 0) {
+ once = 1;
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ /* Call the handler: */
+ thr_sig_invoke_handler(curthread, i,
+ &curthread->siginfo[i], &uc);
}
}
}
}
+ else
+ THR_SCHED_UNLOCK(curthread, curthread);
}
-#endif
/*
- * Do special processing to the thread states before we deliver
- * a signal to the application.
+ * This must be called with upcalls disabled.
*/
static void
-thread_sig_handle_special(int sig)
+handle_special_signals(struct kse *curkse, int sig)
{
- struct pthread *pthread, *pthread_next;
- int i;
-
switch (sig) {
- case SIGCHLD:
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- /*
- * Grab the next thread before possibly
- * destroying the link entry:
- */
- pthread_next = TAILQ_NEXT(pthread, pqe);
-
- /*
- * If this thread is waiting for a child
- * process to complete, wake it up:
- */
- if (pthread->state == PS_WAIT_WAIT) {
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- }
- break;
-
/*
* POSIX says that pending SIGCONT signals are
* discarded when one of these signals occurs.
@@ -333,13 +561,9 @@ thread_sig_handle_special(int sig)
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
- /*
- * Enter a loop to discard pending SIGCONT
- * signals:
- */
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- sigdelset(&pthread->sigpend, SIGCONT);
- }
+ KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
+ sigdelset(&_thr_proc_sigpending, SIGCONT);
+ KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
break;
default:
@@ -352,12 +576,17 @@ thread_sig_handle_special(int sig)
* This function is only called if there is a handler installed
* for the signal, and if the target thread has the signal
* unmasked.
+ *
+ * This must be called with the thread's scheduling lock held.
*/
static void
-thread_sig_add(struct pthread *pthread, int sig, int has_args)
+thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
{
+ int restart;
int suppress_handler = 0;
+ restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
+
/* Make sure this signal isn't still in the pending set: */
sigdelset(&pthread->sigpend, sig);
@@ -370,6 +599,8 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
*/
case PS_DEAD:
case PS_DEADLOCK:
+ case PS_LOCKWAIT:
+ case PS_SUSPENDED:
case PS_STATE_MAX:
/*
* You can't call a signal handler for threads in these
@@ -387,56 +618,21 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* Remove the thread from the queue before changing its
* priority:
*/
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
- PTHREAD_PRIOQ_REMOVE(pthread);
- break;
-
- case PS_SUSPENDED:
- break;
-
- case PS_SPINBLOCK:
- /* Remove the thread from the workq and waitq: */
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_WAITQ_REMOVE(pthread);
- /* Make the thread runnable: */
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
- break;
-
- case PS_SIGWAIT:
- /* The signal handler is not called for threads in SIGWAIT. */
- suppress_handler = 1;
- /* Wake up the thread if the signal is blocked. */
- if (sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- } else
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend, sig);
- break;
-
- /*
- * The wait state is a special case due to the handling of
- * SIGCHLD signals.
- */
- case PS_WAIT_WAIT:
- if (sig == SIGCHLD) {
- /* Change the state of the thread to run: */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0)
+ THR_RUNQ_REMOVE(pthread);
else {
/*
- * Mark the thread as interrupted only if the
- * restart flag is not set on the signal action:
+ * This thread is active; add the signal to the
+ * pending set and mark it as having pending
+ * signals.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ suppress_handler = 1;
+ sigaddset(&pthread->sigpend, sig);
+ build_siginfo(&pthread->siginfo[sig], sig);
+ pthread->check_pending = 1;
+ if ((pthread->blocked != 0) &&
+ !THR_IN_CRITICAL(pthread))
+ kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */);
}
break;
@@ -451,18 +647,7 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* be added back to the wait queue once all signal
* handlers have been invoked.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- break;
-
- case PS_JOIN:
- /*
- * Remove the thread from the wait queue. It will
- * be added back to the wait queue once all signal
- * handlers have been invoked.
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- /* Make the thread runnable: */
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
break;
case PS_SLEEP_WAIT:
@@ -471,55 +656,76 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* regardless of SA_RESTART:
*/
pthread->interrupted = 1;
- /* Remove threads in poll and select from the workq: */
- if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
+ THR_SET_STATE(pthread, PS_RUNNING);
break;
+ case PS_JOIN:
case PS_SIGSUSPEND:
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
+ THR_SET_STATE(pthread, PS_RUNNING);
break;
- }
- DBG_MSG(">>> suppress_handler = %d\n", suppress_handler);
+ case PS_SIGWAIT:
+ /* The signal handler is not called for threads in SIGWAIT. */
+ suppress_handler = 1;
+ /* Wake up the thread if the signal is blocked. */
+ if (sigismember(pthread->data.sigwait, sig)) {
+ /* Return the signal number: */
+ pthread->signo = sig;
+
+ /* Make the thread runnable: */
+ _thr_setrunnable_unlocked(pthread);
+ } else
+ /* Increment the pending signal count. */
+ sigaddset(&pthread->sigpend, sig);
+ break;
+ }
if (suppress_handler == 0) {
- /* Setup a signal frame and save the current threads state: */
- thread_sigframe_add(pthread, sig, has_args);
-
- /*
- * Signals are deferred until just before the threads
- * signal handler is invoked:
- */
- pthread->sig_defer_count = 1;
+ if (pthread->curframe == NULL) {
+ /*
+ * This thread is active. Just add it to the
+ * thread's pending set.
+ */
+ sigaddset(&pthread->sigpend, sig);
+ pthread->check_pending = 1;
+ if (info == NULL)
+ build_siginfo(&pthread->siginfo[sig], sig);
+ else
+ memcpy(&pthread->siginfo[sig], info,
+ sizeof(*info));
+ } else {
+ /*
+ * Setup a signal frame and save the current threads
+ * state:
+ */
+ thr_sigframe_add(pthread, sig, info);
+ }
- /* Make sure the thread is runnable: */
if (pthread->state != PS_RUNNING)
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ THR_SET_STATE(pthread, PS_RUNNING);
+
/*
* The thread should be removed from all scheduling
- * queues at this point. Raise the priority and place
- * the thread in the run queue. It is also possible
- * for a signal to be sent to a suspended thread,
- * mostly via pthread_kill(). If a thread is suspended,
- * don't insert it into the priority queue; just set
- * its state to suspended and it will run the signal
- * handler when it is resumed.
+ * queues at this point. Raise the priority and
+ * place the thread in the run queue. It is also
+ * possible for a signal to be sent to a suspended
+ * thread, mostly via pthread_kill(). If a thread
+ * is suspended, don't insert it into the priority
+ * queue; just set its state to suspended and it
+ * will run the signal handler when it is resumed.
*/
- pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- else
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ pthread->active_priority |= THR_SIGNAL_PRIORITY;
+ if ((pthread->flags & THR_FLAGS_SUSPENDED) != 0)
+ THR_SET_STATE(pthread, PS_SUSPENDED);
+ else if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ THR_RUNQ_INSERT_TAIL(pthread);
}
}
-#if __XXX_NOT_YET__
static void
-thread_sig_check_state(struct pthread *pthread, int sig)
+thr_sig_check_state(struct pthread *pthread, int sig)
{
/*
* Process according to thread state:
@@ -528,91 +734,54 @@ thread_sig_check_state(struct pthread *pthread, int sig)
/*
* States which do not change when a signal is trapped:
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_STATE_MAX:
case PS_RUNNING:
- case PS_SUSPENDED:
- case PS_SPINBLOCK:
+ case PS_LOCKWAIT:
+ case PS_MUTEX_WAIT:
case PS_COND_WAIT:
case PS_JOIN:
- case PS_MUTEX_WAIT:
+ case PS_SUSPENDED:
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_STATE_MAX:
break;
case PS_SIGWAIT:
/* Wake up the thread if the signal is blocked. */
if (sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
/* Return the signal number: */
pthread->signo = sig;
+
+ /* Change the state of the thread to run: */
+ _thr_setrunnable_unlocked(pthread);
} else
/* Increment the pending signal count. */
sigaddset(&pthread->sigpend, sig);
break;
- /*
- * The wait state is a special case due to the handling of
- * SIGCHLD signals.
- */
- case PS_WAIT_WAIT:
- if (sig == SIGCHLD) {
- /*
- * Remove the thread from the wait queue and
- * make it runnable:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- break;
-
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
case PS_SIGSUSPEND:
case PS_SLEEP_WAIT:
/*
* Remove the thread from the wait queue and make it
* runnable:
*/
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Flag the operation as interrupted: */
- pthread->interrupted = 1;
- break;
-
- /*
- * These states are additionally in the work queue:
- */
- case PS_FDR_WAIT:
- case PS_FDW_WAIT:
- case PS_FILE_WAIT:
- case PS_POLL_WAIT:
- case PS_SELECT_WAIT:
- /*
- * Remove the thread from the wait and work queues, and
- * make it runnable:
- */
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ _thr_setrunnable_unlocked(pthread);
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
break;
}
}
-#endif
-#if __XXX_NOT_YET__
/*
* Send a signal to a specific thread (ala pthread_kill):
*/
void
-_thread_sig_send(struct pthread *pthread, int sig)
+_thr_sig_send(struct pthread *pthread, int sig)
{
- struct pthread *curthread = _get_curthread();
+ struct pthread *curthread = _get_curthread();
+
+ /* Lock the scheduling queue of the target thread. */
+ THR_SCHED_LOCK(curthread, pthread);
/* Check for signals whose actions are SIG_DFL: */
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
@@ -620,17 +789,21 @@ _thread_sig_send(struct pthread *pthread, int sig)
* Check to see if a temporary signal handler is
* installed for sigwaiters:
*/
- if (_thread_dfl_count[sig] == 0)
+ if (_thread_dfl_count[sig] == 0) {
/*
* Deliver the signal to the process if a handler
* is not installed:
*/
+ THR_SCHED_UNLOCK(curthread, pthread);
kill(getpid(), sig);
+ THR_SCHED_LOCK(curthread, pthread);
+ }
/*
* Assuming we're still running after the above kill(),
* make any necessary state changes to the thread:
*/
- thread_sig_check_state(pthread, sig);
+ thr_sig_check_state(pthread, sig);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
/*
* Check that the signal is not being ignored:
@@ -638,145 +811,98 @@ _thread_sig_send(struct pthread *pthread, int sig)
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
if (pthread->state == PS_SIGWAIT &&
sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
/* Return the signal number: */
pthread->signo = sig;
- } else if (sigismember(&pthread->sigmask, sig))
+
+ /* Change the state of the thread to run: */
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
+ } else if (sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig)) {
/* Add the signal to the pending set: */
sigaddset(&pthread->sigpend, sig);
- else if (pthread == curthread)
- /* Call the signal handler for the current thread: */
- thread_sig_invoke_handler(sig, NULL, NULL);
- else {
- /* Protect the scheduling queues: */
- _thread_kern_sig_defer();
+ THR_SCHED_UNLOCK(curthread, pthread);
+ } else if (pthread == curthread) {
+ ucontext_t uc;
+ siginfo_t info;
+ volatile int once;
+
+ THR_SCHED_UNLOCK(curthread, pthread);
+ build_siginfo(&info, sig);
+ once = 0;
+ THR_GETCONTEXT(&uc);
+ if (once == 0) {
+ once = 1;
+ /*
+ * Call the signal handler for the current
+ * thread:
+ */
+ thr_sig_invoke_handler(curthread, sig,
+ &info, &uc);
+ }
+ } else {
/*
* Perform any state changes due to signal
* arrival:
*/
- thread_sig_add(pthread, sig, /* has args */ 0);
- /* Unprotect the scheduling queues: */
- _thread_kern_sig_undefer();
+ thr_sig_add(pthread, sig, NULL);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
}
}
-#endif
-/*
- * User thread signal handler wrapper.
- *
- * thread - current running thread
- */
-void
-_thread_sig_wrapper(int sig, siginfo_t *info, ucontext_t *context)
+static void
+thr_sigframe_add(struct pthread *thread, int sig, siginfo_t *info)
{
- struct pthread_state_data psd;
- struct pthread *thread = _get_curthread();
- __siginfohandler_t *handler;
-
- /* Save the thread's previous state. */
- thread_sigframe_save(thread, &psd);
+ if (thread->curframe == NULL)
+ PANIC("Thread doesn't have signal frame ");
- /* Check the threads previous state: */
- if (psd.psd_state != PS_RUNNING) {
+ if (thread->check_pending == 0) {
/*
- * Do a little cleanup handling for those threads in
- * queues before calling the signal handler. Signals
- * for these threads are temporarily blocked until
- * after cleanup handling.
+ * Multiple signals can be added to the same signal
+ * frame. Only save the thread's state the first time.
*/
- switch (psd.psd_state) {
- case PS_COND_WAIT:
- _cond_wait_backout(thread);
- psd.psd_state = PS_RUNNING;
- break;
-
- case PS_MUTEX_WAIT:
- _mutex_lock_backout(thread);
- psd.psd_state = PS_RUNNING;
- break;
-
- default:
- break;
- }
+ thr_sigframe_save(thread, thread->curframe);
+ thread->check_pending = 1;
+ thread->flags &= THR_FLAGS_PRIVATE;
}
-
- /* Unblock the signal in case we don't return from the handler. */
- /*
- * XXX - This is totally bogus. We need to lock the signal mask
- * somehow.
- */
- sigdelset(&_thread_sigmask, sig);
-
- /*
- * Lower the priority before calling the handler in case
- * it never returns (longjmps back):
- */
- thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
-
- /*
- * Reenable interruptions without checking for the need to
- * context switch.
- */
- thread->sig_defer_count = 0;
-
- if (_thread_sigact[sig -1].sa_handler != NULL) {
- handler = (__siginfohandler_t *)
- _thread_sigact[sig - 1].sa_handler;
- handler(sig, info, context);
- }
-
- /* Restore the signal frame. */
- thread_sigframe_restore(thread, &psd);
-
- /* The signal mask was restored; check for any pending signals. */
- /* XXX - thread->check_pending = 1; */
-}
-
-static void
-thread_sigframe_add(struct pthread *thread, int sig, int has_args)
-{
- struct pthread_signal_frame *psf = NULL;
- unsigned long stackp;
-
- /* Add a signal frame to the stack, pointing to our signal wrapper. */
- signalcontext(&thread->mailbox.tm_context, sig,
- (__sighandler_t *)_thread_sig_wrapper);
+ sigaddset(&thread->curframe->psf_sigset, sig);
+ if (info != NULL)
+ memcpy(&thread->siginfo[sig], info, sizeof(*info));
+ else
+ build_siginfo(&thread->siginfo[sig], sig);
/* Setup the new signal mask. */
- SIGSETOR(thread->mailbox.tm_context.uc_sigmask,
+ SIGSETOR(thread->tmbx.tm_context.uc_sigmask,
_thread_sigact[sig - 1].sa_mask);
- sigaddset(&thread->mailbox.tm_context.uc_sigmask, sig);
+ sigaddset(&thread->tmbx.tm_context.uc_sigmask, sig);
}
-static void
-thread_sigframe_restore(struct pthread *thread, struct pthread_state_data *psd)
+void
+thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf)
{
- thread->wakeup_time = psd->psd_wakeup_time;
- thread->data = psd->psd_wait_data;
- thread->state = psd->psd_state;
- thread->flags = psd->psd_flags;
- thread->interrupted = psd->psd_interrupted;
- thread->sig_defer_count = psd->psd_sig_defer_count;
+ thread->flags = psf->psf_flags;
+ thread->interrupted = psf->psf_interrupted;
+ thread->signo = psf->psf_signo;
+ thread->state = psf->psf_state;
+ thread->data = psf->psf_wait_data;
+ thread->wakeup_time = psf->psf_wakeup_time;
+ if (thread->sigmask_seqno == psf->psf_seqno)
+ thread->tmbx.tm_context.uc_sigmask = psf->psf_sigmask;
+ else
+ thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
}
static void
-thread_sigframe_save(struct pthread *thread, struct pthread_state_data *psd)
+thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf)
{
- psd->psd_wakeup_time = thread->wakeup_time;
- psd->psd_wait_data = thread->data;
- psd->psd_state = thread->state;
- psd->psd_flags = thread->flags &
- (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE);
- psd->psd_interrupted = thread->interrupted;
- psd->psd_sig_defer_count = thread->sig_defer_count;
-}
-
-void
-_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *context)
-{
-
- /* Nothing. */
+ /* This has to initialize all members of the sigframe. */
+ psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE;
+ psf->psf_interrupted = thread->interrupted;
+ psf->psf_signo = thread->signo;
+ psf->psf_state = thread->state;
+ psf->psf_wait_data = thread->data;
+ psf->psf_wakeup_time = thread->wakeup_time;
+ psf->psf_sigmask = thread->tmbx.tm_context.uc_sigmask;
+ psf->psf_seqno = thread->sigmask_seqno;
+ sigemptyset(&psf->psf_sigset);
}
diff --git a/lib/libkse/thread/thr_sigaction.c b/lib/libkse/thread/thr_sigaction.c
index a16f859..7ede6d2 100644
--- a/lib/libkse/thread/thr_sigaction.c
+++ b/lib/libkse/thread/thr_sigaction.c
@@ -50,8 +50,8 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
errno = EINVAL;
ret = -1;
} else {
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
/*
* Check if the existing signal action structure contents are
@@ -76,14 +76,9 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Check if the kernel needs to be advised of a change
* in signal action:
*/
- if (act != NULL && sig != SIGCHLD) {
- /*
- * Ensure the signal handler cannot be interrupted
- * by other signals. Always request the POSIX signal
- * handler arguments.
- */
- sigfillset(&gact.sa_mask);
- gact.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ if (act != NULL && sig != SIGINFO) {
+ gact.sa_mask = act->sa_mask;
+ gact.sa_flags = SA_SIGINFO | act->sa_flags;
/*
* Check if the signal handler is being set to
@@ -98,10 +93,10 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Specify the thread kernel signal
* handler:
*/
- gact.sa_handler = (void (*) ()) _thread_sig_handler;
+ gact.sa_handler = (void (*) ())_thr_sig_handler;
/* Change the signal action in the kernel: */
- if (__sys_sigaction(sig,&gact,NULL) != 0)
+ if (__sys_sigaction(sig, &gact, NULL) != 0)
ret = -1;
}
}
diff --git a/lib/libkse/thread/thr_sigmask.c b/lib/libkse/thread/thr_sigmask.c
index f98c421..d9cb839 100644
--- a/lib/libkse/thread/thr_sigmask.c
+++ b/lib/libkse/thread/thr_sigmask.c
@@ -36,6 +36,7 @@
#include <sys/signalvar.h>
#include <errno.h>
#include <signal.h>
+#include <string.h>
#include <pthread.h>
#include "thr_private.h"
@@ -44,32 +45,59 @@ __weak_reference(_pthread_sigmask, pthread_sigmask);
int
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int i;
struct pthread *curthread = _get_curthread();
+ int ret;
+ ret = 0;
if (oset != NULL)
- bcopy(&curthread->mailbox.tm_context.uc_sigmask, oset,
- sizeof(sigset_t));
- if (set == NULL)
- return (0);
- switch (how) {
- case SIG_BLOCK:
- for (i = 0; i < _SIG_WORDS; i++)
- curthread->mailbox.tm_context.uc_sigmask.__bits[i] |=
- set->__bits[i];
- break;
- case SIG_UNBLOCK:
- for (i = 0; i < _SIG_WORDS; i++)
- curthread->mailbox.tm_context.uc_sigmask.__bits[i] &=
- ~set->__bits[i];
- break;
- case SIG_SETMASK:
- bcopy(set, &curthread->mailbox.tm_context.uc_sigmask,
- sizeof(sigset_t));
- break;
- default:
- errno = EINVAL;
- return (-1);
+ /* Return the current mask: */
+ *oset = curthread->tmbx.tm_context.uc_sigmask;
+
+ /* Check if a new signal set was provided by the caller: */
+ if (set != NULL) {
+ THR_SCHED_LOCK(curthread, curthread);
+
+ /* Process according to what to do: */
+ switch (how) {
+ /* Block signals: */
+ case SIG_BLOCK:
+ /* Add signals to the existing mask: */
+ SIGSETOR(curthread->tmbx.tm_context.uc_sigmask, *set);
+ break;
+
+ /* Unblock signals: */
+ case SIG_UNBLOCK:
+ /* Clear signals from the existing mask: */
+ SIGSETNAND(curthread->tmbx.tm_context.uc_sigmask, *set);
+ break;
+
+ /* Set the signal process mask: */
+ case SIG_SETMASK:
+ /* Set the new mask: */
+ curthread->tmbx.tm_context.uc_sigmask = *set;
+ break;
+
+ /* Trap invalid actions: */
+ default:
+ /* Return an invalid argument: */
+ errno = EINVAL;
+ ret = -1;
+ break;
+ }
+
+ if (ret == 0) {
+ curthread->sigmask =
+ curthread->tmbx.tm_context.uc_sigmask;
+ curthread->sigmask_seqno++;
+ }
+
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /*
+ * Run down any pending signals:
+ */
+ if (ret == 0)
+ _thr_sig_check_pending(curthread);
}
- return (0);
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_sigpending.c b/lib/libkse/thread/thr_sigpending.c
index 5b3c02f..7f42ff3 100644
--- a/lib/libkse/thread/thr_sigpending.c
+++ b/lib/libkse/thread/thr_sigpending.c
@@ -45,6 +45,7 @@ int
_sigpending(sigset_t *set)
{
struct pthread *curthread = _get_curthread();
+ kse_critical_t crit;
int ret = 0;
/* Check for a null signal set pointer: */
@@ -54,7 +55,11 @@ _sigpending(sigset_t *set)
}
else {
*set = curthread->sigpend;
- SIGSETOR(*set, _thread_sigpending);
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ SIGSETOR(*set, _thr_proc_sigpending);
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
}
/* Return the completion status: */
return (ret);
diff --git a/lib/libkse/thread/thr_sigsuspend.c b/lib/libkse/thread/thr_sigsuspend.c
index dc805ac..7ce027a 100644
--- a/lib/libkse/thread/thr_sigsuspend.c
+++ b/lib/libkse/thread/thr_sigsuspend.c
@@ -32,22 +32,58 @@
* $FreeBSD$
*/
#include <signal.h>
-#include <sys/param.h>
-#include <sys/signalvar.h>
#include <errno.h>
#include <pthread.h>
+#include <string.h>
#include "thr_private.h"
__weak_reference(__sigsuspend, sigsuspend);
int
+_sigsuspend(const sigset_t *set)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret = -1;
+
+ /* Check if a new signal set was provided by the caller: */
+ if (set != NULL) {
+ THR_SCHED_LOCK(curthread, curthread);
+
+ /* Change the caller's mask: */
+ memcpy(&curthread->tmbx.tm_context.uc_sigmask,
+ set, sizeof(sigset_t));
+
+ THR_SET_STATE(curthread, PS_SIGSUSPEND);
+
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /* Wait for a signal: */
+ _thr_sched_switch(curthread);
+
+ /* Always return an interrupted error: */
+ errno = EINTR;
+
+ /* Restore the signal mask: */
+ memcpy(&curthread->tmbx.tm_context.uc_sigmask,
+ &curthread->sigmask, sizeof(sigset_t));
+ } else {
+ /* Return an invalid argument error: */
+ errno = EINVAL;
+ }
+
+ /* Return the completion status: */
+ return (ret);
+}
+
+int
__sigsuspend(const sigset_t * set)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
- ret = __sys_sigsuspend(set);
- _thread_leave_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
+ ret = _sigsuspend(set);
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c
index 5364d66..b955251 100644
--- a/lib/libkse/thread/thr_sigwait.c
+++ b/lib/libkse/thread/thr_sigwait.c
@@ -42,11 +42,135 @@
__weak_reference(_sigwait, sigwait);
int
-_sigwait(const sigset_t * __restrict set, int * __restrict sig)
+_sigwait(const sigset_t *set, int *sig)
{
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
+ int i;
+ sigset_t tempset, waitset;
+ struct sigaction act;
+
+ _thr_enter_cancellation_point(curthread);
/*
- * All signals are invalid for waiting.
+ * Specify the thread kernel signal handler.
*/
- return (EINVAL);
+ act.sa_handler = (void (*) ()) _thr_sig_handler;
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ /* Ensure the signal handler cannot be interrupted by other signals: */
+ sigfillset(&act.sa_mask);
+
+ /*
+ * Initialize the set of signals that will be waited on:
+ */
+ waitset = *set;
+
+ /* These signals can't be waited on. */
+ sigdelset(&waitset, SIGKILL);
+ sigdelset(&waitset, SIGSTOP);
+
+ /*
+ * Check to see if a pending signal is in the wait mask.
+ * This has to be atomic. */
+ tempset = curthread->sigpend;
+ SIGSETOR(tempset, _thr_proc_sigpending);
+ SIGSETAND(tempset, waitset);
+ if (SIGNOTEMPTY(tempset)) {
+ /* Enter a loop to find a pending signal: */
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember (&tempset, i))
+ break;
+ }
+
+ /* Clear the pending signal: */
+ if (sigismember(&curthread->sigpend,i))
+ sigdelset(&curthread->sigpend,i);
+ else
+ sigdelset(&_thr_proc_sigpending,i);
+
+ /* Return the signal number to the caller: */
+ *sig = i;
+
+ _thr_leave_cancellation_point(curthread);
+ return (0);
+ }
+
+ /*
+ * Lock the array of SIG_DFL wait counts.
+ */
+ THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
+
+ /*
+ * Enter a loop to find the signals that are SIG_DFL. For
+ * these signals we must install a dummy signal handler in
+ * order for the kernel to pass them in to us. POSIX says
+ * that the _application_ must explicitly install a dummy
+ * handler for signals that are SIG_IGN in order to sigwait
+ * on them. Note that SIG_IGN signals are left in the
+ * mask because a subsequent sigaction could enable an
+ * ignored signal.
+ */
+ sigemptyset(&tempset);
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&waitset, i) &&
+ (_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
+ _thread_dfl_count[i]++;
+ sigaddset(&tempset, i);
+ if (_thread_dfl_count[i] == 1) {
+ if (__sys_sigaction(i, &act, NULL) != 0)
+ ret = -1;
+ }
+ }
+ }
+ /* Done accessing _thread_dfl_count for now. */
+ THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
+
+ if (ret == 0) {
+ /*
+ * Save the wait signal mask. The wait signal
+ * mask is independent of the threads signal mask
+ * and requires separate storage.
+ */
+ curthread->data.sigwait = &waitset;
+
+ /* Wait for a signal: */
+ THR_SCHED_LOCK(curthread, curthread);
+ THR_SET_STATE(curthread, PS_SIGWAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
+
+ /* Return the signal number to the caller: */
+ *sig = curthread->signo;
+
+ /*
+ * Probably unnecessary, but since it's in a union struct
+ * we don't know how it could be used in the future.
+ */
+ curthread->data.sigwait = NULL;
+ }
+
+ /*
+ * Relock the array of SIG_DFL wait counts.
+ */
+ THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
+
+ /* Restore the sigactions: */
+ act.sa_handler = SIG_DFL;
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&tempset, i)) {
+ _thread_dfl_count[i]--;
+ if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
+ (_thread_dfl_count[i] == 0)) {
+ if (__sys_sigaction(i, &act, NULL) != 0)
+ ret = -1;
+ }
+ }
+ }
+ /* Done accessing _thread_dfl_count. */
+ THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
+
+ _thr_leave_cancellation_point(curthread);
+
+ /* Return the completion status: */
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_sleep.c b/lib/libkse/thread/thr_sleep.c
index 70b1c9f..0f02db7 100644
--- a/lib/libkse/thread/thr_sleep.c
+++ b/lib/libkse/thread/thr_sleep.c
@@ -38,11 +38,12 @@ __weak_reference(_sleep, sleep);
unsigned int
_sleep(unsigned int seconds)
{
+ struct pthread *curthread = _get_curthread();
unsigned int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sleep(seconds);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_spec.c b/lib/libkse/thread/thr_spec.c
index 07ef387..2cd18d1 100644
--- a/lib/libkse/thread/thr_spec.c
+++ b/lib/libkse/thread/thr_spec.c
@@ -39,7 +39,6 @@
#include "thr_private.h"
struct pthread_key {
- spinlock_t lock;
volatile int allocated;
volatile int count;
int seqno;
@@ -47,7 +46,7 @@ struct pthread_key {
};
/* Static variables: */
-static struct pthread_key key_table[PTHREAD_KEYS_MAX];
+static struct pthread_key key_table[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@@ -56,44 +55,47 @@ __weak_reference(_pthread_setspecific, pthread_setspecific);
int
-_pthread_key_create(pthread_key_t * key, void (*destructor) (void *))
+_pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
{
+ struct pthread *curthread = _get_curthread();
+
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for ((*key) = 0; (*key) < PTHREAD_KEYS_MAX; (*key)++) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[*key].lock);
if (key_table[(*key)].allocated == 0) {
key_table[(*key)].allocated = 1;
key_table[(*key)].destructor = destructor;
key_table[(*key)].seqno++;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (0);
}
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
}
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (EAGAIN);
}
int
_pthread_key_delete(pthread_key_t key)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
if (key < PTHREAD_KEYS_MAX) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
if (key_table[key].allocated)
key_table[key].allocated = 0;
else
ret = EINVAL;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
} else
ret = EINVAL;
return (ret);
@@ -105,44 +107,41 @@ _thread_cleanupspecific(void)
struct pthread *curthread = _get_curthread();
void *data = NULL;
int key;
- int itr;
void (*destructor)( void *);
- for (itr = 0; itr < PTHREAD_DESTRUCTOR_ITERATIONS; itr++) {
- for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
- if (curthread->specific_data_count > 0) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
- destructor = NULL;
-
- if (key_table[key].allocated &&
- (curthread->specific[key].data != NULL)) {
- if (curthread->specific[key].seqno ==
- key_table[key].seqno) {
- data = (void *) curthread->specific[key].data;
- destructor = key_table[key].destructor;
- }
- curthread->specific[key].data = NULL;
- curthread->specific_data_count--;
+ if (curthread->specific != NULL) {
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (key = 0; (key < PTHREAD_KEYS_MAX) &&
+ (curthread->specific_data_count > 0); key++) {
+ destructor = NULL;
+
+ if (key_table[key].allocated &&
+ (curthread->specific[key].data != NULL)) {
+ if (curthread->specific[key].seqno ==
+ key_table[key].seqno) {
+ data = (void *)curthread->specific[key].data;
+ destructor = key_table[key].destructor;
}
+ curthread->specific[key].data = NULL;
+ curthread->specific_data_count--;
+ }
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
-
+ /*
+ * If there is a destructore, call it
+ * with the key table entry unlocked:
+ */
+ if (destructor != NULL) {
/*
- * If there is a destructore, call it
- * with the key table entry unlocked:
+ * Don't hold the lock while calling the
+ * destructor:
*/
- if (destructor)
- destructor(data);
- } else {
- free(curthread->specific);
- curthread->specific = NULL;
- return;
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ destructor(data);
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
}
}
- }
- if (curthread->specific != NULL) {
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
free(curthread->specific);
curthread->specific = NULL;
}
diff --git a/lib/libkse/thread/thr_spinlock.c b/lib/libkse/thread/thr_spinlock.c
index ad7b222..cb71a46 100644
--- a/lib/libkse/thread/thr_spinlock.c
+++ b/lib/libkse/thread/thr_spinlock.c
@@ -41,9 +41,14 @@
#include <unistd.h>
#include <libc_private.h>
-
+#include "spinlock.h"
#include "thr_private.h"
+/*
+ * These are for compatability only. Spinlocks of this type
+ * are deprecated.
+ */
+
void
_spinunlock(spinlock_t *lck)
{
@@ -60,20 +65,14 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
- struct pthread *curthread = _get_curthread();
-
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Block the thread until the lock. */
- curthread->data.spinlock = lck;
- _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
+ while (lck->access_lock)
+ ;
}
-
- /* The running thread now owns the lock: */
- lck->lock_owner = (long) curthread;
}
/*
@@ -89,30 +88,12 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
- struct pthread *curthread = _get_curthread();
- int cnt = 0;
-
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- cnt++;
- if (cnt > 100) {
- char str[256];
- snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno);
- __sys_write(2,str,strlen(str));
- __sleep(1);
- cnt = 0;
- }
-
- /* Block the thread until the lock. */
- curthread->data.spinlock = lck;
- _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
+ while (lck->access_lock)
+ ;
}
-
- /* The running thread now owns the lock: */
- lck->lock_owner = (long) curthread;
- lck->fname = fname;
- lck->lineno = lineno;
}
diff --git a/lib/libkse/thread/thr_stack.c b/lib/libkse/thread/thr_stack.c
index c75d6ee..f14289e 100644
--- a/lib/libkse/thread/thr_stack.c
+++ b/lib/libkse/thread/thr_stack.c
@@ -28,9 +28,7 @@
*/
#include <sys/types.h>
#include <sys/mman.h>
-#include <sys/param.h>
#include <sys/queue.h>
-#include <sys/user.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
@@ -44,31 +42,32 @@ struct stack {
};
/*
- * Default sized (stack and guard) spare stack queue. Stacks are cached to
- * avoid additional complexity managing mmap()ed stack regions. Spare stacks
- * are used in LIFO order to increase cache locality.
+ * Default sized (stack and guard) spare stack queue. Stacks are cached
+ * to avoid additional complexity managing mmap()ed stack regions. Spare
+ * stacks are used in LIFO order to increase cache locality.
*/
-static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
+static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
/*
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
- * Stacks are cached to avoid additional complexity managing mmap()ed stack
- * regions. This list is unordered, since ordering on both stack size and guard
- * size would be more trouble than it's worth. Stacks are allocated from this
- * cache on a first size match basis.
+ * Stacks are cached to avoid additional complexity managing mmap()ed
+ * stack regions. This list is unordered, since ordering on both stack
+ * size and guard size would be more trouble than it's worth. Stacks are
+ * allocated from this cache on a first size match basis.
*/
-static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
+static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
/**
- * Base address of the last stack allocated (including its red zone, if there is
- * one). Stacks are allocated contiguously, starting beyond the top of the main
- * stack. When a new stack is created, a red zone is typically created
- * (actually, the red zone is simply left unmapped) above the top of the stack,
- * such that the stack will not be able to grow all the way to the bottom of the
- * next stack. This isn't fool-proof. It is possible for a stack to grow by a
- * large amount, such that it grows into the next stack, and as long as the
- * memory within the red zone is never accessed, nothing will prevent one thread
- * stack from trouncing all over the next.
+ * Base address of the last stack allocated (including its red zone, if
+ * there is one). Stacks are allocated contiguously, starting beyond the
+ * top of the main stack. When a new stack is created, a red zone is
+ * typically created (actually, the red zone is simply left unmapped) above
+ * the top of the stack, such that the stack will not be able to grow all
+ * the way to the bottom of the next stack. This isn't fool-proof. It is
+ * possible for a stack to grow by a large amount, such that it grows into
+ * the next stack, and as long as the memory within the red zone is never
+ * accessed, nothing will prevent one thread stack from trouncing all over
+ * the next.
*
* low memory
* . . . . . . . . . . . . . . . . . .
@@ -112,50 +111,51 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* high memory
*
*/
-static void * last_stack;
+static void *last_stack = NULL;
-void *
-_thread_stack_alloc(size_t stacksize, size_t guardsize)
+int
+_thr_stack_alloc(struct pthread_attr *attr)
{
- void *stack = NULL;
- struct stack *spare_stack;
- size_t stack_size;
+ struct stack *spare_stack;
+ struct kse *curkse;
+ kse_critical_t crit;
+ size_t stacksize;
+ size_t guardsize;
+
+ stacksize = attr->stacksize_attr;
+ guardsize = attr->guardsize_attr;
/*
- * Round up stack size to nearest multiple of _pthread_page_size,
- * so that mmap() * will work. If the stack size is not an even
- * multiple, we end up initializing things such that there is unused
- * space above the beginning of the stack, so the stack sits snugly
- * against its guard.
+ * Round up stack size to nearest multiple of _thr_page_size so
+ * that mmap() * will work. If the stack size is not an even
+ * multiple, we end up initializing things such that there is
+ * unused space above the beginning of the stack, so the stack
+ * sits snugly against its guard.
*/
- if (stacksize % _pthread_page_size != 0)
- stack_size = ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- else
- stack_size = stacksize;
+ if ((stacksize % _thr_page_size) != 0)
+ stacksize = ((stacksize / _thr_page_size) + 1) *
+ _thr_page_size;
+ attr->stackaddr_attr = NULL;
+ attr->flags &= ~THR_STACK_USER;
/*
+ * Use the garbage collector lock for synchronization of the
+ * spare stack lists and allocations from usrstack.
+ */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ /*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:
*/
- if (stack_size == PTHREAD_STACK_DEFAULT &&
- guardsize == _pthread_guard_default) {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
- /* Use the spare stack. */
+ if ((stacksize == THR_STACK_DEFAULT) &&
+ (guardsize == _thr_guard_default)) {
+ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
+ /* Use the spare stack. */
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
/*
* The user specified a non-default stack and/or guard size, so try to
@@ -163,78 +163,75 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
* rounded up stack size (stack_size) in the search:
*/
else {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- LIST_FOREACH(spare_stack, &_mstackq, qe) {
- if (spare_stack->stacksize == stack_size &&
+ LIST_FOREACH(spare_stack, &mstackq, qe) {
+ if (spare_stack->stacksize == stacksize &&
spare_stack->guardsize == guardsize) {
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
break;
}
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
-
- /* Check if a stack was not allocated from a stack cache: */
- if (stack == NULL) {
-
+ if (attr->stackaddr_attr != NULL) {
+ /* A cached stack was found. Release the lock. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
+ }
+ else {
+ /* Allocate a stack from usrstack. */
if (last_stack == NULL)
- last_stack = _usrstack - PTHREAD_STACK_INITIAL -
- _pthread_guard_default;
+ last_stack = _usrstack - THR_STACK_INITIAL -
+ _thr_guard_default;
/* Allocate a new stack. */
- stack = last_stack - stack_size;
+ attr->stackaddr_attr = last_stack - stacksize;
/*
- * Even if stack allocation fails, we don't want to try to use
- * this location again, so unconditionally decrement
+ * Even if stack allocation fails, we don't want to try to
+ * use this location again, so unconditionally decrement
* last_stack. Under normal operating conditions, the most
- * likely reason for an mmap() error is a stack overflow of the
- * adjacent thread stack.
+ * likely reason for an mmap() error is a stack overflow of
+ * the adjacent thread stack.
*/
- last_stack -= (stack_size + guardsize);
+ last_stack -= (stacksize + guardsize);
- /* Stack: */
- if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
- -1, 0) == MAP_FAILED)
- stack = NULL;
- }
+ /* Release the lock before mmap'ing it. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
- return (stack);
+ /* Map the stack, but not the guard page: */
+ if (mmap(attr->stackaddr_attr, stacksize,
+ PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED)
+ attr->stackaddr_attr = NULL;
+ }
+ if (attr->stackaddr_attr != NULL)
+ return (0);
+ else
+ return (-1);
}
-/* This function must be called with _gc_mutex held. */
+/* This function must be called with _thread_list_lock held. */
void
-_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
+_thr_stack_free(struct pthread_attr *attr)
{
- struct stack *spare_stack;
-
- spare_stack = (stack + stacksize - sizeof(struct stack));
- /* Round stacksize up to nearest multiple of _pthread_page_size. */
- if (stacksize % _pthread_page_size != 0) {
- spare_stack->stacksize =
- ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- } else
- spare_stack->stacksize = stacksize;
- spare_stack->guardsize = guardsize;
- spare_stack->stackaddr = stack;
-
- if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
- spare_stack->guardsize == _pthread_guard_default) {
- /* Default stack/guard size. */
- LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
- } else {
- /* Non-default stack/guard size. */
- LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
+ struct stack *spare_stack;
+
+ if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
+ && (attr->stackaddr_attr != NULL)) {
+ spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
+ - sizeof(struct stack));
+ spare_stack->stacksize = attr->stacksize_attr;
+ spare_stack->guardsize = attr->guardsize_attr;
+ spare_stack->stackaddr = attr->stackaddr_attr;
+
+ if (spare_stack->stacksize == THR_STACK_DEFAULT &&
+ spare_stack->guardsize == _thr_guard_default) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
+ }
+ attr->stackaddr_attr = NULL;
}
}
diff --git a/lib/libkse/thread/thr_suspend_np.c b/lib/libkse/thread/thr_suspend_np.c
index 4128437..3025584 100644
--- a/lib/libkse/thread/thr_suspend_np.c
+++ b/lib/libkse/thread/thr_suspend_np.c
@@ -35,7 +35,7 @@
#include <pthread.h>
#include "thr_private.h"
-static void suspend_common(struct pthread *thread);
+static void suspend_common(struct pthread *thread);
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
@@ -44,27 +44,26 @@ __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
int
_pthread_suspend_np(pthread_t thread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
/* Suspending the current thread doesn't make sense. */
if (thread == _get_curthread())
ret = EDEADLK;
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(thread)) == 0) {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Add a reference to the thread: */
+ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
+ == 0) {
+ /* Lock the threads scheduling queue: */
+ THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, thread);
+
+ /* Don't forget to remove the reference: */
+ _thr_ref_delete(curthread, thread);
}
return (ret);
}
@@ -74,31 +73,34 @@ _pthread_suspend_all_np(void)
{
struct pthread *curthread = _get_curthread();
struct pthread *thread;
+ kse_critical_t crit;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Take the thread list lock: */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
- if (thread != curthread)
+ if ((thread != curthread) &&
+ (thread->state != PS_DEAD) &&
+ (thread->state != PS_DEADLOCK) &&
+ ((thread->flags & THR_FLAGS_EXITING) == 0)) {
+ THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
}
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Release the thread list lock: */
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
}
void
suspend_common(struct pthread *thread)
{
- thread->flags |= PTHREAD_FLAGS_SUSPENDED;
- if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
- PTHREAD_PRIOQ_REMOVE(thread);
- PTHREAD_SET_STATE(thread, PS_SUSPENDED);
+ thread->flags |= THR_FLAGS_SUSPENDED;
+ if (thread->flags & THR_FLAGS_IN_RUNQ) {
+ THR_RUNQ_REMOVE(thread);
+ THR_SET_STATE(thread, PS_SUSPENDED);
}
}
diff --git a/lib/libkse/thread/thr_switch_np.c b/lib/libkse/thread/thr_switch_np.c
index 45c289e..b70ce70 100644
--- a/lib/libkse/thread/thr_switch_np.c
+++ b/lib/libkse/thread/thr_switch_np.c
@@ -43,29 +43,11 @@ __weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
int
_pthread_switch_add_np(pthread_switch_routine_t routine)
{
- int ret = 0;
-
- if (routine == NULL)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Shouldn't need a lock to protect this assigment. */
- _sched_switch_hook = routine;
-
- return(ret);
+ return (ENOTSUP);
}
int
_pthread_switch_delete_np(pthread_switch_routine_t routine)
{
- int ret = 0;
-
- if (routine != _sched_switch_hook)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Shouldn't need a lock to protect this assigment. */
- _sched_switch_hook = NULL;
-
- return(ret);
+ return (ENOTSUP);
}
diff --git a/lib/libkse/thread/thr_system.c b/lib/libkse/thread/thr_system.c
index 591562b..28976d3 100644
--- a/lib/libkse/thread/thr_system.c
+++ b/lib/libkse/thread/thr_system.c
@@ -38,11 +38,12 @@ __weak_reference(_system, system);
int
_system(const char *string)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __system(string);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_tcdrain.c b/lib/libkse/thread/thr_tcdrain.c
index 140039b..6a2002b 100644
--- a/lib/libkse/thread/thr_tcdrain.c
+++ b/lib/libkse/thread/thr_tcdrain.c
@@ -38,11 +38,12 @@ __weak_reference(_tcdrain, tcdrain);
int
_tcdrain(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __tcdrain(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libkse/thread/thr_wait.c b/lib/libkse/thread/thr_wait.c
index 8e9c864..98f2c8d 100644
--- a/lib/libkse/thread/thr_wait.c
+++ b/lib/libkse/thread/thr_wait.c
@@ -37,11 +37,12 @@ __weak_reference(_wait, wait);
pid_t
_wait(int *istat)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __wait(istat);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_wait4.c b/lib/libkse/thread/thr_wait4.c
index 07b0c84..9f23584 100644
--- a/lib/libkse/thread/thr_wait4.c
+++ b/lib/libkse/thread/thr_wait4.c
@@ -43,11 +43,12 @@ __weak_reference(__wait4, wait4);
pid_t
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = _wait4(pid, istat, options, rusage);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_waitpid.c b/lib/libkse/thread/thr_waitpid.c
index 8938a62..8ee3ce1 100644
--- a/lib/libkse/thread/thr_waitpid.c
+++ b/lib/libkse/thread/thr_waitpid.c
@@ -39,11 +39,12 @@ __weak_reference(_waitpid, waitpid);
pid_t
_waitpid(pid_t wpid, int *status, int options)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __waitpid(wpid, status, options);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_write.c b/lib/libkse/thread/thr_write.c
index 4c8c171..53d897c 100644
--- a/lib/libkse/thread/thr_write.c
+++ b/lib/libkse/thread/thr_write.c
@@ -45,11 +45,12 @@ __weak_reference(__write, write);
ssize_t
__write(int fd, const void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_write(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_writev.c b/lib/libkse/thread/thr_writev.c
index cff95b1..e13c9d2 100644
--- a/lib/libkse/thread/thr_writev.c
+++ b/lib/libkse/thread/thr_writev.c
@@ -47,11 +47,12 @@ __weak_reference(__writev, writev);
ssize_t
__writev(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_writev(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libkse/thread/thr_yield.c b/lib/libkse/thread/thr_yield.c
index e652a9c..acaa3c5 100644
--- a/lib/libkse/thread/thr_yield.c
+++ b/lib/libkse/thread/thr_yield.c
@@ -46,7 +46,7 @@ _sched_yield(void)
curthread->slice_usec = -1;
/* Schedule the next thread: */
- _thread_kern_sched();
+ _thr_sched_switch(curthread);
/* Always return no error. */
return(0);
@@ -62,5 +62,5 @@ _pthread_yield(void)
curthread->slice_usec = -1;
/* Schedule the next thread: */
- _thread_kern_sched();
+ _thr_sched_switch(curthread);
}
diff --git a/lib/libpthread/Makefile b/lib/libpthread/Makefile
index bebda3f..28a4763 100644
--- a/lib/libpthread/Makefile
+++ b/lib/libpthread/Makefile
@@ -9,16 +9,18 @@
# system call stubs.
LIB=kse
SHLIB_MAJOR= 1
-CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
+CFLAGS+=-DPTHREAD_KERNEL
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
-I${.CURDIR}/../../include
+CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
+CFLAGS+=-I${.CURDIR}/sys
# Uncomment this if you want libpthread to contain debug information for
# thread locking.
-CFLAGS+=-D_LOCK_DEBUG
+CFLAGS+=-D_LOCK_DEBUG -g
# enable extra internal consistancy checks
-CFLAGS+=-D_PTHREADS_INVARIANTS
+CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
PRECIOUSLIB= yes
diff --git a/lib/libpthread/thread/Makefile.inc b/lib/libpthread/thread/Makefile.inc
index 2b7800a..6b00117 100644
--- a/lib/libpthread/thread/Makefile.inc
+++ b/lib/libpthread/thread/Makefile.inc
@@ -5,6 +5,7 @@
SRCS+= \
thr_aio_suspend.c \
+ thr_autoinit.c \
thr_attr_destroy.c \
thr_attr_init.c \
thr_attr_get_np.c \
@@ -27,7 +28,6 @@ SRCS+= \
thr_attr_setstack.c \
thr_attr_setstackaddr.c \
thr_attr_setstacksize.c \
- thr_autoinit.c \
thr_cancel.c \
thr_clean.c \
thr_close.c \
@@ -43,7 +43,6 @@ SRCS+= \
thr_find_thread.c \
thr_fork.c \
thr_fsync.c \
- thr_gc.c \
thr_getprio.c \
thr_getschedparam.c \
thr_info.c \
@@ -82,6 +81,8 @@ SRCS+= \
thr_sig.c \
thr_sigaction.c \
thr_sigmask.c \
+ thr_sigpending.c \
+ thr_sigprocmask.c \
thr_sigsuspend.c \
thr_sigwait.c \
thr_single_np.c \
diff --git a/lib/libpthread/thread/thr_acl_aclcheck_fd.c b/lib/libpthread/thread/thr_acl_aclcheck_fd.c
deleted file mode 100644
index 3c30e2c..0000000
--- a/lib/libpthread/thread/thr_acl_aclcheck_fd.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*-
- * Copyright (c) 2001 Thomas Moestl
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/types.h>
-#include <sys/acl.h>
-#include <pthread.h>
-#include "thr_private.h"
-
-__weak_reference(___acl_aclcheck_fd, __acl_aclcheck_fd);
-
-int
-___acl_aclcheck_fd(int fd, acl_type_t tp, acl_t acl)
-{
- int error;
-
- if ((error = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
- error = __sys___acl_aclcheck_fd(fd, tp, (struct acl *)acl);
- _FD_UNLOCK(fd, FD_READ);
- }
- return (error);
-}
-
diff --git a/lib/libpthread/thread/thr_aio_suspend.c b/lib/libpthread/thread/thr_aio_suspend.c
index 05a6c82..94eed27 100644
--- a/lib/libpthread/thread/thr_aio_suspend.c
+++ b/lib/libpthread/thread/thr_aio_suspend.c
@@ -39,12 +39,13 @@ int
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
timespec *timeout)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_aio_suspend(iocbs, niocb, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_attr_get_np.c b/lib/libpthread/thread/thr_attr_get_np.c
index 2a37f4d..4431824 100644
--- a/lib/libpthread/thread/thr_attr_get_np.c
+++ b/lib/libpthread/thread/thr_attr_get_np.c
@@ -36,22 +36,18 @@ __weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
int
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
{
+ struct pthread *curthread;
int ret;
if (pid == NULL || dst == NULL || *dst == NULL)
return (EINVAL);
- if ((ret = _find_thread(pid)) != 0)
+ curthread = _get_curthread();
+ if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
return (ret);
memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
-
- /*
- * Special case, if stack address was not provided by caller
- * of pthread_create(), then return address allocated internally
- */
- if ((*dst)->stackaddr_attr == NULL)
- (*dst)->stackaddr_attr = pid->stack;
+ _thr_ref_delete(curthread, pid);
return (0);
}
diff --git a/lib/libpthread/thread/thr_attr_init.c b/lib/libpthread/thread/thr_attr_init.c
index a3befed..d8b701e 100644
--- a/lib/libpthread/thread/thr_attr_init.c
+++ b/lib/libpthread/thread/thr_attr_init.c
@@ -51,7 +51,8 @@ _pthread_attr_init(pthread_attr_t *attr)
ret = ENOMEM;
else {
/* Initialise the attribute object with the defaults: */
- memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
+ memcpy(pattr, &_pthread_attr_default,
+ sizeof(struct pthread_attr));
/* Return a pointer to the attribute object: */
*attr = pattr;
diff --git a/lib/libpthread/thread/thr_attr_setcreatesuspend_np.c b/lib/libpthread/thread/thr_attr_setcreatesuspend_np.c
index eddfc46..28f0546 100644
--- a/lib/libpthread/thread/thr_attr_setcreatesuspend_np.c
+++ b/lib/libpthread/thread/thr_attr_setcreatesuspend_np.c
@@ -45,7 +45,7 @@ _pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
errno = EINVAL;
ret = -1;
} else {
- (*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
+ (*attr)->suspend = THR_CREATE_SUSPENDED;
ret = 0;
}
return(ret);
diff --git a/lib/libpthread/thread/thr_attr_setguardsize.c b/lib/libpthread/thread/thr_attr_setguardsize.c
index 94da871..59ec908 100644
--- a/lib/libpthread/thread/thr_attr_setguardsize.c
+++ b/lib/libpthread/thread/thr_attr_setguardsize.c
@@ -47,11 +47,11 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
else {
/*
* Round guardsize up to the nearest multiple of
- * _pthread_page_size.
+ * _thr_page_size.
*/
- if (guardsize % _pthread_page_size != 0)
- guardsize = ((guardsize / _pthread_page_size) + 1) *
- _pthread_page_size;
+ if (guardsize % _thr_page_size != 0)
+ guardsize = ((guardsize / _thr_page_size) + 1) *
+ _thr_page_size;
/* Save the stack size. */
(*attr)->guardsize_attr = guardsize;
diff --git a/lib/libpthread/thread/thr_attr_setschedparam.c b/lib/libpthread/thread/thr_attr_setschedparam.c
index 2f34c77..bbb4b1e 100644
--- a/lib/libpthread/thread/thr_attr_setschedparam.c
+++ b/lib/libpthread/thread/thr_attr_setschedparam.c
@@ -46,8 +46,8 @@ _pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *para
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
- } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
- (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
} else
diff --git a/lib/libpthread/thread/thr_attr_setscope.c b/lib/libpthread/thread/thr_attr_setscope.c
index dc9e2dd..70dd69e 100644
--- a/lib/libpthread/thread/thr_attr_setscope.c
+++ b/lib/libpthread/thread/thr_attr_setscope.c
@@ -45,12 +45,11 @@ _pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
if ((attr == NULL) || (*attr == NULL)) {
/* Return an invalid argument: */
ret = EINVAL;
- } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) ||
- (contentionscope == PTHREAD_SCOPE_SYSTEM)) {
- /* We don't support PTHREAD_SCOPE_SYSTEM. */
- ret = ENOTSUP;
+ } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
+ (contentionscope != PTHREAD_SCOPE_SYSTEM)) {
+ ret = EINVAL;
} else
(*attr)->flags |= contentionscope;
- return(ret);
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_autoinit.c b/lib/libpthread/thread/thr_autoinit.c
index 31e2d48..95b2a85 100644
--- a/lib/libpthread/thread/thr_autoinit.c
+++ b/lib/libpthread/thread/thr_autoinit.c
@@ -38,13 +38,16 @@
* threads package at program start-up time.
*/
+#include <pthread.h>
+#include "thr_private.h"
+
void _thread_init_hack(void) __attribute__ ((constructor));
void
_thread_init_hack(void)
{
- _thread_init();
+ _libpthread_init(NULL);
}
/*
diff --git a/lib/libpthread/thread/thr_cancel.c b/lib/libpthread/thread/thr_cancel.c
index 8b0b4c0..23e0dfa 100644
--- a/lib/libpthread/thread/thr_cancel.c
+++ b/lib/libpthread/thread/thr_cancel.c
@@ -6,32 +6,32 @@
#include <pthread.h>
#include "thr_private.h"
-static void finish_cancellation(void *arg);
-
__weak_reference(_pthread_cancel, pthread_cancel);
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
__weak_reference(_pthread_testcancel, pthread_testcancel);
+static int checkcancel(struct pthread *curthread);
+static void testcancel(struct pthread *curthread);
+static void finish_cancellation(void *arg);
+
int
_pthread_cancel(pthread_t pthread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- if ((ret = _find_thread(pthread)) != 0) {
- /* NOTHING */
- } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK
- || (pthread->flags & PTHREAD_EXITING) != 0) {
- ret = 0;
- } else {
- /* Protect the scheduling queues: */
- _thread_kern_sig_defer();
+ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0)) == 0) {
+ /*
+ * Take the scheduling lock while we change the cancel flags.
+ */
+ THR_SCHED_LOCK(curthread, pthread);
if (((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) ||
- (((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0) &&
- ((pthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0)))
+ (((pthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((pthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) == 0)))
/* Just mark it for cancellation: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
+ pthread->cancelflags |= THR_CANCELLING;
else {
/*
* Check if we need to kick it back into the
@@ -40,23 +40,27 @@ _pthread_cancel(pthread_t pthread)
switch (pthread->state) {
case PS_RUNNING:
/* No need to resume: */
- pthread->cancelflags |= PTHREAD_CANCELLING;
+ pthread->cancelflags |= THR_CANCELLING;
+ break;
+
+ case PS_LOCKWAIT:
+ /*
+ * These can't be removed from the queue.
+ * Just mark it as cancelling and tell it
+ * to yield once it leaves the critical
+ * region.
+ */
+ pthread->cancelflags |= THR_CANCELLING;
+ pthread->critical_yield = 1;
break;
- case PS_SPINBLOCK:
- /* Remove these threads from the work queue: */
- if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- != 0)
- PTHREAD_WORKQ_REMOVE(pthread);
- /* Fall through: */
case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
case PS_SIGSUSPEND:
case PS_SIGWAIT:
/* Interrupt and resume: */
pthread->interrupted = 1;
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ pthread->cancelflags |= THR_CANCELLING;
+ _thr_setrunnable_unlocked(pthread);
break;
case PS_JOIN:
@@ -68,8 +72,8 @@ _pthread_cancel(pthread_t pthread)
= NULL;
pthread->join_status.thread = NULL;
}
- pthread->cancelflags |= PTHREAD_CANCELLING;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ pthread->cancelflags |= THR_CANCELLING;
+ _thr_setrunnable_unlocked(pthread);
break;
case PS_SUSPENDED:
@@ -86,8 +90,8 @@ _pthread_cancel(pthread_t pthread)
* cancellation completion routine.
*/
pthread->interrupted = 1;
- pthread->cancelflags |= PTHREAD_CANCEL_NEEDED;
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ pthread->cancelflags |= THR_CANCEL_NEEDED;
+ _thr_setrunnable_unlocked(pthread);
pthread->continuation = finish_cancellation;
break;
@@ -97,12 +101,17 @@ _pthread_cancel(pthread_t pthread)
/* Ignore - only here to silence -Wall: */
break;
}
+ if ((pthread->blocked != 0) &&
+ ((pthread->cancelflags & THR_AT_CANCEL_POINT) != 0))
+ kse_thr_interrupt(&pthread->tmbx);
}
- /* Unprotect the scheduling queues: */
- _thread_kern_sig_undefer();
-
- ret = 0;
+ /*
+ * Release the thread's scheduling lock and remove the
+ * reference:
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
}
return (ret);
}
@@ -113,6 +122,10 @@ _pthread_setcancelstate(int state, int *oldstate)
struct pthread *curthread = _get_curthread();
int ostate;
int ret;
+ int need_exit = 0;
+
+ /* Take the scheduling lock while fiddling with the thread's state: */
+ THR_SCHED_LOCK(curthread, curthread);
ostate = curthread->cancelflags & PTHREAD_CANCEL_DISABLE;
@@ -122,7 +135,7 @@ _pthread_setcancelstate(int state, int *oldstate)
*oldstate = ostate;
curthread->cancelflags &= ~PTHREAD_CANCEL_DISABLE;
if ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)
- pthread_testcancel();
+ need_exit = checkcancel(curthread);
ret = 0;
break;
case PTHREAD_CANCEL_DISABLE:
@@ -135,6 +148,12 @@ _pthread_setcancelstate(int state, int *oldstate)
ret = EINVAL;
}
+ THR_SCHED_UNLOCK(curthread, curthread);
+ if (need_exit != 0) {
+ _thr_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
return (ret);
}
@@ -144,6 +163,10 @@ _pthread_setcanceltype(int type, int *oldtype)
struct pthread *curthread = _get_curthread();
int otype;
int ret;
+ int need_exit = 0;
+
+ /* Take the scheduling lock while fiddling with the state: */
+ THR_SCHED_LOCK(curthread, curthread);
otype = curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
switch (type) {
@@ -151,7 +174,7 @@ _pthread_setcanceltype(int type, int *oldtype)
if (oldtype != NULL)
*oldtype = otype;
curthread->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
- pthread_testcancel();
+ need_exit = checkcancel(curthread);
ret = 0;
break;
case PTHREAD_CANCEL_DEFERRED:
@@ -164,47 +187,72 @@ _pthread_setcanceltype(int type, int *oldtype)
ret = EINVAL;
}
+ THR_SCHED_UNLOCK(curthread, curthread);
+ if (need_exit != 0) {
+ _thr_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
return (ret);
}
-void
-_pthread_testcancel(void)
+static int
+checkcancel(struct pthread *curthread)
{
- struct pthread *curthread = _get_curthread();
-
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
- ((curthread->cancelflags & PTHREAD_CANCELLING) != 0) &&
- ((curthread->flags & PTHREAD_EXITING) == 0)) {
+ ((curthread->cancelflags & THR_CANCELLING) != 0)) {
/*
* It is possible for this thread to be swapped out
* while performing cancellation; do not allow it
* to be cancelled again.
*/
- curthread->cancelflags &= ~PTHREAD_CANCELLING;
- _thread_exit_cleanup();
+ curthread->cancelflags &= ~THR_CANCELLING;
+ return (1);
+ }
+ else
+ return (0);
+}
+
+static void
+testcancel(struct pthread *curthread)
+{
+ /* Take the scheduling lock while fiddling with the state: */
+ THR_SCHED_LOCK(curthread, curthread);
+
+ if (checkcancel(curthread) != 0) {
+ /* Unlock before exiting: */
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ _thr_exit_cleanup();
pthread_exit(PTHREAD_CANCELED);
PANIC("cancel");
}
+
+ THR_SCHED_UNLOCK(curthread, curthread);
}
void
-_thread_enter_cancellation_point(void)
+_pthread_testcancel(void)
{
struct pthread *curthread = _get_curthread();
- /* Look for a cancellation before we block: */
- pthread_testcancel();
- curthread->cancelflags |= PTHREAD_AT_CANCEL_POINT;
+ testcancel(curthread);
}
void
-_thread_leave_cancellation_point(void)
+_thr_enter_cancellation_point(struct pthread *thread)
{
- struct pthread *curthread = _get_curthread();
+ /* Look for a cancellation before we block: */
+ testcancel(thread);
+ thread->cancelflags |= THR_AT_CANCEL_POINT;
+}
- curthread->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
+void
+_thr_leave_cancellation_point(struct pthread *thread)
+{
+ thread->cancelflags &= ~THR_AT_CANCEL_POINT;
/* Look for a cancellation after we unblock: */
- pthread_testcancel();
+ testcancel(thread);
}
static void
@@ -215,9 +263,9 @@ finish_cancellation(void *arg)
curthread->continuation = NULL;
curthread->interrupted = 0;
- if ((curthread->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
- curthread->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
- _thread_exit_cleanup();
+ if ((curthread->cancelflags & THR_CANCEL_NEEDED) != 0) {
+ curthread->cancelflags &= ~THR_CANCEL_NEEDED;
+ _thr_exit_cleanup();
pthread_exit(PTHREAD_CANCELED);
}
}
diff --git a/lib/libpthread/thread/thr_clean.c b/lib/libpthread/thread/thr_clean.c
index 8ae6b42..a8cedb4 100644
--- a/lib/libpthread/thread/thr_clean.c
+++ b/lib/libpthread/thread/thr_clean.c
@@ -46,7 +46,8 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
struct pthread *curthread = _get_curthread();
struct pthread_cleanup *new;
- if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
+ if ((new = (struct pthread_cleanup *)
+ malloc(sizeof(struct pthread_cleanup))) != NULL) {
new->routine = routine;
new->routine_arg = routine_arg;
new->next = curthread->cleanup;
@@ -69,4 +70,3 @@ _pthread_cleanup_pop(int execute)
free(old);
}
}
-
diff --git a/lib/libpthread/thread/thr_close.c b/lib/libpthread/thread/thr_close.c
index d03961b..269140b 100644
--- a/lib/libpthread/thread/thr_close.c
+++ b/lib/libpthread/thread/thr_close.c
@@ -44,11 +44,12 @@ __weak_reference(__close, close);
int
__close(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_close(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c
index a22d983..1221fd8 100644
--- a/lib/libpthread/thread/thr_cond.c
+++ b/lib/libpthread/thread/thr_cond.c
@@ -37,12 +37,17 @@
#include <pthread.h>
#include "thr_private.h"
+#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+#define THR_IN_CONDQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+#define THR_CONDQ_SET(thr) (thr)->sflags |= THR_FLAGS_IN_SYNCQ
+#define THR_CONDQ_CLEAR(thr) (thr)->sflags &= ~THR_FLAGS_IN_SYNCQ
+
/*
* Prototypes
*/
-static inline pthread_t cond_queue_deq(pthread_cond_t);
-static inline void cond_queue_remove(pthread_cond_t, pthread_t);
-static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+static inline struct pthread *cond_queue_deq(pthread_cond_t);
+static inline void cond_queue_remove(pthread_cond_t, pthread_t);
+static inline void cond_queue_enq(pthread_cond_t, pthread_t);
__weak_reference(_pthread_cond_init, pthread_cond_init);
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
@@ -52,35 +57,12 @@ __weak_reference(_pthread_cond_signal, pthread_cond_signal);
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
-/* Reinitialize a condition variable to defaults. */
-int
-_cond_reinit(pthread_cond_t *cond)
-{
- int ret = 0;
-
- if (cond == NULL)
- ret = EINVAL;
- else if (*cond == NULL)
- ret = pthread_cond_init(cond, NULL);
- else {
- /*
- * Initialize the condition variable structure:
- */
- TAILQ_INIT(&(*cond)->c_queue);
- (*cond)->c_flags = COND_FLAGS_INITED;
- (*cond)->c_type = COND_TYPE_FAST;
- (*cond)->c_mutex = NULL;
- (*cond)->c_seqno = 0;
- memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
- }
- return (ret);
-}
-
int
_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
enum pthread_cond_type type;
pthread_cond_t pcond;
+ int flags;
int rval = 0;
if (cond == NULL)
@@ -93,9 +75,11 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
if (cond_attr != NULL && *cond_attr != NULL) {
/* Default to a fast condition variable: */
type = (*cond_attr)->c_type;
+ flags = (*cond_attr)->c_flags;
} else {
/* Default to a fast condition variable: */
type = COND_TYPE_FAST;
+ flags = 0;
}
/* Process according to condition variable type: */
@@ -117,6 +101,10 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
if ((pcond = (pthread_cond_t)
malloc(sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
+ } else if (_lock_init(&pcond->c_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0) {
+ free(pcond);
+ rval = ENOMEM;
} else {
/*
* Initialise the condition variable
@@ -127,7 +115,6 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
pcond->c_type = type;
pcond->c_mutex = NULL;
pcond->c_seqno = 0;
- memset(&pcond->lock,0,sizeof(pcond->lock));
*cond = pcond;
}
}
@@ -139,25 +126,32 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
- int rval = 0;
+ struct pthread_cond *cv;
+ struct pthread *curthread = _get_curthread();
+ int rval = 0;
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
- /*
- * Free the memory allocated for the condition
- * variable structure:
- */
- free(*cond);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* NULL the caller's pointer now that the condition
* variable has been destroyed:
*/
+ cv = *cond;
*cond = NULL;
+
+ /* Unlock the condition variable structure: */
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+
+ /*
+ * Free the memory allocated for the condition
+ * variable structure:
+ */
+ free(cv);
+
}
/* Return the completion status: */
return (rval);
@@ -170,20 +164,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
int rval = 0;
int done = 0;
int interrupted = 0;
+ int unlock_mutex = 1;
int seqno;
- _thread_enter_cancellation_point();
-
- if (cond == NULL)
+ _thr_enter_cancellation_point(curthread);
+
+ if (cond == NULL) {
+ _thr_leave_cancellation_point(curthread);
return (EINVAL);
+ }
/*
* If the condition variable is statically initialized,
* perform the dynamic initialization:
*/
if (*cond == NULL &&
- (rval = pthread_cond_init(cond, NULL)) != 0)
+ (rval = pthread_cond_init(cond, NULL)) != 0) {
+ _thr_leave_cancellation_point(curthread);
return (rval);
+ }
/*
* Enter a loop waiting for a condition signal or broadcast
@@ -196,7 +195,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
*/
do {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* If the condvar was statically allocated, properly
@@ -214,7 +213,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
((*cond)->c_mutex != *mutex))) {
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return invalid argument error: */
rval = EINVAL;
@@ -237,7 +236,8 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
curthread->wakeup_time.tv_sec = -1;
/* Unlock the mutex: */
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ if ((unlock_mutex != 0) &&
+ ((rval = _mutex_cv_unlock(mutex)) != 0)) {
/*
* Cannot unlock the mutex, so remove
* the running thread from the condition
@@ -246,45 +246,60 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
cond_queue_remove(*cond, curthread);
/* Check for no more waiters: */
- if (TAILQ_FIRST(&(*cond)->c_queue) ==
- NULL)
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
- } else {
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
+ }
+ else {
/*
- * Schedule the next thread and unlock
- * the condition variable structure:
+ * Don't unlock the mutex the next
+ * time through the loop (if the
+ * thread has to be requeued after
+ * handling a signal).
*/
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ unlock_mutex = 0;
- done = (seqno != (*cond)->c_seqno);
+ /*
+ * This thread is active and is in a
+ * critical region (holding the cv
+ * lock); we should be able to safely
+ * set the state.
+ */
+ THR_SET_STATE(curthread, PS_COND_WAIT);
- interrupted = curthread->interrupted;
+ /* Remember the CV: */
+ curthread->data.cond = *cond;
+
+ /* Unlock the CV structure: */
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+
+ curthread->data.cond = NULL;
/*
- * Check if the wait was interrupted
- * (canceled) or needs to be resumed
- * after handling a signal.
+ * XXX - This really isn't a good check
+ * since there can be more than one
+ * thread waiting on the CV. Signals
+ * sent to threads waiting on mutexes
+ * or CVs should really be deferred
+ * until the threads are no longer
+ * waiting, but POSIX says that signals
+ * should be sent "as soon as possible".
*/
- if (interrupted != 0) {
- /*
- * Lock the mutex and ignore any
- * errors. Note that even
- * though this thread may have
- * been canceled, POSIX requires
- * that the mutex be reaquired
- * prior to cancellation.
- */
- (void)_mutex_cv_lock(mutex);
- } else {
+ done = (seqno != (*cond)->c_seqno);
+
+ if (THR_IN_SYNCQ(curthread)) {
/*
* Lock the condition variable
* while removing the thread.
*/
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread,
+ &(*cond)->c_lock);
cond_queue_remove(*cond,
curthread);
@@ -293,11 +308,24 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+ }
- /* Lock the mutex: */
+ /*
+ * Save the interrupted flag; locking
+ * the mutex may destroy it.
+ */
+ interrupted = curthread->interrupted;
+
+ /*
+ * Note that even though this thread may
+ * have been canceled, POSIX requires
+ * that the mutex be reaquired prior to
+ * cancellation.
+ */
+ if (done != 0)
rval = _mutex_cv_lock(mutex);
- }
}
}
break;
@@ -305,7 +333,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
/* Trap invalid condition variable types: */
default:
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return an invalid argument error: */
rval = EINVAL;
@@ -316,13 +344,25 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
curthread->continuation((void *) curthread);
} while ((done == 0) && (rval == 0));
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (rval);
}
int
+__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ _thr_enter_cancellation_point(curthread);
+ ret = _pthread_cond_wait(cond, mutex);
+ _thr_leave_cancellation_point(curthread);
+ return (ret);
+}
+
+int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
{
@@ -330,19 +370,24 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
int rval = 0;
int done = 0;
int interrupted = 0;
+ int unlock_mutex = 1;
int seqno;
- _thread_enter_cancellation_point();
-
+ _thr_enter_cancellation_point(curthread);
+
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000)
+ abstime->tv_nsec >= 1000000000) {
+ _thr_leave_cancellation_point(curthread);
return (EINVAL);
+ }
/*
* If the condition variable is statically initialized, perform dynamic
* initialization.
*/
- if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0)
+ if (*cond == NULL && (rval = pthread_cond_init(cond, NULL)) != 0) {
+ _thr_leave_cancellation_point(curthread);
return (rval);
+ }
/*
* Enter a loop waiting for a condition signal or broadcast
@@ -355,7 +400,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
do {
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/*
* If the condvar was statically allocated, properly
@@ -376,11 +421,10 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
rval = EINVAL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
} else {
/* Set the wakeup time: */
- curthread->wakeup_time.tv_sec =
- abstime->tv_sec;
+ curthread->wakeup_time.tv_sec = abstime->tv_sec;
curthread->wakeup_time.tv_nsec =
abstime->tv_nsec;
@@ -399,10 +443,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
seqno = (*cond)->c_seqno;
/* Unlock the mutex: */
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ if ((unlock_mutex != 0) &&
+ ((rval = _mutex_cv_unlock(mutex)) != 0)) {
/*
- * Cannot unlock the mutex, so remove
- * the running thread from the condition
+ * Cannot unlock the mutex; remove the
+ * running thread from the condition
* variable queue:
*/
cond_queue_remove(*cond, curthread);
@@ -412,40 +457,55 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_mutex = NULL;
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
} else {
/*
- * Schedule the next thread and unlock
- * the condition variable structure:
+ * Don't unlock the mutex the next
+ * time through the loop (if the
+ * thread has to be requeued after
+ * handling a signal).
*/
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ unlock_mutex = 0;
- done = (seqno != (*cond)->c_seqno);
+ /*
+ * This thread is active and is in a
+ * critical region (holding the cv
+ * lock); we should be able to safely
+ * set the state.
+ */
+ THR_SET_STATE(curthread, PS_COND_WAIT);
- interrupted = curthread->interrupted;
+ /* Remember the CV: */
+ curthread->data.cond = *cond;
+
+ /* Unlock the CV structure: */
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+
+ curthread->data.cond = NULL;
/*
- * Check if the wait was interrupted
- * (canceled) or needs to be resumed
- * after handling a signal.
+ * XXX - This really isn't a good check
+ * since there can be more than one
+ * thread waiting on the CV. Signals
+ * sent to threads waiting on mutexes
+ * or CVs should really be deferred
+ * until the threads are no longer
+ * waiting, but POSIX says that signals
+ * should be sent "as soon as possible".
*/
- if (interrupted != 0) {
- /*
- * Lock the mutex and ignore any
- * errors. Note that even
- * though this thread may have
- * been canceled, POSIX requires
- * that the mutex be reaquired
- * prior to cancellation.
- */
- (void)_mutex_cv_lock(mutex);
- } else {
+ done = (seqno != (*cond)->c_seqno);
+
+ if (THR_IN_CONDQ(curthread)) {
/*
* Lock the condition variable
* while removing the thread.
*/
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread,
+ &(*cond)->c_lock);
cond_queue_remove(*cond,
curthread);
@@ -454,21 +514,22 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread,
+ &(*cond)->c_lock);
+ }
- /* Lock the mutex: */
+ /*
+ * Save the interrupted flag; locking
+ * the mutex may destroy it.
+ */
+ interrupted = curthread->interrupted;
+ if (curthread->timeout != 0) {
+ /* The wait timedout. */
+ rval = ETIMEDOUT;
+ (void)_mutex_cv_lock(mutex);
+ } else if ((interrupted == 0) ||
+ (done != 0))
rval = _mutex_cv_lock(mutex);
-
- /*
- * Return ETIMEDOUT if the wait
- * timed out and there wasn't an
- * error locking the mutex:
- */
- if ((curthread->timeout != 0)
- && rval == 0)
- rval = ETIMEDOUT;
-
- }
}
}
break;
@@ -476,7 +537,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
/* Trap invalid condition variable types: */
default:
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
/* Return an invalid argument error: */
rval = EINVAL;
@@ -484,20 +545,35 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
}
if ((interrupted != 0) && (curthread->continuation != NULL))
- curthread->continuation((void *) curthread);
+ curthread->continuation((void *)curthread);
} while ((done == 0) && (rval == 0));
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (rval);
}
int
+__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ _thr_enter_cancellation_point(curthread);
+ ret = _pthread_cond_timedwait(cond, mutex, abstime);
+ _thr_leave_cancellation_point(curthread);
+ return (ret);
+}
+
+
+int
_pthread_cond_signal(pthread_cond_t * cond)
{
- int rval = 0;
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *pthread;
+ int rval = 0;
if (cond == NULL)
rval = EINVAL;
@@ -506,14 +582,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
@@ -522,13 +592,19 @@ _pthread_cond_signal(pthread_cond_t * cond)
/* Increment the sequence number: */
(*cond)->c_seqno++;
- if ((pthread = cond_queue_deq(*cond)) != NULL) {
- /*
- * Wake up the signaled thread:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ /*
+ * Wakeups have to be done with the CV lock held;
+ * otherwise there is a race condition where the
+ * thread can timeout, run on another KSE, and enter
+ * another blocking state (including blocking on a CV).
+ */
+ if ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
+ != NULL) {
+ THR_SCHED_LOCK(curthread, pthread);
+ cond_queue_remove(*cond, pthread);
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
-
/* Check for no more waiters: */
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
(*cond)->c_mutex = NULL;
@@ -542,13 +618,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
}
/* Return the completion status: */
@@ -558,8 +628,9 @@ _pthread_cond_signal(pthread_cond_t * cond)
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
- int rval = 0;
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *pthread;
+ int rval = 0;
if (cond == NULL)
rval = EINVAL;
@@ -568,14 +639,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
* initialization.
*/
else if (*cond != NULL || (rval = pthread_cond_init(cond, NULL)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
@@ -588,11 +653,12 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
* Enter a loop to bring all threads off the
* condition queue:
*/
- while ((pthread = cond_queue_deq(*cond)) != NULL) {
- /*
- * Wake up the signaled thread:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ while ((pthread = TAILQ_FIRST(&(*cond)->c_queue))
+ != NULL) {
+ THR_SCHED_LOCK(curthread, pthread);
+ cond_queue_remove(*cond, pthread);
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
/* There are no more waiting threads: */
@@ -607,13 +673,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&(*cond)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
}
/* Return the completion status: */
@@ -621,26 +681,20 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
}
void
-_cond_wait_backout(pthread_t pthread)
+_cond_wait_backout(struct pthread *curthread)
{
pthread_cond_t cond;
- cond = pthread->data.cond;
+ cond = curthread->data.cond;
if (cond != NULL) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the condition variable structure: */
- _SPINLOCK(&cond->lock);
+ THR_LOCK_ACQUIRE(curthread, &cond->c_lock);
/* Process according to condition variable type: */
switch (cond->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- cond_queue_remove(cond, pthread);
+ cond_queue_remove(cond, curthread);
/* Check for no more waiters: */
if (TAILQ_FIRST(&cond->c_queue) == NULL)
@@ -652,13 +706,7 @@ _cond_wait_backout(pthread_t pthread)
}
/* Unlock the condition variable structure: */
- _SPINUNLOCK(&cond->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &cond->c_lock);
}
}
@@ -666,14 +714,14 @@ _cond_wait_backout(pthread_t pthread)
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
*/
-static inline pthread_t
+static inline struct pthread *
cond_queue_deq(pthread_cond_t cond)
{
- pthread_t pthread;
+ struct pthread *pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_SET(pthread);
if ((pthread->timeout == 0) && (pthread->interrupted == 0))
/*
* Only exit the loop when we find a thread
@@ -684,7 +732,7 @@ cond_queue_deq(pthread_cond_t cond)
break;
}
- return(pthread);
+ return (pthread);
}
/*
@@ -692,7 +740,7 @@ cond_queue_deq(pthread_cond_t cond)
* order.
*/
static inline void
-cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+cond_queue_remove(pthread_cond_t cond, struct pthread *pthread)
{
/*
* Because pthread_cond_timedwait() can timeout as well
@@ -700,9 +748,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
- if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
+ if (THR_IN_CONDQ(pthread)) {
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_CLEAR(pthread);
}
}
@@ -711,11 +759,12 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* order.
*/
static inline void
-cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+cond_queue_enq(pthread_cond_t cond, struct pthread *pthread)
{
- pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+ struct pthread *tid = TAILQ_LAST(&cond->c_queue, cond_head);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+ THR_ASSERT(!THR_IN_SYNCQ(pthread),
+ "cond_queue_enq: thread already queued!");
/*
* For the common case of all threads having equal priority,
@@ -730,6 +779,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
+ THR_CONDQ_SET(pthread);
pthread->data.cond = cond;
}
diff --git a/lib/libpthread/thread/thr_condattr_init.c b/lib/libpthread/thread/thr_condattr_init.c
index 1af12e1..7cf4c9e 100644
--- a/lib/libpthread/thread/thr_condattr_init.c
+++ b/lib/libpthread/thread/thr_condattr_init.c
@@ -46,13 +46,13 @@ _pthread_condattr_init(pthread_condattr_t *attr)
pthread_condattr_t pattr;
if ((pattr = (pthread_condattr_t)
- malloc(sizeof(struct pthread_cond_attr))) == NULL) {
+ malloc(sizeof(struct pthread_cond_attr))) == NULL) {
ret = ENOMEM;
} else {
- memcpy(pattr, &pthread_condattr_default,
- sizeof(struct pthread_cond_attr));
+ memcpy(pattr, &_pthread_condattr_default,
+ sizeof(struct pthread_cond_attr));
*attr = pattr;
ret = 0;
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_creat.c b/lib/libpthread/thread/thr_creat.c
index 3d5be08..bba8ec3 100644
--- a/lib/libpthread/thread/thr_creat.c
+++ b/lib/libpthread/thread/thr_creat.c
@@ -38,11 +38,12 @@ __weak_reference(___creat, creat);
int
___creat(const char *path, mode_t mode)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __creat(path, mode);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c
index c9f5a42..5a435ca 100644
--- a/lib/libpthread/thread/thr_create.c
+++ b/lib/libpthread/thread/thr_create.c
@@ -50,102 +50,150 @@ int _thread_next_offset = OFF(tle.tqe_next);
int _thread_uniqueid_offset = OFF(uniqueid);
int _thread_state_offset = OFF(state);
int _thread_name_offset = OFF(name);
-int _thread_ctx_offset = OFF(mailbox.tm_context);
+int _thread_ctx_offset = OFF(tmbx.tm_context);
#undef OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
+static int create_stack(struct pthread_attr *pattr);
+static void thread_start(struct pthread *curthread,
+ void *(*start_routine) (void *), void *arg);
+
__weak_reference(_pthread_create, pthread_create);
+/*
+ * Some notes on new thread creation and first time initializion
+ * to enable multi-threading.
+ *
+ * There are basically two things that need to be done.
+ *
+ * 1) The internal library variables must be initialized.
+ * 2) Upcalls need to be enabled to allow multiple threads
+ * to be run.
+ *
+ * The first may be done as a result of other pthread functions
+ * being called. When _thr_initial is null, _libpthread_init is
+ * called to initialize the internal variables; this also creates
+ * or sets the initial thread. It'd be nice to automatically
+ * have _libpthread_init called on program execution so we don't
+ * have to have checks throughout the library.
+ *
+ * The second part is only triggered by the creation of the first
+ * thread (other than the initial/main thread). If the thread
+ * being created is a scope system thread, then a new KSE/KSEG
+ * pair needs to be allocated. Also, if upcalls haven't been
+ * enabled on the initial thread's KSE, they must be now that
+ * there is more than one thread; this could be delayed until
+ * the initial KSEG has more than one thread.
+ */
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
- struct pthread *curthread = _get_curthread();
- struct itimerval itimer;
- int f_gc = 0;
- int ret = 0;
- pthread_t gc_thread;
- pthread_t new_thread;
- pthread_attr_t pattr;
- void *stack;
-
- /*
- * Locking functions in libc are required when there are
- * threads other than the initial thread.
- */
- __isthreaded = 1;
+ struct kse *curkse;
+ struct pthread *curthread, *new_thread;
+ struct kse *kse = NULL;
+ struct kse_group *kseg = NULL;
+ kse_critical_t crit;
+ int i;
+ int ret = 0;
+
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+
+ crit = _kse_critical_enter();
+ curthread = _get_curthread();
+ curkse = curthread->kse;
/* Allocate memory for the thread structure: */
- if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
+ if ((new_thread = _thr_alloc(curkse)) == NULL) {
/* Insufficient memory to create a thread: */
ret = EAGAIN;
} else {
+ /* Initialize the thread structure: */
+ memset(new_thread, 0, sizeof(struct pthread));
+
/* Check if default thread attributes are required: */
- if (attr == NULL || *attr == NULL) {
+ if (attr == NULL || *attr == NULL)
/* Use the default thread attributes: */
- pattr = &pthread_attr_default;
- } else {
- pattr = *attr;
+ new_thread->attr = _pthread_attr_default;
+ else
+ new_thread->attr = *(*attr);
+
+ if (create_stack(&new_thread->attr) != 0) {
+ /* Insufficient memory to create a stack: */
+ ret = EAGAIN;
+ _thr_free(curkse, new_thread);
}
- /* Check if a stack was specified in the thread attributes: */
- if ((stack = pattr->stackaddr_attr) != NULL) {
+ else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
+ (((kse = _kse_alloc(curkse)) == NULL)
+ || ((kseg = _kseg_alloc(curkse)) == NULL))) {
+ /* Insufficient memory to create a new KSE/KSEG: */
+ ret = EAGAIN;
+ if (kse != NULL)
+ _kse_free(curkse, kse);
+ if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ _thr_stack_free(&new_thread->attr);
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ }
+ _thr_free(curkse, new_thread);
}
- /* Allocate a stack: */
else {
- stack = _thread_stack_alloc(pattr->stacksize_attr,
- pattr->guardsize_attr);
- if (stack == NULL) {
- ret = EAGAIN;
- free(new_thread);
+ if (kseg != NULL) {
+ /* Add the KSE to the KSEG's list of KSEs. */
+ TAILQ_INSERT_HEAD(&kseg->kg_kseq, kse, k_qe);
+ kse->k_kseg = kseg;
+ kse->k_schedq = &kseg->kg_schedq;
}
- }
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ new_thread->magic = THR_MAGIC;
- /* Check for errors: */
- if (ret != 0) {
- } else {
- /* Initialise the thread structure: */
- memset(new_thread, 0, sizeof(struct pthread));
new_thread->slice_usec = -1;
- new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
-
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
- /*
- * Write a magic value to the thread structure
- * to help identify valid ones:
- */
- new_thread->magic = PTHREAD_MAGIC;
+ /* Initialize the thread for signals: */
+ new_thread->sigmask = curthread->sigmask;
+
+ /* No thread is wanting to join to this one: */
+ new_thread->joiner = NULL;
- /* Initialise the machine context: */
- getcontext(&new_thread->mailbox.tm_context);
- new_thread->mailbox.tm_context.uc_stack.ss_sp =
- new_thread->stack;
- new_thread->mailbox.tm_context.uc_stack.ss_size =
- pattr->stacksize_attr;
- makecontext(&new_thread->mailbox.tm_context,
- _thread_start, 1);
- new_thread->mailbox.tm_udata = (void *)new_thread;
+ /* Initialize the signal frame: */
+ new_thread->curframe = NULL;
- /* Copy the thread attributes: */
- memcpy(&new_thread->attr, pattr, sizeof(struct pthread_attr));
+ /* Initialize the machine context: */
+ THR_GETCONTEXT(&new_thread->tmbx.tm_context);
+ new_thread->tmbx.tm_udata = new_thread;
+ new_thread->tmbx.tm_context.uc_sigmask =
+ new_thread->sigmask;
+ new_thread->tmbx.tm_context.uc_stack.ss_size =
+ new_thread->attr.stacksize_attr;
+ new_thread->tmbx.tm_context.uc_stack.ss_sp =
+ new_thread->attr.stackaddr_attr;
+
+ makecontext(&new_thread->tmbx.tm_context,
+ (void (*)(void))thread_start, 4, new_thread,
+ start_routine, arg);
/*
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
- if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
+ if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
/* Copy the scheduling attributes: */
new_thread->base_priority =
curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
+ ~THR_SIGNAL_PRIORITY;
new_thread->attr.prio =
curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
+ ~THR_SIGNAL_PRIORITY;
new_thread->attr.sched_policy =
curthread->attr.sched_policy;
} else {
@@ -160,23 +208,49 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
- /* Initialize joiner to NULL (no joiner): */
- new_thread->joiner = NULL;
-
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
+ /* Initialize thread locking. */
+ if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize thread lock");
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_init(&new_thread->lockusers[i],
+ (void *)new_thread);
+ _LCK_SET_PRIVATE2(&new_thread->lockusers[i],
+ (void *)new_thread);
+ }
+
/* Initialise hooks in the thread structure: */
new_thread->specific = NULL;
new_thread->cleanup = NULL;
new_thread->flags = 0;
new_thread->continuation = NULL;
+ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
+ new_thread->state = PS_SUSPENDED;
+ else
+ new_thread->state = PS_RUNNING;
+
/*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
+ * System scope threads have their own kse and
+ * kseg. Process scope threads are all hung
+ * off the main process kseg.
*/
- _thread_kern_sig_defer();
+ if ((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0) {
+ new_thread->kseg = _kse_initial->k_kseg;
+ new_thread->kse = _kse_initial;
+ }
+ else {
+ kse->k_curthread = NULL;
+ kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD;
+ new_thread->kse = kse;
+ new_thread->kseg = kse->k_kseg;
+ kse->k_mbx.km_udata = kse;
+ kse->k_mbx.km_curthread = NULL;
+ }
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
/*
* Initialise the unique id which GDB uses to
@@ -184,57 +258,53 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
new_thread->uniqueid = next_uniqueid++;
- /*
- * Check if the garbage collector thread
- * needs to be started.
- */
- f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
-
/* Add the thread to the linked list of all threads: */
- TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
+ THR_LIST_ADD(new_thread);
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
- new_thread->state = PS_SUSPENDED;
- } else {
- new_thread->state = PS_RUNNING;
- PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
- }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
/*
- * Undefer and handle pending signals, yielding
- * if necessary.
+ * Schedule the new thread starting a new KSEG/KSE
+ * pair if necessary.
*/
- _thread_kern_sig_undefer();
+ _thr_schedule_add(curthread, new_thread);
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
-
- /* Schedule the new user thread: */
- _thread_kern_sched();
-
- /*
- * Start a garbage collector thread
- * if necessary.
- */
- if (f_gc && pthread_create(&gc_thread,NULL,
- _thread_gc,NULL) != 0)
- PANIC("Can't create gc thread");
-
}
}
+ _kse_critical_leave(crit);
+
+ if ((ret == 0) && (_kse_isthreaded() == 0))
+ _kse_setthreaded(1);
/* Return the status: */
return (ret);
}
-void
-_thread_start(void)
+static int
+create_stack(struct pthread_attr *pattr)
{
- struct pthread *curthread = _get_curthread();
+ int ret;
+ /* Check if a stack was specified in the thread attributes: */
+ if ((pattr->stackaddr_attr) != NULL) {
+ pattr->guardsize_attr = 0;
+ pattr->flags = THR_STACK_USER;
+ ret = 0;
+ }
+ else
+ ret = _thr_stack_alloc(pattr);
+ return (ret);
+}
+
+
+static void
+thread_start(struct pthread *curthread, void *(*start_routine) (void *),
+ void *arg)
+{
/* Run the current thread's start routine with argument: */
- pthread_exit(curthread->start_routine(curthread->arg));
+ pthread_exit(start_routine(arg));
/* This point should never be reached. */
PANIC("Thread has resumed after exit");
diff --git a/lib/libpthread/thread/thr_detach.c b/lib/libpthread/thread/thr_detach.c
index 646dcbf..59d363e 100644
--- a/lib/libpthread/thread/thr_detach.c
+++ b/lib/libpthread/thread/thr_detach.c
@@ -31,6 +31,8 @@
*
* $FreeBSD$
*/
+#include <sys/types.h>
+#include <machine/atomic.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
@@ -40,50 +42,60 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
- int rval = 0;
+ struct pthread *curthread, *joiner;
+ int rval = 0;
/* Check for invalid calling parameters: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
+ if (pthread == NULL || pthread->magic != THR_MAGIC)
/* Return an invalid argument error: */
rval = EINVAL;
- /* Check if the thread has not been detached: */
- else if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
+ /* Check if the thread is already detached: */
+ else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
+ /* Return an error: */
+ rval = EINVAL;
+ else {
+ /* Lock the detached thread: */
+ curthread = _get_curthread();
+ THR_SCHED_LOCK(curthread, pthread);
+
/* Flag the thread as detached: */
pthread->attr.flags |= PTHREAD_DETACHED;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Retrieve any joining thread and remove it: */
+ joiner = pthread->joiner;
+ pthread->joiner = NULL;
- /* Check if there is a joiner: */
- if (pthread->joiner != NULL) {
- struct pthread *joiner = pthread->joiner;
+ /* We are already in a critical region. */
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
+ THR_LIST_REMOVE(pthread);
+ THR_GCLIST_ADD(pthread);
+ atomic_store_rel_int(&_gc_check, 1);
+ if (KSE_WAITING(_kse_initial))
+ KSE_WAKEUP(_kse_initial);
+ }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(joiner, PS_RUNNING);
+ THR_SCHED_UNLOCK(curthread, pthread);
- /* Set the return value for the woken thread: */
- joiner->join_status.error = ESRCH;
- joiner->join_status.ret = NULL;
- joiner->join_status.thread = NULL;
+ /* See if there is a thread waiting in pthread_join(): */
+ if (joiner != NULL) {
+ /* Lock the joiner before fiddling with it. */
+ THR_SCHED_LOCK(curthread, joiner);
+ if (joiner->join_status.thread == pthread) {
+ /*
+ * Set the return value for the woken thread:
+ */
+ joiner->join_status.error = ESRCH;
+ joiner->join_status.ret = NULL;
+ joiner->join_status.thread = NULL;
- /*
- * Disconnect the joiner from the thread being detached:
- */
- pthread->joiner = NULL;
+ _thr_setrunnable_unlocked(joiner);
+ }
+ THR_SCHED_UNLOCK(curthread, joiner);
}
-
- /*
- * Undefer and handle pending signals, yielding if a
- * scheduling signal occurred while in the critical region.
- */
- _thread_kern_sig_undefer();
- } else
- /* Return an error: */
- rval = EINVAL;
+ }
/* Return the completion status: */
return (rval);
diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c
index 96d288e..4a82b12 100644
--- a/lib/libpthread/thread/thr_exit.c
+++ b/lib/libpthread/thread/thr_exit.c
@@ -40,31 +40,24 @@
#include <pthread.h>
#include "thr_private.h"
-#define FLAGS_IN_SCHEDQ \
- (PTHREAD_FLAGS_IN_PRIOQ|PTHREAD_FLAGS_IN_WAITQ|PTHREAD_FLAGS_IN_WORKQ)
+void _pthread_exit(void *status);
__weak_reference(_pthread_exit, pthread_exit);
void
-_thread_exit(char *fname, int lineno, char *string)
+_thr_exit(char *fname, int lineno, char *msg)
{
- char s[256];
+ char s[256];
/* Prepare an error message string: */
snprintf(s, sizeof(s),
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
- string, lineno, fname, errno);
+ msg, lineno, fname, errno);
/* Write the string to the standard error file descriptor: */
__sys_write(2, s, strlen(s));
- /* Force this process to exit: */
- /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
-#if defined(_PTHREADS_INVARIANTS)
abort();
-#else
- __sys_exit(1);
-#endif
}
/*
@@ -73,7 +66,7 @@ _thread_exit(char *fname, int lineno, char *string)
* abnormal thread termination can be found.
*/
void
-_thread_exit_cleanup(void)
+_thr_exit_cleanup(void)
{
struct pthread *curthread = _get_curthread();
@@ -96,22 +89,25 @@ _thread_exit_cleanup(void)
void
_pthread_exit(void *status)
{
- struct pthread *curthread = _get_curthread();
- pthread_t pthread;
+ struct pthread *curthread = _get_curthread();
/* Check if this thread is already in the process of exiting: */
- if ((curthread->flags & PTHREAD_EXITING) != 0) {
+ if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
char msg[128];
- snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
+ snprintf(msg, sizeof(msg), "Thread %p has called "
+ "pthread_exit() from a destructor. POSIX 1003.1 "
+ "1996 s16.2.5.2 does not allow this!", curthread);
PANIC(msg);
}
- /* Flag this thread as exiting: */
- curthread->flags |= PTHREAD_EXITING;
+ /*
+ * Flag this thread as exiting. Threads should now be prevented
+ * from joining to this thread.
+ */
+ curthread->flags |= THR_FLAGS_EXITING;
/* Save the return value: */
curthread->ret = status;
-
while (curthread->cleanup != NULL) {
pthread_cleanup_pop(1);
}
@@ -124,58 +120,11 @@ _pthread_exit(void *status)
_thread_cleanupspecific();
}
- /*
- * Lock the garbage collector mutex to ensure that the garbage
- * collector is not using the dead thread list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /* Add this thread to the list of dead threads. */
- TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
-
- /*
- * Signal the garbage collector thread that there is something
- * to clean up.
- */
- if (pthread_cond_signal(&_gc_cond) != 0)
- PANIC("Cannot signal gc cond");
-
- /*
- * Avoid a race condition where a scheduling signal can occur
- * causing the garbage collector thread to run. If this happens,
- * the current thread can be cleaned out from under us.
- */
- _thread_kern_sig_defer();
-
- /* Unlock the garbage collector mutex: */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
-
- /* Check if there is a thread joining this one: */
- if (curthread->joiner != NULL) {
- pthread = curthread->joiner;
- curthread->joiner = NULL;
-
- /* Make the joining thread runnable: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Set the return value for the joining thread: */
- pthread->join_status.ret = curthread->ret;
- pthread->join_status.error = 0;
- pthread->join_status.thread = NULL;
-
- /* Make this thread collectable by the garbage collector. */
- PTHREAD_ASSERT(((curthread->attr.flags & PTHREAD_DETACHED) ==
- 0), "Cannot join a detached thread");
- curthread->attr.flags |= PTHREAD_DETACHED;
- }
-
- /* Remove this thread from the thread list: */
- TAILQ_REMOVE(&_thread_list, curthread, tle);
-
/* This thread will never be re-scheduled. */
- _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__);
+ THR_SCHED_LOCK(curthread, curthread);
+ THR_SET_STATE(curthread, PS_DEAD);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
/* This point should not be reached. */
PANIC("Dead thread has resumed");
diff --git a/lib/libpthread/thread/thr_fcntl.c b/lib/libpthread/thread/thr_fcntl.c
index 33a1c2f..0b4a990 100644
--- a/lib/libpthread/thread/thr_fcntl.c
+++ b/lib/libpthread/thread/thr_fcntl.c
@@ -32,8 +32,9 @@
* $FreeBSD$
*/
#include <stdarg.h>
-#include <unistd.h>
+#include "namespace.h"
#include <fcntl.h>
+#include "un-namespace.h"
#include <pthread.h>
#include "thr_private.h"
@@ -42,28 +43,29 @@ __weak_reference(__fcntl, fcntl);
int
__fcntl(int fd, int cmd,...)
{
+ struct pthread *curthread = _get_curthread();
int ret;
va_list ap;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
va_start(ap, cmd);
switch (cmd) {
- case F_DUPFD:
- case F_SETFD:
- case F_SETFL:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
- break;
- case F_GETFD:
- case F_GETFL:
- ret = __sys_fcntl(fd, cmd);
- break;
- default:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
+ case F_DUPFD:
+ case F_SETFD:
+ case F_SETFL:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
+ break;
+ case F_GETFD:
+ case F_GETFL:
+ ret = __sys_fcntl(fd, cmd);
+ break;
+ default:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
}
va_end(ap);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_find_thread.c b/lib/libpthread/thread/thr_find_thread.c
index 032fcfb..9b291b1 100644
--- a/lib/libpthread/thread/thr_find_thread.c
+++ b/lib/libpthread/thread/thr_find_thread.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@@ -35,32 +36,65 @@
#include <pthread.h>
#include "thr_private.h"
-/* Find a thread in the linked list of active threads: */
+/*
+ * Find a thread in the linked list of active threads and add a reference
+ * to it. Threads with positive reference counts will not be deallocated
+ * until all references are released.
+ */
int
-_find_thread(pthread_t pthread)
+_thr_ref_add(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
{
- pthread_t pthread1;
+ kse_critical_t crit;
+ struct pthread *pthread;
- /* Check if the caller has specified an invalid thread: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
+ if (thread == NULL)
/* Invalid thread: */
- return(EINVAL);
-
- /*
- * Defer signals to protect the thread list from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ return (EINVAL);
- /* Search for the specified thread: */
- TAILQ_FOREACH(pthread1, &_thread_list, tle) {
- if (pthread == pthread1)
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
+ if (pthread == thread) {
+ if ((include_dead == 0) &&
+ ((pthread->state == PS_DEAD) ||
+ ((pthread->state == PS_DEADLOCK) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0))))
+ pthread = NULL;
+ else {
+ thread->refcount++;
+ curthread->critical_count++;
+ }
break;
+ }
}
-
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
/* Return zero if the thread exists: */
- return ((pthread1 != NULL) ? 0:ESRCH);
+ return ((pthread != NULL) ? 0 : ESRCH);
+}
+
+void
+_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
+{
+ kse_critical_t crit;
+
+ if (thread != NULL) {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
+ thread->refcount--;
+ curthread->critical_count--;
+ if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
+ (thread->refcount == 0) &&
+ ((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
+ THR_LIST_REMOVE(thread);
+ THR_GCLIST_ADD(thread);
+ _gc_check = 1;
+ if (KSE_WAITING(_kse_initial))
+ KSE_WAKEUP(_kse_initial);
+ }
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
+ }
}
diff --git a/lib/libpthread/thread/thr_fork.c b/lib/libpthread/thread/thr_fork.c
index 5bb64f7..a279621 100644
--- a/lib/libpthread/thread/thr_fork.c
+++ b/lib/libpthread/thread/thr_fork.c
@@ -31,7 +31,6 @@
*
* $FreeBSD$
*/
-#include <sys/param.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
@@ -40,141 +39,21 @@
#include <pthread.h>
#include "thr_private.h"
-static void free_thread_resources(struct pthread *thread);
-
__weak_reference(_fork, fork);
pid_t
_fork(void)
{
- struct pthread *curthread = _get_curthread();
- int i, flags, use_deadlist = 0;
- pid_t ret;
- pthread_t pthread;
- pthread_t pthread_save;
+ struct pthread *curthread;
+ pid_t ret;
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ curthread = _get_curthread();
/* Fork a new process: */
- if ((ret = __sys_fork()) != 0) {
- /* Parent process or error. Nothing to do here. */
- } else {
- /* Reinitialize the GC mutex: */
- if (_mutex_reinit(&_gc_mutex) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize GC mutex for forked process");
- }
- /* Reinitialize the GC condition variable: */
- else if (_cond_reinit(&_gc_cond) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize GC condvar for forked process");
- }
- /* Initialize the ready queue: */
- else if (_pq_init(&_readyq) != 0) {
- /* Abort this application: */
- PANIC("Cannot initialize priority ready queue.");
- } else {
- /*
- * Enter a loop to remove all threads other than
- * the running thread from the thread list:
- */
- if ((pthread = TAILQ_FIRST(&_thread_list)) == NULL) {
- pthread = TAILQ_FIRST(&_dead_list);
- use_deadlist = 1;
- }
- while (pthread != NULL) {
- /* Save the thread to be freed: */
- pthread_save = pthread;
-
- /*
- * Advance to the next thread before
- * destroying the current thread:
- */
- if (use_deadlist != 0)
- pthread = TAILQ_NEXT(pthread, dle);
- else
- pthread = TAILQ_NEXT(pthread, tle);
-
- /* Make sure this isn't the running thread: */
- if (pthread_save != curthread) {
- /*
- * Remove this thread from the
- * appropriate list:
- */
- if (use_deadlist != 0)
- TAILQ_REMOVE(&_thread_list,
- pthread_save, dle);
- else
- TAILQ_REMOVE(&_thread_list,
- pthread_save, tle);
-
- free_thread_resources(pthread_save);
- }
-
- /*
- * Switch to the deadlist when the active
- * thread list has been consumed. This can't
- * be at the top of the loop because it is
- * used to determine to which list the thread
- * belongs (when it is removed from the list).
- */
- if (pthread == NULL) {
- pthread = TAILQ_FIRST(&_dead_list);
- use_deadlist = 1;
- }
- }
-
- /* Treat the current thread as the initial thread: */
- _thread_initial = curthread;
-
- /* Re-init the dead thread list: */
- TAILQ_INIT(&_dead_list);
-
- /* Re-init the waiting and work queues. */
- TAILQ_INIT(&_waitingq);
- TAILQ_INIT(&_workq);
-
- /* Re-init the threads mutex queue: */
- TAILQ_INIT(&curthread->mutexq);
-
- /* No spinlocks yet: */
- _spinblock_count = 0;
-
- /* Initialize the scheduling switch hook routine: */
- _sched_switch_hook = NULL;
- }
- }
-
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ if ((ret = __sys_fork()) == 0)
+ /* Child process */
+ _kse_single_thread(curthread);
/* Return the process ID: */
return (ret);
}
-
-static void
-free_thread_resources(struct pthread *thread)
-{
-
- /* Check to see if the threads library allocated the stack. */
- if ((thread->attr.stackaddr_attr == NULL) && (thread->stack != NULL)) {
- /*
- * Since this is being called from fork, we are currently single
- * threaded so there is no need to protect the call to
- * _thread_stack_free() with _gc_mutex.
- */
- _thread_stack_free(thread->stack, thread->attr.stacksize_attr,
- thread->attr.guardsize_attr);
- }
-
- if (thread->specific != NULL)
- free(thread->specific);
-
- free(thread);
-}
diff --git a/lib/libpthread/thread/thr_fsync.c b/lib/libpthread/thread/thr_fsync.c
index 0ede529..d5d3398 100644
--- a/lib/libpthread/thread/thr_fsync.c
+++ b/lib/libpthread/thread/thr_fsync.c
@@ -40,11 +40,12 @@ __weak_reference(__fsync, fsync);
int
__fsync(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_fsync(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_gc.c b/lib/libpthread/thread/thr_gc.c
deleted file mode 100644
index 63e23f8..0000000
--- a/lib/libpthread/thread/thr_gc.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- *
- * Garbage collector thread. Frees memory allocated for dead threads.
- *
- */
-#include <sys/param.h>
-#include <errno.h>
-#include <time.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pthread.h>
-#include "thr_private.h"
-
-pthread_addr_t
-_thread_gc(pthread_addr_t arg)
-{
- struct pthread *curthread = _get_curthread();
- int f_debug;
- int f_done = 0;
- int ret;
- sigset_t mask;
- pthread_t pthread;
- pthread_t pthread_cln;
- struct timespec abstime;
- void *p_stack;
-
- /* Block all signals */
- sigfillset(&mask);
- pthread_sigmask(SIG_BLOCK, &mask, NULL);
-
- /* Mark this thread as a library thread (not a user thread). */
- curthread->flags |= PTHREAD_FLAGS_PRIVATE;
-
- /* Set a debug flag based on an environment variable. */
- f_debug = (getenv("LIBC_R_DEBUG") != NULL);
-
- /* Set the name of this thread. */
- pthread_set_name_np(curthread,"GC");
-
- while (!f_done) {
- /* Check if debugging this application. */
- if (f_debug)
- /* Dump thread info to file. */
- _thread_dump_info();
-
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /* Check if this is the last running thread: */
- if (TAILQ_FIRST(&_thread_list) == curthread &&
- TAILQ_NEXT(curthread, tle) == NULL)
- /*
- * This is the last thread, so it can exit
- * now.
- */
- f_done = 1;
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
-
- /* No stack of thread structure to free yet: */
- p_stack = NULL;
- pthread_cln = NULL;
-
- /*
- * Lock the garbage collector mutex which ensures that
- * this thread sees another thread exit:
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /*
- * Enter a loop to search for the first dead thread that
- * has memory to free.
- */
- for (pthread = TAILQ_FIRST(&_dead_list);
- p_stack == NULL && pthread_cln == NULL && pthread != NULL;
- pthread = TAILQ_NEXT(pthread, dle)) {
- /* Check if the initial thread: */
- if (pthread == _thread_initial) {
- /* Don't destroy the initial thread. */
- }
- /*
- * Check if this thread has detached:
- */
- else if ((pthread->attr.flags &
- PTHREAD_DETACHED) != 0) {
- /* Remove this thread from the dead list: */
- TAILQ_REMOVE(&_dead_list, pthread, dle);
-
- /*
- * Check if the stack was not specified by
- * the caller to pthread_create() and has not
- * been destroyed yet:
- */
- if (pthread->attr.stackaddr_attr == NULL &&
- pthread->stack != NULL) {
- _thread_stack_free(pthread->stack,
- pthread->attr.stacksize_attr,
- pthread->attr.guardsize_attr);
- }
-
- /*
- * Point to the thread structure that must
- * be freed outside the locks:
- */
- pthread_cln = pthread;
-
- } else {
- /*
- * This thread has not detached, so do
- * not destroy it.
- *
- * Check if the stack was not specified by
- * the caller to pthread_create() and has not
- * been destroyed yet:
- */
- if (pthread->attr.stackaddr_attr == NULL &&
- pthread->stack != NULL) {
- _thread_stack_free(pthread->stack,
- pthread->attr.stacksize_attr,
- pthread->attr.guardsize_attr);
-
- /*
- * NULL the stack pointer now that the
- * memory has been freed:
- */
- pthread->stack = NULL;
- }
- }
- }
-
- /*
- * Check if this is not the last thread and there is no
- * memory to free this time around.
- */
- if (!f_done && p_stack == NULL && pthread_cln == NULL) {
- /* Get the current time. */
- if (clock_gettime(CLOCK_REALTIME,&abstime) != 0)
- PANIC("gc cannot get time");
-
- /*
- * Do a backup poll in 10 seconds if no threads
- * die before then.
- */
- abstime.tv_sec += 10;
-
- /*
- * Wait for a signal from a dying thread or a
- * timeout (for a backup poll).
- */
- if ((ret = pthread_cond_timedwait(&_gc_cond,
- &_gc_mutex, &abstime)) != 0 && ret != ETIMEDOUT)
- PANIC("gc cannot wait for a signal");
- }
-
- /* Unlock the garbage collector mutex: */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
-
- /*
- * If there is memory to free, do it now. The call to
- * free() might block, so this must be done outside the
- * locks.
- */
- if (p_stack != NULL)
- free(p_stack);
- if (pthread_cln != NULL) {
- if (pthread_cln->name != NULL) {
- /* Free the thread name string. */
- free(pthread_cln->name);
- }
- /*
- * Free the memory allocated for the thread
- * structure.
- */
- free(pthread_cln);
- }
- }
- return (NULL);
-}
diff --git a/lib/libpthread/thread/thr_getschedparam.c b/lib/libpthread/thread/thr_getschedparam.c
index 7f1503c..d00e498 100644
--- a/lib/libpthread/thread/thr_getschedparam.c
+++ b/lib/libpthread/thread/thr_getschedparam.c
@@ -41,19 +41,33 @@ int
_pthread_getschedparam(pthread_t pthread, int *policy,
struct sched_param *param)
{
+ struct pthread *curthread = _get_curthread();
int ret;
if ((param == NULL) || (policy == NULL))
/* Return an invalid argument error: */
ret = EINVAL;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0) {
- /* Return the threads base priority and scheduling policy: */
+ else if (pthread == curthread) {
+ /*
+ * Avoid searching the thread list when it is the current
+ * thread.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
param->sched_priority =
- PTHREAD_BASE_PRIORITY(pthread->base_priority);
+ THR_BASE_PRIORITY(pthread->base_priority);
*policy = pthread->attr.sched_policy;
+ THR_SCHED_UNLOCK(curthread, curthread);
+ ret = 0;
}
-
- return(ret);
+ /* Find the thread in the list of active threads. */
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ THR_SCHED_LOCK(curthread, pthread);
+ param->sched_priority =
+ THR_BASE_PRIORITY(pthread->base_priority);
+ *policy = pthread->attr.sched_policy;
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+ }
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_info.c b/lib/libpthread/thread/thr_info.c
index 9ade157..3218b5b 100644
--- a/lib/libpthread/thread/thr_info.c
+++ b/lib/libpthread/thread/thr_info.c
@@ -56,11 +56,12 @@ struct s_thread_info {
/* Static variables: */
static const struct s_thread_info thread_info[] = {
{PS_RUNNING , "Running"},
+ {PS_LOCKWAIT , "Waiting on an internal lock"},
{PS_MUTEX_WAIT , "Waiting on a mutex"},
{PS_COND_WAIT , "Waiting on a condition variable"},
{PS_SLEEP_WAIT , "Sleeping"},
- {PS_WAIT_WAIT , "Waiting process"},
- {PS_SPINBLOCK , "Waiting for a spinlock"},
+ {PS_SIGSUSPEND , "Suspended, waiting for a signal"},
+ {PS_SIGWAIT , "Waiting for a signal"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
@@ -71,12 +72,9 @@ static const struct s_thread_info thread_info[] = {
void
_thread_dump_info(void)
{
- char s[512];
- int fd;
- int i;
- pthread_t pthread;
- char tmpfile[128];
- pq_list_t *pq_list;
+ char s[512], tmpfile[128];
+ pthread_t pthread;
+ int fd, i;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
@@ -102,64 +100,34 @@ _thread_dump_info(void)
/* all 100000 possibilities are in use :( */
return;
} else {
- /* Output a header for active threads: */
- strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
+ /* Dump the active threads. */
+ strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
__sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
TAILQ_FOREACH(pthread, &_thread_list, tle) {
- dump_thread(fd, pthread, /*long_verson*/ 1);
+ if (pthread->state != PS_DEAD)
+ dump_thread(fd, pthread, /*long_verson*/ 1);
}
- /* Output a header for ready threads: */
- strcpy(s, "\n\n=============\nREADY THREADS\n\n");
- __sys_write(fd, s, strlen(s));
-
- /* Enter a loop to report each thread in the ready queue: */
- TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
- TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- }
-
- /* Output a header for waiting threads: */
- strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
+ /*
+ * Dump the ready threads.
+ * XXX - We can't easily do this because the run queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
- /* Enter a loop to report each thread in the waiting queue: */
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- /* Output a header for threads in the work queue: */
- strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
+ /*
+ * Dump the waiting threads.
+ * XXX - We can't easily do this because the wait queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
- /* Enter a loop to report each thread in the waiting queue: */
- TAILQ_FOREACH (pthread, &_workq, qe) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
-
- /* Check if there are no dead threads: */
- if (TAILQ_FIRST(&_dead_list) == NULL) {
- /* Output a record: */
- strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
- __sys_write(fd, s, strlen(s));
- } else {
- /* Output a header for dead threads: */
- strcpy(s, "\n\nDEAD THREADS\n\n");
- __sys_write(fd, s, strlen(s));
-
- /*
- * Enter a loop to report each thread in the global
- * dead thread list:
- */
- TAILQ_FOREACH(pthread, &_dead_list, dle) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- }
-
- /* Close the dump file: */
+ /* Close the dump file. */
__sys_close(fd);
}
}
@@ -167,9 +135,9 @@ _thread_dump_info(void)
static void
dump_thread(int fd, pthread_t pthread, int long_version)
{
- struct pthread *curthread = _get_curthread();
- char s[512];
- int i;
+ struct pthread *curthread = _get_curthread();
+ char s[512];
+ int i;
/* Find the state: */
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
@@ -178,10 +146,11 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Output a record for the thread: */
snprintf(s, sizeof(s),
- "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ "--------------------\n"
+ "Thread %p (%s) prio %3d, blocked %s, state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ? "" : pthread->name,
- pthread->active_priority, thread_info[i].name, pthread->fname,
- pthread->lineno);
+ pthread->active_priority, (pthread->blocked != 0) ? "yes" : "no",
+ thread_info[i].name, pthread->fname, pthread->lineno);
__sys_write(fd, s, strlen(s));
if (long_version != 0) {
@@ -192,13 +161,24 @@ dump_thread(int fd, pthread_t pthread, int long_version)
__sys_write(fd, s, strlen(s));
}
/* Check if this is the initial thread: */
- if (pthread == _thread_initial) {
+ if (pthread == _thr_initial) {
/* Output a record for the initial thread: */
strcpy(s, "This is the initial thread\n");
__sys_write(fd, s, strlen(s));
}
/* Process according to thread state: */
switch (pthread->state) {
+ case PS_SIGWAIT:
+ snprintf(s, sizeof(s), "sigmask (hi)");
+ __sys_write(fd, s, strlen(s));
+ for (i = _SIG_WORDS - 1; i >= 0; i--) {
+ snprintf(s, sizeof(s), "%08x\n",
+ pthread->sigmask.__bits[i]);
+ __sys_write(fd, s, strlen(s));
+ }
+ snprintf(s, sizeof(s), "(lo)\n");
+ __sys_write(fd, s, strlen(s));
+ break;
/*
* Trap other states that are not explicitly
* coded to dump information:
@@ -212,10 +192,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Set the thread name for debug: */
void
-_pthread_set_name_np(pthread_t thread, const char *name)
+_pthread_set_name_np(pthread_t thread, char *name)
{
/* Check if the caller has specified a valid thread: */
- if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
+ if (thread != NULL && thread->magic == THR_MAGIC) {
if (thread->name != NULL) {
/* Free space for previous name. */
free(thread->name);
diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c
index 8e19924..6ff88c8 100644
--- a/lib/libpthread/thread/thr_init.c
+++ b/lib/libpthread/thread/thr_init.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@FreeBSD.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -49,7 +50,6 @@
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
-#include <sys/user.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <dirent.h>
@@ -57,6 +57,7 @@
#include <fcntl.h>
#include <paths.h>
#include <pthread.h>
+#include <pthread_np.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -64,11 +65,20 @@
#include <unistd.h>
#include "un-namespace.h"
+#include "libc_private.h"
#include "thr_private.h"
+#include "ksd.h"
+
+int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int __pthread_mutex_lock(pthread_mutex_t *);
+int __pthread_mutex_trylock(pthread_mutex_t *);
+
+static void init_private(void);
+static void init_main_thread(struct pthread *thread);
/*
* All weak references used within libc should be in this table.
- * This will is so that static libraries will work.
+ * This is so that static libraries will work.
*/
static void *references[] = {
&_accept,
@@ -145,40 +155,64 @@ static void *libgcc_references[] = {
&_pthread_mutex_unlock
};
-int _pthread_guard_default;
-int _pthread_page_size;
+#define DUAL_ENTRY(entry) \
+ (pthread_func_t)entry, (pthread_func_t)entry
+
+static pthread_func_t jmp_table[][2] = {
+ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
+ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
+ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
+ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
+ {(pthread_func_t)__pthread_cond_wait,
+ (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
+ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
+ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
+ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
+ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
+ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
+ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
+ {(pthread_func_t)__pthread_mutex_lock,
+ (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
+ {(pthread_func_t)__pthread_mutex_trylock,
+ (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
+ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
+ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
+ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
+ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
+ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
+ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
+ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
+ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
+ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
+ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
+ {DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */
+};
+
+static int init_once = 0;
/*
- * Threaded process initialization
+ * Threaded process initialization.
+ *
+ * This is only called under two conditions:
+ *
+ * 1) Some thread routines have detected that the library hasn't yet
+ * been initialized (_thr_initial == NULL && curthread == NULL), or
+ *
+ * 2) An explicit call to reinitialize after a fork (indicated
+ * by curthread != NULL)
*/
void
-_thread_init(void)
+_libpthread_init(struct pthread *curthread)
{
- int fd;
- int flags;
- int i;
- size_t len;
- int mib[2];
- int sched_stack_size; /* Size of scheduler stack. */
-
- struct clockinfo clockinfo;
- struct sigaction act;
+ int fd;
/* Check if this function has already been called: */
- if (_thread_initial)
- /* Only initialise the threaded application once. */
- return;
-
- _pthread_page_size = getpagesize();
- _pthread_guard_default = getpagesize();
- sched_stack_size = getpagesize();
-
- pthread_attr_default.guardsize_attr = _pthread_guard_default;
-
-
- /* Check if this function has already been called: */
- if (_thread_initial)
- /* Only initialise the threaded application once. */
+ if ((_thr_initial != NULL) && (curthread == NULL))
+ /* Only initialize the threaded application once. */
return;
/*
@@ -189,10 +223,18 @@ _thread_init(void)
PANIC("Failed loading mandatory references in _thread_init");
/*
+ * Check the size of the jump table to make sure it is preset
+ * with the correct number of entries.
+ */
+ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
+ PANIC("Thread jump table not properly initialized");
+ memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
+
+ /*
* Check for the special case of this process running as
* or in place of init as pid = 1:
*/
- if (getpid() == 1) {
+ if ((_thr_pid = getpid()) == 1) {
/*
* Setup a new session for this process which is
* assumed to be running as root.
@@ -207,200 +249,271 @@ _thread_init(void)
PANIC("Can't set login to root");
if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
PANIC("Can't set controlling terminal");
- if (__sys_dup2(fd, 0) == -1 ||
- __sys_dup2(fd, 1) == -1 ||
- __sys_dup2(fd, 2) == -1)
- PANIC("Can't dup2");
}
- /* Allocate and initialize the ready queue: */
- if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_LAST_PRIORITY) !=
- 0) {
- /* Abort this application: */
- PANIC("Cannot allocate priority ready queue.");
- }
- /* Allocate memory for the thread structure of the initial thread: */
- else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
+ /* Initialize pthread private data. */
+ init_private();
+ _kse_init();
+
+ /* Initialize the initial kse and kseg. */
+ _kse_initial = _kse_alloc(NULL);
+ if (_kse_initial == NULL)
+ PANIC("Can't allocate initial kse.");
+ _kse_initial->k_kseg = _kseg_alloc(NULL);
+ if (_kse_initial->k_kseg == NULL)
+ PANIC("Can't allocate initial kseg.");
+ _kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
+
+ /* Set the initial thread. */
+ if (curthread == NULL) {
+ /* Create and initialize the initial thread. */
+ curthread = _thr_alloc(NULL);
+ if (curthread == NULL)
+ PANIC("Can't allocate initial thread");
+ _thr_initial = curthread;
+ init_main_thread(curthread);
+ } else {
/*
- * Insufficient memory to initialise this application, so
- * abort:
+ * The initial thread is the current thread. It is
+ * assumed that the current thread is already initialized
+ * because it is left over from a fork().
*/
- PANIC("Cannot allocate memory for initial thread");
+ _thr_initial = curthread;
+ }
+ _kse_initial->k_kseg->kg_threadcount = 1;
+ _thr_initial->kse = _kse_initial;
+ _thr_initial->kseg = _kse_initial->k_kseg;
+ _thr_initial->active = 1;
+
+ /*
+ * Add the thread to the thread list and to the KSEG's thread
+ * queue.
+ */
+ THR_LIST_ADD(_thr_initial);
+ TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle);
+
+ /* Setup the KSE/thread specific data for the current KSE/thread. */
+ if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0)
+ PANIC("Can't set initial KSE specific data");
+ _set_curkse(_thr_initial->kse);
+ _thr_initial->kse->k_curthread = _thr_initial;
+ _thr_initial->kse->k_flags |= KF_INITIALIZED;
+ _kse_initial->k_curthread = _thr_initial;
+}
+
+/*
+ * This function and pthread_create() do a lot of the same things.
+ * It'd be nice to consolidate the common stuff in one place.
+ */
+static void
+init_main_thread(struct pthread *thread)
+{
+ int i;
+
+ /* Zero the initial thread structure. */
+ memset(thread, 0, sizeof(struct pthread));
+
+ /* Setup the thread attributes. */
+ thread->attr = _pthread_attr_default;
+
+ /*
+ * Set up the thread stack.
+ *
+ * Create a red zone below the main stack. All other stacks
+ * are constrained to a maximum size by the parameters
+ * passed to mmap(), but this stack is only limited by
+ * resource limits, so this stack needs an explicitly mapped
+ * red zone to protect the thread stack that is just beyond.
+ */
+ if (mmap((void *)_usrstack - THR_STACK_INITIAL -
+ _thr_guard_default, _thr_guard_default, 0, MAP_ANON,
+ -1, 0) == MAP_FAILED)
+ PANIC("Cannot allocate red zone for initial thread");
+
+ /*
+ * Mark the stack as an application supplied stack so that it
+ * isn't deallocated.
+ *
+ * XXX - I'm not sure it would hurt anything to deallocate
+ * the main thread stack because deallocation doesn't
+ * actually free() it; it just puts it in the free
+ * stack queue for later reuse.
+ */
+ thread->attr.stackaddr_attr = (void *)_usrstack - THR_STACK_INITIAL;
+ thread->attr.stacksize_attr = THR_STACK_INITIAL;
+ thread->attr.guardsize_attr = _thr_guard_default;
+ thread->attr.flags |= THR_STACK_USER;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ thread->magic = THR_MAGIC;
+
+ thread->slice_usec = -1;
+ thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
+ thread->name = strdup("initial thread");
+
+ /* Initialize the thread for signals: */
+ sigemptyset(&thread->sigmask);
+
+ /*
+ * Set up the thread mailbox. The threads saved context
+ * is also in the mailbox.
+ */
+ thread->tmbx.tm_udata = thread;
+ thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
+ thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr;
+ thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr;
+
+ /* Default the priority of the initial thread: */
+ thread->base_priority = THR_DEFAULT_PRIORITY;
+ thread->active_priority = THR_DEFAULT_PRIORITY;
+ thread->inherited_priority = 0;
+
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&thread->mutexq);
+
+ /* Initialize thread locking. */
+ if (_lock_init(&thread->lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize initial thread lock");
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_init(&thread->lockusers[i], (void *)thread);
+ _LCK_SET_PRIVATE2(&thread->lockusers[i], (void *)thread);
}
- /* Allocate memory for the scheduler stack: */
- else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
- PANIC("Failed to allocate stack for scheduler");
- /* Allocate memory for the idle stack: */
- else if ((_idle_thr_stack = malloc(sched_stack_size)) == NULL)
- PANIC("Failed to allocate stack for scheduler");
- else {
- /* Zero the global kernel thread structure: */
- memset(&_thread_kern_thread, 0, sizeof(struct pthread));
- _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
- memset(_thread_initial, 0, sizeof(struct pthread));
-
- /* Initialize the waiting and work queues: */
- TAILQ_INIT(&_waitingq);
- TAILQ_INIT(&_workq);
-
- /* Initialize the scheduling switch hook routine: */
- _sched_switch_hook = NULL;
-
- /* Give this thread default attributes: */
- memcpy((void *) &_thread_initial->attr, &pthread_attr_default,
- sizeof(struct pthread_attr));
+ /* Initialize hooks in the thread structure: */
+ thread->specific = NULL;
+ thread->cleanup = NULL;
+ thread->flags = 0;
+ thread->continuation = NULL;
+
+ thread->state = PS_RUNNING;
+ thread->uniqueid = 0;
+}
+
+static void
+init_private(void)
+{
+ struct clockinfo clockinfo;
+ struct sigaction act;
+ size_t len;
+ int mib[2];
+ int i;
+
+ /*
+ * Avoid reinitializing some things if they don't need to be,
+ * e.g. after a fork().
+ */
+ if (init_once == 0) {
/* Find the stack top */
mib[0] = CTL_KERN;
mib[1] = KERN_USRSTACK;
len = sizeof (_usrstack);
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
- _usrstack = (void *)USRSTACK;
+ PANIC("Cannot get kern.usrstack from sysctl");
+
/*
- * Create a red zone below the main stack. All other stacks are
- * constrained to a maximum size by the paramters passed to
- * mmap(), but this stack is only limited by resource limits, so
- * this stack needs an explicitly mapped red zone to protect the
- * thread stack that is just beyond.
+ * Create a red zone below the main stack. All other
+ * stacks are constrained to a maximum size by the
+ * parameters passed to mmap(), but this stack is only
+ * limited by resource limits, so this stack needs an
+ * explicitly mapped red zone to protect the thread stack
+ * that is just beyond.
*/
- if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
- _pthread_guard_default, _pthread_guard_default, 0,
- MAP_ANON, -1, 0) == MAP_FAILED)
+ if (mmap((void *)_usrstack - THR_STACK_INITIAL -
+ _thr_guard_default, _thr_guard_default,
+ 0, MAP_ANON, -1, 0) == MAP_FAILED)
PANIC("Cannot allocate red zone for initial thread");
- /* Set the main thread stack pointer. */
- _thread_initial->stack = _usrstack - PTHREAD_STACK_INITIAL;
-
- /* Set the stack attributes. */
- _thread_initial->attr.stackaddr_attr = _thread_initial->stack;
- _thread_initial->attr.stacksize_attr = PTHREAD_STACK_INITIAL;
-
- /* Setup the context for the scheduler. */
- _thread_kern_kse_mailbox.km_stack.ss_sp =
- _thread_kern_sched_stack;
- _thread_kern_kse_mailbox.km_stack.ss_size = sched_stack_size;
- _thread_kern_kse_mailbox.km_func =
- (void *)_thread_kern_scheduler;
-
- /* Initialize the idle context. */
- bzero(&_idle_thr_mailbox, sizeof(struct kse_thr_mailbox));
- getcontext(&_idle_thr_mailbox.tm_context);
- _idle_thr_mailbox.tm_context.uc_stack.ss_sp = _idle_thr_stack;
- _idle_thr_mailbox.tm_context.uc_stack.ss_size =
- sched_stack_size;
- makecontext(&_idle_thr_mailbox.tm_context, _thread_kern_idle,
- 1);
+ /* Get the kernel clockrate: */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_CLOCKRATE;
+ len = sizeof (struct clockinfo);
+ if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
+ _clock_res_usec = clockinfo.tick;
+ else
+ _clock_res_usec = CLOCK_RES_USEC;
+
+ _thr_page_size = getpagesize();
+ _thr_guard_default = _thr_page_size;
+ init_once = 1; /* Don't do this again. */
+ } else {
/*
- * Write a magic value to the thread structure
- * to help identify valid ones:
+ * Destroy the locks before creating them. We don't
+ * know what state they are in so it is better to just
+ * recreate them.
*/
- _thread_initial->magic = PTHREAD_MAGIC;
-
- /* Set the initial cancel state */
- _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
- PTHREAD_CANCEL_DEFERRED;
-
- /* Setup the context for initial thread. */
- getcontext(&_thread_initial->mailbox.tm_context);
- _thread_initial->mailbox.tm_context.uc_stack.ss_sp =
- _thread_initial->stack;
- _thread_initial->mailbox.tm_context.uc_stack.ss_size =
- PTHREAD_STACK_INITIAL;
- _thread_initial->mailbox.tm_udata = (void *)_thread_initial;
-
- /* Default the priority of the initial thread: */
- _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
- _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
- _thread_initial->inherited_priority = 0;
-
- /* Initialise the state of the initial thread: */
- _thread_initial->state = PS_RUNNING;
-
- /* Set the name of the thread: */
- _thread_initial->name = strdup("_thread_initial");
-
- /* Initialize joiner to NULL (no joiner): */
- _thread_initial->joiner = NULL;
-
- /* Initialize the owned mutex queue and count: */
- TAILQ_INIT(&(_thread_initial->mutexq));
- _thread_initial->priority_mutex_count = 0;
-
- /* Initialize the global scheduling time: */
- _sched_ticks = 0;
- gettimeofday((struct timeval *) &_sched_tod, NULL);
-
- /* Initialize last active: */
- _thread_initial->last_active = (long) _sched_ticks;
-
- /* Initialise the rest of the fields: */
- _thread_initial->sig_defer_count = 0;
- _thread_initial->specific = NULL;
- _thread_initial->cleanup = NULL;
- _thread_initial->flags = 0;
- _thread_initial->error = 0;
- TAILQ_INIT(&_thread_list);
- TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
- _set_curthread(_thread_initial);
-
- /* Clear the pending signals for the process. */
- sigemptyset(&_thread_sigpending);
-
- /* Enter a loop to get the existing signal status: */
- for (i = 1; i < NSIG; i++) {
- /* Check for signals which cannot be trapped. */
- if (i == SIGKILL || i == SIGSTOP)
- continue;
-
- /* Get the signal handler details. */
- if (__sys_sigaction(i, NULL,
- &_thread_sigact[i - 1]) != 0)
- PANIC("Cannot read signal handler info");
- }
+ _lock_destroy(&_thread_signal_lock);
+ _lock_destroy(&_mutex_static_lock);
+ _lock_destroy(&_rwlock_static_lock);
+ _lock_destroy(&_keytable_lock);
+ }
- /* Register SIGCHLD (needed for wait(2)). */
- sigfillset(&act.sa_mask);
- act.sa_handler = (void (*) ()) _thread_sig_handler;
- act.sa_flags = SA_SIGINFO | SA_RESTART;
- if (__sys_sigaction(SIGCHLD, &act, NULL) != 0)
- PANIC("Can't initialize signal handler");
- /* Get the process signal mask. */
- __sys_sigprocmask(SIG_SETMASK, NULL, &_thread_sigmask);
+ /* Initialize everything else. */
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INIT(&_thread_gc_list);
- /* Get the kernel clockrate: */
- mib[0] = CTL_KERN;
- mib[1] = KERN_CLOCKRATE;
- len = sizeof (struct clockinfo);
- if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
- clockinfo.tick : CLOCK_RES_USEC_MIN;
-
- /* Start KSE. */
- _thread_kern_kse_mailbox.km_curthread =
- &_thread_initial->mailbox;
- if (kse_create(&_thread_kern_kse_mailbox, 0) != 0)
- PANIC("kse_new failed");
+ /* Enter a loop to get the existing signal status: */
+ for (i = 1; i < NSIG; i++) {
+ /* Check for signals which cannot be trapped: */
+ if (i == SIGKILL || i == SIGSTOP) {
+ }
+
+ /* Get the signal handler details: */
+ else if (__sys_sigaction(i, NULL,
+ &_thread_sigact[i - 1]) != 0) {
+ /*
+ * Abort this process if signal
+ * initialisation fails:
+ */
+ PANIC("Cannot read signal handler info");
+ }
+
+ /* Initialize the SIG_DFL dummy handler count. */
+ _thread_dfl_count[i] = 0;
}
- /* Initialise the garbage collector mutex and condition variable. */
- if (_pthread_mutex_init(&_gc_mutex,NULL) != 0 ||
- pthread_cond_init(&_gc_cond,NULL) != 0)
- PANIC("Failed to initialise garbage collector mutex or condvar");
-}
+ /*
+ * Install the signal handler for SIGINFO. It isn't
+ * really needed, but it is nice to have for debugging
+ * purposes.
+ */
+ if (__sys_sigaction(SIGINFO, &act, NULL) != 0) {
+ /*
+ * Abort this process if signal initialisation fails:
+ */
+ PANIC("Cannot initialize signal handler");
+ }
+ _thread_sigact[SIGINFO - 1].sa_flags = SA_SIGINFO | SA_RESTART;
-/*
- * Special start up code for NetBSD/Alpha
- */
-#if defined(__NetBSD__) && defined(__alpha__)
-int
-main(int argc, char *argv[], char *env);
+ /*
+ * Initialize the lock for temporary installation of signal
+ * handlers (to support sigwait() semantics) and for the
+ * process signal mask and pending signal sets.
+ */
+ if (_lock_init(&_thread_signal_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize _thread_signal_lock");
+ if (_lock_init(&_mutex_static_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize mutex static init lock");
+ if (_lock_init(&_rwlock_static_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize rwlock static init lock");
+ if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0)
+ PANIC("Cannot initialize thread specific keytable lock");
+
+ /* Clear pending signals and get the process signal mask. */
+ sigemptyset(&_thr_proc_sigpending);
+ __sys_sigprocmask(SIG_SETMASK, NULL, &_thr_proc_sigmask);
-int
-_thread_main(int argc, char *argv[], char *env)
-{
- _thread_init();
- return (main(argc, argv, env));
+ /*
+ * _thread_list_lock and _kse_count are initialized
+ * by _kse_init()
+ */
}
-#endif
diff --git a/lib/libpthread/thread/thr_join.c b/lib/libpthread/thread/thr_join.c
index c2e7cec..38cc8b3 100644
--- a/lib/libpthread/thread/thr_join.c
+++ b/lib/libpthread/thread/thr_join.c
@@ -41,121 +41,91 @@ int
_pthread_join(pthread_t pthread, void **thread_return)
{
struct pthread *curthread = _get_curthread();
- int ret = 0;
- pthread_t thread;
+ int ret = 0;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
/* Check if the caller has specified an invalid thread: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) {
+ if (pthread == NULL || pthread->magic != THR_MAGIC) {
/* Invalid thread: */
- _thread_leave_cancellation_point();
- return(EINVAL);
+ _thr_leave_cancellation_point(curthread);
+ return (EINVAL);
}
/* Check if the caller has specified itself: */
if (pthread == curthread) {
/* Avoid a deadlock condition: */
- _thread_leave_cancellation_point();
- return(EDEADLK);
+ _thr_leave_cancellation_point(curthread);
+ return (EDEADLK);
}
/*
- * Lock the garbage collector mutex to ensure that the garbage
- * collector is not using the dead thread list.
+ * Find the thread in the list of active threads or in the
+ * list of dead threads:
*/
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /*
- * Defer signals to protect the thread list from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
-
- /*
- * Unlock the garbage collector mutex, now that the garbage collector
- * can't be run:
- */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- /*
- * Search for the specified thread in the list of active threads. This
- * is done manually here rather than calling _find_thread() because
- * the searches in _thread_list and _dead_list (as well as setting up
- * join/detach state) have to be done atomically.
- */
- TAILQ_FOREACH(thread, &_thread_list, tle) {
- if (thread == pthread)
- break;
- }
- if (thread == NULL) {
- /*
- * Search for the specified thread in the list of dead threads:
- */
- TAILQ_FOREACH(thread, &_dead_list, dle) {
- if (thread == pthread)
- break;
- }
+ if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/1)) != 0) {
+ /* Return an error: */
+ _thr_leave_cancellation_point(curthread);
+ return (ESRCH);
}
- /* Check if the thread was not found or has been detached: */
- if (thread == NULL ||
- ((pthread->attr.flags & PTHREAD_DETACHED) != 0)) {
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
-
- /* Return an error: */
+ /* Check if this thread has been detached: */
+ if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
+ /* Remove the reference and return an error: */
+ _thr_ref_delete(curthread, pthread);
ret = ESRCH;
-
- } else if (pthread->joiner != NULL) {
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
-
- /* Multiple joiners are not supported. */
- ret = ENOTSUP;
-
- /* Check if the thread is not dead: */
- } else if (pthread->state != PS_DEAD) {
- /* Set the running thread to be the joiner: */
- pthread->joiner = curthread;
-
- /* Keep track of which thread we're joining to: */
- curthread->join_status.thread = pthread;
-
- while (curthread->join_status.thread == pthread) {
- /* Schedule the next thread: */
- _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
- }
-
- /*
- * The thread return value and error are set by the thread we're
- * joining to when it exits or detaches:
- */
- ret = curthread->join_status.error;
- if ((ret == 0) && (thread_return != NULL))
- *thread_return = curthread->join_status.ret;
} else {
- /*
- * The thread exited (is dead) without being detached, and no
- * thread has joined it.
- */
-
- /* Check if the return value is required: */
- if (thread_return != NULL) {
- /* Return the thread's return value: */
- *thread_return = pthread->ret;
+ /* Lock the target thread while checking its state. */
+ THR_SCHED_LOCK(curthread, pthread);
+ if ((pthread->state == PS_DEAD) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0)) {
+ if (thread_return != NULL)
+ /* Return the thread's return value: */
+ *thread_return = pthread->ret;
+
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
}
+ else if (pthread->joiner != NULL) {
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
- /* Make the thread collectable by the garbage collector. */
- pthread->attr.flags |= PTHREAD_DETACHED;
-
- /* Undefer and handle pending signals, yielding if necessary: */
- _thread_kern_sig_undefer();
+ /* Multiple joiners are not supported. */
+ ret = ENOTSUP;
+ }
+ else {
+ /* Set the running thread to be the joiner: */
+ pthread->joiner = curthread;
+
+ /* Keep track of which thread we're joining to: */
+ curthread->join_status.thread = pthread;
+
+ /* Unlock the thread and remove the reference. */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+
+ THR_SCHED_LOCK(curthread, curthread);
+ if (curthread->join_status.thread == pthread)
+ THR_SET_STATE(curthread, PS_JOIN);
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ while (curthread->join_status.thread == pthread) {
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
+ }
+
+ /*
+ * The thread return value and error are set by the
+ * thread we're joining to when it exits or detaches:
+ */
+ ret = curthread->join_status.error;
+ if ((ret == 0) && (thread_return != NULL))
+ *thread_return = curthread->join_status.ret;
+ }
}
-
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
/* Return the completion status: */
return (ret);
diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c
index 6e59a43..b87ae3d 100644
--- a/lib/libpthread/thread/thr_kern.c
+++ b/lib/libpthread/thread/thr_kern.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -32,474 +34,1452 @@
* $FreeBSD$
*
*/
-#include <errno.h>
-#include <poll.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD");
+
+#include <sys/types.h>
+#include <sys/kse.h>
+#include <sys/signalvar.h>
+#include <sys/queue.h>
+#include <machine/atomic.h>
+
+#include <assert.h>
+#include <signal.h>
#include <stdlib.h>
-#include <stdarg.h>
#include <string.h>
+#include <time.h>
+#include <ucontext.h>
#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/signalvar.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/socket.h>
-#include <sys/uio.h>
-#include <sys/syscall.h>
-#include <fcntl.h>
-#include <pthread.h>
+
+#include "atomic_ops.h"
#include "thr_private.h"
+#include "pthread_md.h"
+#include "libc_private.h"
-/* #define DEBUG_THREAD_KERN */
+/*#define DEBUG_THREAD_KERN */
#ifdef DEBUG_THREAD_KERN
#define DBG_MSG stdout_debug
#else
#define DBG_MSG(x...)
#endif
-static int _kern_idle_running = 0;
-static struct timeval _kern_idle_timeout;
+/*
+ * Define a high water mark for the maximum number of threads that
+ * will be cached. Once this level is reached, any extra threads
+ * will be free()'d.
+ *
+ * XXX - It doesn't make sense to worry about the maximum number of
+ * KSEs that we can cache because the system will limit us to
+ * something *much* less than the maximum number of threads
+ * that we can have. Disregarding KSEs in their own group,
+ * the maximum number of KSEs is the number of processors in
+ * the system.
+ */
+#define MAX_CACHED_THREADS 100
+#define KSE_STACKSIZE 16384
-/* Static function prototype definitions: */
-static void
-thread_kern_idle(void);
+#define KSE_SET_MBOX(kse, thrd) \
+ (kse)->k_mbx.km_curthread = &(thrd)->tmbx
-static void
-dequeue_signals(void);
+#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
-static inline void
-thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/*
+ * Add/remove threads from a KSE's scheduling queue.
+ * For now the scheduling queue is hung off the KSEG.
+ */
+#define KSEG_THRQ_ADD(kseg, thr) \
+ TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle)
+#define KSEG_THRQ_REMOVE(kseg, thr) \
+ TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle)
-/* Static variables: */
-static int last_tick = 0;
+/*
+ * Macros for manipulating the run queues. The priority queue
+ * routines use the thread's pqe link and also handle the setting
+ * and clearing of the thread's THR_FLAGS_IN_RUNQ flag.
+ */
+#define KSE_RUNQ_INSERT_HEAD(kse, thrd) \
+ _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
+ _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_REMOVE(kse, thrd) \
+ _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
+#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
+
+
+/*
+ * We've got to keep track of everything that is allocated, not only
+ * to have a speedy free list, but also so they can be deallocated
+ * after a fork().
+ */
+static TAILQ_HEAD(, kse) active_kseq;
+static TAILQ_HEAD(, kse) free_kseq;
+static TAILQ_HEAD(, kse_group) free_kse_groupq;
+static TAILQ_HEAD(, kse_group) active_kse_groupq;
+static struct lock kse_lock; /* also used for kseg queue */
+static int free_kse_count = 0;
+static int free_kseg_count = 0;
+static TAILQ_HEAD(, pthread) free_threadq;
+static struct lock thread_lock;
+static int free_thread_count = 0;
+static int inited = 0;
+static int active_kse_count = 0;
+static int active_kseg_count = 0;
+
+static void kse_check_completed(struct kse *kse);
+static void kse_check_waitq(struct kse *kse);
+static void kse_check_signals(struct kse *kse);
+static void kse_entry(struct kse_mailbox *mbx);
+static void kse_fini(struct kse *curkse);
+static void kse_sched_multi(struct kse *curkse);
+static void kse_sched_single(struct kse *curkse);
+static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
+static void kse_wait(struct kse *kse);
+static void kseg_free(struct kse_group *kseg);
+static void kseg_init(struct kse_group *kseg);
+static void kse_waitq_insert(struct pthread *thread);
+static void thr_cleanup(struct kse *kse, struct pthread *curthread);
+static void thr_gc(struct kse *curkse);
+static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
+ ucontext_t *ucp);
+static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf);
+static int thr_timedout(struct pthread *thread, struct timespec *curtime);
+
+/*
+ * This is called after a fork().
+ * No locks need to be taken here since we are guaranteed to be
+ * single threaded.
+ */
void
-_thread_kern_sched(void)
+_kse_single_thread(struct pthread *curthread)
{
- struct timespec ts;
- struct timeval tv;
- struct pthread *curthread = _get_curthread();
- unsigned int current_tick;
-
- /* Get the current time of day. */
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &ts);
- current_tick = _sched_ticks;
+ struct kse *kse, *kse_next;
+ struct kse_group *kseg, *kseg_next;
+ struct pthread *thread, *thread_next;
+ kse_critical_t crit;
+ int i;
/*
- * Enter a critical section.
+ * Disable upcalls and clear the threaded flag.
+ * XXX - I don't think we need to disable upcalls after a fork().
+ * but it doesn't hurt.
*/
- _thread_kern_kse_mailbox.km_curthread = NULL;
+ crit = _kse_critical_enter();
+ __isthreaded = 0;
/*
- * If this thread is becoming inactive, make note of the
- * time.
+ * Enter a loop to remove and free all threads other than
+ * the running thread from the active thread list:
*/
- if (curthread->state != PS_RUNNING) {
+ for (thread = TAILQ_FIRST(&_thread_list); thread != NULL;
+ thread = thread_next) {
/*
- * Save the current time as the time that the
- * thread became inactive:
+ * Advance to the next thread before the destroying
+ * the current thread.
+ */
+ thread_next = TAILQ_NEXT(thread, tle);
+
+ /*
+ * Remove this thread from the list (the current
+ * thread will be removed but re-added by libpthread
+ * initialization.
*/
- curthread->last_inactive = (long)current_tick;
- if (curthread->last_inactive <
- curthread->last_active) {
- /* Account for a rollover: */
- curthread->last_inactive =+
- UINT_MAX + 1;
+ TAILQ_REMOVE(&_thread_list, thread, tle);
+ /* Make sure this isn't the running thread: */
+ if (thread != curthread) {
+ _thr_stack_free(&thread->attr);
+ if (thread->specific != NULL)
+ free(thread->specific);
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_destroy(&thread->lockusers[i]);
+ }
+ _lock_destroy(&thread->lock);
+ free(thread);
+ }
+ }
+
+ TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
+ curthread->joiner = NULL; /* no joining threads yet */
+ sigemptyset(&curthread->sigpend); /* clear pending signals */
+ if (curthread->specific != NULL) {
+ free(curthread->specific);
+ curthread->specific = NULL;
+ curthread->specific_data_count = 0;
+ }
+
+ /* Free the free KSEs: */
+ while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
+ TAILQ_REMOVE(&free_kseq, kse, k_qe);
+ _ksd_destroy(&kse->k_ksd);
+ free(kse);
+ }
+ free_kse_count = 0;
+
+ /* Free the active KSEs: */
+ for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) {
+ kse_next = TAILQ_NEXT(kse, k_qe);
+ TAILQ_REMOVE(&active_kseq, kse, k_qe);
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_destroy(&kse->k_lockusers[i]);
}
+ _lock_destroy(&kse->k_lock);
+ free(kse);
+ }
+ active_kse_count = 0;
+
+ /* Free the free KSEGs: */
+ while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
+ TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
+ _lock_destroy(&kseg->kg_lock);
+ free(kseg);
+ }
+ free_kseg_count = 0;
+
+ /* Free the active KSEGs: */
+ for (kseg = TAILQ_FIRST(&active_kse_groupq);
+ kseg != NULL; kseg = kseg_next) {
+ kseg_next = TAILQ_NEXT(kseg, kg_qe);
+ TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
+ _lock_destroy(&kseg->kg_lock);
+ free(kseg);
+ }
+ active_kseg_count = 0;
+
+ /* Free the free threads. */
+ while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ if (thread->specific != NULL)
+ free(thread->specific);
+ for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
+ _lockuser_destroy(&thread->lockusers[i]);
+ }
+ _lock_destroy(&thread->lock);
+ free(thread);
+ }
+ free_thread_count = 0;
+
+ /* Free the to-be-gc'd threads. */
+ while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
+ TAILQ_REMOVE(&_thread_gc_list, thread, tle);
+ free(thread);
+ }
+
+ if (inited != 0) {
+ /*
+ * Destroy these locks; they'll be recreated to assure they
+ * are in the unlocked state.
+ */
+ _lock_destroy(&kse_lock);
+ _lock_destroy(&thread_lock);
+ _lock_destroy(&_thread_list_lock);
+ inited = 0;
+ }
+
+ /*
+ * After a fork(), the leftover thread goes back to being
+ * scope process.
+ */
+ curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
+ curthread->attr.flags |= PTHREAD_SCOPE_PROCESS;
+
+ /*
+ * After a fork, we are still operating on the thread's original
+ * stack. Don't clear the THR_FLAGS_USER from the thread's
+ * attribute flags.
+ */
+
+ /* Initialize the threads library. */
+ curthread->kse = NULL;
+ curthread->kseg = NULL;
+ _kse_initial = NULL;
+ _libpthread_init(curthread);
+}
+
+/*
+ * This is used to initialize housekeeping and to initialize the
+ * KSD for the KSE.
+ */
+void
+_kse_init(void)
+{
+ if (inited == 0) {
+ TAILQ_INIT(&active_kseq);
+ TAILQ_INIT(&active_kse_groupq);
+ TAILQ_INIT(&free_kseq);
+ TAILQ_INIT(&free_kse_groupq);
+ TAILQ_INIT(&free_threadq);
+ if (_lock_init(&kse_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize free KSE queue lock");
+ if (_lock_init(&thread_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize free thread queue lock");
+ if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
+ _kse_lock_wait, _kse_lock_wakeup) != 0)
+ PANIC("Unable to initialize thread list lock");
+ active_kse_count = 0;
+ active_kseg_count = 0;
+ inited = 1;
}
+}
+
+int
+_kse_isthreaded(void)
+{
+ return (__isthreaded != 0);
+}
+
+/*
+ * This is called when the first thread (other than the initial
+ * thread) is created.
+ */
+void
+_kse_setthreaded(int threaded)
+{
+ if ((threaded != 0) && (__isthreaded == 0)) {
+ /*
+ * Locking functions in libc are required when there are
+ * threads other than the initial thread.
+ */
+ __isthreaded = 1;
+
+ /*
+ * Tell the kernel to create a KSE for the initial thread
+ * and enable upcalls in it.
+ */
+ kse_create(&_kse_initial->k_mbx, 0);
+ KSE_SET_MBOX(_kse_initial, _thr_initial);
+ }
+}
+
+/*
+ * Lock wait and wakeup handlers for KSE locks. These are only used by
+ * KSEs, and should never be used by threads. KSE locks include the
+ * KSE group lock (used for locking the scheduling queue) and the
+ * kse_lock defined above.
+ *
+ * When a KSE lock attempt blocks, the entire KSE blocks allowing another
+ * KSE to run. For the most part, it doesn't make much sense to try and
+ * schedule another thread because you need to lock the scheduling queue
+ * in order to do that. And since the KSE lock is used to lock the scheduling
+ * queue, you would just end up blocking again.
+ */
+void
+_kse_lock_wait(struct lock *lock, struct lockuser *lu)
+{
+ struct kse *curkse = (struct kse *)_LCK_GET_PRIVATE(lu);
+ struct timespec ts;
+ kse_critical_t crit;
/*
- * Place this thread into the appropriate queue(s).
+ * Enter a loop to wait until we get the lock.
*/
- switch (curthread->state) {
- case PS_DEAD:
- case PS_STATE_MAX: /* XXX: silences -Wall */
- case PS_SUSPENDED:
- /* Dead or suspended threads are not placed in any queue. */
- break;
- case PS_RUNNING:
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000; /* 1 sec */
+ KSE_SET_WAIT(curkse);
+ while (_LCK_BUSY(lu)) {
+ /*
+ * Yield the kse and wait to be notified when the lock
+ * is granted.
+ */
+ crit = _kse_critical_enter();
+ __sys_nanosleep(&ts, NULL);
+ _kse_critical_leave(crit);
+
+ /*
+ * Make sure that the wait flag is set again in case
+ * we wokeup without the lock being granted.
+ */
+ KSE_SET_WAIT(curkse);
+ }
+ KSE_CLEAR_WAIT(curkse);
+}
+
+void
+_kse_lock_wakeup(struct lock *lock, struct lockuser *lu)
+{
+ struct kse *curkse;
+ struct kse *kse;
+
+ curkse = _get_curkse();
+ kse = (struct kse *)_LCK_GET_PRIVATE(lu);
+
+ if (kse == curkse)
+ PANIC("KSE trying to wake itself up in lock");
+ else if (KSE_WAITING(kse)) {
/*
- * Save the current time as the time that the
- * thread became inactive:
+ * Notify the owning kse that it has the lock.
*/
- current_tick = _sched_ticks;
- curthread->last_inactive = (long)current_tick;
- if (curthread->last_inactive <
- curthread->last_active) {
- /* Account for a rollover: */
- curthread->last_inactive =+ UINT_MAX + 1;
+ KSE_WAKEUP(kse);
+ }
+}
+
+/*
+ * Thread wait and wakeup handlers for thread locks. These are only used
+ * by threads, never by KSEs. Thread locks include the per-thread lock
+ * (defined in its structure), and condition variable and mutex locks.
+ */
+void
+_thr_lock_wait(struct lock *lock, struct lockuser *lu)
+{
+ struct pthread *curthread = (struct pthread *)lu->lu_private;
+ int count;
+
+ /*
+ * Spin for a bit.
+ *
+ * XXX - We probably want to make this a bit smarter. It
+ * doesn't make sense to spin unless there is more
+ * than 1 CPU. A thread that is holding one of these
+ * locks is prevented from being swapped out for another
+ * thread within the same scheduling entity.
+ */
+ count = 0;
+ while (_LCK_BUSY(lu) && count < 300)
+ count++;
+ while (_LCK_BUSY(lu)) {
+ THR_SCHED_LOCK(curthread, curthread);
+ if (_LCK_BUSY(lu)) {
+ /* Wait for the lock: */
+ atomic_store_rel_int(&curthread->need_wakeup, 1);
+ THR_SET_STATE(curthread, PS_LOCKWAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
}
+ else
+ THR_SCHED_UNLOCK(curthread, curthread);
+ }
+}
- if ((curthread->slice_usec != -1) &&
- (curthread->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds for
- * which the current thread has run:
- */
- curthread->slice_usec +=
- (curthread->last_inactive -
- curthread->last_active) *
- (long)_clock_res_usec;
- /* Check for time quantum exceeded: */
- if (curthread->slice_usec > TIMESLICE_USEC)
- curthread->slice_usec = -1;
+void
+_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
+{
+ struct pthread *thread;
+ struct pthread *curthread;
+
+ curthread = _get_curthread();
+ thread = (struct pthread *)_LCK_GET_PRIVATE(lu);
+
+ THR_SCHED_LOCK(curthread, thread);
+ _thr_setrunnable_unlocked(thread);
+ atomic_store_rel_int(&thread->need_wakeup, 0);
+ THR_SCHED_UNLOCK(curthread, thread);
+}
+
+kse_critical_t
+_kse_critical_enter(void)
+{
+ kse_critical_t crit;
+
+ crit = _ksd_readandclear_tmbx;
+ return (crit);
+}
+
+void
+_kse_critical_leave(kse_critical_t crit)
+{
+ struct pthread *curthread;
+
+ _ksd_set_tmbx(crit);
+ if ((crit != NULL) && ((curthread = _get_curthread()) != NULL))
+ THR_YIELD_CHECK(curthread);
+}
+
+void
+_thr_critical_enter(struct pthread *thread)
+{
+ thread->critical_count++;
+}
+
+void
+_thr_critical_leave(struct pthread *thread)
+{
+ thread->critical_count--;
+ THR_YIELD_CHECK(thread);
+}
+
+/*
+ * XXX - We may need to take the scheduling lock before calling
+ * this, or perhaps take the lock within here before
+ * doing anything else.
+ */
+void
+_thr_sched_switch(struct pthread *curthread)
+{
+ struct pthread_sigframe psf;
+ kse_critical_t crit;
+ struct kse *curkse;
+ volatile int once = 0;
+
+ /* We're in the scheduler, 5 by 5: */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+
+ curthread->need_switchout = 1; /* The thread yielded on its own. */
+ curthread->critical_yield = 0; /* No need to yield anymore. */
+ curthread->slice_usec = -1; /* Restart the time slice. */
+
+ /*
+ * The signal frame is allocated off the stack because
+ * a thread can be interrupted by other signals while
+ * it is running down pending signals.
+ */
+ sigemptyset(&psf.psf_sigset);
+ curthread->curframe = &psf;
+
+ _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx);
+
+ /*
+ * This thread is being resumed; check for cancellations.
+ */
+ if ((once == 0) && (!THR_IN_CRITICAL(curthread))) {
+ once = 1;
+ thr_resume_check(curthread, &curthread->tmbx.tm_context, &psf);
+ }
+}
+
+/*
+ * This is the entry point of the KSE upcall.
+ */
+static void
+kse_entry(struct kse_mailbox *mbx)
+{
+ struct kse *curkse;
+
+ /* The kernel should always clear this before making the upcall. */
+ assert(mbx->km_curthread == NULL);
+ curkse = (struct kse *)mbx->km_udata;
+
+ /* Check for first time initialization: */
+ if ((curkse->k_flags & KF_INITIALIZED) == 0) {
+ /* Setup this KSEs specific data. */
+ _ksd_setprivate(&curkse->k_ksd);
+ _set_curkse(curkse);
+
+ /* Set this before grabbing the context. */
+ curkse->k_flags |= KF_INITIALIZED;
+ }
+
+ /* Avoid checking the type of KSE more than once. */
+ if ((curkse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0) {
+ curkse->k_mbx.km_func = (void *)kse_sched_single;
+ kse_sched_single(curkse);
+ } else {
+ curkse->k_mbx.km_func = (void *)kse_sched_multi;
+ kse_sched_multi(curkse);
+ }
+}
+
+/*
+ * This is the scheduler for a KSE which runs a scope system thread.
+ * The multi-thread KSE scheduler should also work for a single threaded
+ * KSE, but we use a separate scheduler so that it can be fine-tuned
+ * to be more efficient (and perhaps not need a separate stack for
+ * the KSE, allowing it to use the thread's stack).
+ *
+ * XXX - This probably needs some work.
+ */
+static void
+kse_sched_single(struct kse *curkse)
+{
+ struct pthread *curthread;
+ struct timespec ts;
+ int level;
+
+ /* This may have returned from a kse_release(). */
+ if (KSE_WAITING(curkse))
+ KSE_CLEAR_WAIT(curkse);
+
+ curthread = curkse->k_curthread;
+
+ if (curthread->active == 0) {
+ if (curthread->state != PS_RUNNING) {
+ /* Check to see if the thread has timed out. */
+ KSE_GET_TOD(curkse, &ts);
+ if (thr_timedout(curthread, &ts) != 0) {
+ curthread->timeout = 1;
+ curthread->state = PS_RUNNING;
+ }
}
+ } else if (curthread->need_switchout != 0) {
+ /*
+ * This has to do the job of kse_switchout_thread(), only
+ * for a single threaded KSE/KSEG.
+ */
- if (curthread->slice_usec == -1) {
- /*
- * The thread exceeded its time
- * quantum or it yielded the CPU;
- * place it at the tail of the
- * queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_TAIL(curthread);
- } else {
+ /* This thread no longer needs to yield the CPU: */
+ curthread->critical_yield = 0;
+ curthread->need_switchout = 0;
+
+ /*
+ * Lock the scheduling queue.
+ *
+ * There is no scheduling queue for single threaded KSEs,
+ * but we need a lock for protection regardless.
+ */
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+
+ switch (curthread->state) {
+ case PS_DEAD:
+ /* Unlock the scheduling queue and exit the KSE. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_fini(curkse); /* does not return */
+ break;
+
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Only insert threads that can timeout: */
+ if (curthread->wakeup_time.tv_sec != -1) {
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(curkse, curthread);
+ }
+ break;
+
+ case PS_LOCKWAIT:
+ level = curthread->locklevel - 1;
+ if (_LCK_BUSY(&curthread->lockusers[level]))
+ KSE_WAITQ_INSERT(curkse, curthread);
+ else
+ THR_SET_STATE(curthread, PS_RUNNING);
+ break;
+
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_RUNNING:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_DEADLOCK:
+ default:
/*
- * The thread hasn't exceeded its
- * interval. Place it at the head
- * of the queue for its priority.
+ * These states don't timeout and don't need
+ * to be in the waiting queue.
*/
- PTHREAD_PRIOQ_INSERT_HEAD(curthread);
+ break;
}
- break;
- case PS_SPINBLOCK:
- /* Increment spinblock count. */
- _spinblock_count++;
+ if (curthread->state != PS_RUNNING)
+ curthread->active = 0;
+ }
- /* No timeouts for these states. */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
+ while (curthread->state != PS_RUNNING) {
+ kse_wait(curkse);
+ }
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /* Remove the frame reference. */
+ curthread->curframe = NULL;
- /* Insert into the work queue. */
- PTHREAD_WORKQ_INSERT(curthread);
- break;
+ /* Unlock the scheduling queue. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- case PS_DEADLOCK:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_WAIT_WAIT:
- /* No timeouts for these states. */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
+ /*
+ * Continue the thread at its current frame:
+ */
+ _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
+}
+
+void
+dump_queues(struct kse *curkse)
+{
+ struct pthread *thread;
+
+ DBG_MSG("Threads in waiting queue:\n");
+ TAILQ_FOREACH(thread, &curkse->k_kseg->kg_schedq.sq_waitq, pqe) {
+ DBG_MSG(" thread %p, state %d, blocked %d\n",
+ thread, thread->state, thread->blocked);
+ }
+}
+
+
+/*
+ * This is the scheduler for a KSE which runs multiple threads.
+ */
+static void
+kse_sched_multi(struct kse *curkse)
+{
+ struct pthread *curthread;
+ struct pthread_sigframe *curframe;
+ int ret;
+
+ /* This may have returned from a kse_release(). */
+ if (KSE_WAITING(curkse))
+ KSE_CLEAR_WAIT(curkse);
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /* Lock the scheduling lock. */
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
- /* Insert into the waiting queue. */
- PTHREAD_WAITQ_INSERT(curthread);
- break;
+ /*
+ * If the current thread was completed in another KSE, then
+ * it will be in the run queue. Don't mark it as being blocked.
+ */
+ if (((curthread = curkse->k_curthread) != NULL) &&
+ ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
+ (curthread->need_switchout == 0)) {
+ /*
+ * Assume the current thread is blocked; when the
+ * completed threads are checked and if the current
+ * thread is among the completed, the blocked flag
+ * will be cleared.
+ */
+ curthread->blocked = 1;
+ }
+
+ /* Check for any unblocked threads in the kernel. */
+ kse_check_completed(curkse);
- case PS_COND_WAIT:
- case PS_SLEEP_WAIT:
- /* These states can timeout. */
- /* Restart the time slice. */
- curthread->slice_usec = -1;
+ /*
+ * Check for threads that have timed-out.
+ */
+ kse_check_waitq(curkse);
- /* Insert into the waiting queue. */
- PTHREAD_WAITQ_INSERT(curthread);
- break;
+ /*
+ * Switchout the current thread, if necessary, as the last step
+ * so that it is inserted into the run queue (if it's runnable)
+ * _after_ any other threads that were added to it above.
+ */
+ if (curthread == NULL)
+ ; /* Nothing to do here. */
+ else if ((curthread->need_switchout == 0) &&
+ (curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) {
+ /*
+ * Resume the thread and tell it to yield when
+ * it leaves the critical region.
+ */
+ curthread->critical_yield = 0;
+ curthread->active = 1;
+ if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0)
+ KSE_RUNQ_REMOVE(curkse, curthread);
+ curkse->k_curthread = curthread;
+ curthread->kse = curkse;
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ DBG_MSG("Continuing thread %p in critical region\n",
+ curthread);
+ ret = _thread_switch(&curthread->tmbx,
+ &curkse->k_mbx.km_curthread);
+ if (ret != 0)
+ PANIC("Can't resume thread in critical region\n");
}
+ else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ kse_switchout_thread(curkse, curthread);
+ curkse->k_curthread = NULL;
- /* Switch into the scheduler's context. */
- DBG_MSG("Calling _thread_enter_uts()\n");
- _thread_enter_uts(&curthread->mailbox, &_thread_kern_kse_mailbox);
- DBG_MSG("Returned from _thread_enter_uts, thread %p\n", curthread);
+ /* This has to be done without the scheduling lock held. */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_check_signals(curkse);
+
+ /* Check for GC: */
+ if (_gc_check != 0)
+ thr_gc(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+
+ dump_queues(curkse);
+
+ /* Check if there are no threads ready to run: */
+ while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
+ (curkse->k_kseg->kg_threadcount != 0)) {
+ /*
+ * Wait for a thread to become active or until there are
+ * no more threads.
+ */
+ kse_wait(curkse);
+ kse_check_waitq(curkse);
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_check_signals(curkse);
+ if (_gc_check != 0)
+ thr_gc(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+ }
+
+ /* Check for no more threads: */
+ if (curkse->k_kseg->kg_threadcount == 0) {
+ /*
+ * Normally this shouldn't return, but it will if there
+ * are other KSEs running that create new threads that
+ * are assigned to this KSE[G]. For instance, if a scope
+ * system thread were to create a scope process thread
+ * and this kse[g] is the initial kse[g], then that newly
+ * created thread would be assigned to us (the initial
+ * kse[g]).
+ */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ kse_fini(curkse);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+ curthread = KSE_RUNQ_FIRST(curkse);
+ }
+
+ THR_ASSERT(curthread != NULL,
+ "Return from kse_wait/fini without thread.");
+ THR_ASSERT(curthread->state != PS_DEAD,
+ "Trying to resume dead thread!");
+ KSE_RUNQ_REMOVE(curkse, curthread);
/*
- * This point is reached when _thread_switch() is called
- * to restore the state of a thread.
- *
- * This is the normal way out of the scheduler (for synchronous
- * switches).
+ * Make the selected thread the current thread.
*/
+ curkse->k_curthread = curthread;
- /* XXXKSE: Do this inside _thread_kern_scheduler() */
- if (curthread->sig_defer_count == 0) {
- if (((curthread->cancelflags &
- PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((curthread->cancelflags &
- PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
- /*
- * Stick a cancellation point at the
- * start of each async-cancellable
- * thread's resumption.
- *
- * We allow threads woken at cancel
- * points to do their own checks.
- */
- pthread_testcancel();
+ /*
+ * Make sure the current thread's kse points to this kse.
+ */
+ curthread->kse = curkse;
+
+ /*
+ * Reset accounting.
+ */
+ curthread->tmbx.tm_uticks = 0;
+ curthread->tmbx.tm_sticks = 0;
+
+ /*
+ * Reset the time slice if this thread is running for the first
+ * time or running again after using its full time slice allocation.
+ */
+ if (curthread->slice_usec == -1)
+ curthread->slice_usec = 0;
+
+ /* Mark the thread active. */
+ curthread->active = 1;
+
+ /* Remove the frame reference. */
+ curframe = curthread->curframe;
+ curthread->curframe = NULL;
+
+ /* Unlock the scheduling queue: */
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+
+ /*
+ * The thread's current signal frame will only be NULL if it
+ * is being resumed after being blocked in the kernel. In
+ * this case, and if the thread needs to run down pending
+ * signals or needs a cancellation check, we need to add a
+ * signal frame to the thread's context.
+ */
+#if 0
+ if ((curframe == NULL) && ((curthread->check_pending != 0) ||
+ (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
+ signalcontext(&curthread->tmbx.tm_context, 0,
+ (__sighandler_t *)thr_resume_wrapper);
}
+#endif
+ /*
+ * Continue the thread at its current frame:
+ */
+ DBG_MSG("Continuing thread %p\n", curthread);
+ ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
+ if (ret != 0)
+ PANIC("Thread has returned from _thread_switch");
- if (_sched_switch_hook != NULL) {
- /* Run the installed switch hook: */
- thread_run_switch_hook(_last_user_thread, curthread);
+ /* This point should not be reached. */
+ PANIC("Thread has returned from _thread_switch");
+}
+
+static void
+kse_check_signals(struct kse *curkse)
+{
+ sigset_t sigset;
+ int i;
+
+ /* Deliver posted signals. */
+ for (i = 0; i < _SIG_WORDS; i++) {
+ atomic_swap_int(&curkse->k_mbx.km_sigscaught.__bits[i],
+ 0, &sigset.__bits[i]);
+ }
+ if (SIGNOTEMPTY(sigset)) {
+ /*
+ * Dispatch each signal.
+ *
+ * XXX - There is no siginfo for any of these.
+ * I think there should be, especially for
+ * signals from other processes (si_pid, si_uid).
+ */
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ DBG_MSG("Dispatching signal %d\n", i);
+ _thr_sig_dispatch(curkse, i,
+ NULL /* no siginfo */);
+ }
+ }
+ sigemptyset(&sigset);
+ __sys_sigprocmask(SIG_SETMASK, &sigset, NULL);
}
}
-void
-_thread_kern_scheduler(struct kse_mailbox *km)
+static void
+thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
{
- struct timespec ts;
- struct timeval tv;
- pthread_t td, pthread, pthread_h;
- unsigned int current_tick;
- struct kse_thr_mailbox *tm, *p;
- sigset_t sigset;
- int i;
+ struct pthread *curthread = _get_curthread();
- DBG_MSG("entering\n");
- while (!TAILQ_EMPTY(&_thread_list)) {
+ thr_resume_check(curthread, ucp, NULL);
+}
- /* Get the current time of day. */
- ts = km->km_timeofday;
- TIMESPEC_TO_TIMEVAL(&_sched_tod, &ts);
- current_tick = _sched_ticks;
+static void
+thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf)
+{
+ /* Check signals before cancellations. */
+ while (curthread->check_pending != 0) {
+ /* Clear the pending flag. */
+ curthread->check_pending = 0;
/*
- * Pick up threads that had blocked in the kernel and
- * have now completed their trap (syscall, vm fault, etc).
- * These threads were PS_RUNNING (and still are), but they
- * need to be added to the run queue so that they can be
- * scheduled again.
+ * It's perfectly valid, though not portable, for
+ * signal handlers to munge their interrupted context
+ * and expect to return to it. Ensure we use the
+ * correct context when running down signals.
*/
- DBG_MSG("Picking up km_completed\n");
- p = km->km_completed;
- km->km_completed = NULL; /* XXX: Atomic xchg here. */
- while ((tm = p) != NULL) {
- p = tm->tm_next;
- tm->tm_next = NULL;
- if (tm->tm_udata == NULL) {
- DBG_MSG("\tidle context\n");
- _kern_idle_running = 0;
- continue;
+ _thr_sig_rundown(curthread, ucp, psf);
+ }
+
+ if (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
+ ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
+ pthread_testcancel();
+}
+
+/*
+ * Clean up a thread. This must be called with the thread's KSE
+ * scheduling lock held. The thread must be a thread from the
+ * KSE's group.
+ */
+static void
+thr_cleanup(struct kse *curkse, struct pthread *thread)
+{
+ struct pthread *joiner;
+ int free_thread = 0;
+
+ if ((joiner = thread->joiner) != NULL) {
+ thread->joiner = NULL;
+ if ((joiner->state == PS_JOIN) &&
+ (joiner->join_status.thread == thread)) {
+ joiner->join_status.thread = NULL;
+
+ /* Set the return status for the joining thread: */
+ joiner->join_status.ret = thread->ret;
+
+ /* Make the thread runnable. */
+ if (joiner->kseg == curkse->k_kseg)
+ _thr_setrunnable_unlocked(joiner);
+ else {
+ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ _thr_setrunnable_unlocked(joiner);
+ KSE_SCHED_UNLOCK(curkse, joiner->kseg);
+ KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
- DBG_MSG("\tmailbox=%p pthread=%p\n", tm, tm->tm_udata);
- PTHREAD_PRIOQ_INSERT_TAIL((pthread_t)tm->tm_udata);
}
+ thread->attr.flags |= PTHREAD_DETACHED;
+ }
- /* Deliver posted signals. */
- DBG_MSG("Picking up signals\n");
- bcopy(&km->km_sigscaught, &sigset, sizeof(sigset_t));
- sigemptyset(&km->km_sigscaught); /* XXX */
- if (SIGNOTEMPTY(sigset))
- for (i = 1; i < NSIG; i++)
- if (sigismember(&sigset, i) != 0)
- _thread_sig_dispatch(i);
+ thread->flags |= THR_FLAGS_GC_SAFE;
+ thread->kseg->kg_threadcount--;
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ _thr_stack_free(&thread->attr);
+ if ((thread->attr.flags & PTHREAD_DETACHED) != 0) {
+ /* Remove this thread from the list of all threads: */
+ THR_LIST_REMOVE(thread);
+ if (thread->refcount == 0) {
+ THR_GCLIST_REMOVE(thread);
+ TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle);
+ free_thread = 1;
+ }
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ if (free_thread != 0)
+ _thr_free(curkse, thread);
+}
- if (_spinblock_count != 0) {
- /*
- * Enter a loop to look for threads waiting on
- * a spinlock that is now available.
- */
- PTHREAD_WAITQ_SETACTIVE();
- TAILQ_FOREACH(pthread, &_workq, qe) {
- if (pthread->state == PS_SPINBLOCK) {
- /*
- * If the lock is available, let the
- * thread run.
- */
- if (pthread->data.spinlock->
- access_lock == 0) {
- PTHREAD_WAITQ_CLEARACTIVE();
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(
- pthread);
- PTHREAD_SET_STATE(pthread,
- PS_RUNNING);
- PTHREAD_WAITQ_SETACTIVE();
-
- /*
- * One less thread in a
- * spinblock state:
- */
- _spinblock_count--;
- }
- }
+void
+thr_gc(struct pthread *curthread)
+{
+ struct pthread *td, *joiner;
+ struct kse_group *free_kseg;
+
+ _gc_check = 0;
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
+ THR_GCLIST_REMOVE(td);
+ clean = (td->attr.flags & PTHREAD_DETACHED) != 0;
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+
+ KSE_SCHED_LOCK(curkse, td->kseg);
+ TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle);
+ if (TAILQ_EMPTY(&td->kseg->kg_threadq))
+ free_kseg = td->kseg;
+ else
+ free_kseg = NULL;
+ joiner = NULL;
+ if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) &&
+ (td->joiner->join_status.thread == td)) {
+ joiner = td->joiner;
+ joiner->join_status.thread = NULL;
+
+ /* Set the return status for the joining thread: */
+ joiner->join_status.ret = td->ret;
+
+ /* Make the thread runnable. */
+ if (td->kseg == joiner->kseg) {
+ _thr_setrunnable_unlocked(joiner);
+ joiner = NULL;
}
- PTHREAD_WAITQ_CLEARACTIVE();
}
+ td->joiner = NULL;
+ KSE_SCHED_UNLOCK(curkse, td->kseg);
+ if (free_kseg != NULL)
+ kseg_free(free_kseg);
+ if (joiner != NULL) {
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ _thr_setrunnable_unlocked(joiner);
+ KSE_SCHED_LOCK(curkse, joiner->kseg);
+ }
+ _thr_free(curkse, td);
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+}
+
+
+/*
+ * Only new threads that are running or suspended may be scheduled.
+ */
+void
+_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
+{
+ struct kse *curkse;
+ kse_critical_t crit;
+ int need_start;
- /* Wake up threads that have timed out. */
- DBG_MSG("setactive\n");
- PTHREAD_WAITQ_SETACTIVE();
- DBG_MSG("Picking up timeouts (%x)\n", TAILQ_FIRST(&_waitingq));
- while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
- (pthread->wakeup_time.tv_sec != -1) &&
- (((pthread->wakeup_time.tv_sec == 0) &&
- (pthread->wakeup_time.tv_nsec == 0)) ||
- (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
- ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
- (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
- DBG_MSG("\t...\n");
+ /*
+ * If this is the first time creating a thread, make sure
+ * the mailbox is set for the current thread.
+ */
+ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
+ /*
+ * No need to lock the scheduling queue since the
+ * KSE/KSEG pair have not yet been started.
+ */
+ KSEG_THRQ_ADD(newthread->kseg, newthread);
+ if (newthread->state == PS_RUNNING)
+ THR_RUNQ_INSERT_TAIL(newthread);
+ newthread->kseg->kg_threadcount++;
+ /*
+ * This thread needs a new KSE and KSEG.
+ */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+ _ksd_setprivate(&newthread->kse->k_ksd);
+ kse_create(&newthread->kse->k_mbx, 1);
+ _ksd_setprivate(&curkse->k_ksd);
+ _kse_critical_leave(crit);
+ }
+ else {
+ /*
+ * Lock the KSE and add the new thread to its list of
+ * assigned threads. If the new thread is runnable, also
+ * add it to the KSE's run queue.
+ */
+ need_start = 0;
+ KSE_SCHED_LOCK(curthread->kse, newthread->kseg);
+ KSEG_THRQ_ADD(newthread->kseg, newthread);
+ if (newthread->state == PS_RUNNING)
+ THR_RUNQ_INSERT_TAIL(newthread);
+ newthread->kseg->kg_threadcount++;
+ if ((newthread->kse->k_flags & KF_STARTED) == 0) {
/*
- * Remove this thread from the waiting queue
- * (and work queue if necessary) and place it
- * in the ready queue.
+ * This KSE hasn't been started yet. Start it
+ * outside of holding the lock.
*/
- PTHREAD_WAITQ_CLEARACTIVE();
- if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
- PTHREAD_WORKQ_REMOVE(pthread);
- DBG_MSG("\twaking thread\n");
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
- PTHREAD_WAITQ_SETACTIVE();
+ newthread->kse->k_flags |= KF_STARTED;
+ need_start = 1;
+ }
+ KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
+
+ if (need_start != 0)
+ kse_create(&newthread->kse->k_mbx, 0);
+ else if ((newthread->state == PS_RUNNING) &&
+ KSE_WAITING(newthread->kse)) {
/*
- * Flag the timeout in the thread structure:
+ * The thread is being scheduled on another KSEG.
*/
- pthread->timeout = 1;
+ KSE_WAKEUP(newthread->kse);
}
- DBG_MSG("clearactive\n");
- PTHREAD_WAITQ_CLEARACTIVE();
+ }
+}
- /*
- * Get the highest priority thread in the ready queue.
- */
- DBG_MSG("Selecting thread\n");
- pthread_h = PTHREAD_PRIOQ_FIRST();
+void
+kse_waitq_insert(struct pthread *thread)
+{
+ struct pthread *td;
+
+ if (thread->wakeup_time.tv_sec == -1)
+ TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq, thread,
+ pqe);
+ else {
+ td = TAILQ_FIRST(&thread->kse->k_schedq->sq_waitq);
+ while ((td != NULL) && (td->wakeup_time.tv_sec != -1) &&
+ ((td->wakeup_time.tv_sec < thread->wakeup_time.tv_sec) ||
+ ((td->wakeup_time.tv_sec == thread->wakeup_time.tv_sec) &&
+ (td->wakeup_time.tv_nsec <= thread->wakeup_time.tv_nsec))))
+ td = TAILQ_NEXT(td, pqe);
+ if (td == NULL)
+ TAILQ_INSERT_TAIL(&thread->kse->k_schedq->sq_waitq,
+ thread, pqe);
+ else
+ TAILQ_INSERT_BEFORE(td, thread, pqe);
+ }
+ thread->flags |= THR_FLAGS_IN_WAITQ;
+}
- /* Check if there are no threads ready to run: */
- if (pthread_h) {
- DBG_MSG("Scheduling thread\n");
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread_h);
+/*
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_check_completed(struct kse *kse)
+{
+ struct pthread *thread;
+ struct kse_thr_mailbox *completed;
+
+ if ((completed = kse->k_mbx.km_completed) != NULL) {
+ kse->k_mbx.km_completed = NULL;
+ while (completed != NULL) {
+ thread = completed->tm_udata;
+ DBG_MSG("Found completed thread %p, name %s\n",
+ thread,
+ (thread->name == NULL) ? "none" : thread->name);
+ thread->blocked = 0;
+ if (thread != kse->k_curthread)
+ KSE_RUNQ_INSERT_TAIL(kse, thread);
+ completed = completed->tm_next;
+ }
+ }
+}
- /* Make the selected thread the current thread: */
- _set_curthread(pthread_h);
+/*
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_check_waitq(struct kse *kse)
+{
+ struct pthread *pthread;
+ struct timespec ts;
- /*
- * Save the current time as the time that the thread
- * became active:
- */
- current_tick = _sched_ticks;
- pthread_h->last_active = (long) current_tick;
+ KSE_GET_TOD(kse, &ts);
+ /*
+ * Wake up threads that have timedout. This has to be
+ * done before adding the current thread to the run queue
+ * so that a CPU intensive thread doesn't get preference
+ * over waiting threads.
+ */
+ while (((pthread = KSE_WAITQ_FIRST(kse)) != NULL) &&
+ thr_timedout(pthread, &ts)) {
+ /* Remove the thread from the wait queue: */
+ KSE_WAITQ_REMOVE(kse, pthread);
+ DBG_MSG("Found timedout thread %p in waitq\n", pthread);
+
+ /* Indicate the thread timedout: */
+ pthread->timeout = 1;
+
+ /* Add the thread to the priority queue: */
+ THR_SET_STATE(pthread, PS_RUNNING);
+ KSE_RUNQ_INSERT_TAIL(kse, pthread);
+ }
+}
+
+static int
+thr_timedout(struct pthread *thread, struct timespec *curtime)
+{
+ if (thread->wakeup_time.tv_sec < 0)
+ return (0);
+ else if (thread->wakeup_time.tv_sec > curtime->tv_sec)
+ return (0);
+ else if ((thread->wakeup_time.tv_sec == curtime->tv_sec) &&
+ (thread->wakeup_time.tv_nsec > curtime->tv_nsec))
+ return (0);
+ else
+ return (1);
+}
+
+/*
+ * This must be called with the scheduling lock held.
+ *
+ * Each thread has a time slice, a wakeup time (used when it wants
+ * to wait for a specified amount of time), a run state, and an
+ * active flag.
+ *
+ * When a thread gets run by the scheduler, the active flag is
+ * set to non-zero (1). When a thread performs an explicit yield
+ * or schedules a state change, it enters the scheduler and the
+ * active flag is cleared. When the active flag is still seen
+ * set in the scheduler, that means that the thread is blocked in
+ * the kernel (because it is cleared before entering the scheduler
+ * in all other instances).
+ *
+ * The wakeup time is only set for those states that can timeout.
+ * It is set to (-1, -1) for all other instances.
+ *
+ * The thread's run state, aside from being useful when debugging,
+ * is used to place the thread in an appropriate queue. There
+ * are 2 basic queues:
+ *
+ * o run queue - queue ordered by priority for all threads
+ * that are runnable
+ * o waiting queue - queue sorted by wakeup time for all threads
+ * that are not otherwise runnable (not blocked
+ * in kernel, not waiting for locks)
+ *
+ * The thread's time slice is used for round-robin scheduling
+ * (the default scheduling policy). While a SCHED_RR thread
+ * is runnable it's time slice accumulates. When it reaches
+ * the time slice interval, it gets reset and added to the end
+ * of the queue of threads at its priority. When a thread no
+ * longer becomes runnable (blocks in kernel, waits, etc), its
+ * time slice is reset.
+ *
+ * The job of kse_switchout_thread() is to handle all of the above.
+ */
+static void
+kse_switchout_thread(struct kse *kse, struct pthread *thread)
+{
+ int level;
+
+ /*
+ * Place the currently running thread into the
+ * appropriate queue(s).
+ */
+ DBG_MSG("Switching out thread %p, state %d\n", thread, thread->state);
+ if (thread->blocked != 0) {
+ /* This thread must have blocked in the kernel. */
+ /* thread->slice_usec = -1;*/ /* restart timeslice */
+ /*
+ * XXX - Check for pending signals for this thread to
+ * see if we need to interrupt it in the kernel.
+ */
+ /* if (thread->check_pending != 0) */
+ if ((thread->slice_usec != -1) &&
+ (thread->attr.sched_policy != SCHED_FIFO))
+ thread->slice_usec += (thread->tmbx.tm_uticks
+ + thread->tmbx.tm_sticks) * _clock_res_usec;
+ }
+ else {
+ switch (thread->state) {
+ case PS_DEAD:
/*
- * Check if this thread is running for the first time
- * or running again after using its full time slice
- * allocation:
+ * The scheduler is operating on a different
+ * stack. It is safe to do garbage collecting
+ * here.
*/
- if (pthread_h->slice_usec == -1) {
- /* Reset the accumulated time slice period: */
- pthread_h->slice_usec = 0;
- }
+ thr_cleanup(kse, thread);
+ return;
+ break;
+ case PS_RUNNING:
+ /* Nothing to do here. */
+ break;
+
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(kse, thread);
+ break;
+
+ case PS_LOCKWAIT:
/*
- * If we had a context switch, run any
- * installed switch hooks.
+ * This state doesn't timeout.
*/
- if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != pthread_h)) {
- thread_run_switch_hook(_last_user_thread,
- pthread_h);
- }
+ thread->wakeup_time.tv_sec = -1;
+ thread->wakeup_time.tv_nsec = -1;
+ level = thread->locklevel - 1;
+ if (_LCK_BUSY(&thread->lockusers[level]))
+ KSE_WAITQ_INSERT(kse, thread);
+ else
+ THR_SET_STATE(thread, PS_RUNNING);
+ break;
+
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_DEADLOCK:
+ default:
/*
- * Continue the thread at its current frame:
+ * These states don't timeout.
*/
- _last_user_thread = td;
- DBG_MSG("switch in\n");
- _thread_switch(&pthread_h->mailbox,
- &_thread_kern_kse_mailbox.km_curthread);
- DBG_MSG("switch out\n");
+ thread->wakeup_time.tv_sec = -1;
+ thread->wakeup_time.tv_nsec = -1;
+
+ /* Insert into the waiting queue: */
+ KSE_WAITQ_INSERT(kse, thread);
+ break;
+ }
+ if (thread->state != PS_RUNNING) {
+ /* Restart the time slice: */
+ thread->slice_usec = -1;
} else {
- /*
- * There is nothing for us to do. Either
- * yield, or idle until something wakes up.
- */
- DBG_MSG("No runnable threads, idling.\n");
- if (_kern_idle_running) {
- DBG_MSG("kse_release");
- kse_release(NULL);
+ if (thread->need_switchout != 0)
+ /*
+ * The thread yielded on its own;
+ * restart the timeslice.
+ */
+ thread->slice_usec = -1;
+ else if ((thread->slice_usec != -1) &&
+ (thread->attr.sched_policy != SCHED_FIFO)) {
+ thread->slice_usec += (thread->tmbx.tm_uticks
+ + thread->tmbx.tm_sticks) * _clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (thread->slice_usec > TIMESLICE_USEC)
+ thread->slice_usec = -1;
}
- _kern_idle_running = 1;
- if ((pthread == NULL) ||
- (pthread->wakeup_time.tv_sec == -1))
+ if (thread->slice_usec == -1) {
+ /*
+ * The thread exceeded its time quantum or
+ * it yielded the CPU; place it at the tail
+ * of the queue for its priority.
+ */
+ KSE_RUNQ_INSERT_TAIL(kse, thread);
+ } else {
/*
- * Nothing is waiting on a timeout, so
- * idling gains us nothing; spin.
+ * The thread hasn't exceeded its interval
+ * Place it at the head of the queue for its
+ * priority.
*/
- continue;
- TIMESPEC_TO_TIMEVAL(&_kern_idle_timeout,
- &pthread->wakeup_time);
- _thread_switch(&_idle_thr_mailbox,
- &_thread_kern_kse_mailbox.km_curthread);
+ KSE_RUNQ_INSERT_HEAD(kse, thread);
+ }
}
- DBG_MSG("looping\n");
}
- /* There are no threads; exit. */
- DBG_MSG("No threads, exiting.\n");
- exit(0);
+ thread->active = 0;
+ thread->need_switchout = 0;
}
-void
-_thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
+/*
+ * This function waits for the smallest timeout value of any waiting
+ * thread, or until it receives a message from another KSE.
+ *
+ * This must be called with the scheduling lock held.
+ */
+static void
+kse_wait(struct kse *kse)
{
- struct pthread *curthread = _get_curthread();
+ struct timespec *ts, ts_sleep;
+ struct pthread *td_wait, *td_run;
- /*
- * Flag the pthread kernel as executing scheduler code
- * to avoid an upcall from interrupting this execution
- * and calling the scheduler again.
- */
- _thread_kern_kse_mailbox.km_curthread = NULL;
+ ts = &kse->k_mbx.km_timeofday;
+ KSE_SET_WAIT(kse);
- /* Change the state of the current thread: */
- curthread->state = state;
- curthread->fname = fname;
- curthread->lineno = lineno;
+ td_wait = KSE_WAITQ_FIRST(kse);
+ td_run = KSE_RUNQ_FIRST(kse);
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
- /* Schedule the next thread that is ready: */
- _thread_kern_sched();
+ if (td_run == NULL) {
+ if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
+ /* Limit sleep to no more than 2 minutes. */
+ ts_sleep.tv_sec = 120;
+ ts_sleep.tv_nsec = 0;
+ } else {
+ TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, ts);
+ if (ts_sleep.tv_sec > 120) {
+ ts_sleep.tv_sec = 120;
+ ts_sleep.tv_nsec = 0;
+ }
+ }
+ if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) {
+ /* Don't sleep for negative times. */
+ kse_release(&ts_sleep);
+ /*
+ * The above never returns.
+ * XXX - Actually, it would be nice if it did
+ * for KSE's with only one thread.
+ */
+ }
+ }
+ KSE_CLEAR_WAIT(kse);
}
-void
-_thread_kern_sched_state_unlock(enum pthread_state state,
- spinlock_t *lock, char *fname, int lineno)
+/*
+ * Avoid calling this kse_exit() so as not to confuse it with the
+ * system call of the same name.
+ */
+static void
+kse_fini(struct kse *kse)
{
- struct pthread *curthread = _get_curthread();
+ struct timespec ts;
/*
- * Flag the pthread kernel as executing scheduler code
- * to avoid an upcall from interrupting this execution
- * and calling the scheduler again.
+ * Check to see if this is the main kse.
*/
- _thread_kern_kse_mailbox.km_curthread = NULL;
-
- /* Change the state of the current thread: */
- curthread->state = state;
- curthread->fname = fname;
- curthread->lineno = lineno;
-
- _SPINUNLOCK(lock);
+ if (kse == _kse_initial) {
+ /*
+ * Wait for the last KSE/thread to exit, or for more
+ * threads to be created (it is possible for additional
+ * scope process threads to be created after the main
+ * thread exits).
+ */
+ ts.tv_sec = 120;
+ ts.tv_nsec = 0;
+ KSE_SET_WAIT(kse);
+ KSE_SCHED_LOCK(kse, kse->k_kseg);
+ if ((active_kse_count > 1) &&
+ (kse->k_kseg->kg_threadcount == 0)) {
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
+ /*
+ * XXX - We need a way for the KSE to do a timed
+ * wait.
+ */
+ kse_release(&ts);
+ /* The above never returns. */
+ }
+ KSE_SCHED_UNLOCK(kse, kse->k_kseg);
- /* Schedule the next thread that is ready: */
- _thread_kern_sched();
+ /* There are no more threads; exit this process: */
+ if (kse->k_kseg->kg_threadcount == 0) {
+ /* kse_exit(); */
+ __isthreaded = 0;
+ exit(0);
+ }
+ } else {
+ /* Mark this KSE for GC: */
+ KSE_LOCK_ACQUIRE(kse, &_thread_list_lock);
+ TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe);
+ KSE_LOCK_RELEASE(kse, &_thread_list_lock);
+ kse_exit();
+ }
}
-/*
- * Block until the next timeout.
- */
void
-_thread_kern_idle(void)
+_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp)
{
- struct timespec ts;
- struct timeval timeout;
+ struct kse *curkse;
- for (;;) {
- timersub(&_kern_idle_timeout, &_sched_tod, &timeout);
- TIMEVAL_TO_TIMESPEC(&timeout, &ts);
- __sys_nanosleep(&ts, NULL);
+ curkse = _get_curkse();
+
+ KSE_SCHED_LOCK(curkse, thread->kseg);
+ /*
+ * A threads assigned KSE can't change out from under us
+ * when we hold the scheduler lock.
+ */
+ if (THR_IS_ACTIVE(thread)) {
+ /* Thread is active. Can't install the signal for it. */
+ /* Make a note in the thread that it has a signal. */
+ sigaddset(&thread->sigpend, sig);
+ thread->check_pending = 1;
}
+ else {
+ /* Make a note in the thread that it has a signal. */
+ sigaddset(&thread->sigpend, sig);
+ thread->check_pending = 1;
+
+ if (thread->blocked != 0) {
+ /* Tell the kernel to interrupt the thread. */
+ kse_thr_interrupt(&thread->tmbx);
+ }
+ }
+ KSE_SCHED_UNLOCK(curkse, thread->kseg);
}
void
-_thread_kern_set_timeout(const struct timespec * timeout)
+_thr_set_timeout(const struct timespec *timeout)
{
struct pthread *curthread = _get_curthread();
- struct timespec current_time;
- struct timeval tv;
+ struct timespec ts;
/* Reset the timeout flag for the running thread: */
curthread->timeout = 0;
@@ -514,94 +1494,311 @@ _thread_kern_set_timeout(const struct timespec * timeout)
curthread->wakeup_time.tv_nsec = -1;
}
/* Check if no waiting is required: */
- else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+ else if ((timeout->tv_sec == 0) && (timeout->tv_nsec == 0)) {
/* Set the wake up time to 'immediately': */
curthread->wakeup_time.tv_sec = 0;
curthread->wakeup_time.tv_nsec = 0;
} else {
- /* Get the current time: */
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time);
-
- /* Calculate the time for the current thread to wake up: */
- curthread->wakeup_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
- curthread->wakeup_time.tv_nsec = current_time.tv_nsec + timeout->tv_nsec;
-
- /* Check if the nanosecond field needs to wrap: */
- if (curthread->wakeup_time.tv_nsec >= 1000000000) {
- /* Wrap the nanosecond field: */
- curthread->wakeup_time.tv_sec += 1;
- curthread->wakeup_time.tv_nsec -= 1000000000;
- }
+ /* Calculate the time for the current thread to wakeup: */
+ KSE_GET_TOD(curthread->kse, &ts);
+ TIMESPEC_ADD(&curthread->wakeup_time, &ts, timeout);
}
}
void
-_thread_kern_sig_defer(void)
+_thr_panic_exit(char *file, int line, char *msg)
{
- struct pthread *curthread = _get_curthread();
+ char buf[256];
- /* Allow signal deferral to be recursive. */
- curthread->sig_defer_count++;
+ snprintf(buf, sizeof(buf), "(%s:%d) %s\n", file, line, msg);
+ __sys_write(2, buf, strlen(buf));
+ abort();
}
void
-_thread_kern_sig_undefer(void)
+_thr_setrunnable(struct pthread *curthread, struct pthread *thread)
{
- struct pthread *curthread = _get_curthread();
+ kse_critical_t crit;
+
+ crit = _kse_critical_enter();
+ KSE_SCHED_LOCK(curthread->kse, thread->kseg);
+ _thr_setrunnable_unlocked(thread);
+ KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
+ _kse_critical_leave(crit);
+}
+
+void
+_thr_setrunnable_unlocked(struct pthread *thread)
+{
+ if ((thread->kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
+ /* No silly queues for these threads. */
+ THR_SET_STATE(thread, PS_RUNNING);
+ else {
+ if ((thread->flags & THR_FLAGS_IN_WAITQ) != 0)
+ KSE_WAITQ_REMOVE(thread->kse, thread);
+ THR_SET_STATE(thread, PS_RUNNING);
+ if ((thread->blocked == 0) &&
+ (thread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ THR_RUNQ_INSERT_TAIL(thread);
+ }
+ /*
+ * XXX - Threads are not yet assigned to specific KSEs; they are
+ * assigned to the KSEG. So the fact that a thread's KSE is
+ * waiting doesn't necessarily mean that it will be the KSE
+ * that runs the thread after the lock is granted. But we
+ * don't know if the other KSEs within the same KSEG are
+ * also in a waiting state or not so we err on the side of
+ * caution and wakeup the thread's last known KSE. We
+ * ensure that the threads KSE doesn't change while it's
+ * scheduling lock is held so it is safe to reference it
+ * (the KSE). If the KSE wakes up and doesn't find any more
+ * work it will again go back to waiting so no harm is done.
+ */
+ if (KSE_WAITING(thread->kse))
+ KSE_WAKEUP(thread->kse);
+}
+
+struct pthread *
+_get_curthread(void)
+{
+ return (_ksd_curthread);
+}
+
+/* This assumes the caller has disabled upcalls. */
+struct kse *
+_get_curkse(void)
+{
+ return (_ksd_curkse);
+}
+
+void
+_set_curkse(struct kse *kse)
+{
+ _ksd_setprivate(&kse->k_ksd);
+}
+
+/*
+ * Allocate a new KSEG.
+ *
+ * We allow the current KSE (curkse) to be NULL in the case that this
+ * is the first time a KSEG is being created (library initialization).
+ * In this case, we don't need to (and can't) take any locks.
+ */
+struct kse_group *
+_kseg_alloc(struct kse *curkse)
+{
+ struct kse_group *kseg = NULL;
+
+ if ((curkse != NULL) && (free_kseg_count > 0)) {
+ /* Use the kse lock for the kseg queue. */
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
+ TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
+ free_kseg_count--;
+ active_kseg_count++;
+ TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
+ }
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ }
/*
- * Perform checks to yield only if we are about to undefer
- * signals.
+ * If requested, attempt to allocate a new KSE group only if the
+ * KSE allocation was successful and a KSE group wasn't found in
+ * the free list.
*/
- if (curthread->sig_defer_count > 1) {
- /* Decrement the signal deferral count. */
- curthread->sig_defer_count--;
+ if ((kseg == NULL) &&
+ ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
+ THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq,
+ THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0,
+ "Unable to allocate priority queue.");
+ kseg_init(kseg);
+ if (curkse != NULL)
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ kseg_free(kseg);
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ }
+ return (kseg);
+}
+
+/*
+ * This must be called with the kse lock held and when there are
+ * no more threads that reference it.
+ */
+static void
+kseg_free(struct kse_group *kseg)
+{
+ TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
+ kseg_init(kseg);
+ free_kseg_count++;
+ active_kseg_count--;
+}
+
+/*
+ * Allocate a new KSE.
+ *
+ * We allow the current KSE (curkse) to be NULL in the case that this
+ * is the first time a KSE is being created (library initialization).
+ * In this case, we don't need to (and can't) take any locks.
+ */
+struct kse *
+_kse_alloc(struct kse *curkse)
+{
+ struct kse *kse = NULL;
+ int need_ksd = 0;
+ int i;
+
+ if ((curkse != NULL) && (free_kse_count > 0)) {
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ /* Search for a finished KSE. */
+ kse = TAILQ_FIRST(&free_kseq);
+#define KEMBX_DONE 0x01
+ while ((kse != NULL) &&
+ ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
+ kse = TAILQ_NEXT(kse, k_qe);
+ }
+#undef KEMBX_DONE
+ if (kse != NULL) {
+ TAILQ_REMOVE(&free_kseq, kse, k_qe);
+ free_kse_count--;
+ active_kse_count++;
+ TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
+ }
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
}
- else if (curthread->sig_defer_count == 1) {
- /* Reenable signals: */
- curthread->sig_defer_count = 0;
+ if ((kse == NULL) &&
+ ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
+ bzero(kse, sizeof(*kse));
+
+ /* Initialize the lockusers. */
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_init(&kse->k_lockusers[i], (void *)kse);
+ _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL);
+ }
+
+ /* We had to malloc a kse; mark it as needing a new ID.*/
+ need_ksd = 1;
/*
- * Check for asynchronous cancellation before delivering any
- * pending signals:
+ * Create the KSE context.
+ *
+ * XXX - For now this is done here in the allocation.
+ * In the future, we may want to have it done
+ * outside the allocation so that scope system
+ * threads (one thread per KSE) are not required
+ * to have a stack for an unneeded kse upcall.
*/
- if (((curthread->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
- ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
- pthread_testcancel();
+ kse->k_mbx.km_func = kse_entry;
+ kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE);
+ kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE;
+ kse->k_mbx.km_udata = (void *)kse;
+ kse->k_mbx.km_quantum = 20000;
+ if (kse->k_mbx.km_stack.ss_size == NULL) {
+ free(kse);
+ kse = NULL;
+ }
}
+ if ((kse != NULL) && (need_ksd != 0)) {
+ /* This KSE needs initialization. */
+ if (curkse != NULL)
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ /* Initialize KSD inside of the lock. */
+ if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+ free(kse->k_mbx.km_stack.ss_sp);
+ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
+ _lockuser_destroy(&kse->k_lockusers[i]);
+ }
+ free(kse);
+ return (NULL);
+ }
+ kse->k_flags = 0;
+ active_kse_count++;
+ TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
+ if (curkse != NULL)
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
+
+ }
+ return (kse);
}
-static inline void
-thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
+void
+_kse_free(struct kse *curkse, struct kse *kse)
{
- pthread_t tid_out = thread_out;
- pthread_t tid_in = thread_in;
-
- if ((tid_out != NULL) &&
- (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
- tid_out = NULL;
- if ((tid_in != NULL) &&
- (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
- tid_in = NULL;
-
- if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
- /* Run the scheduler switch hook: */
- _sched_switch_hook(tid_out, tid_in);
+ struct kse_group *kseg = NULL;
+
+ if (curkse == kse)
+ PANIC("KSE trying to free itself");
+ KSE_LOCK_ACQUIRE(curkse, &kse_lock);
+ active_kse_count--;
+ if ((kseg = kse->k_kseg) != NULL) {
+ TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe);
+ /*
+ * Free the KSEG if there are no more threads associated
+ * with it.
+ */
+ if (TAILQ_EMPTY(&kseg->kg_threadq))
+ kseg_free(kseg);
}
+ kse->k_kseg = NULL;
+ kse->k_flags &= ~KF_INITIALIZED;
+ TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
+ free_kse_count++;
+ KSE_LOCK_RELEASE(curkse, &kse_lock);
}
-struct pthread *
-_get_curthread(void)
+static void
+kseg_init(struct kse_group *kseg)
{
- if (_thread_initial == NULL)
- _thread_init();
+ TAILQ_INIT(&kseg->kg_kseq);
+ TAILQ_INIT(&kseg->kg_threadq);
+ TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
+ TAILQ_INIT(&kseg->kg_schedq.sq_blockedq);
+ _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
+ _kse_lock_wakeup);
+ kseg->kg_threadcount = 0;
+ kseg->kg_idle_kses = 0;
+ kseg->kg_flags = 0;
+}
- return (_thread_run);
+struct pthread *
+_thr_alloc(struct pthread *curthread)
+{
+ kse_critical_t crit;
+ struct pthread *thread = NULL;
+
+ if (curthread != NULL) {
+ if (_gc_check != 0)
+ thread_gc(curthread);
+ if (free_thread_count > 0) {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curkse, &thread_lock);
+ if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ free_thread_count--;
+ }
+ KSE_LOCK_RELEASE(curkse, &thread_lock);
+ }
+ }
+ if (thread == NULL)
+ thread = (struct pthread *)malloc(sizeof(struct pthread));
+ return (thread);
}
void
-_set_curthread(struct pthread *newthread)
+_thr_free(struct pthread *curthread, struct pthread *thread)
{
- _thread_run = newthread;
+ kse_critical_t crit;
+
+ if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
+ free(thread);
+ else {
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curkse, &thread_lock);
+ TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
+ free_thread_count++;
+ KSE_LOCK_RELEASE(curkse, &thread_lock);
+ _kse_critical_leave(crit);
+ }
}
diff --git a/lib/libpthread/thread/thr_kill.c b/lib/libpthread/thread/thr_kill.c
index 24f9150..19f34bb 100644
--- a/lib/libpthread/thread/thr_kill.c
+++ b/lib/libpthread/thread/thr_kill.c
@@ -41,8 +41,26 @@ __weak_reference(_pthread_kill, pthread_kill);
int
_pthread_kill(pthread_t pthread, int sig)
{
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /* Check for invalid signal numbers: */
+ if (sig < 0 || sig >= NSIG)
+ /* Invalid signal: */
+ ret = EINVAL;
/*
- * All signals are unsupported.
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
*/
- return (EINVAL);
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ if ((sig > 0) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_IGN))
+ _thr_sig_send(pthread, sig);
+ _thr_ref_delete(curthread, pthread);
+ }
+
+ /* Return the completion status: */
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_main_np.c b/lib/libpthread/thread/thr_main_np.c
index 1d5849d..0dacd48 100644
--- a/lib/libpthread/thread/thr_main_np.c
+++ b/lib/libpthread/thread/thr_main_np.c
@@ -40,8 +40,8 @@ int
_pthread_main_np()
{
- if (!_thread_initial)
+ if (!_thr_initial)
return (-1);
else
- return (pthread_equal(pthread_self(), _thread_initial) ? 1 : 0);
+ return (pthread_equal(pthread_self(), _thr_initial) ? 1 : 0);
}
diff --git a/lib/libpthread/thread/thr_mattr_init.c b/lib/libpthread/thread/thr_mattr_init.c
index 6b403d6..d5a7a18 100644
--- a/lib/libpthread/thread/thr_mattr_init.c
+++ b/lib/libpthread/thread/thr_mattr_init.c
@@ -46,13 +46,13 @@ _pthread_mutexattr_init(pthread_mutexattr_t *attr)
pthread_mutexattr_t pattr;
if ((pattr = (pthread_mutexattr_t)
- malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
+ malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
ret = ENOMEM;
} else {
- memcpy(pattr, &pthread_mutexattr_default,
- sizeof(struct pthread_mutex_attr));
+ memcpy(pattr, &_pthread_mutexattr_default,
+ sizeof(struct pthread_mutex_attr));
*attr = pattr;
ret = 0;
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_msync.c b/lib/libpthread/thread/thr_msync.c
index 302f314f..24b78ec 100644
--- a/lib/libpthread/thread/thr_msync.c
+++ b/lib/libpthread/thread/thr_msync.c
@@ -16,6 +16,7 @@ __weak_reference(__msync, msync);
int
__msync(void *addr, size_t len, int flags)
{
+ struct pthread *curthread = _get_curthread();
int ret;
/*
@@ -24,9 +25,9 @@ __msync(void *addr, size_t len, int flags)
* write. The only real use of this wrapper is to guarantee
* a cancellation point, as per the standard. sigh.
*/
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_msync(addr, len, flags);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index 7b22fb5..1ae12ea9 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -40,47 +40,52 @@
#include "thr_private.h"
#if defined(_PTHREADS_INVARIANTS)
-#define _MUTEX_INIT_LINK(m) do { \
+#define MUTEX_INIT_LINK(m) do { \
(m)->m_qe.tqe_prev = NULL; \
(m)->m_qe.tqe_next = NULL; \
} while (0)
-#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+#define MUTEX_ASSERT_IS_OWNED(m) do { \
if ((m)->m_qe.tqe_prev == NULL) \
PANIC("mutex is not on list"); \
} while (0)
-#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+#define MUTEX_ASSERT_NOT_OWNED(m) do { \
if (((m)->m_qe.tqe_prev != NULL) || \
((m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
+ THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
+ "thread in syncq when it shouldn't be."); \
+} while (0);
#else
-#define _MUTEX_INIT_LINK(m)
-#define _MUTEX_ASSERT_IS_OWNED(m)
-#define _MUTEX_ASSERT_NOT_OWNED(m)
+#define MUTEX_INIT_LINK(m)
+#define MUTEX_ASSERT_IS_OWNED(m)
+#define MUTEX_ASSERT_NOT_OWNED(m)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr)
#endif
/*
* Prototypes
*/
-static inline int mutex_self_trylock(pthread_mutex_t);
-static inline int mutex_self_lock(pthread_mutex_t);
-static inline int mutex_unlock_common(pthread_mutex_t *, int);
-static void mutex_priority_adjust(pthread_mutex_t);
-static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static void mutex_handoff(struct pthread *, struct pthread_mutex *);
+static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t);
+static inline int mutex_self_lock(struct pthread *, pthread_mutex_t);
+static int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
+static void mutex_rescan_owned (struct pthread *, struct pthread *,
+ struct pthread_mutex *);
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
-
static struct pthread_mutex_attr static_mutex_attr =
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
/* Single underscore versions provided for libc internal usage: */
-__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
+__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
/* No difference between libc and application usage of these: */
__weak_reference(_pthread_mutex_init, pthread_mutex_init);
@@ -88,45 +93,16 @@ __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-/* Reinitialize a mutex to defaults. */
-int
-_mutex_reinit(pthread_mutex_t * mutex)
-{
- int ret = 0;
-
- if (mutex == NULL)
- ret = EINVAL;
- else if (*mutex == NULL)
- ret = pthread_mutex_init(mutex, NULL);
- else {
- /*
- * Initialize the mutex structure:
- */
- (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
- (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_owner = NULL;
- (*mutex)->m_data.m_count = 0;
- (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- (*mutex)->m_refcount = 0;
- (*mutex)->m_prio = 0;
- (*mutex)->m_saved_prio = 0;
- _MUTEX_INIT_LINK(*mutex);
- memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
- }
- return (ret);
-}
int
-_pthread_mutex_init(pthread_mutex_t * mutex,
- const pthread_mutexattr_t * mutex_attr)
+_pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
{
- enum pthread_mutextype type;
+ struct pthread_mutex *pmutex;
+ enum pthread_mutextype type;
int protocol;
int ceiling;
int flags;
- pthread_mutex_t pmutex;
int ret = 0;
if (mutex == NULL)
@@ -137,7 +113,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Default to a (error checking) POSIX mutex: */
type = PTHREAD_MUTEX_ERRORCHECK;
protocol = PTHREAD_PRIO_NONE;
- ceiling = PTHREAD_MAX_PRIORITY;
+ ceiling = THR_MAX_PRIORITY;
flags = 0;
}
@@ -166,7 +142,12 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if ((pmutex = (pthread_mutex_t)
malloc(sizeof(struct pthread_mutex))) == NULL)
ret = ENOMEM;
- else {
+ else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
+ _thr_lock_wait, _thr_lock_wakeup) != 0) {
+ free(pmutex);
+ *mutex = NULL;
+ ret = ENOMEM;
+ } else {
/* Set the mutex flags: */
pmutex->m_flags = flags;
@@ -181,7 +162,7 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
/* Single UNIX Spec 2 recursive mutex: */
case PTHREAD_MUTEX_RECURSIVE:
/* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
+ pmutex->m_count = 0;
break;
/* Trap invalid mutex types: */
@@ -201,10 +182,9 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
if (protocol == PTHREAD_PRIO_PROTECT)
pmutex->m_prio = ceiling;
else
- pmutex->m_prio = 0;
+ pmutex->m_prio = -1;
pmutex->m_saved_prio = 0;
- _MUTEX_INIT_LINK(pmutex);
- memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ MUTEX_INIT_LINK(pmutex);
*mutex = pmutex;
} else {
free(pmutex);
@@ -213,19 +193,21 @@ _pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return(ret);
+ return (ret);
}
int
-_pthread_mutex_destroy(pthread_mutex_t * mutex)
+_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ pthread_mutex_t m;
+ int ret = 0;
if (mutex == NULL || *mutex == NULL)
ret = EINVAL;
else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* Check to see if this mutex is in use:
@@ -236,21 +218,24 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
ret = EBUSY;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
- }
- else {
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
+ } else {
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Save a pointer to the mutex so it can be free'd
+ * and set the caller's pointer to NULL:
*/
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
- free(*mutex);
+ m = *mutex;
+ *mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
/*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
+ * Free the memory allocated for the mutex
+ * structure:
*/
- *mutex = NULL;
+ MUTEX_ASSERT_NOT_OWNED(m);
+ free(m);
}
}
@@ -259,56 +244,49 @@ _pthread_mutex_destroy(pthread_mutex_t * mutex)
}
static int
-init_static(pthread_mutex_t *mutex)
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, NULL);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-init_static_private(pthread_mutex_t *mutex)
+init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
{
- int ret;
+ int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
ret = pthread_mutex_init(mutex, &static_mattr);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- return(ret);
+ return (ret);
}
static int
-mutex_trylock_common(pthread_mutex_t *mutex)
+mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
- struct pthread *curthread = _get_curthread();
- int ret = 0;
+ int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((mutex != NULL) && (*mutex != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
* If the mutex was statically allocated, properly
@@ -316,7 +294,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
@@ -330,11 +308,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -359,11 +337,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
curthread->inherited_priority;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -395,11 +373,11 @@ mutex_trylock_common(pthread_mutex_t *mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&curthread->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_trylock(*mutex);
+ ret = mutex_self_trylock(curthread, *mutex);
else
/* Return a busy error: */
ret = EBUSY;
@@ -413,13 +391,7 @@ mutex_trylock_common(pthread_mutex_t *mutex)
}
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
/* Return the completion status: */
return (ret);
@@ -428,7 +400,8 @@ mutex_trylock_common(pthread_mutex_t *mutex)
int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
if (mutex == NULL)
ret = EINVAL;
@@ -437,8 +410,9 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
@@ -446,6 +420,7 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
int
_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
if (mutex == NULL)
@@ -455,19 +430,19 @@ _pthread_mutex_trylock(pthread_mutex_t *mutex)
* If the mutex is statically initialized, perform the dynamic
* initialization marking the mutex private (delete safe):
*/
- else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
- ret = mutex_trylock_common(mutex);
+ else if ((*mutex != NULL) ||
+ ((ret = init_static_private(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
static int
-mutex_lock_common(pthread_mutex_t * mutex)
+mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m)
{
- struct pthread *curthread = _get_curthread();
int ret = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((m != NULL) && (*m != NULL),
"Uninitialized mutex in pthread_mutex_trylock_basic");
/* Reset the interrupted flag: */
@@ -482,71 +457,68 @@ mutex_lock_common(pthread_mutex_t * mutex)
* waiting queue prior to executing the signal handler.
*/
do {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
- if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- _MUTEX_INIT_LINK(*mutex);
+ if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
+ TAILQ_INIT(&(*m)->m_queue);
+ (*m)->m_flags |= MUTEX_FLAGS_INITED;
+ MUTEX_INIT_LINK(*m);
}
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority inheritence mutex: */
case PTHREAD_PRIO_INHERIT:
/* Check if this mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
+ if ((*m)->m_owner == NULL) {
/* Lock the mutex for this thread: */
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -554,63 +526,70 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The mutex takes on attributes of the
* running thread when there are no waiters.
+ * Make sure the thread's scheduling lock is
+ * held while priorities are adjusted.
*/
- (*mutex)->m_prio = curthread->active_priority;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ (*m)->m_prio = curthread->active_priority;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
+ (*m), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
* Join the queue of threads waiting to lock
- * the mutex:
+ * the mutex and save a pointer to the mutex.
*/
- mutex_queue_enq(*mutex, curthread);
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- curthread->data.mutex = *mutex;
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
- if (curthread->active_priority >
- (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio)
/* Adjust priorities: */
- mutex_priority_adjust(*mutex);
+ mutex_priority_adjust(curthread, *m);
- /*
- * Unlock the mutex structure and schedule the
- * next thread:
- */
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
}
break;
/* POSIX priority protection mutex: */
case PTHREAD_PRIO_PROTECT:
/* Check for a priority ceiling violation: */
- if (curthread->active_priority > (*mutex)->m_prio)
+ if (curthread->active_priority > (*m)->m_prio) {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
ret = EINVAL;
-
+ }
/* Check if this mutex is not locked: */
- else if ((*mutex)->m_owner == NULL) {
+ else if ((*m)->m_owner == NULL) {
/*
* Lock the mutex for the running
* thread:
*/
- (*mutex)->m_owner = curthread;
+ (*m)->m_owner = curthread;
/* Track number of priority mutexes owned: */
curthread->priority_mutex_count++;
@@ -618,45 +597,52 @@ mutex_lock_common(pthread_mutex_t * mutex)
/*
* The running thread inherits the ceiling
* priority of the mutex and executes at that
- * priority:
+ * priority. Make sure the thread's
+ * scheduling lock is held while priorities
+ * are adjusted.
*/
- curthread->active_priority = (*mutex)->m_prio;
- (*mutex)->m_saved_prio =
+ THR_SCHED_LOCK(curthread, curthread);
+ curthread->active_priority = (*m)->m_prio;
+ (*m)->m_saved_prio =
curthread->inherited_priority;
- curthread->inherited_priority =
- (*mutex)->m_prio;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_SCHED_UNLOCK(curthread, curthread);
/* Add to the list of owned mutexes: */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
+ MUTEX_ASSERT_NOT_OWNED(*m);
TAILQ_INSERT_TAIL(&curthread->mutexq,
- (*mutex), m_qe);
- } else if ((*mutex)->m_owner == curthread)
- ret = mutex_self_lock(*mutex);
- else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- mutex_queue_enq(*mutex, curthread);
+ (*m), m_qe);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
/*
- * Keep a pointer to the mutex this thread
- * is waiting on:
+ * Join the queue of threads waiting to lock
+ * the mutex and save a pointer to the mutex.
*/
- curthread->data.mutex = *mutex;
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
/* Clear any previous error: */
curthread->error = 0;
/*
- * Unlock the mutex structure and schedule the
- * next thread:
+ * This thread is active and is in a critical
+ * region (holding the mutex lock); we should
+ * be able to safely set the state.
*/
- _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
- &(*mutex)->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_MUTEX_WAIT);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- /* Lock the mutex structure again: */
- _SPINLOCK(&(*mutex)->lock);
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
/*
* The threads priority may have changed while
@@ -670,107 +656,111 @@ mutex_lock_common(pthread_mutex_t * mutex)
/* Trap invalid mutex types: */
default:
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
/* Return an invalid argument error: */
ret = EINVAL;
break;
}
- /*
- * Check to see if this thread was interrupted and
- * is still in the mutex queue of waiting threads:
- */
- if (curthread->interrupted != 0)
- mutex_queue_remove(*mutex, curthread);
-
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
- } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
+ } while (((*m)->m_owner != curthread) && (ret == 0) &&
(curthread->interrupted == 0));
- if (curthread->interrupted != 0 &&
- curthread->continuation != NULL)
- curthread->continuation((void *) curthread);
+ /*
+ * Check to see if this thread was interrupted and
+ * is still in the mutex queue of waiting threads:
+ */
+ if (curthread->interrupted != 0) {
+ /* Remove this thread from the mutex queue. */
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+ if (THR_IN_SYNCQ(curthread))
+ mutex_queue_remove(*m, curthread);
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
+ /* Check for asynchronous cancellation. */
+ if (curthread->continuation != NULL)
+ curthread->continuation((void *) curthread);
+ }
/* Return the completion status: */
return (ret);
}
int
-__pthread_mutex_lock(pthread_mutex_t *mutex)
+__pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
- if (mutex == NULL)
+ curthread = _get_curthread();
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_lock(pthread_mutex_t *mutex)
+_pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+ curthread = _get_curthread();
- if (mutex == NULL)
+ if (m == NULL)
ret = EINVAL;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking it private (delete safe):
*/
- else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
- ret = mutex_lock_common(mutex);
+ else if ((*m != NULL) ||
+ ((ret = init_static_private(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m);
return (ret);
}
int
-_pthread_mutex_unlock(pthread_mutex_t * mutex)
+_pthread_mutex_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 0));
+ return (mutex_unlock_common(m, /* add reference */ 0));
}
int
-_mutex_cv_unlock(pthread_mutex_t * mutex)
+_mutex_cv_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 1));
+ return (mutex_unlock_common(m, /* add reference */ 1));
}
int
-_mutex_cv_lock(pthread_mutex_t * mutex)
+_mutex_cv_lock(pthread_mutex_t *m)
{
int ret;
- if ((ret = pthread_mutex_lock(mutex)) == 0)
- (*mutex)->m_refcount--;
+ if ((ret = _pthread_mutex_lock(m)) == 0)
+ (*m)->m_refcount--;
return (ret);
}
static inline int
-mutex_self_trylock(pthread_mutex_t mutex)
+mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
-
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_NORMAL:
@@ -778,12 +768,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
* POSIX specifies that mutexes should return EDEADLK if a
* recursive lock is detected.
*/
- ret = EBUSY;
+ if (m->m_owner == curthread)
+ ret = EDEADLK;
+ else
+ ret = EBUSY;
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -791,15 +784,15 @@ mutex_self_trylock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
static inline int
-mutex_self_lock(pthread_mutex_t mutex)
+mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
{
int ret = 0;
- switch (mutex->m_type) {
+ switch (m->m_type) {
/* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
/*
@@ -814,13 +807,18 @@ mutex_self_lock(pthread_mutex_t mutex)
* What SS2 define as a 'normal' mutex. Intentionally
* deadlock on attempts to get a lock you already own.
*/
- _thread_kern_sched_state_unlock(PS_DEADLOCK,
- &mutex->lock, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_DEADLOCK);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Schedule the next thread: */
+ _thr_sched_switch(curthread);
break;
case PTHREAD_MUTEX_RECURSIVE:
/* Increment the lock count: */
- mutex->m_data.m_count++;
+ m->m_count++;
break;
default:
@@ -828,82 +826,58 @@ mutex_self_lock(pthread_mutex_t mutex)
ret = EINVAL;
}
- return(ret);
+ return (ret);
}
-static inline int
-mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+static int
+mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
- if (mutex == NULL || *mutex == NULL) {
+ if (m == NULL || *m == NULL)
ret = EINVAL;
- } else {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
-
+ else {
/* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_protocol) {
+ switch ((*m)->m_protocol) {
/* Default POSIX mutex: */
case PTHREAD_PRIO_NONE:
/*
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of
- * threads waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) {
- /* Make the new owner runnable: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
-
- /*
- * Add the mutex to the threads list of
- * owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -913,23 +887,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
* Clear the count in case this is recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -937,11 +911,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -949,69 +925,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Get the next thread from the queue of threads
- * waiting on the mutex:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- if (((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) == NULL)
- /* This mutex has no priority. */
- (*mutex)->m_prio = 0;
- else {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Set the priority of the mutex. Since
- * our waiting threads are in descending
- * priority order, the priority of the
- * mutex becomes the active priority of
- * the thread we just dequeued.
- */
- (*mutex)->m_prio =
- (*mutex)->m_owner->active_priority;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning threads inherited priority
- * now becomes his active priority (the
- * priority of the mutex).
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1021,23 +944,23 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* Check if the running thread is not the owner of the
* mutex:
*/
- if ((*mutex)->m_owner != curthread) {
+ if ((*m)->m_owner != curthread)
/*
* Return an invalid argument error for no
* owner and a permission error otherwise:
*/
- ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
- }
- else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
- ((*mutex)->m_data.m_count > 0)) {
+ ret = (*m)->m_owner == NULL ? EINVAL : EPERM;
+
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
/* Decrement the count: */
- (*mutex)->m_data.m_count--;
- } else {
+ (*m)->m_count--;
+ else {
/*
- * Clear the count in case this is recursive
+ * Clear the count in case this is a recursive
* mutex.
*/
- (*mutex)->m_data.m_count = 0;
+ (*m)->m_count = 0;
/*
* Restore the threads inherited priority and
@@ -1045,11 +968,13 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* not to override changes in the threads base
* priority subsequent to locking the mutex).
*/
+ THR_SCHED_LOCK(curthread, curthread);
curthread->inherited_priority =
- (*mutex)->m_saved_prio;
+ (*m)->m_saved_prio;
curthread->active_priority =
MAX(curthread->inherited_priority,
curthread->base_priority);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
* This thread now owns one less priority mutex.
@@ -1057,80 +982,16 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
curthread->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
- _MUTEX_ASSERT_IS_OWNED(*mutex);
- TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
- _MUTEX_INIT_LINK(*mutex);
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
/*
- * Enter a loop to find a waiting thread whose
- * active priority will not cause a ceiling
- * violation:
+ * Hand off the mutex to the next waiting
+ * thread:
*/
- while ((((*mutex)->m_owner =
- mutex_queue_deq(*mutex)) != NULL) &&
- ((*mutex)->m_owner->active_priority >
- (*mutex)->m_prio)) {
- /*
- * Either the mutex ceiling priority
- * been lowered and/or this threads
- * priority has been raised subsequent
- * to this thread being queued on the
- * waiting list.
- */
- (*mutex)->m_owner->error = EINVAL;
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- /*
- * The thread is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
- }
-
- /* Check for a new owner: */
- if ((*mutex)->m_owner != NULL) {
- /*
- * Track number of priority mutexes owned:
- */
- (*mutex)->m_owner->priority_mutex_count++;
-
- /*
- * Add the mutex to the threads list
- * of owned mutexes:
- */
- TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
-
- /*
- * The owner is no longer waiting for
- * this mutex:
- */
- (*mutex)->m_owner->data.mutex = NULL;
-
- /*
- * Save the owning threads inherited
- * priority:
- */
- (*mutex)->m_saved_prio =
- (*mutex)->m_owner->inherited_priority;
-
- /*
- * The owning thread inherits the
- * ceiling priority of the mutex and
- * executes at that priority:
- */
- (*mutex)->m_owner->inherited_priority =
- (*mutex)->m_prio;
- (*mutex)->m_owner->active_priority =
- (*mutex)->m_prio;
-
- /*
- * Make the new owner runnable:
- */
- PTHREAD_NEW_STATE((*mutex)->m_owner,
- PS_RUNNING);
- }
+ mutex_handoff(curthread, *m);
}
break;
@@ -1141,19 +1002,12 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
break;
}
- if ((ret == 0) && (add_reference != 0)) {
+ if ((ret == 0) && (add_reference != 0))
/* Increment the reference count: */
- (*mutex)->m_refcount++;
- }
+ (*m)->m_refcount++;
/* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
}
/* Return the completion status: */
@@ -1168,11 +1022,14 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
* changes to active priorities of other threads and to the ordering
* of mutex locking by waiting threads.
*
- * This must be called while thread scheduling is deferred.
+ * This must be called without the target thread's scheduling lock held.
*/
void
-_mutex_notify_priochange(pthread_t pthread)
+_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
+ int propagate_prio)
{
+ struct pthread_mutex *m;
+
/* Adjust the priorites of any owned priority mutexes: */
if (pthread->priority_mutex_count > 0) {
/*
@@ -1180,14 +1037,29 @@ _mutex_notify_priochange(pthread_t pthread)
* their priorities to account for this threads change
* in priority. This has the side effect of changing
* the threads active priority.
+ *
+ * Be sure to lock the first mutex in the list of owned
+ * mutexes. This acts as a barrier against another
+ * simultaneous call to change the threads priority
+ * and from the owning thread releasing the mutex.
*/
- mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ m = TAILQ_FIRST(&pthread->mutexq);
+ if (m != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+ /*
+ * Make sure the thread still owns the lock.
+ */
+ if (m == TAILQ_FIRST(&pthread->mutexq))
+ mutex_rescan_owned(curthread, pthread,
+ /* rescan all owned */ NULL);
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
/*
* If this thread is waiting on a priority inheritence mutex,
* check for priority adjustments. A change in priority can
- * also effect a ceiling violation(*) for a thread waiting on
+ * also cause a ceiling violation(*) for a thread waiting on
* a priority protection mutex; we don't perform the check here
* as it is done in pthread_mutex_unlock.
*
@@ -1196,32 +1068,53 @@ _mutex_notify_priochange(pthread_t pthread)
* does not affect ownership of that mutex; the ceiling
* priority is only checked before mutex ownership occurs.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
- /* Lock the mutex structure: */
- _SPINLOCK(&pthread->data.mutex->lock);
-
+ if (propagate_prio != 0) {
/*
- * Check to make sure this thread is still in the same state
- * (the spinlock above can yield the CPU to another thread):
+ * Lock the thread's scheduling queue. This is a bit
+ * convoluted; the "in synchronization queue flag" can
+ * only be cleared with both the thread's scheduling and
+ * mutex locks held. The thread's pointer to the wanted
+ * mutex is guaranteed to be valid during this time.
*/
- if (pthread->state == PS_MUTEX_WAIT) {
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
+ ((m = pthread->data.mutex) == NULL))
+ THR_SCHED_UNLOCK(curthread, pthread);
+ else {
+ /*
+ * This thread is currently waiting on a mutex; unlock
+ * the scheduling queue lock and lock the mutex. We
+ * can't hold both at the same time because the locking
+ * order could cause a deadlock.
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
/*
- * Remove and reinsert this thread into the list of
- * waiting threads to preserve decreasing priority
- * order.
+ * Check to make sure this thread is still in the
+ * same state (the lock above can yield the CPU to
+ * another thread or the thread may be running on
+ * another CPU).
*/
- mutex_queue_remove(pthread->data.mutex, pthread);
- mutex_queue_enq(pthread->data.mutex, pthread);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (pthread->data.mutex == m)) {
+ /*
+ * Remove and reinsert this thread into
+ * the list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- if (pthread->data.mutex->m_protocol ==
- PTHREAD_PRIO_INHERIT) {
- /* Adjust priorities: */
- mutex_priority_adjust(pthread->data.mutex);
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT)
+ /* Adjust priorities: */
+ mutex_priority_adjust(curthread, m);
}
- }
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&pthread->data.mutex->lock);
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
}
}
@@ -1229,13 +1122,15 @@ _mutex_notify_priochange(pthread_t pthread)
* Called when a new thread is added to the mutex waiting queue or
* when a threads priority changes that is already in the mutex
* waiting queue.
+ *
+ * This must be called with the mutex locked by the current thread.
*/
static void
-mutex_priority_adjust(pthread_mutex_t mutex)
+mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
{
- pthread_t pthread_next, pthread = mutex->m_owner;
- int temp_prio;
pthread_mutex_t m = mutex;
+ struct pthread *pthread_next, *pthread = mutex->m_owner;
+ int done, temp_prio;
/*
* Calculate the mutex priority as the maximum of the highest
@@ -1260,7 +1155,12 @@ mutex_priority_adjust(pthread_mutex_t mutex)
/* Set new priority of the mutex: */
m->m_prio = temp_prio;
- while (m != NULL) {
+ /*
+ * Don't unlock the mutex passed in as an argument. It is
+ * expected to be locked and unlocked by the caller.
+ */
+ done = 1;
+ do {
/*
* Save the threads priority before rescanning the
* owned mutexes:
@@ -1268,11 +1168,26 @@ mutex_priority_adjust(pthread_mutex_t mutex)
temp_prio = pthread->active_priority;
/*
- * Fix the priorities for all the mutexes this thread has
- * locked since taking this mutex. This also has a
+ * Fix the priorities for all mutexes held by the owning
+ * thread since taking this mutex. This also has a
* potential side-effect of changing the threads priority.
+ *
+ * At this point the mutex is locked by the current thread.
+ * The owning thread can't release the mutex until it is
+ * unlocked, so we should be able to safely walk its list
+ * of owned mutexes.
+ */
+ mutex_rescan_owned(curthread, pthread, m);
+
+ /*
+ * If this isn't the first time through the loop,
+ * the current mutex needs to be unlocked.
*/
- mutex_rescan_owned(pthread, m);
+ if (done == 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Assume we're done unless told otherwise: */
+ done = 1;
/*
* If the thread is currently waiting on a mutex, check
@@ -1280,56 +1195,70 @@ mutex_priority_adjust(pthread_mutex_t mutex)
* priority of the mutex.
*/
if ((temp_prio != pthread->active_priority) &&
- (pthread->state == PS_MUTEX_WAIT) &&
- (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
- /* Grab the mutex this thread is waiting on: */
- m = pthread->data.mutex;
+ ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ ((m = pthread->data.mutex) != NULL) &&
+ (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
/*
- * The priority for this thread has changed. Remove
- * and reinsert this thread into the list of waiting
- * threads to preserve decreasing priority order.
+ * Make sure the thread is still waiting on the
+ * mutex:
*/
- mutex_queue_remove(m, pthread);
- mutex_queue_enq(m, pthread);
-
- /* Grab the waiting thread with highest priority: */
- pthread_next = TAILQ_FIRST(&m->m_queue);
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (m == pthread->data.mutex)) {
+ /*
+ * The priority for this thread has changed.
+ * Remove and reinsert this thread into the
+ * list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
- /*
- * Calculate the mutex priority as the maximum of the
- * highest active priority of any waiting threads and
- * the owning threads active priority.
- */
- temp_prio = MAX(pthread_next->active_priority,
- MAX(m->m_saved_prio, m->m_owner->base_priority));
+ /*
+ * Grab the waiting thread with highest
+ * priority:
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
- if (temp_prio != m->m_prio) {
/*
- * The priority needs to be propagated to the
- * mutex this thread is waiting on and up to
- * the owner of that mutex.
+ * Calculate the mutex priority as the maximum
+ * of the highest active priority of any
+ * waiting threads and the owning threads
+ * active priority.
*/
- m->m_prio = temp_prio;
- pthread = m->m_owner;
- }
- else
- /* We're done: */
- m = NULL;
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio,
+ m->m_owner->base_priority));
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated
+ * to the mutex this thread is waiting
+ * on and up to the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+
+ /* We're not done yet: */
+ done = 0;
+ }
+ }
+ /* Only release the mutex if we're done: */
+ if (done != 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
}
- else
- /* We're done: */
- m = NULL;
- }
+ } while (done == 0);
}
static void
-mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
+mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
+ struct pthread_mutex *mutex)
{
- int active_prio, inherited_prio;
- pthread_mutex_t m;
- pthread_t pthread_next;
+ struct pthread_mutex *m;
+ struct pthread *pthread_next;
+ int active_prio, inherited_prio;
/*
* Start walking the mutexes the thread has taken since
@@ -1344,8 +1273,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* There is no inherited priority yet. */
inherited_prio = 0;
- }
- else {
+ } else {
/*
* The caller wants to start after a specific mutex. It
* is assumed that this mutex is a priority inheritence
@@ -1359,7 +1287,7 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
}
active_prio = MAX(inherited_prio, pthread->base_priority);
- while (m != NULL) {
+ for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
/*
* We only want to deal with priority inheritence
* mutexes. This might be optimized by only placing
@@ -1386,9 +1314,6 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
inherited_prio = m->m_prio;
active_prio = MAX(m->m_prio, pthread->base_priority);
}
-
- /* Advance to the next mutex owned by this thread: */
- m = TAILQ_NEXT(m, m_qe);
}
/*
@@ -1399,16 +1324,22 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
active_prio = MAX(inherited_prio, pthread->base_priority);
if (active_prio != pthread->active_priority) {
- /*
- * If this thread is in the priority queue, it must be
- * removed and reinserted for its new priority.
- */
- if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
+ /* Lock the thread's scheduling queue: */
+ THR_SCHED_LOCK(curthread, pthread);
+
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
/*
- * Remove the thread from the priority queue
- * before changing its priority:
+ * This thread is not in a run queue. Just set
+ * its active priority.
*/
- PTHREAD_PRIOQ_REMOVE(pthread);
+ pthread->active_priority = active_prio;
+ }
+ else {
+ /*
+ * This thread is in a run queue. Remove it from
+ * the queue before changing its priority:
+ */
+ THR_RUNQ_REMOVE(pthread);
/*
* POSIX states that if the priority is being
@@ -1421,19 +1352,15 @@ mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_HEAD(pthread);
- }
- else {
+ THR_RUNQ_INSERT_HEAD(pthread);
+ } else {
/* Set the new active priority. */
pthread->active_priority = active_prio;
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ THR_RUNQ_INSERT_TAIL(pthread);
}
}
- else {
- /* Set the new active priority. */
- pthread->active_priority = active_prio;
- }
+ THR_SCHED_UNLOCK(curthread, pthread);
}
}
@@ -1449,36 +1376,182 @@ _mutex_unlock_private(pthread_t pthread)
}
}
+/*
+ * This is called by the current thread when it wants to back out of a
+ * mutex_lock in order to run a signal handler.
+ */
void
-_mutex_lock_backout(pthread_t pthread)
+_mutex_lock_backout(struct pthread *curthread)
{
- struct pthread_mutex *mutex;
+ struct pthread_mutex *m;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- mutex = pthread->data.mutex;
+ if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
+ /*
+ * Any other thread may clear the "in sync queue flag",
+ * but only the current thread can clear the pointer
+ * to the mutex. So if the flag is set, we can
+ * guarantee that the pointer to the mutex is valid.
+ * The only problem may be if the mutex is destroyed
+ * out from under us, but that should be considered
+ * an application bug.
+ */
+ m = curthread->data.mutex;
/* Lock the mutex structure: */
- _SPINLOCK(&mutex->lock);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
+
+ /*
+ * Check to make sure this thread doesn't already own
+ * the mutex. Since mutexes are unlocked with direct
+ * handoffs, it is possible the previous owner gave it
+ * to us after we checked the sync queue flag and before
+ * we locked the mutex structure.
+ */
+ if (m->m_owner == curthread) {
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ mutex_unlock_common(&m, /* add_reference */ 0);
+ } else {
+ /*
+ * Remove ourselves from the mutex queue and
+ * clear the pointer to the mutex. We may no
+ * longer be in the mutex queue, but the removal
+ * function will DTRT.
+ */
+ mutex_queue_remove(m, curthread);
+ curthread->data.mutex = NULL;
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ *
+ * In order to properly dequeue a thread from the mutex queue and
+ * make it runnable without the possibility of errant wakeups, it
+ * is necessary to lock the thread's scheduling queue while also
+ * holding the mutex lock.
+ */
+static void
+mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
+{
+ struct pthread *pthread;
+
+ /* Keep dequeueing until we find a valid thread: */
+ mutex->m_owner = NULL;
+ pthread = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread != NULL) {
+ /* Take the thread's scheduling lock: */
+ THR_SCHED_LOCK(curthread, pthread);
- mutex_queue_remove(mutex, pthread);
+ /* Remove the thread from the mutex queue: */
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
- /* This thread is no longer waiting for the mutex: */
+ /* This thread is no longer waiting for this mutex. */
pthread->data.mutex = NULL;
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&mutex->lock);
+ /*
+ * Only exit the loop if the thread hasn't been
+ * cancelled.
+ */
+ switch (mutex->m_protocol) {
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+ break;
+ case PTHREAD_PRIO_INHERIT:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Set the priority of the mutex. Since our waiting
+ * threads are in descending priority order, the
+ * priority of the mutex becomes the active priority
+ * of the thread we just dequeued.
+ */
+ mutex->m_prio = pthread->active_priority;
+
+ /* Save the owning threads inherited priority: */
+ mutex->m_saved_prio = pthread->inherited_priority;
+
+ /*
+ * The owning threads inherited priority now becomes
+ * his active priority (the priority of the mutex).
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ break;
+
+ case PTHREAD_PRIO_PROTECT:
+ if (pthread->active_priority > mutex->m_prio) {
+ /*
+ * Either the mutex ceiling priority has
+ * been lowered and/or this threads priority
+ * has been raised subsequent to the thread
+ * being queued on the waiting list.
+ */
+ pthread->error = EINVAL;
+ }
+ else {
+ /*
+ * Assign the new owner and add the mutex
+ * to the thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->mutexq,
+ mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ mutex->m_saved_prio =
+ pthread->inherited_priority;
+
+ /*
+ * The owning thread inherits the ceiling
+ * priority of the mutex and executes at
+ * that priority:
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ pthread->active_priority = mutex->m_prio;
+
+ }
+ break;
+ }
+
+ /* Make the thread runnable and unlock the scheduling queue: */
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
+
+ if (mutex->m_owner == pthread)
+ /* We're done; a valid owner was found. */
+ break;
+ else
+ /* Get the next thread from the waiting queue: */
+ pthread = TAILQ_NEXT(pthread, sqe);
}
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+
+ if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
+ /* This mutex has no priority: */
+ mutex->m_prio = 0;
}
/*
@@ -1486,13 +1559,13 @@ _mutex_lock_backout(pthread_t pthread)
* priority order.
*/
static inline pthread_t
-mutex_queue_deq(pthread_mutex_t mutex)
+mutex_queue_deq(struct pthread_mutex *mutex)
{
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
/*
* Only exit the loop if the thread hasn't been
@@ -1502,7 +1575,7 @@ mutex_queue_deq(pthread_mutex_t mutex)
break;
}
- return(pthread);
+ return (pthread);
}
/*
@@ -1511,9 +1584,9 @@ mutex_queue_deq(pthread_mutex_t mutex)
static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
}
}
@@ -1525,7 +1598,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+ THR_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
@@ -1539,6 +1612,5 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags |= THR_FLAGS_IN_SYNCQ;
}
-
diff --git a/lib/libpthread/thread/thr_mutex_prioceiling.c b/lib/libpthread/thread/thr_mutex_prioceiling.c
index 7d2e92e..a78b5d1 100644
--- a/lib/libpthread/thread/thr_mutex_prioceiling.c
+++ b/lib/libpthread/thread/thr_mutex_prioceiling.c
@@ -98,16 +98,14 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
ret = EINVAL;
else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
- else {
- /* Lock the mutex: */
- if ((ret = pthread_mutex_lock(mutex)) == 0) {
- /* Return the old ceiling and set the new ceiling: */
- *old_ceiling = (*mutex)->m_prio;
- (*mutex)->m_prio = prioceiling;
+ /* Lock the mutex: */
+ else if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ /* Return the old ceiling and set the new ceiling: */
+ *old_ceiling = (*mutex)->m_prio;
+ (*mutex)->m_prio = prioceiling;
- /* Unlock the mutex: */
- ret = pthread_mutex_unlock(mutex);
- }
+ /* Unlock the mutex: */
+ ret = pthread_mutex_unlock(mutex);
}
return(ret);
}
diff --git a/lib/libpthread/thread/thr_mutex_protocol.c b/lib/libpthread/thread/thr_mutex_protocol.c
index f7be5a6..9f0f262 100644
--- a/lib/libpthread/thread/thr_mutex_protocol.c
+++ b/lib/libpthread/thread/thr_mutex_protocol.c
@@ -63,7 +63,7 @@ _pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
ret = EINVAL;
else {
(*mattr)->m_protocol = protocol;
- (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
+ (*mattr)->m_ceiling = THR_MAX_PRIORITY;
}
return(ret);
}
diff --git a/lib/libpthread/thread/thr_nanosleep.c b/lib/libpthread/thread/thr_nanosleep.c
index 6cccb87..bec3b66 100644
--- a/lib/libpthread/thread/thr_nanosleep.c
+++ b/lib/libpthread/thread/thr_nanosleep.c
@@ -39,57 +39,42 @@
__weak_reference(__nanosleep, nanosleep);
int
-_nanosleep(const struct timespec * time_to_sleep,
- struct timespec * time_remaining)
+_nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
- struct timespec current_time;
- struct timespec current_time1;
+ struct timespec ts, ts1;
struct timespec remaining_time;
- struct timeval tv;
/* Check if the time to sleep is legal: */
- if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
- time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
+ if ((time_to_sleep == NULL) || (time_to_sleep->tv_sec < 0) ||
+ (time_to_sleep->tv_nsec < 0) ||
+ (time_to_sleep->tv_nsec >= 1000000000)) {
/* Return an EINVAL error : */
errno = EINVAL;
ret = -1;
} else {
- /*
- * As long as we're going to get the time of day, we
- * might as well store it in the global time of day:
- */
- gettimeofday((struct timeval *) &_sched_tod, NULL);
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time);
+ KSE_GET_TOD(curthread->kse, &ts);
/* Calculate the time for the current thread to wake up: */
- curthread->wakeup_time.tv_sec = current_time.tv_sec + time_to_sleep->tv_sec;
- curthread->wakeup_time.tv_nsec = current_time.tv_nsec + time_to_sleep->tv_nsec;
+ TIMESPEC_ADD(&curthread->wakeup_time, &ts, time_to_sleep);
- /* Check if the nanosecond field has overflowed: */
- if (curthread->wakeup_time.tv_nsec >= 1000000000) {
- /* Wrap the nanosecond field: */
- curthread->wakeup_time.tv_sec += 1;
- curthread->wakeup_time.tv_nsec -= 1000000000;
- }
+ THR_SCHED_LOCK(curthread, curthread);
curthread->interrupted = 0;
- /* Reschedule the current thread to sleep: */
- _thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);
+ THR_SET_STATE(curthread, PS_SLEEP_WAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
- /*
- * As long as we're going to get the time of day, we
- * might as well store it in the global time of day:
- */
- gettimeofday((struct timeval *) &_sched_tod, NULL);
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &current_time1);
+ /* Reschedule the current thread to sleep: */
+ _thr_sched_switch(curthread);
/* Calculate the remaining time to sleep: */
- remaining_time.tv_sec = time_to_sleep->tv_sec + current_time.tv_sec - current_time1.tv_sec;
- remaining_time.tv_nsec = time_to_sleep->tv_nsec + current_time.tv_nsec - current_time1.tv_nsec;
+ KSE_GET_TOD(curthread->kse, &ts1);
+ remaining_time.tv_sec = time_to_sleep->tv_sec
+ + ts.tv_sec - ts1.tv_sec;
+ remaining_time.tv_nsec = time_to_sleep->tv_nsec
+ + ts.tv_nsec - ts1.tv_nsec;
/* Check if the nanosecond field has underflowed: */
if (remaining_time.tv_nsec < 0) {
@@ -97,9 +82,8 @@ _nanosleep(const struct timespec * time_to_sleep,
remaining_time.tv_sec -= 1;
remaining_time.tv_nsec += 1000000000;
}
-
/* Check if the nanosecond field has overflowed: */
- if (remaining_time.tv_nsec >= 1000000000) {
+ else if (remaining_time.tv_nsec >= 1000000000) {
/* Handle the overflow: */
remaining_time.tv_sec += 1;
remaining_time.tv_nsec -= 1000000000;
@@ -130,14 +114,15 @@ _nanosleep(const struct timespec * time_to_sleep,
}
int
-__nanosleep(const struct timespec * time_to_sleep, struct timespec *
- time_remaining)
+__nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = _nanosleep(time_to_sleep, time_remaining);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_once.c b/lib/libpthread/thread/thr_once.c
index cef478d..152fdec 100644
--- a/lib/libpthread/thread/thr_once.c
+++ b/lib/libpthread/thread/thr_once.c
@@ -31,23 +31,25 @@
*
* $FreeBSD$
*/
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
__weak_reference(_pthread_once, pthread_once);
int
-_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
+_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
if (once_control->state == PTHREAD_NEEDS_INIT) {
- if (_thread_initial == NULL)
- _thread_init();
- pthread_mutex_lock(&(once_control->mutex));
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+ _pthread_mutex_lock(&(once_control->mutex));
if (once_control->state == PTHREAD_NEEDS_INIT) {
init_routine();
once_control->state = PTHREAD_DONE_INIT;
}
- pthread_mutex_unlock(&(once_control->mutex));
+ _pthread_mutex_unlock(&(once_control->mutex));
}
return (0);
}
diff --git a/lib/libpthread/thread/thr_open.c b/lib/libpthread/thread/thr_open.c
index b8a6635..ec60ba4 100644
--- a/lib/libpthread/thread/thr_open.c
+++ b/lib/libpthread/thread/thr_open.c
@@ -45,11 +45,12 @@ __weak_reference(__open, open);
int
__open(const char *path, int flags,...)
{
+ struct pthread *curthread = _get_curthread();
int ret;
int mode = 0;
va_list ap;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
/* Check if the file is being created: */
if (flags & O_CREAT) {
@@ -60,7 +61,7 @@ __open(const char *path, int flags,...)
}
ret = __sys_open(path, flags, mode);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_pause.c b/lib/libpthread/thread/thr_pause.c
index 4a46be0..b6bcc51 100644
--- a/lib/libpthread/thread/thr_pause.c
+++ b/lib/libpthread/thread/thr_pause.c
@@ -38,11 +38,12 @@ __weak_reference(_pause, pause);
int
_pause(void)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __pause();
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_poll.c b/lib/libpthread/thread/thr_poll.c
index 353b3cc..0b78047 100644
--- a/lib/libpthread/thread/thr_poll.c
+++ b/lib/libpthread/thread/thr_poll.c
@@ -46,11 +46,12 @@ __weak_reference(__poll, poll);
int
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_poll(fds, nfds, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_printf.c b/lib/libpthread/thread/thr_printf.c
index 0da9ae5..f0791df 100644
--- a/lib/libpthread/thread/thr_printf.c
+++ b/lib/libpthread/thread/thr_printf.c
@@ -29,13 +29,9 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <sys/types.h>
-#include <sys/fcntl.h>
-#include <sys/uio.h>
-#include <errno.h>
#include <stdarg.h>
+#include <string.h>
#include <unistd.h>
-#include <pthread.h>
#include "thr_private.h"
@@ -109,7 +105,7 @@ static void
pchar(int fd, char c)
{
- write(fd, &c, 1);
+ __sys_write(fd, &c, 1);
}
/*
@@ -119,6 +115,6 @@ static void
pstr(int fd, const char *s)
{
- write(fd, s, strlen(s));
+ __sys_write(fd, s, strlen(s));
}
diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c
index 7dcd752..b8bb2ca 100644
--- a/lib/libpthread/thread/thr_priority_queue.c
+++ b/lib/libpthread/thread/thr_priority_queue.c
@@ -42,47 +42,40 @@ static void pq_insert_prio_list(pq_queue_t *pq, int prio);
#if defined(_PTHREADS_INVARIANTS)
-static int _pq_active = 0;
+#define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ)
-#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
-
-#define _PQ_SET_ACTIVE() _pq_active = 1
-#define _PQ_CLEAR_ACTIVE() _pq_active = 0
-#define _PQ_ASSERT_ACTIVE(msg) do { \
- if (_pq_active == 0) \
+#define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE
+#define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE
+#define PQ_ASSERT_ACTIVE(pq, msg) do { \
+ if (((pq)->pq_flags & PQF_ACTIVE) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_INACTIVE(msg) do { \
- if (_pq_active != 0) \
+#define PQ_ASSERT_INACTIVE(pq, msg) do { \
+ if (((pq)->pq_flags & PQF_ACTIVE) != 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
- if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
+#define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
- if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
+#define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
- if (((thrd)->flags & _PQ_IN_SCHEDQ) != 0) \
+#define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
+ if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \
PANIC(msg); \
} while (0)
-#define _PQ_ASSERT_PROTECTED(msg) \
- PTHREAD_ASSERT((_thread_kern_kse_mailbox.km_curthread == NULL) || \
- ((_get_curthread())->sig_defer_count > 0), \
- msg);
#else
-#define _PQ_SET_ACTIVE()
-#define _PQ_CLEAR_ACTIVE()
-#define _PQ_ASSERT_ACTIVE(msg)
-#define _PQ_ASSERT_INACTIVE(msg)
-#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
-#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
-#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
-#define _PQ_ASSERT_PROTECTED(msg)
+#define PQ_SET_ACTIVE(pq)
+#define PQ_CLEAR_ACTIVE(pq)
+#define PQ_ASSERT_ACTIVE(pq, msg)
+#define PQ_ASSERT_INACTIVE(pq, msg)
+#define PQ_ASSERT_IN_WAITQ(thrd, msg)
+#define PQ_ASSERT_IN_RUNQ(thrd, msg)
+#define PQ_ASSERT_NOT_QUEUED(thrd, msg)
#endif
@@ -123,10 +116,9 @@ _pq_init(pq_queue_t *pq)
pq->pq_lists[i].pl_prio = i;
pq->pq_lists[i].pl_queued = 0;
}
-
/* Initialize the priority queue: */
TAILQ_INIT(&pq->pq_queue);
- _PQ_CLEAR_ACTIVE();
+ pq->pq_flags = 0;
}
return (ret);
}
@@ -139,10 +131,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_remove: prioq not protected!");
+ PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue");
/*
* Remove this thread from priority list. Note that if
@@ -155,9 +146,9 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
/* This thread is now longer in the priority queue. */
- pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
+ pthread->flags &= ~THR_FLAGS_IN_RUNQ;
- _PQ_CLEAR_ACTIVE();
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -167,34 +158,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
int prio;
/*
- * Don't insert suspended threads into the priority queue.
- * The caller is responsible for setting the threads state.
+ * Make some assertions when debugging is enabled:
*/
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /* Make sure the threads state is suspended. */
- if (pthread->state != PS_SUSPENDED)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- } else {
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread,
- "_pq_insert_head: Already in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_insert_head: prioq not protected!");
-
- prio = pthread->active_priority;
- TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
- if (pq->pq_lists[prio].pl_queued == 0)
- /* Insert the list into the priority queue: */
- pq_insert_prio_list(pq, prio);
-
- /* Mark this thread as being in the priority queue. */
- pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
-
- _PQ_CLEAR_ACTIVE();
- }
+ PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_head: Already in priority queue");
+
+ prio = pthread->active_priority;
+ TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= THR_FLAGS_IN_RUNQ;
+
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -204,34 +184,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
int prio;
/*
- * Don't insert suspended threads into the priority queue.
- * The caller is responsible for setting the threads state.
+ * Make some assertions when debugging is enabled:
*/
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /* Make sure the threads state is suspended. */
- if (pthread->state != PS_SUSPENDED)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- } else {
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread,
- "_pq_insert_tail: Already in priority queue");
- _PQ_ASSERT_PROTECTED("_pq_insert_tail: prioq not protected!");
-
- prio = pthread->active_priority;
- TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
- if (pq->pq_lists[prio].pl_queued == 0)
- /* Insert the list into the priority queue: */
- pq_insert_prio_list(pq, prio);
-
- /* Mark this thread as being in the priority queue. */
- pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
-
- _PQ_CLEAR_ACTIVE();
- }
+ PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active");
+ PQ_SET_ACTIVE(pq);
+ PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_tail: Already in priority queue");
+
+ prio = pthread->active_priority;
+ TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= THR_FLAGS_IN_RUNQ;
+
+ PQ_CLEAR_ACTIVE(pq);
}
@@ -244,9 +213,8 @@ _pq_first(pq_queue_t *pq)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_INACTIVE("_pq_first: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_PROTECTED("_pq_first: prioq not protected!");
+ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active");
+ PQ_SET_ACTIVE(pq);
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
@@ -259,21 +227,10 @@ _pq_first(pq_queue_t *pq)
/* Mark the list as not being in the queue: */
pql->pl_queued = 0;
- } else if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0) {
- /*
- * This thread is suspended; remove it from the
- * list and ensure its state is suspended.
- */
- TAILQ_REMOVE(&pql->pl_head, pthread, pqe);
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
-
- /* This thread is now longer in the priority queue. */
- pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
- pthread = NULL;
}
}
- _PQ_CLEAR_ACTIVE();
+ PQ_CLEAR_ACTIVE(pq);
return (pthread);
}
@@ -286,8 +243,7 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
/*
* Make some assertions when debugging is enabled:
*/
- _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
- _PQ_ASSERT_PROTECTED("_pq_insert_prio_list: prioq not protected!");
+ PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active");
/*
* The priority queue is in descending priority order. Start at
@@ -307,64 +263,3 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
/* Mark this list as being in the queue: */
pq->pq_lists[prio].pl_queued = 1;
}
-
-void
-_waitq_insert(pthread_t pthread)
-{
- pthread_t tid;
-
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
-
- if (pthread->wakeup_time.tv_sec == -1)
- TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
- else {
- tid = TAILQ_FIRST(&_waitingq);
- while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
- ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
- ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
- (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
- tid = TAILQ_NEXT(tid, pqe);
- if (tid == NULL)
- TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
- else
- TAILQ_INSERT_BEFORE(tid, pthread, pqe);
- }
- pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
-
- _PQ_CLEAR_ACTIVE();
-}
-
-void
-_waitq_remove(pthread_t pthread)
-{
- /*
- * Make some assertions when debugging is enabled:
- */
- _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
- _PQ_SET_ACTIVE();
- _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
-
- TAILQ_REMOVE(&_waitingq, pthread, pqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
-
- _PQ_CLEAR_ACTIVE();
-}
-
-void
-_waitq_setactive(void)
-{
- _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
- _PQ_SET_ACTIVE();
-}
-
-void
-_waitq_clearactive(void)
-{
- _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
- _PQ_CLEAR_ACTIVE();
-}
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index 7c5cc87..41a6693 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -38,17 +38,9 @@
#define _THR_PRIVATE_H
/*
- * Evaluate the storage class specifier.
- */
-#ifdef GLOBAL_PTHREAD_PRIVATE
-#define SCLASS
-#else
-#define SCLASS extern
-#endif
-
-/*
* Include files.
*/
+#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <sys/queue.h>
@@ -57,110 +49,81 @@
#include <sys/cdefs.h>
#include <sys/kse.h>
#include <sched.h>
-#include <spinlock.h>
#include <ucontext.h>
+#include <unistd.h>
+#include <pthread.h>
#include <pthread_np.h>
+#include "ksd.h"
+#include "lock.h"
+#include "pthread_md.h"
+
+/*
+ * Evaluate the storage class specifier.
+ */
+#ifdef GLOBAL_PTHREAD_PRIVATE
+#define SCLASS
+#define SCLASS_PRESET(x...) = x
+#else
+#define SCLASS extern
+#define SCLASS_PRESET(x...)
+#endif
+
/*
* Kernel fatal error handler macro.
*/
-#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
+#define PANIC(string) _thr_exit(__FILE__,__LINE__,string)
/* Output debug messages like this: */
-#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args)
-#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args)
+#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
+#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
-/*
- * Priority queue manipulation macros (using pqe link):
- */
-#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
-#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
-#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
-#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
+#define DBG_MUTEX 0x0001
+#define DBG_SIG 0x0002
-/*
- * Waiting queue manipulation macros (using pqe link):
- */
-#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
-#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
-#if defined(_PTHREADS_INVARIANTS)
-#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
-#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
-#else
-#define PTHREAD_WAITQ_CLEARACTIVE()
-#define PTHREAD_WAITQ_SETACTIVE()
-#endif
-
-/*
- * Work queue manipulation macros (using qe link):
- */
-#define PTHREAD_WORKQ_INSERT(thrd) do { \
- TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
- (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
-} while (0)
-#define PTHREAD_WORKQ_REMOVE(thrd) do { \
- TAILQ_REMOVE(&_workq,thrd,qe); \
- (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
+#define THR_ASSERT(cond, msg) do { \
+ if (!(cond)) \
+ PANIC(msg); \
} while (0)
/*
* State change macro without scheduling queue change:
*/
-#define PTHREAD_SET_STATE(thrd, newstate) do { \
+#define THR_SET_STATE(thrd, newstate) do { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
} while (0)
+
/*
- * State change macro with scheduling queue change - This must be
- * called with preemption deferred (see thread_kern_sched_[un]defer).
+ * Define the signals to be used for scheduling.
*/
-#if defined(_PTHREADS_INVARIANTS)
-#include <assert.h>
-#define PTHREAD_ASSERT(cond, msg) do { \
- if (!(cond)) \
- PANIC(msg); \
-} while (0)
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
- PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
- "Illegal call from signal handler");
-#define PTHREAD_NEW_STATE(thrd, newstate) do { \
- if (_thread_kern_new_state != 0) \
- PANIC("Recursive PTHREAD_NEW_STATE"); \
- _thread_kern_new_state = 1; \
- if ((thrd)->state != newstate) { \
- if ((thrd)->state == PS_RUNNING) { \
- PTHREAD_PRIOQ_REMOVE(thrd); \
- PTHREAD_SET_STATE(thrd, newstate); \
- PTHREAD_WAITQ_INSERT(thrd); \
- } else if (newstate == PS_RUNNING) { \
- PTHREAD_WAITQ_REMOVE(thrd); \
- PTHREAD_SET_STATE(thrd, newstate); \
- PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
- } \
- } \
- _thread_kern_new_state = 0; \
-} while (0)
-#else
-#define PTHREAD_ASSERT(cond, msg)
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
-#define PTHREAD_NEW_STATE(thrd, newstate) do { \
- if ((thrd)->state != newstate) { \
- if ((thrd)->state == PS_RUNNING) { \
- PTHREAD_PRIOQ_REMOVE(thrd); \
- PTHREAD_WAITQ_INSERT(thrd); \
- } else if (newstate == PS_RUNNING) { \
- PTHREAD_WAITQ_REMOVE(thrd); \
- PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
- } \
- } \
- PTHREAD_SET_STATE(thrd, newstate); \
-} while (0)
-#endif
+#define _ITIMER_SCHED_TIMER ITIMER_PROF
+#define _SCHED_SIGNAL SIGPROF
+
+#define TIMESPEC_ADD(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
+ if ((dst)->tv_nsec > 1000000000) { \
+ (dst)->tv_sec++; \
+ (dst)->tv_nsec -= 1000000000; \
+ } \
+ } while (0)
+
+#define TIMESPEC_SUB(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
+ if ((dst)->tv_nsec < 0) { \
+ (dst)->tv_sec--; \
+ (dst)->tv_nsec += 1000000000; \
+ } \
+ } while (0)
/*
* Priority queues.
@@ -178,29 +141,167 @@ typedef struct pq_queue {
TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
pq_list_t *pq_lists; /* array of all priority lists */
int pq_size; /* number of priority lists */
+#define PQF_ACTIVE 0x0001
+ int pq_flags;
} pq_queue_t;
+/*
+ * Each KSEG has a scheduling queue. For now, threads that exist in their
+ * own KSEG (system scope) will get a full priority queue. In the future
+ * this can be optimized for the single thread per KSEG case.
+ */
+struct sched_queue {
+ pq_queue_t sq_runq;
+ TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
+ TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */
+};
+
+/* Used to maintain pending and active signals: */
+struct sigstatus {
+ siginfo_t *info; /* arg 2 to signal handler */
+ int pending; /* Is this a pending signal? */
+ int blocked; /*
+ * This signal has occured and hasn't
+ * yet been handled; ignore subsequent
+ * signals until the handler is done.
+ */
+ int signo;
+};
+
+typedef struct kse_thr_mailbox *kse_critical_t;
+
+struct kse_group;
+
+#define MAX_KSE_LOCKLEVEL 3
+struct kse {
+ struct kse_mailbox k_mbx; /* kernel kse mailbox */
+ /* -- location and order specific items for gdb -- */
+ struct pthread *k_curthread; /* current thread */
+ struct kse_group *k_kseg; /* parent KSEG */
+ struct sched_queue *k_schedq; /* scheduling queue */
+ /* -- end of location and order specific items -- */
+ TAILQ_ENTRY(kse) k_qe; /* link entry */
+ struct ksd k_ksd; /* KSE specific data */
+ /*
+ * Items that are only modified by the kse, or that otherwise
+ * don't need to be locked when accessed
+ */
+ struct lock k_lock;
+ struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
+ int k_locklevel;
+ sigset_t k_sigmask;
+ struct sigstatus k_sigq[NSIG];
+ int k_check_sigq;
+ long k_resched; /* scheduling signal arrived */
+ int k_flags;
+#define KF_STARTED 0x0001 /* kernel kse created */
+#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
+ int k_cpu; /* CPU ID when bound */
+ int k_done; /* this KSE is done */
+};
+
+/*
+ * Each KSE group contains one or more KSEs in which threads can run.
+ * At least for now, there is one scheduling queue per KSE group; KSEs
+ * within the same KSE group compete for threads from the same scheduling
+ * queue. A scope system thread has one KSE in one KSE group; the group
+ * does not use its scheduling queue.
+ */
+struct kse_group {
+ TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
+ TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
+ TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
+ struct sched_queue kg_schedq; /* scheduling queue */
+ struct lock kg_lock;
+ int kg_threadcount; /* # of assigned threads */
+ int kg_idle_kses;
+ int kg_flags;
+#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
+#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
+};
+
+/*
+ * Lock acquire and release for KSEs.
+ */
+#define KSE_LOCK_ACQUIRE(kse, lck) \
+do { \
+ if ((kse)->k_locklevel >= MAX_KSE_LOCKLEVEL) \
+ PANIC("Exceeded maximum lock level"); \
+ else { \
+ (kse)->k_locklevel++; \
+ _lock_acquire((lck), \
+ &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
+ } \
+} while (0)
+
+#define KSE_LOCK_RELEASE(kse, lck) \
+do { \
+ if ((kse)->k_locklevel > 0) { \
+ _lock_release((lck), \
+ &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
+ (kse)->k_locklevel--; \
+ } \
+} while (0)
+
+/*
+ * Lock our own KSEG.
+ */
+#define KSE_LOCK(curkse) \
+ KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
+#define KSE_UNLOCK(curkse) \
+ KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
+
+/*
+ * Lock a potentially different KSEG.
+ */
+#define KSE_SCHED_LOCK(curkse, kseg) \
+ KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
+#define KSE_SCHED_UNLOCK(curkse, kseg) \
+ KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
+
+/*
+ * Waiting queue manipulation macros (using pqe link):
+ */
+#define KSE_WAITQ_REMOVE(kse, thrd) \
+do { \
+ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
+ TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
+ (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
+ } \
+} while (0)
+#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
+#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
+
+#define KSE_SET_WAIT(kse) \
+ atomic_store_rel_int(&(kse)->k_mbx.km_flags, 1)
+
+#define KSE_CLEAR_WAIT(kse) \
+ atomic_set_acq_int(&(kse)->k_mbx.km_flags, 0)
+
+#define KSE_WAITING(kse) (kse)->k_mbx.km_flags != 0
+#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx)
/*
* TailQ initialization values.
*/
#define TAILQ_INITIALIZER { NULL, NULL }
-/*
- * Mutex definitions.
+/*
+ * lock initialization values.
*/
-union pthread_mutex_data {
- void *m_ptr;
- int m_count;
-};
+#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
struct pthread_mutex {
+ /*
+ * Lock for accesses to this structure.
+ */
+ struct lock m_lock;
enum pthread_mutextype m_type;
int m_protocol;
TAILQ_HEAD(mutex_head, pthread) m_queue;
struct pthread *m_owner;
- union pthread_mutex_data m_data;
long m_flags;
+ int m_count;
int m_refcount;
/*
@@ -221,11 +322,6 @@ struct pthread_mutex {
* Link for list of all mutexes a thread currently owns.
*/
TAILQ_ENTRY(pthread_mutex) m_qe;
-
- /*
- * Lock for accesses to this structure.
- */
- spinlock_t lock;
};
/*
@@ -238,10 +334,10 @@ struct pthread_mutex {
/*
* Static mutex initialization values.
*/
-#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
- NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
- _SPINLOCK_INITIALIZER }
+#define PTHREAD_MUTEX_STATIC_INITIALIZER \
+ { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
+ TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
+ TAILQ_INITIALIZER }
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
@@ -262,17 +358,15 @@ enum pthread_cond_type {
};
struct pthread_cond {
- enum pthread_cond_type c_type;
- TAILQ_HEAD(cond_head, pthread) c_queue;
- pthread_mutex_t c_mutex;
- void *c_data;
- long c_flags;
- int c_seqno;
-
/*
* Lock for accesses to this structure.
*/
- spinlock_t lock;
+ struct lock c_lock;
+ enum pthread_cond_type c_type;
+ TAILQ_HEAD(cond_head, pthread) c_queue;
+ struct pthread_mutex *c_mutex;
+ long c_flags;
+ long c_seqno;
};
struct pthread_cond_attr {
@@ -290,9 +384,9 @@ struct pthread_cond_attr {
/*
* Static cond initialization values.
*/
-#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
- 0, 0, _SPINLOCK_INITIALIZER }
+#define PTHREAD_COND_STATIC_INITIALIZER \
+ { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
+ NULL, NULL, 0, 0 }
/*
* Semaphore definitions.
@@ -321,6 +415,7 @@ struct pthread_attr {
int sched_interval;
int prio;
int suspend;
+#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
int flags;
void *arg_attr;
void (*cleanup_attr) ();
@@ -332,31 +427,20 @@ struct pthread_attr {
/*
* Thread creation state attributes.
*/
-#define PTHREAD_CREATE_RUNNING 0
-#define PTHREAD_CREATE_SUSPENDED 1
+#define THR_CREATE_RUNNING 0
+#define THR_CREATE_SUSPENDED 1
/*
* Miscellaneous definitions.
*/
-#define PTHREAD_STACK_DEFAULT 65536
-/*
- * Size of default red zone at the end of each stack. In actuality, this "red
- * zone" is merely an unmapped region, except in the case of the initial stack.
- * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
- * region, an unmapped gap between thread stacks achieves the same effect as
- * explicitly mapped red zones.
- * This is declared and initialized in uthread_init.c.
- */
-extern int _pthread_guard_default;
-
-extern int _pthread_page_size;
+#define THR_STACK_DEFAULT 65536
/*
* Maximum size of initial thread's stack. This perhaps deserves to be larger
* than the stacks of other threads, since many applications are likely to run
* almost entirely on this stack.
*/
-#define PTHREAD_STACK_INITIAL 0x100000
+#define THR_STACK_INITIAL 0x100000
/*
* Define the different priority ranges. All applications have thread
@@ -372,21 +456,20 @@ extern int _pthread_page_size;
* The approach taken is that, within each class, signal delivery
* always has priority over thread execution.
*/
-#define PTHREAD_DEFAULT_PRIORITY 15
-#define PTHREAD_MIN_PRIORITY 0
-#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
-#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
-#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
-#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
-#define PTHREAD_LAST_PRIORITY \
- (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
-#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
+#define THR_DEFAULT_PRIORITY 15
+#define THR_MIN_PRIORITY 0
+#define THR_MAX_PRIORITY 31 /* 0x1F */
+#define THR_SIGNAL_PRIORITY 32 /* 0x20 */
+#define THR_RT_PRIORITY 64 /* 0x40 */
+#define THR_FIRST_PRIORITY THR_MIN_PRIORITY
+#define THR_LAST_PRIORITY \
+ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
+#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
/*
* Clock resolution in microseconds.
*/
#define CLOCK_RES_USEC 10000
-#define CLOCK_RES_USEC_MIN 1000
/*
* Time slice period in microseconds.
@@ -394,16 +477,17 @@ extern int _pthread_page_size;
#define TIMESLICE_USEC 20000
/*
- * Define a thread-safe macro to get the current time of day
- * which is updated at regular intervals by the scheduling signal
- * handler.
+ * XXX - Define a thread-safe macro to get the current time of day
+ * which is updated at regular intervals by something.
+ *
+ * For now, we just make the system call to get the time.
*/
-#define GET_CURRENT_TOD(tv) \
- do { \
- tv.tv_sec = _sched_tod.tv_sec; \
- tv.tv_usec = _sched_tod.tv_usec; \
- } while (tv.tv_sec != _sched_tod.tv_sec)
-
+#define KSE_GET_TOD(curkse, tsp) \
+do { \
+ *tsp = (curkse)->k_mbx.km_timeofday; \
+ if ((tsp)->tv_sec == 0) \
+ clock_gettime(CLOCK_REALTIME, tsp); \
+} while (0)
struct pthread_rwlockattr {
int pshared;
@@ -422,13 +506,12 @@ struct pthread_rwlock {
*/
enum pthread_state {
PS_RUNNING,
+ PS_LOCKWAIT,
PS_MUTEX_WAIT,
PS_COND_WAIT,
PS_SLEEP_WAIT,
- PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
- PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
@@ -437,19 +520,11 @@ enum pthread_state {
};
-/*
- * File descriptor locking definitions.
- */
-#define FD_READ 0x1
-#define FD_WRITE 0x2
-#define FD_RDWR (FD_READ | FD_WRITE)
-
union pthread_wait_data {
pthread_mutex_t mutex;
pthread_cond_t cond;
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
- spinlock_t *spinlock;
- struct pthread *thread;
+ struct lock *lock;
};
/*
@@ -458,26 +533,37 @@ union pthread_wait_data {
*/
typedef void (*thread_continuation_t) (void *);
+/*
+ * This stores a thread's state prior to running a signal handler.
+ * It is used when a signal is delivered to a thread blocked in
+ * userland. If the signal handler returns normally, the thread's
+ * state is restored from here.
+ */
+struct pthread_sigframe {
+ int psf_flags;
+ int psf_interrupted;
+ int psf_signo;
+ enum pthread_state psf_state;
+ union pthread_wait_data psf_wait_data;
+ struct timespec psf_wakeup_time;
+ sigset_t psf_sigset;
+ sigset_t psf_sigmask;
+ int psf_seqno;
+};
+
struct join_status {
struct pthread *thread;
void *ret;
int error;
};
-struct pthread_state_data {
- struct timespec psd_wakeup_time;
- union pthread_wait_data psd_wait_data;
- enum pthread_state psd_state;
- int psd_flags;
- int psd_interrupted;
- int psd_sig_defer_count;
-};
-
struct pthread_specific_elem {
const void *data;
int seqno;
};
+
+#define MAX_THR_LOCKLEVEL 3
/*
* Thread structure.
*/
@@ -486,21 +572,27 @@ struct pthread {
* Magic value to help recognize a valid thread structure
* from an invalid one:
*/
-#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
+#define THR_MAGIC ((u_int32_t) 0xd09ba115)
u_int32_t magic;
char *name;
u_int64_t uniqueid; /* for gdb */
+ /* Queue entry for list of all threads: */
+ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
+ TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
+
+ /* Queue entry for GC lists: */
+ TAILQ_ENTRY(pthread) gcle;
+
/*
* Lock for accesses to this thread structure.
*/
- spinlock_t lock;
-
- /* Queue entry for list of all threads: */
- TAILQ_ENTRY(pthread) tle;
-
- /* Queue entry for list of dead threads: */
- TAILQ_ENTRY(pthread) dle;
+ struct lock lock;
+ struct lockuser lockusers[MAX_THR_LOCKLEVEL];
+ int locklevel;
+ kse_critical_t critical[MAX_KSE_LOCKLEVEL];
+ struct kse *kse;
+ struct kse_group *kseg;
/*
* Thread start routine, argument, stack pointer and thread
@@ -508,57 +600,67 @@ struct pthread {
*/
void *(*start_routine)(void *);
void *arg;
- void *stack;
struct pthread_attr attr;
/*
- * Machine context, including signal state.
+ * Thread mailbox.
*/
- struct kse_thr_mailbox mailbox;
+ struct kse_thr_mailbox tmbx;
+ int active; /* thread running */
+ int blocked; /* thread blocked in kernel */
+ int need_switchout;
+ int need_wakeup;
/*
+ * Used for tracking delivery of signal handlers.
+ */
+ struct pthread_sigframe *curframe;
+ siginfo_t siginfo[NSIG];
+
+ /*
* Cancelability flags - the lower 2 bits are used by cancel
* definitions in pthread.h
*/
-#define PTHREAD_AT_CANCEL_POINT 0x0004
-#define PTHREAD_CANCELLING 0x0008
-#define PTHREAD_CANCEL_NEEDED 0x0010
- int cancelflags;
+#define THR_AT_CANCEL_POINT 0x0004
+#define THR_CANCELLING 0x0008
+#define THR_CANCEL_NEEDED 0x0010
+ int cancelflags;
thread_continuation_t continuation;
- /* Currently pending signals. */
- sigset_t sigpend;
+ /*
+ * The thread's base and pending signal masks. The active
+ * signal mask is stored in the thread's context (in mailbox).
+ */
+ sigset_t sigmask;
+ sigset_t sigpend;
+ int sigmask_seqno;
+ int check_pending;
+ int refcount;
/* Thread state: */
enum pthread_state state;
- /* Scheduling clock when this thread was last made active. */
- long last_active;
-
- /* Scheduling clock when this thread was last made inactive. */
- long last_inactive;
-
/*
* Number of microseconds accumulated by this thread when
* time slicing is active.
*/
- long slice_usec;
+ long slice_usec;
/*
* Time to wake up thread. This is used for sleeping threads and
- * for any operation which may time out.
+ * for any operation which may time out (such as select).
*/
- struct timespec wakeup_time;
+ struct timespec wakeup_time;
/* TRUE if operation has timed out. */
- int timeout;
+ int timeout;
/*
* Error variable used instead of errno. The function __error()
* returns a pointer to this.
*/
- int error;
+ int error;
/*
* The joiner is the thread that is joining to this thread. The
@@ -573,28 +675,21 @@ struct pthread {
*
* o A queue of threads waiting for a mutex
* o A queue of threads waiting for a condition variable
- * o A queue of threads waiting for a file descriptor lock
- * o A queue of threads needing work done by the kernel thread
- * (waiting for a spinlock or file I/O)
- *
- * A thread can also be joining a thread (the joiner field above).
*
- * It must not be possible for a thread to belong to any of the
- * above queues while it is handling a signal. Signal handlers
- * may longjmp back to previous stack frames circumventing normal
- * control flow. This could corrupt queue integrity if the thread
- * retains membership in the queue. Therefore, if a thread is a
- * member of one of these queues when a signal handler is invoked,
- * it must remove itself from the queue before calling the signal
- * handler and reinsert itself after normal return of the handler.
+ * It is possible for a thread to belong to more than one of the
+ * above queues if it is handling a signal. A thread may only
+ * enter a mutex or condition variable queue when it is not
+ * being called from a signal handler. If a thread is a member
+ * of one of these queues when a signal handler is invoked, it
+ * must be removed from the queue before invoking the handler
+ * and then added back to the queue after return from the handler.
*
* Use pqe for the scheduling queue link (both ready and waiting),
- * sqe for synchronization (mutex and condition variable) queue
- * links, and qe for all other links.
+ * sqe for synchronization (mutex, condition variable, and join)
+ * queue links, and qe for all other links.
*/
- TAILQ_ENTRY(pthread) pqe; /* priority queue link */
+ TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
- TAILQ_ENTRY(pthread) qe; /* all other queues link */
/* Wait data. */
union pthread_wait_data data;
@@ -603,40 +698,43 @@ struct pthread {
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
- int interrupted;
+ int interrupted;
/* Signal number when in state PS_SIGWAIT: */
- int signo;
+ int signo;
/*
- * Set to non-zero when this thread has deferred signals.
- * We allow for recursive deferral.
+ * Set to non-zero when this thread has entered a critical
+ * region. We allow for recursive entries into critical regions.
*/
- int sig_defer_count;
-
- /* Miscellaneous flags; only set with signals deferred. */
- int flags;
-#define PTHREAD_FLAGS_PRIVATE 0x0001
-#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
-#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
- /* 0x0040 Unused. */
-#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
-#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
-#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
-#define PTHREAD_FLAGS_IN_SYNCQ \
- (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
+ int critical_count;
/*
+ * Set to TRUE if this thread should yield after leaving a
+ * critical region to check for signals, messages, etc.
+ */
+ int critical_yield;
+
+ int sflags;
+#define THR_FLAGS_IN_SYNCQ 0x0001
+
+ /* Miscellaneous flags; only set with scheduling lock held. */
+ int flags;
+#define THR_FLAGS_PRIVATE 0x0001
+#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
+#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
+#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
+#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
+#define THR_FLAGS_GC_SAFE 0x0020 /* thread safe for cleaning */
+#define THR_FLAGS_IN_TDLIST 0x0040 /* thread in all thread list */
+#define THR_FLAGS_IN_GCLIST 0x0080 /* thread in gc list */
+ /*
* Base priority is the user setable and retrievable priority
* of the thread. It is only affected by explicit calls to
* set thread priority and upon thread creation via a thread
* attribute or default priority.
*/
- char base_priority;
+ char base_priority;
/*
* Inherited priority is the priority a thread inherits by
@@ -646,7 +744,7 @@ struct pthread {
* that is being waited on by any other thread whose priority
* is non-zero.
*/
- char inherited_priority;
+ char inherited_priority;
/*
* Active priority is always the maximum of the threads base
@@ -654,10 +752,10 @@ struct pthread {
* in either the base or inherited priority, the active
* priority must be recalculated.
*/
- char active_priority;
+ char active_priority;
/* Number of priority ceiling or protection mutexes owned. */
- int priority_mutex_count;
+ int priority_mutex_count;
/*
* Queue of currently owned mutexes.
@@ -675,212 +773,243 @@ struct pthread {
};
/*
- * Global variables for the uthread kernel.
+ * Critical regions can also be detected by looking at the threads
+ * current lock level. Ensure these macros increment and decrement
+ * the lock levels such that locks can not be held with a lock level
+ * of 0.
*/
+#define THR_IN_CRITICAL(thrd) \
+ (((thrd)->locklevel > 0) || \
+ ((thrd)->critical_count > 0))
+
+#define THR_YIELD_CHECK(thrd) \
+do { \
+ if (((thrd)->critical_yield != 0) && \
+ !(THR_IN_CRITICAL(thrd))) \
+ _thr_sched_switch(thrd); \
+ else if (((thrd)->check_pending != 0) && \
+ !(THR_IN_CRITICAL(thrd))) \
+ _thr_sig_check_pending(thrd); \
+} while (0)
-SCLASS void *_usrstack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= (void *) USRSTACK;
-#else
-;
-#endif
-
-/* Kernel thread structure used when there are no running threads: */
-SCLASS struct pthread _thread_kern_thread;
+#define THR_LOCK_ACQUIRE(thrd, lck) \
+do { \
+ if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \
+ PANIC("Exceeded maximum lock level"); \
+ else { \
+ (thrd)->locklevel++; \
+ _lock_acquire((lck), \
+ &(thrd)->lockusers[(thrd)->locklevel - 1], \
+ (thrd)->active_priority); \
+ } \
+} while (0)
-/* Ptr to the thread structure for the running thread: */
-SCLASS struct pthread * volatile _thread_run
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= &_thread_kern_thread;
-#else
-;
-#endif
+#define THR_LOCK_RELEASE(thrd, lck) \
+do { \
+ if ((thrd)->locklevel > 0) { \
+ _lock_release((lck), \
+ &(thrd)->lockusers[(thrd)->locklevel - 1]); \
+ (thrd)->locklevel--; \
+ if ((thrd)->locklevel != 0) \
+ ; \
+ else if ((thrd)->critical_yield != 0) \
+ _thr_sched_switch(thrd); \
+ else if ((thrd)->check_pending != 0) \
+ _thr_sig_check_pending(thrd); \
+ } \
+} while (0)
-/* Ptr to the thread structure for the last user thread to run: */
-SCLASS struct pthread * volatile _last_user_thread
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= &_thread_kern_thread;
-#else
-;
-#endif
+/*
+ * For now, threads will have their own lock separate from their
+ * KSE scheduling lock.
+ */
+#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
+#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
+#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
+#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
-/* List of all threads: */
-SCLASS TAILQ_HEAD(, pthread) _thread_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_thread_list);
-#else
-;
-#endif
+/*
+ * Priority queue manipulation macros (using pqe link). We use
+ * the thread's kseg link instead of the kse link because a thread
+ * does not (currently) have a statically assigned kse.
+ */
+#define THR_RUNQ_INSERT_HEAD(thrd) \
+ _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_INSERT_TAIL(thrd) \
+ _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_REMOVE(thrd) \
+ _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
+#define THR_RUNQ_FIRST() \
+ _pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
-/* Time of day at last scheduling timer signal: */
-SCLASS struct timeval volatile _sched_tod
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { 0, 0 };
-#else
-;
-#endif
+/*
+ * Macros to insert/remove threads to the all thread list and
+ * the gc list.
+ */
+#define THR_LIST_ADD(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
+ (thrd)->flags |= THR_FLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_LIST_REMOVE(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_list, thrd, tle); \
+ (thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_ADD(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \
+ (thrd)->flags |= THR_FLAGS_IN_GCLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_REMOVE(thrd) do { \
+ if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \
+ (thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \
+ } \
+} while (0)
/*
- * Current scheduling timer ticks; used as resource usage.
+ * Locking the scheduling queue for another thread uses that thread's
+ * KSEG lock.
*/
-SCLASS unsigned int volatile _sched_ticks
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0;
-#else
-;
-#endif
+#define THR_SCHED_LOCK(curthr, thr) do { \
+ (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
+ (curthr)->locklevel++; \
+ KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
+} while (0)
-/* Dead threads: */
-SCLASS TAILQ_HEAD(, pthread) _dead_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_dead_list);
-#else
-;
-#endif
+#define THR_SCHED_UNLOCK(curthr, thr) do { \
+ KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
+ (curthr)->locklevel--; \
+ _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
+ if ((curthr)->locklevel == 0) \
+ THR_YIELD_CHECK(curthr); \
+} while (0)
-/* Initial thread: */
-SCLASS struct pthread *_thread_initial
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
-#else
-;
-#endif
+#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
+#define THR_CRITICAL_LEAVE(thr) do { \
+ (thr)->critical_count--; \
+ if (((thr)->critical_yield != 0) && \
+ ((thr)->critical_count == 0)) { \
+ (thr)->critical_yield = 0; \
+ _thr_sched_switch(thr); \
+ } \
+} while (0)
-/* Default thread attributes: */
-SCLASS struct pthread_attr pthread_attr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
- PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
- PTHREAD_STACK_DEFAULT, -1 };
-#else
-;
-#endif
+#define THR_IS_ACTIVE(thrd) \
+ ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
-/* Default mutex attributes: */
-SCLASS struct pthread_mutex_attr pthread_mutexattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
-#else
-;
-#endif
+#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+
+/*
+ * Global variables for the pthread kernel.
+ */
-/* Default condition variable attributes: */
-SCLASS struct pthread_cond_attr pthread_condattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { COND_TYPE_FAST, 0 };
-#else
-;
-#endif
+SCLASS void *_usrstack SCLASS_PRESET(NULL);
+SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
+SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
-SCLASS int _clock_res_usec /* Clock resolution in usec. */
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_USEC;
-#else
-;
-#endif
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
-/* Garbage collector mutex and condition variable. */
-SCLASS pthread_mutex_t _gc_mutex
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
-SCLASS pthread_cond_t _gc_cond
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* List of threads needing GC: */
+SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
-/*
- * Array of signal actions for this process.
- */
-SCLASS struct sigaction _thread_sigact[NSIG];
+/* Default thread attributes: */
+SCLASS struct pthread_attr _pthread_attr_default
+ SCLASS_PRESET({
+ SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
+ THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
+ NULL, NULL, THR_STACK_DEFAULT
+ });
-/*
- * Scheduling queues:
- */
-SCLASS pq_queue_t _readyq;
-SCLASS TAILQ_HEAD(, pthread) _waitingq;
+/* Default mutex attributes: */
+SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
+ SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
-/*
- * Work queue:
- */
-SCLASS TAILQ_HEAD(, pthread) _workq;
+/* Default condition variable attributes: */
+SCLASS struct pthread_cond_attr _pthread_condattr_default
+ SCLASS_PRESET({COND_TYPE_FAST, 0});
-/* Tracks the number of threads blocked while waiting for a spinlock. */
-SCLASS volatile int _spinblock_count
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+/* Clock resolution in usec. */
+SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
-/* Thread switch hook. */
-SCLASS pthread_switch_routine_t _sched_switch_hook
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Array of signal actions for this process: */
+SCLASS struct sigaction _thread_sigact[NSIG];
/*
- * Signals pending and masked.
+ * Array of counts of dummy handlers for SIG_DFL signals. This is used to
+ * assure that there is always a dummy signal handler installed while there
+ * is a thread sigwait()ing on the corresponding signal.
*/
-SCLASS sigset_t _thread_sigpending;
-SCLASS sigset_t _thread_sigmask;
+SCLASS int _thread_dfl_count[NSIG];
/*
- * Declare the kernel scheduler jump buffer and stack:
+ * Lock for above count of dummy handlers and for the process signal
+ * mask and pending signal sets.
*/
-SCLASS struct kse_mailbox _thread_kern_kse_mailbox;
+SCLASS struct lock _thread_signal_lock;
-SCLASS void * _thread_kern_sched_stack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Pending signals and mask for this process: */
+SCLASS sigset_t _thr_proc_sigpending;
+SCLASS sigset_t _thr_proc_sigmask SCLASS_PRESET({{0, 0, 0, 0}});
+SCLASS siginfo_t _thr_proc_siginfo[NSIG];
-/*
- * Delcare the idle context.
- */
-SCLASS struct kse_thr_mailbox _idle_thr_mailbox;
+SCLASS pid_t _thr_pid SCLASS_PRESET(0);
-SCLASS void * _idle_thr_stack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+/* Garbage collector lock. */
+SCLASS struct lock _gc_lock;
+SCLASS int _gc_check SCLASS_PRESET(0);
+SCLASS pthread_t _gc_thread;
+SCLASS struct lock _mutex_static_lock;
+SCLASS struct lock _rwlock_static_lock;
+SCLASS struct lock _keytable_lock;
+SCLASS struct lock _thread_list_lock;
+SCLASS int _thr_guard_default;
+SCLASS int _thr_page_size;
-/* Used for _PTHREADS_INVARIANTS checking. */
-SCLASS int _thread_kern_new_state
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+SCLASS int _thr_debug_flags SCLASS_PRESET(0);
-/* Undefine the storage class specifier: */
+/* Undefine the storage class and preset specifiers: */
#undef SCLASS
+#undef SCLASS_PRESET
+
/*
* Function prototype definitions.
*/
__BEGIN_DECLS
-char *__ttyname_basic(int);
-char *__ttyname_r_basic(int, char *, size_t);
-char *ttyname_r(int, char *, size_t);
-void _cond_wait_backout(pthread_t);
-int _find_thread(pthread_t);
+int _cond_reinit(pthread_cond_t *);
+void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
-void _set_curthread(struct pthread *);
-void *_thread_stack_alloc(size_t, size_t);
-void _thread_stack_free(void *, size_t, size_t);
-int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
+struct kse *_get_curkse(void);
+void _set_curkse(struct kse *);
+struct kse *_kse_alloc(struct kse *);
+kse_critical_t _kse_critical_enter(void);
+void _kse_critical_leave(kse_critical_t);
+void _kse_free(struct kse *, struct kse *);
+void _kse_init();
+struct kse_group *_kseg_alloc(struct kse *);
+void _kse_lock_wait(struct lock *, struct lockuser *lu);
+void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
+void _kse_sig_check_pending(struct kse *);
+void _kse_single_thread(struct pthread *);
+void _kse_start(struct kse *);
+void _kse_setthreaded(int);
+int _kse_isthreaded(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
-void _mutex_lock_backout(pthread_t);
-void _mutex_notify_priochange(pthread_t);
-int _mutex_reinit(pthread_mutex_t *);
-void _mutex_unlock_private(pthread_t);
-int _cond_reinit(pthread_cond_t *);
+void _mutex_lock_backout(struct pthread *);
+void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
+int _mutex_reinit(struct pthread_mutex *);
+void _mutex_unlock_private(struct pthread *);
+void _libpthread_init(struct pthread *);
int _pq_alloc(struct pq_queue *, int, int);
int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
@@ -899,50 +1028,61 @@ int _pthread_mutexattr_init(pthread_mutexattr_t *);
int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
-pthread_t _pthread_self(void);
+struct pthread *_pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
-void _waitq_insert(pthread_t pthread);
-void _waitq_remove(pthread_t pthread);
-#if defined(_PTHREADS_INVARIANTS)
-void _waitq_setactive(void);
-void _waitq_clearactive(void);
-#endif
-void _thread_exit(char *, int, char *);
-void _thread_exit_cleanup(void);
-void *_thread_cleanup(pthread_t);
+struct pthread *_thr_alloc(struct kse *);
+void _thr_exit(char *, int, char *);
+void _thr_exit_cleanup(void);
+void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
+void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
+int _thr_ref_add(struct pthread *, struct pthread *, int);
+void _thr_ref_delete(struct pthread *, struct pthread *);
+void _thr_schedule_add(struct pthread *, struct pthread *);
+void _thr_schedule_remove(struct pthread *, struct pthread *);
+void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
+void _thr_setrunnable_unlocked(struct pthread *thread);
+void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *);
+void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
+int _thr_stack_alloc(struct pthread_attr *);
+void _thr_stack_free(struct pthread_attr *);
+void _thr_exit_cleanup(void);
+void _thr_free(struct kse *, struct pthread *);
+void _thr_panic_exit(char *, int, char *);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);
-void _thread_init(void);
-void _thread_kern_idle(void);
-void _thread_kern_sched(void);
-void _thread_kern_scheduler(struct kse_mailbox *);
-void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
-void _thread_kern_sched_state_unlock(enum pthread_state state,
- spinlock_t *lock, char *fname, int lineno);
-void _thread_kern_set_timeout(const struct timespec *);
-void _thread_kern_sig_defer(void);
-void _thread_kern_sig_undefer(void);
-void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
-void _thread_printf(int fd, const char *, ...);
-void _thread_start(void);
-void _thread_seterrno(pthread_t, int);
-int _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km);
-int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
-pthread_addr_t _thread_gc(pthread_addr_t);
-void _thread_enter_cancellation_point(void);
-void _thread_leave_cancellation_point(void);
-void _thread_cancellation_point(void);
-
+void _thread_printf(int, const char *, ...);
+void _thr_sched_frame(struct pthread_sigframe *);
+void _thr_sched_switch(struct pthread *);
+void _thr_set_timeout(const struct timespec *);
+void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
+void _thr_sig_check_pending(struct pthread *);
+void _thr_sig_rundown(struct pthread *, ucontext_t *,
+ struct pthread_sigframe *);
+void _thr_sig_send(struct pthread *pthread, int sig);
+void _thr_sig_wrapper(void);
+void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
+void _thr_seterrno(struct pthread *, int);
+void _thr_enter_cancellation_point(struct pthread *);
+void _thr_leave_cancellation_point(struct pthread *);
+
+/* XXX - Stuff that goes away when my sources get more up to date. */
+/* #include <sys/kse.h> */
+#ifdef SYS_KSE_H
+int __sys_kse_create(struct kse_mailbox *, int);
+int __sys_kse_thr_wakeup(struct kse_mailbox *);
+int __sys_kse_exit(struct kse_mailbox *);
+int __sys_kse_release(struct kse_mailbox *);
+#endif
/* #include <sys/aio.h> */
#ifdef _SYS_AIO_H_
int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
#endif
-/* #include <sys/event.h> */
-#ifdef _SYS_EVENT_H_
-int __sys_kevent(int, const struct kevent *, int, struct kevent *,
- int, const struct timespec *);
+/* #include <fcntl.h> */
+#ifdef _SYS_FCNTL_H_
+int __sys_fcntl(int, int, ...);
+int __sys_open(const char *, int, ...);
#endif
/* #include <sys/ioctl.h> */
@@ -950,95 +1090,60 @@ int __sys_kevent(int, const struct kevent *, int, struct kevent *,
int __sys_ioctl(int, unsigned long, ...);
#endif
-/* #include <sys/mman.h> */
-#ifdef _SYS_MMAN_H_
-int __sys_msync(void *, size_t, int);
+/* #inclde <sched.h> */
+#ifdef _SCHED_H_
+int __sys_sched_yield(void);
#endif
-/* #include <sys/mount.h> */
-#ifdef _SYS_MOUNT_H_
-int __sys_fstatfs(int, struct statfs *);
+/* #include <signal.h> */
+#ifdef _SIGNAL_H_
+int __sys_kill(pid_t, int);
+int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
+int __sys_sigpending(sigset_t *);
+int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
+int __sys_sigsuspend(const sigset_t *);
+int __sys_sigreturn(ucontext_t *);
+int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
#endif
/* #include <sys/socket.h> */
#ifdef _SYS_SOCKET_H_
-int __sys_accept(int, struct sockaddr *, socklen_t *);
-int __sys_bind(int, const struct sockaddr *, socklen_t);
-int __sys_connect(int, const struct sockaddr *, socklen_t);
-int __sys_getpeername(int, struct sockaddr *, socklen_t *);
-int __sys_getsockname(int, struct sockaddr *, socklen_t *);
-int __sys_getsockopt(int, int, int, void *, socklen_t *);
-int __sys_listen(int, int);
-ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
-ssize_t __sys_recvmsg(int, struct msghdr *, int);
-int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
-ssize_t __sys_sendmsg(int, const struct msghdr *, int);
-ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
-int __sys_setsockopt(int, int, int, const void *, socklen_t);
-int __sys_shutdown(int, int);
-int __sys_socket(int, int, int);
-int __sys_socketpair(int, int, int, int *);
-#endif
-
-/* #include <sys/stat.h> */
-#ifdef _SYS_STAT_H_
-int __sys_fchflags(int, u_long);
-int __sys_fchmod(int, mode_t);
-int __sys_fstat(int, struct stat *);
+int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
+ off_t *, int);
#endif
/* #include <sys/uio.h> */
-#ifdef _SYS_UIO_H_
-ssize_t __sys_readv(int, const struct iovec *, int);
-ssize_t __sys_writev(int, const struct iovec *, int);
-#endif
-
-/* #include <sys/wait.h> */
-#ifdef WNOHANG
-pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
+#ifdef _SYS_UIO_H_
+ssize_t __sys_readv(int, const struct iovec *, int);
+ssize_t __sys_writev(int, const struct iovec *, int);
#endif
-/* #include <dirent.h> */
-#ifdef _DIRENT_H_
-int __sys_getdirentries(int, char *, int, long *);
+/* #include <time.h> */
+#ifdef _TIME_H_
+int __sys_nanosleep(const struct timespec *, struct timespec *);
#endif
-/* #include <fcntl.h> */
-#ifdef _SYS_FCNTL_H_
-int __sys_fcntl(int, int, ...);
-int __sys_flock(int, int);
-int __sys_open(const char *, int, ...);
+/* #include <unistd.h> */
+#ifdef _UNISTD_H_
+int __sys_close(int);
+int __sys_execve(const char *, char * const *, char * const *);
+int __sys_fork(void);
+int __sys_fsync(int);
+pid_t __sys_getpid(void);
+int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+ssize_t __sys_read(int, void *, size_t);
+ssize_t __sys_write(int, const void *, size_t);
+void __sys_exit(int);
#endif
/* #include <poll.h> */
#ifdef _SYS_POLL_H_
-int __sys_poll(struct pollfd *, unsigned, int);
-#endif
-
-/* #include <signal.h> */
-#ifdef _SIGNAL_H_
-int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
-int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
-int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
-int __sys_sigreturn(ucontext_t *);
+int __sys_poll(struct pollfd *, unsigned, int);
#endif
-/* #include <unistd.h> */
-#ifdef _UNISTD_H_
-int __sys_close(int);
-int __sys_dup(int);
-int __sys_dup2(int, int);
-int __sys_execve(const char *, char * const *, char * const *);
-void __sys_exit(int);
-int __sys_fchown(int, uid_t, gid_t);
-pid_t __sys_fork(void);
-long __sys_fpathconf(int, int);
-int __sys_fsync(int);
-int __sys_pipe(int *);
-ssize_t __sys_read(int, void *, size_t);
-ssize_t __sys_write(int, const void *, size_t);
+/* #include <sys/mman.h> */
+#ifdef _SYS_MMAN_H_
+int __sys_msync(void *, size_t, int);
#endif
-__END_DECLS
-
-#endif /* !_PTHREAD_PRIVATE_H */
+#endif /* !_THR_PRIVATE_H */
diff --git a/lib/libpthread/thread/thr_pselect.c b/lib/libpthread/thread/thr_pselect.c
index af22337..c80a1cf 100644
--- a/lib/libpthread/thread/thr_pselect.c
+++ b/lib/libpthread/thread/thr_pselect.c
@@ -44,11 +44,12 @@ int
pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __pselect(count, rfds, wfds, efds, timo, mask);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return (ret);
}
diff --git a/lib/libpthread/thread/thr_read.c b/lib/libpthread/thread/thr_read.c
index 8d095c4..34dabd3 100644
--- a/lib/libpthread/thread/thr_read.c
+++ b/lib/libpthread/thread/thr_read.c
@@ -45,11 +45,12 @@ __weak_reference(__read, read);
ssize_t
__read(int fd, void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_read(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_readv.c b/lib/libpthread/thread/thr_readv.c
index c3b03eb..3a8823f 100644
--- a/lib/libpthread/thread/thr_readv.c
+++ b/lib/libpthread/thread/thr_readv.c
@@ -45,11 +45,12 @@ __weak_reference(__readv, readv);
ssize_t
__readv(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_readv(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_resume_np.c b/lib/libpthread/thread/thr_resume_np.c
index cc00f09..d0b45b3 100644
--- a/lib/libpthread/thread/thr_resume_np.c
+++ b/lib/libpthread/thread/thr_resume_np.c
@@ -35,33 +35,32 @@
#include <pthread.h>
#include "thr_private.h"
-static void resume_common(struct pthread *);
+static void resume_common(struct pthread *);
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
+
/* Resume a thread: */
int
_pthread_resume_np(pthread_t thread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(thread)) == 0) {
- /*
- * Defer signals to protect the scheduling queues
- * from access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Add a reference to the thread: */
+ if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
+ /* Is it currently suspended? */
+ if ((thread->flags & THR_FLAGS_SUSPENDED) != 0) {
+ /* Lock the threads scheduling queue: */
+ THR_SCHED_LOCK(curthread, thread);
- if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
resume_common(thread);
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
+ _thr_ref_delete(curthread, thread);
}
return (ret);
}
@@ -69,43 +68,42 @@ _pthread_resume_np(pthread_t thread)
void
_pthread_resume_all_np(void)
{
- struct pthread *curthread = _get_curthread();
- struct pthread *thread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+ kse_critical_t crit;
- /*
- * Defer signals to protect the scheduling queues from access
- * by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Take the thread list lock: */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if ((thread != curthread) &&
- ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
+ ((thread->flags & THR_FLAGS_SUSPENDED) != 0) &&
+ (thread->state != PS_DEAD) &&
+ (thread->state != PS_DEADLOCK) &&
+ ((thread->flags & THR_FLAGS_EXITING) == 0)) {
+ THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
}
- /*
- * Undefer and handle pending signals, yielding if necessary:
- */
- _thread_kern_sig_undefer();
+ /* Release the thread list lock: */
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
}
static void
resume_common(struct pthread *thread)
{
/* Clear the suspend flag: */
- thread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
+ thread->flags &= ~THR_FLAGS_SUSPENDED;
/*
* If the thread's state is suspended, that means it is
* now runnable but not in any scheduling queue. Set the
* state to running and insert it into the run queue.
*/
- if (thread->state == PS_SUSPENDED) {
- PTHREAD_SET_STATE(thread, PS_RUNNING);
- if (thread->priority_mutex_count > 0)
- PTHREAD_PRIOQ_INSERT_HEAD(thread);
- else
- PTHREAD_PRIOQ_INSERT_TAIL(thread);
- }
+ if (thread->state == PS_SUSPENDED)
+ _thr_setrunnable_unlocked(thread);
}
diff --git a/lib/libpthread/thread/thr_rwlock.c b/lib/libpthread/thread/thr_rwlock.c
index f41e8a2..5f1d8e7 100644
--- a/lib/libpthread/thread/thr_rwlock.c
+++ b/lib/libpthread/thread/thr_rwlock.c
@@ -30,7 +30,9 @@
#include <limits.h>
#include <stdlib.h>
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
/* maximum number of times a read lock may be obtained */
@@ -44,25 +46,28 @@ __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
-static int init_static (pthread_rwlock_t *rwlock);
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+/*
+ * Prototypes
+ */
+static int init_static(pthread_rwlock_t *rwlock);
+
static int
-init_static (pthread_rwlock_t *rwlock)
+init_static(pthread_rwlock_t *rwlock)
{
+ struct pthread *thread = _get_curthread();
int ret;
- _SPINLOCK(&static_init_lock);
+ THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
if (*rwlock == NULL)
- ret = pthread_rwlock_init(rwlock, NULL);
+ ret = _pthread_rwlock_init(rwlock, NULL);
else
ret = 0;
- _SPINUNLOCK(&static_init_lock);
-
- return(ret);
+ THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
+ return (ret);
}
int
@@ -77,9 +82,9 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
prwlock = *rwlock;
- pthread_mutex_destroy(&prwlock->lock);
- pthread_cond_destroy(&prwlock->read_signal);
- pthread_cond_destroy(&prwlock->write_signal);
+ _pthread_mutex_destroy(&prwlock->lock);
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_cond_destroy(&prwlock->write_signal);
free(prwlock);
*rwlock = NULL;
@@ -87,7 +92,7 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
ret = 0;
}
- return(ret);
+ return (ret);
}
int
@@ -100,25 +105,25 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
if (prwlock == NULL)
- return(ENOMEM);
+ return (ENOMEM);
/* initialize the lock */
- if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
+ if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
free(prwlock);
else {
/* initialize the read condition signal */
- ret = pthread_cond_init(&prwlock->read_signal, NULL);
+ ret = _pthread_cond_init(&prwlock->read_signal, NULL);
if (ret != 0) {
- pthread_mutex_destroy(&prwlock->lock);
+ _pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* initialize the write condition signal */
- ret = pthread_cond_init(&prwlock->write_signal, NULL);
+ ret = _pthread_cond_init(&prwlock->write_signal, NULL);
if (ret != 0) {
- pthread_cond_destroy(&prwlock->read_signal);
- pthread_mutex_destroy(&prwlock->lock);
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* success */
@@ -130,7 +135,7 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr
}
}
- return(ret);
+ return (ret);
}
int
@@ -140,30 +145,30 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
/* give writers priority over readers */
while (prwlock->blocked_writers || prwlock->state < 0) {
- ret = pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
+ ret = _pthread_cond_wait(&prwlock->read_signal, &prwlock->lock);
if (ret != 0) {
/* can't do a whole lot if this fails */
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
}
}
@@ -179,9 +184,9 @@ _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
* lock. Decrementing 'state' is no good because we probably
* don't have the monitor lock.
*/
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -191,21 +196,21 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
/* give writers priority over readers */
if (prwlock->blocked_writers || prwlock->state < 0)
@@ -216,9 +221,9 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
++prwlock->state; /* indicate we are locked for reading */
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -228,21 +233,21 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
if (prwlock->state != 0)
ret = EBUSY;
@@ -251,9 +256,9 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
prwlock->state = -1;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -263,34 +268,34 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
if (prwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
if (prwlock->state > 0) {
if (--prwlock->state == 0 && prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
+ ret = _pthread_cond_signal(&prwlock->write_signal);
} else if (prwlock->state < 0) {
prwlock->state = 0;
if (prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
+ ret = _pthread_cond_signal(&prwlock->write_signal);
else
- ret = pthread_cond_broadcast(&prwlock->read_signal);
+ ret = _pthread_cond_broadcast(&prwlock->read_signal);
} else
ret = EINVAL;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
@@ -300,31 +305,31 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
int ret;
if (rwlock == NULL)
- return(EINVAL);
+ return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(rwlock)) != 0)
- return(ret);
+ return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
while (prwlock->state != 0) {
++prwlock->blocked_writers;
- ret = pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
+ ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
if (ret != 0) {
--prwlock->blocked_writers;
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
}
--prwlock->blocked_writers;
@@ -334,8 +339,7 @@ _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
prwlock->state = -1;
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
-
diff --git a/lib/libpthread/thread/thr_select.c b/lib/libpthread/thread/thr_select.c
index a4f4a15..6714af0 100644
--- a/lib/libpthread/thread/thr_select.c
+++ b/lib/libpthread/thread/thr_select.c
@@ -49,11 +49,12 @@ int
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_self.c b/lib/libpthread/thread/thr_self.c
index d213e5e..0c702a6 100644
--- a/lib/libpthread/thread/thr_self.c
+++ b/lib/libpthread/thread/thr_self.c
@@ -39,6 +39,9 @@ __weak_reference(_pthread_self, pthread_self);
pthread_t
_pthread_self(void)
{
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+
/* Return the running thread pointer: */
return (_get_curthread());
}
diff --git a/lib/libpthread/thread/thr_sem.c b/lib/libpthread/thread/thr_sem.c
index 70a9721..d6021a8 100644
--- a/lib/libpthread/thread/thr_sem.c
+++ b/lib/libpthread/thread/thr_sem.c
@@ -32,7 +32,9 @@
#include <stdlib.h>
#include <errno.h>
#include <semaphore.h>
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
#define _SEM_CHECK_VALIDITY(sem) \
@@ -88,15 +90,15 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
/*
* Initialize the semaphore.
*/
- if (pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
+ if (_pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
free(*sem);
errno = ENOSPC;
retval = -1;
goto RETURN;
}
- if (pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
- pthread_mutex_destroy(&(*sem)->lock);
+ if (_pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
+ _pthread_mutex_destroy(&(*sem)->lock);
free(*sem);
errno = ENOSPC;
retval = -1;
@@ -109,7 +111,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
int
@@ -120,71 +122,72 @@ _sem_destroy(sem_t *sem)
_SEM_CHECK_VALIDITY(sem);
/* Make sure there are no waiters. */
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->nwaiters > 0) {
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
errno = EBUSY;
retval = -1;
goto RETURN;
}
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
- pthread_mutex_destroy(&(*sem)->lock);
- pthread_cond_destroy(&(*sem)->gtzero);
+ _pthread_mutex_destroy(&(*sem)->lock);
+ _pthread_cond_destroy(&(*sem)->gtzero);
(*sem)->magic = 0;
free(*sem);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
sem_t *
_sem_open(const char *name, int oflag, ...)
{
errno = ENOSYS;
- return SEM_FAILED;
+ return (SEM_FAILED);
}
int
_sem_close(sem_t *sem)
{
errno = ENOSYS;
- return -1;
+ return (-1);
}
int
_sem_unlink(const char *name)
{
errno = ENOSYS;
- return -1;
+ return (-1);
}
int
_sem_wait(sem_t *sem)
{
- int retval;
+ struct pthread *curthread = _get_curthread();
+ int retval;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
while ((*sem)->count == 0) {
(*sem)->nwaiters++;
- pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
+ _pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
(*sem)->nwaiters--;
}
(*sem)->count--;
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
- _thread_leave_cancellation_point();
- return retval;
+ _thr_leave_cancellation_point(curthread);
+ return (retval);
}
int
@@ -194,7 +197,7 @@ _sem_trywait(sem_t *sem)
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->count > 0) {
(*sem)->count--;
@@ -204,37 +207,38 @@ _sem_trywait(sem_t *sem)
retval = -1;
}
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
RETURN:
- return retval;
+ return (retval);
}
int
_sem_post(sem_t *sem)
{
- int retval;
+ kse_critical_t crit;
+ int retval;
_SEM_CHECK_VALIDITY(sem);
/*
* sem_post() is required to be safe to call from within signal
- * handlers. Thus, we must defer signals.
+ * handlers. Thus, we must enter a critical region.
*/
- _thread_kern_sig_defer();
+ crit = _kse_critical_enter();
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
(*sem)->count++;
if ((*sem)->nwaiters > 0)
- pthread_cond_signal(&(*sem)->gtzero);
+ _pthread_cond_signal(&(*sem)->gtzero);
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
- _thread_kern_sig_undefer();
+ _kse_critical_leave(crit);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
int
@@ -244,11 +248,11 @@ _sem_getvalue(sem_t *sem, int *sval)
_SEM_CHECK_VALIDITY(sem);
- pthread_mutex_lock(&(*sem)->lock);
+ _pthread_mutex_lock(&(*sem)->lock);
*sval = (int)(*sem)->count;
- pthread_mutex_unlock(&(*sem)->lock);
+ _pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
- return retval;
+ return (retval);
}
diff --git a/lib/libpthread/thread/thr_seterrno.c b/lib/libpthread/thread/thr_seterrno.c
index 4f585ac..245d43f 100644
--- a/lib/libpthread/thread/thr_seterrno.c
+++ b/lib/libpthread/thread/thr_seterrno.c
@@ -47,7 +47,7 @@ void
_thread_seterrno(pthread_t thread, int error)
{
/* Check for the initial thread: */
- if (thread == _thread_initial)
+ if (thread == _thr_initial)
/* The initial thread always uses the global error variable: */
errno = error;
else
diff --git a/lib/libpthread/thread/thr_setschedparam.c b/lib/libpthread/thread/thr_setschedparam.c
index 5117a26..1be9f91 100644
--- a/lib/libpthread/thread/thr_setschedparam.c
+++ b/lib/libpthread/thread/thr_setschedparam.c
@@ -42,40 +42,55 @@ int
_pthread_setschedparam(pthread_t pthread, int policy,
const struct sched_param *param)
{
- int old_prio, in_readyq = 0, ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int in_syncq;
+ int in_readyq = 0;
+ int old_prio;
+ int ret = 0;
if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
/* Return an invalid argument error: */
ret = EINVAL;
- } else if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
- (param->sched_priority > PTHREAD_MAX_PRIORITY)) {
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
/* Find the thread in the list of active threads: */
- } else if ((ret = _find_thread(pthread)) == 0) {
+ } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
/*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
+ * Lock the threads scheduling queue while we change
+ * its priority:
*/
- _thread_kern_sig_defer();
+ THR_SCHED_LOCK(curthread, pthread);
+ in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ;
- if (param->sched_priority !=
- PTHREAD_BASE_PRIORITY(pthread->base_priority)) {
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ if (param->sched_priority ==
+ THR_BASE_PRIORITY(pthread->base_priority))
+ /*
+ * There is nothing to do; unlock the threads
+ * scheduling queue.
+ */
+ THR_SCHED_UNLOCK(curthread, pthread);
+ else {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
* active priority:
*/
old_prio = pthread->active_priority;
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) {
in_readyq = 1;
- PTHREAD_PRIOQ_REMOVE(pthread);
+ THR_RUNQ_REMOVE(pthread);
}
/* Set the thread base priority: */
pthread->base_priority &=
- (PTHREAD_SIGNAL_PRIORITY | PTHREAD_RT_PRIORITY);
+ (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
@@ -92,28 +107,23 @@ _pthread_setschedparam(pthread_t pthread, int policy,
* its priority if it owns any priority
* protection or inheritence mutexes.
*/
- PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ THR_RUNQ_INSERT_HEAD(pthread);
}
else
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ THR_RUNQ_INSERT_TAIL(pthread);
}
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, pthread);
+
/*
* Check for any mutex priority adjustments. This
* includes checking for a priority mutex on which
* this thread is waiting.
*/
- _mutex_notify_priochange(pthread);
+ _mutex_notify_priochange(curthread, pthread, in_syncq);
}
-
- /* Set the scheduling policy: */
- pthread->attr.sched_policy = policy;
-
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ _thr_ref_delete(curthread, pthread);
}
- return(ret);
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c
index fa63b5f..de177d9 100644
--- a/lib/libpthread/thread/thr_sig.c
+++ b/lib/libpthread/thread/thr_sig.c
@@ -35,25 +35,29 @@
#include <sys/types.h>
#include <sys/signalvar.h>
#include <signal.h>
+#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
-#include <setjmp.h>
-#include <errno.h>
+#include <string.h>
#include <pthread.h>
#include "thr_private.h"
+#include "pthread_md.h"
/* Prototypes: */
-static void thread_sig_handle_special(int sig);
-
-static void thread_sig_add(struct pthread *pthread, int sig, int has_args);
-static void thread_sig_check_state(struct pthread *pthread, int sig);
-static struct pthread *thread_sig_find(int sig);
-static void thread_sigframe_add(struct pthread *thread, int sig,
- int has_args);
-static void thread_sigframe_save(struct pthread *thread,
- struct pthread_state_data *psd);
-static void thread_sigframe_restore(struct pthread *thread,
- struct pthread_state_data *psd);
+static void build_siginfo(siginfo_t *info, int signo);
+static void thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info);
+static void thr_sig_check_state(struct pthread *pthread, int sig);
+static struct pthread *thr_sig_find(struct kse *curkse, int sig,
+ siginfo_t *info);
+static void handle_special_signals(struct kse *curkse, int sig);
+static void thr_sigframe_add(struct pthread *thread, int sig,
+ siginfo_t *info);
+static void thr_sigframe_restore(struct pthread *thread,
+ struct pthread_sigframe *psf);
+static void thr_sigframe_save(struct pthread *thread,
+ struct pthread_sigframe *psf);
+static void thr_sig_invoke_handler(struct pthread *, int sig,
+ siginfo_t *info, ucontext_t *ucp);
/* #define DEBUG_SIGNAL */
#ifdef DEBUG_SIGNAL
@@ -63,55 +67,210 @@ static void thread_sigframe_restore(struct pthread *thread,
#endif
/*
- * Dispatch a signal to a thread, if appropriate.
+ * Signal setup and delivery.
+ *
+ * 1) Delivering signals to threads in the same KSE.
+ * These signals are sent by upcall events and are set in the
+ * km_sigscaught field of the KSE mailbox. Since these signals
+ * are received while operating on the KSE stack, they can be
+ * delivered either by using signalcontext() to add a stack frame
+ * to the target thread's stack, or by adding them in the thread's
+ * pending set and having the thread run them down after it
+ * 2) Delivering signals to threads in other KSEs/KSEGs.
+ * 3) Delivering signals to threads in critical regions.
+ * 4) Delivering signals to threads after they change their signal masks.
+ *
+ * Methods of delivering signals.
+ *
+ * 1) Add a signal frame to the thread's saved context.
+ * 2) Add the signal to the thread structure, mark the thread as
+ * having signals to handle, and let the thread run them down
+ * after it resumes from the KSE scheduler.
+ *
+ * Problem with 1). You can't do this to a running thread or a
+ * thread in a critical region.
+ *
+ * Problem with 2). You can't do this to a thread that doesn't
+ * yield in some way (explicitly enters the scheduler). A thread
+ * blocked in the kernel or a CPU hungry thread will not see the
+ * signal without entering the scheduler.
+ *
+ * The solution is to use both 1) and 2) to deliver signals:
+ *
+ * o Thread in critical region - use 2). When the thread
+ * leaves the critical region it will check to see if it
+ * has pending signals and run them down.
+ *
+ * o Thread enters scheduler explicitly - use 2). The thread
+ * can check for pending signals after it returns from the
+ * the scheduler.
+ *
+ * o Thread is running and not current thread - use 2). When the
+ * thread hits a condition specified by one of the other bullets,
+ * the signal will be delivered.
+ *
+ * o Thread is running and is current thread (e.g., the thread
+ * has just changed its signal mask and now sees that it has
+ * pending signals) - just run down the pending signals.
+ *
+ * o Thread is swapped out due to quantum expiration - use 1)
+ *
+ * o Thread is blocked in kernel - kse_thr_wakeup() and then
+ * use 1)
+ */
+
+/*
+ * Rules for selecting threads for signals received:
+ *
+ * 1) If the signal is a sychronous signal, it is delivered to
+ * the generating (current thread). If the thread has the
+ * signal masked, it is added to the threads pending signal
+ * set until the thread unmasks it.
+ *
+ * 2) A thread in sigwait() where the signal is in the thread's
+ * waitset.
+ *
+ * 3) A thread in sigsuspend() where the signal is not in the
+ * thread's suspended signal mask.
+ *
+ * 4) Any thread (first found/easiest to deliver) that has the
+ * signal unmasked.
+ */
+
+/*
+ * This signal handler only delivers asynchronous signals.
+ * This must be called with upcalls disabled and without
+ * holding any locks.
*/
void
-_thread_sig_dispatch(int sig)
+_thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
+{
+ struct pthread *thread;
+
+ DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
+
+ /* Some signals need special handling: */
+ handle_special_signals(curkse, sig);
+
+ if ((thread = thr_sig_find(curkse, sig, info)) != NULL) {
+ /*
+ * Setup the target thread to receive the signal:
+ */
+ DBG_MSG("Got signal %d, selecting thread %p\n", sig, thread);
+ KSE_SCHED_LOCK(curkse, thread->kseg);
+ thr_sig_add(thread, sig, info);
+ KSE_SCHED_UNLOCK(curkse, thread->kseg);
+ }
+}
+
+void
+_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
+{
+ void (*sigfunc)(int, siginfo_t *, void *);
+ struct kse *curkse;
+
+ curkse = _get_curkse();
+ if ((curkse == NULL) || ((curkse->k_flags & KF_STARTED) == 0)) {
+ /* Upcalls are not yet started; just call the handler. */
+ sigfunc = _thread_sigact[sig - 1].sa_sigaction;
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
+ if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO)
+ != 0) || (info == NULL))
+ (*(sigfunc))(sig, info, ucp);
+ else
+ (*(sigfunc))(sig, (siginfo_t *)info->si_code,
+ ucp);
+ }
+ }
+ else {
+ /* Nothing. */
+ DBG_MSG("Got signal %d\n", sig);
+ sigaddset(&curkse->k_mbx.km_sigscaught, sig);
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ }
+}
+
+static void
+thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
+ ucontext_t *ucp)
{
- struct pthread *pthread;
+ void (*sigfunc)(int, siginfo_t *, void *);
+ sigset_t saved_mask;
+ int saved_seqno;
- DBG_MSG(">>> _thread_sig_dispatch(%d)\n", sig);
+ /* Invoke the signal handler without going through the scheduler:
+ */
+ DBG_MSG("Got signal %d, calling handler for current thread %p\n",
+ sig, curthread);
- thread_sig_handle_special(sig);
- if (sigismember(&_thread_sigmask, sig))
- /* Don't deliver the signal if it's masked. */
- return;
- /* Mask the signal until it's handled. */
- sigaddset(&_thread_sigmask, sig);
- /* This signal will be handled; clear the pending flag. */
- sigdelset(&_thread_sigpending, sig);
+ /*
+ * Setup the threads signal mask.
+ *
+ * The mask is changed in the thread's active signal mask
+ * (in the context) and not in the base signal mask because
+ * a thread is allowed to change its signal mask within a
+ * signal handler. If it does, the signal mask restored
+ * after the handler should be the same as that set by the
+ * thread during the handler, not the original mask from
+ * before calling the handler. The thread could also
+ * modify the signal mask in the context and expect this
+ * mask to be used.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
+ saved_mask = curthread->tmbx.tm_context.uc_sigmask;
+ saved_seqno = curthread->sigmask_seqno;
+ SIGSETOR(curthread->tmbx.tm_context.uc_sigmask,
+ _thread_sigact[sig - 1].sa_mask);
+ sigaddset(&curthread->tmbx.tm_context.uc_sigmask, sig);
+ THR_SCHED_UNLOCK(curthread, curthread);
/*
- * Deliver the signal to a thread.
+ * Check that a custom handler is installed and if
+ * the signal is not blocked:
*/
- if ((pthread = thread_sig_find(sig)) == NULL) {
- DBG_MSG("No thread to handle signal %d\n", sig);
- return;
+ sigfunc = _thread_sigact[sig - 1].sa_sigaction;
+ ucp->uc_sigmask = _thr_proc_sigmask;
+ if (((__sighandler_t *)sigfunc != SIG_DFL) &&
+ ((__sighandler_t *)sigfunc != SIG_IGN)) {
+ if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO) != 0) ||
+ (info == NULL))
+ (*(sigfunc))(sig, info, ucp);
+ else
+ (*(sigfunc))(sig, (siginfo_t *)info->si_code, ucp);
}
- DBG_MSG("Got signal %d, selecting thread %p\n", sig, pthread);
- thread_sig_add(pthread, sig, /*has_args*/ 1);
+
+ /*
+ * Restore the thread's signal mask.
+ */
+ if (saved_seqno == curthread->sigmask_seqno)
+ curthread->tmbx.tm_context.uc_sigmask = saved_mask;
+ else
+ curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
}
/*
- * Find a thread that can handle the signal.
+ * Find a thread that can handle the signal. This must be called
+ * with upcalls disabled.
*/
-static struct pthread *
-thread_sig_find(int sig)
+struct pthread *
+thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
{
- struct pthread *curthread = _get_curthread();
+ int handler_installed;
struct pthread *pthread, *pthread_next;
struct pthread *suspended_thread, *signaled_thread;
DBG_MSG("Looking for thread to handle signal %d\n", sig);
+
+ handler_installed = (_thread_sigact[sig - 1].sa_handler != SIG_IGN) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_DFL);
+
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO) {
/* Dump thread information to file: */
_thread_dump_info();
-
- /* Unblock this signal to allow further dumps: */
- sigdelset(&_thread_sigmask, sig);
}
-
/*
* Enter a loop to look for threads that have the signal
* unmasked. POSIX specifies that a thread in a sigwait
@@ -123,13 +282,10 @@ thread_sig_find(int sig)
* installed, the signal only affects threads in sigwait.
*/
suspended_thread = NULL;
- if ((curthread != &_thread_kern_thread) &&
- !sigismember(&curthread->mailbox.tm_context.uc_sigmask, sig))
- signaled_thread = curthread;
- else
- signaled_thread = NULL;
+ signaled_thread = NULL;
- for (pthread = TAILQ_FIRST(&_waitingq);
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ for (pthread = TAILQ_FIRST(&_thread_list);
pthread != NULL; pthread = pthread_next) {
/*
* Grab the next thread before possibly destroying
@@ -139,19 +295,17 @@ thread_sig_find(int sig)
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ /* Take the scheduling lock. */
+ KSE_SCHED_LOCK(curkse, pthread->kseg);
/*
- * A signal handler is not invoked for threads
- * in sigwait. Clear the blocked and pending
- * flags.
+ * Return the signal number and make the
+ * thread runnable.
*/
- sigdelset(&_thread_sigmask, sig);
- sigdelset(&_thread_sigpending, sig);
-
- /* Return the signal number: */
pthread->signo = sig;
+ _thr_setrunnable_unlocked(pthread);
+
+ KSE_SCHED_UNLOCK(curkse, pthread->kseg);
/*
* POSIX doesn't doesn't specify which thread
@@ -163,11 +317,18 @@ thread_sig_find(int sig)
* to other threads and do not add the signal
* to the process pending set.
*/
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ DBG_MSG("Waking thread %p in sigwait with signal %d\n",
+ pthread, sig);
return (NULL);
}
- if (!sigismember(
- &pthread->mailbox.tm_context.uc_sigmask, sig) &&
- ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) == 0)) {
+ else if ((pthread->state == PS_DEAD) ||
+ (pthread->state == PS_DEADLOCK) ||
+ ((pthread->flags & THR_FLAGS_EXITING) != 0))
+ ; /* Skip this thread. */
+ else if ((handler_installed != 0) &&
+ !sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig) &&
+ ((pthread->flags & THR_FLAGS_SUSPENDED) == 0)) {
if (pthread->state == PS_SIGSUSPEND) {
if (suspended_thread == NULL)
suspended_thread = pthread;
@@ -175,34 +336,33 @@ thread_sig_find(int sig)
signaled_thread = pthread;
}
}
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/*
- * If we didn't find a thread in the waiting queue,
- * check the all threads queue:
+ * Only perform wakeups and signal delivery if there is a
+ * custom handler installed:
*/
- if (suspended_thread == NULL &&
- signaled_thread == NULL) {
+ if (handler_installed == 0) {
/*
- * Enter a loop to look for other threads
- * capable of receiving the signal:
+ * There is no handler installed; nothing to do here.
*/
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- if (!sigismember(
- &pthread->mailbox.tm_context.uc_sigmask, sig)) {
- signaled_thread = pthread;
- break;
- }
- }
- }
-
- if (suspended_thread == NULL &&
- signaled_thread == NULL)
+ } else if (suspended_thread == NULL &&
+ signaled_thread == NULL) {
/*
* Add it to the set of signals pending
* on the process:
*/
- sigaddset(&_thread_sigpending, sig);
- else {
+ KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
+ if (!sigismember(&_thr_proc_sigpending, sig)) {
+ sigaddset(&_thr_proc_sigpending, sig);
+ if (info == NULL)
+ build_siginfo(&_thr_proc_siginfo[sig], sig);
+ else
+ memcpy(&_thr_proc_siginfo[sig], info,
+ sizeof(*info));
+ }
+ KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
+ } else {
/*
* We only deliver the signal to one thread;
* give preference to the suspended thread:
@@ -213,119 +373,187 @@ thread_sig_find(int sig)
pthread = signaled_thread;
return (pthread);
}
-
- /* Returns nothing. */
return (NULL);
}
-#if __XXX_NOT_YET__
+static void
+build_siginfo(siginfo_t *info, int signo)
+{
+ bzero(info, sizeof(*info));
+ info->si_signo = signo;
+ info->si_pid = _thr_pid;
+}
+
+/*
+ * This is called by a thread when it has pending signals to deliver.
+ * It should only be called from the context of the thread.
+ */
void
-_thread_sig_check_pending(struct pthread *pthread)
+_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
+ struct pthread_sigframe *psf)
{
- sigset_t sigset;
- int i;
+ struct pthread_sigframe psf_save;
+ sigset_t sigset;
+ int i;
+
+ THR_SCHED_LOCK(curthread, curthread);
+ memcpy(&sigset, &curthread->sigpend, sizeof(sigset));
+ sigemptyset(&curthread->sigpend);
+ if (psf != NULL) {
+ memcpy(&psf_save, psf, sizeof(*psf));
+ SIGSETOR(sigset, psf_save.psf_sigset);
+ sigemptyset(&psf->psf_sigset);
+ }
+ THR_SCHED_UNLOCK(curthread, curthread);
+ /* Check the threads previous state: */
+ if ((psf != NULL) && (psf_save.psf_state != PS_RUNNING)) {
+ /*
+ * Do a little cleanup handling for those threads in
+ * queues before calling the signal handler. Signals
+ * for these threads are temporarily blocked until
+ * after cleanup handling.
+ */
+ switch (psf_save.psf_state) {
+ case PS_COND_WAIT:
+ _cond_wait_backout(curthread);
+ psf_save.psf_state = PS_RUNNING;
+ break;
+
+ case PS_MUTEX_WAIT:
+ _mutex_lock_backout(curthread);
+ psf_save.psf_state = PS_RUNNING;
+ break;
+
+ default:
+ break;
+ }
+ }
/*
- * Check if there are pending signals for the running
- * thread or process that aren't blocked:
+ * Lower the priority before calling the handler in case
+ * it never returns (longjmps back):
*/
- sigset = pthread->sigpend;
- SIGSETOR(sigset, _thread_sigpending);
- SIGSETNAND(sigset, pthread->sigmask);
- SIGSETNAND(sigset, _thread_sigmask);
- if (SIGNOTEMPTY(sigset)) {
- for (i = 1; i < NSIG; i++) {
- if (sigismember(&sigset, i) != 0) {
- if (sigismember(&pthread->sigpend, i) != 0)
- thread_sig_add(pthread, i,
- /*has_args*/ 0);
- else {
- thread_sig_add(pthread, i,
- /*has_args*/ 1);
- sigdelset(&_thread_sigpending, i);
- }
- }
+ curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
+
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ /* Call the handler: */
+ thr_sig_invoke_handler(curthread, i,
+ &curthread->siginfo[i], ucp);
}
}
+
+ THR_SCHED_LOCK(curthread, curthread);
+ if (psf != NULL)
+ thr_sigframe_restore(curthread, &psf_save);
+ /* Restore the signal mask. */
+ curthread->tmbx.tm_context.uc_sigmask = curthread->sigmask;
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sig_check_pending(curthread);
}
-#endif
-#if __XXX_NOT_YET__
/*
- * This can only be called from the kernel scheduler. It assumes that
- * all thread contexts are saved and that a signal frame can safely be
- * added to any user thread.
+ * This checks pending signals for the current thread. It should be
+ * called whenever a thread changes its signal mask. Note that this
+ * is called from a thread (using its stack).
+ *
+ * XXX - We might want to just check to see if there are pending
+ * signals for the thread here, but enter the UTS scheduler
+ * to actually install the signal handler(s).
*/
void
-_thread_sig_handle_pending(void)
+_thr_sig_check_pending(struct pthread *curthread)
{
- struct pthread *pthread;
- int sig;
+ sigset_t sigset;
+ sigset_t pending_process;
+ sigset_t pending_thread;
+ kse_critical_t crit;
+ int i;
+
+ curthread->check_pending = 0;
/*
- * Check the array of pending signals:
+ * Check if there are pending signals for the running
+ * thread or process that aren't blocked:
*/
- for (sig = 1; sig <= NSIG; sig++) {
- if (sigismember(&_thread_sigpending, sig)) {
- /* This signal is no longer pending. */
- sigdelset(&_thread_sigpending, sig);
- /* Some signals need special handling. */
- thread_sig_handle_special(sig);
- /* Deliver the signal. */
- if (sigismember(&_thread_sigmask, sig)) {
- sigaddset(&_thread_sigmask, sig);
- if ((pthread = thread_sig_find(sig)) != NULL) {
- /*
- * Setup the target thread to receive
- * the signal:
- */
- thread_sig_add(pthread, sig,
- /*has_args*/ 1);
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ sigset = _thr_proc_sigpending;
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
+
+ THR_SCHED_LOCK(curthread, curthread);
+ SIGSETOR(sigset, curthread->sigpend);
+ SIGSETNAND(sigset, curthread->tmbx.tm_context.uc_sigmask);
+ if (SIGNOTEMPTY(sigset)) {
+ ucontext_t uc;
+ volatile int once;
+
+ curthread->check_pending = 0;
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /*
+ * Split the pending signals into those that were
+ * pending on the process and those that were pending
+ * on the thread.
+ */
+ sigfillset(&pending_process);
+ sigfillset(&pending_thread);
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ if (sigismember(&curthread->sigpend, i) != 0) {
+ build_siginfo(&curthread->siginfo[i], i);
+ sigdelset(&pending_thread, i);
+ } else {
+ memcpy(&curthread->siginfo[i],
+ &_thr_proc_siginfo[i],
+ sizeof(siginfo_t));
+ sigdelset(&pending_process, i);
+ }
+ }
+ }
+ /*
+ * Remove any process pending signals that were scheduled
+ * to be delivered from process' pending set.
+ */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ SIGSETAND(_thr_proc_sigpending, pending_process);
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
+
+ /*
+ * Remove any thread pending signals that were scheduled
+ * to be delivered from thread's pending set.
+ */
+ THR_SCHED_LOCK(curthread, curthread);
+ SIGSETAND(curthread->sigpend, pending_thread);
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ once = 0;
+ THR_GETCONTEXT(&uc);
+ if (once == 0) {
+ once = 1;
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&sigset, i) != 0) {
+ /* Call the handler: */
+ thr_sig_invoke_handler(curthread, i,
+ &curthread->siginfo[i], &uc);
}
}
}
}
+ else
+ THR_SCHED_UNLOCK(curthread, curthread);
}
-#endif
/*
- * Do special processing to the thread states before we deliver
- * a signal to the application.
+ * This must be called with upcalls disabled.
*/
static void
-thread_sig_handle_special(int sig)
+handle_special_signals(struct kse *curkse, int sig)
{
- struct pthread *pthread, *pthread_next;
- int i;
-
switch (sig) {
- case SIGCHLD:
- /*
- * Enter a loop to wake up all threads waiting
- * for a process to complete:
- */
- for (pthread = TAILQ_FIRST(&_waitingq);
- pthread != NULL; pthread = pthread_next) {
- /*
- * Grab the next thread before possibly
- * destroying the link entry:
- */
- pthread_next = TAILQ_NEXT(pthread, pqe);
-
- /*
- * If this thread is waiting for a child
- * process to complete, wake it up:
- */
- if (pthread->state == PS_WAIT_WAIT) {
- /* Make the thread runnable: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- }
- break;
-
/*
* POSIX says that pending SIGCONT signals are
* discarded when one of these signals occurs.
@@ -333,13 +561,9 @@ thread_sig_handle_special(int sig)
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
- /*
- * Enter a loop to discard pending SIGCONT
- * signals:
- */
- TAILQ_FOREACH(pthread, &_thread_list, tle) {
- sigdelset(&pthread->sigpend, SIGCONT);
- }
+ KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
+ sigdelset(&_thr_proc_sigpending, SIGCONT);
+ KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
break;
default:
@@ -352,12 +576,17 @@ thread_sig_handle_special(int sig)
* This function is only called if there is a handler installed
* for the signal, and if the target thread has the signal
* unmasked.
+ *
+ * This must be called with the thread's scheduling lock held.
*/
static void
-thread_sig_add(struct pthread *pthread, int sig, int has_args)
+thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
{
+ int restart;
int suppress_handler = 0;
+ restart = _thread_sigact[sig - 1].sa_flags & SA_RESTART;
+
/* Make sure this signal isn't still in the pending set: */
sigdelset(&pthread->sigpend, sig);
@@ -370,6 +599,8 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
*/
case PS_DEAD:
case PS_DEADLOCK:
+ case PS_LOCKWAIT:
+ case PS_SUSPENDED:
case PS_STATE_MAX:
/*
* You can't call a signal handler for threads in these
@@ -387,56 +618,21 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* Remove the thread from the queue before changing its
* priority:
*/
- if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0)
- PTHREAD_PRIOQ_REMOVE(pthread);
- break;
-
- case PS_SUSPENDED:
- break;
-
- case PS_SPINBLOCK:
- /* Remove the thread from the workq and waitq: */
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_WAITQ_REMOVE(pthread);
- /* Make the thread runnable: */
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
- break;
-
- case PS_SIGWAIT:
- /* The signal handler is not called for threads in SIGWAIT. */
- suppress_handler = 1;
- /* Wake up the thread if the signal is blocked. */
- if (sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- } else
- /* Increment the pending signal count. */
- sigaddset(&pthread->sigpend, sig);
- break;
-
- /*
- * The wait state is a special case due to the handling of
- * SIGCHLD signals.
- */
- case PS_WAIT_WAIT:
- if (sig == SIGCHLD) {
- /* Change the state of the thread to run: */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
+ if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0)
+ THR_RUNQ_REMOVE(pthread);
else {
/*
- * Mark the thread as interrupted only if the
- * restart flag is not set on the signal action:
+ * This thread is active; add the signal to the
+ * pending set and mark it as having pending
+ * signals.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ suppress_handler = 1;
+ sigaddset(&pthread->sigpend, sig);
+ build_siginfo(&pthread->siginfo[sig], sig);
+ pthread->check_pending = 1;
+ if ((pthread->blocked != 0) &&
+ !THR_IN_CRITICAL(pthread))
+ kse_thr_interrupt(&pthread->tmbx /* XXX - restart?!?! */);
}
break;
@@ -451,18 +647,7 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* be added back to the wait queue once all signal
* handlers have been invoked.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- break;
-
- case PS_JOIN:
- /*
- * Remove the thread from the wait queue. It will
- * be added back to the wait queue once all signal
- * handlers have been invoked.
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- /* Make the thread runnable: */
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
break;
case PS_SLEEP_WAIT:
@@ -471,55 +656,76 @@ thread_sig_add(struct pthread *pthread, int sig, int has_args)
* regardless of SA_RESTART:
*/
pthread->interrupted = 1;
- /* Remove threads in poll and select from the workq: */
- if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) != 0)
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
+ THR_SET_STATE(pthread, PS_RUNNING);
break;
+ case PS_JOIN:
case PS_SIGSUSPEND:
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ KSE_WAITQ_REMOVE(pthread->kse, pthread);
+ THR_SET_STATE(pthread, PS_RUNNING);
break;
- }
- DBG_MSG(">>> suppress_handler = %d\n", suppress_handler);
+ case PS_SIGWAIT:
+ /* The signal handler is not called for threads in SIGWAIT. */
+ suppress_handler = 1;
+ /* Wake up the thread if the signal is blocked. */
+ if (sigismember(pthread->data.sigwait, sig)) {
+ /* Return the signal number: */
+ pthread->signo = sig;
+
+ /* Make the thread runnable: */
+ _thr_setrunnable_unlocked(pthread);
+ } else
+ /* Increment the pending signal count. */
+ sigaddset(&pthread->sigpend, sig);
+ break;
+ }
if (suppress_handler == 0) {
- /* Setup a signal frame and save the current threads state: */
- thread_sigframe_add(pthread, sig, has_args);
-
- /*
- * Signals are deferred until just before the threads
- * signal handler is invoked:
- */
- pthread->sig_defer_count = 1;
+ if (pthread->curframe == NULL) {
+ /*
+ * This thread is active. Just add it to the
+ * thread's pending set.
+ */
+ sigaddset(&pthread->sigpend, sig);
+ pthread->check_pending = 1;
+ if (info == NULL)
+ build_siginfo(&pthread->siginfo[sig], sig);
+ else
+ memcpy(&pthread->siginfo[sig], info,
+ sizeof(*info));
+ } else {
+ /*
+ * Setup a signal frame and save the current threads
+ * state:
+ */
+ thr_sigframe_add(pthread, sig, info);
+ }
- /* Make sure the thread is runnable: */
if (pthread->state != PS_RUNNING)
- PTHREAD_SET_STATE(pthread, PS_RUNNING);
+ THR_SET_STATE(pthread, PS_RUNNING);
+
/*
* The thread should be removed from all scheduling
- * queues at this point. Raise the priority and place
- * the thread in the run queue. It is also possible
- * for a signal to be sent to a suspended thread,
- * mostly via pthread_kill(). If a thread is suspended,
- * don't insert it into the priority queue; just set
- * its state to suspended and it will run the signal
- * handler when it is resumed.
+ * queues at this point. Raise the priority and
+ * place the thread in the run queue. It is also
+ * possible for a signal to be sent to a suspended
+ * thread, mostly via pthread_kill(). If a thread
+ * is suspended, don't insert it into the priority
+ * queue; just set its state to suspended and it
+ * will run the signal handler when it is resumed.
*/
- pthread->active_priority |= PTHREAD_SIGNAL_PRIORITY;
- if ((pthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
- PTHREAD_SET_STATE(pthread, PS_SUSPENDED);
- else
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ pthread->active_priority |= THR_SIGNAL_PRIORITY;
+ if ((pthread->flags & THR_FLAGS_SUSPENDED) != 0)
+ THR_SET_STATE(pthread, PS_SUSPENDED);
+ else if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0)
+ THR_RUNQ_INSERT_TAIL(pthread);
}
}
-#if __XXX_NOT_YET__
static void
-thread_sig_check_state(struct pthread *pthread, int sig)
+thr_sig_check_state(struct pthread *pthread, int sig)
{
/*
* Process according to thread state:
@@ -528,91 +734,54 @@ thread_sig_check_state(struct pthread *pthread, int sig)
/*
* States which do not change when a signal is trapped:
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_STATE_MAX:
case PS_RUNNING:
- case PS_SUSPENDED:
- case PS_SPINBLOCK:
+ case PS_LOCKWAIT:
+ case PS_MUTEX_WAIT:
case PS_COND_WAIT:
case PS_JOIN:
- case PS_MUTEX_WAIT:
+ case PS_SUSPENDED:
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_STATE_MAX:
break;
case PS_SIGWAIT:
/* Wake up the thread if the signal is blocked. */
if (sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
/* Return the signal number: */
pthread->signo = sig;
+
+ /* Change the state of the thread to run: */
+ _thr_setrunnable_unlocked(pthread);
} else
/* Increment the pending signal count. */
sigaddset(&pthread->sigpend, sig);
break;
- /*
- * The wait state is a special case due to the handling of
- * SIGCHLD signals.
- */
- case PS_WAIT_WAIT:
- if (sig == SIGCHLD) {
- /*
- * Remove the thread from the wait queue and
- * make it runnable:
- */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Return the signal number: */
- pthread->signo = sig;
- }
- break;
-
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
case PS_SIGSUSPEND:
case PS_SLEEP_WAIT:
/*
* Remove the thread from the wait queue and make it
* runnable:
*/
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
- /* Flag the operation as interrupted: */
- pthread->interrupted = 1;
- break;
-
- /*
- * These states are additionally in the work queue:
- */
- case PS_FDR_WAIT:
- case PS_FDW_WAIT:
- case PS_FILE_WAIT:
- case PS_POLL_WAIT:
- case PS_SELECT_WAIT:
- /*
- * Remove the thread from the wait and work queues, and
- * make it runnable:
- */
- PTHREAD_WORKQ_REMOVE(pthread);
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ _thr_setrunnable_unlocked(pthread);
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
break;
}
}
-#endif
-#if __XXX_NOT_YET__
/*
* Send a signal to a specific thread (ala pthread_kill):
*/
void
-_thread_sig_send(struct pthread *pthread, int sig)
+_thr_sig_send(struct pthread *pthread, int sig)
{
- struct pthread *curthread = _get_curthread();
+ struct pthread *curthread = _get_curthread();
+
+ /* Lock the scheduling queue of the target thread. */
+ THR_SCHED_LOCK(curthread, pthread);
/* Check for signals whose actions are SIG_DFL: */
if (_thread_sigact[sig - 1].sa_handler == SIG_DFL) {
@@ -620,17 +789,21 @@ _thread_sig_send(struct pthread *pthread, int sig)
* Check to see if a temporary signal handler is
* installed for sigwaiters:
*/
- if (_thread_dfl_count[sig] == 0)
+ if (_thread_dfl_count[sig] == 0) {
/*
* Deliver the signal to the process if a handler
* is not installed:
*/
+ THR_SCHED_UNLOCK(curthread, pthread);
kill(getpid(), sig);
+ THR_SCHED_LOCK(curthread, pthread);
+ }
/*
* Assuming we're still running after the above kill(),
* make any necessary state changes to the thread:
*/
- thread_sig_check_state(pthread, sig);
+ thr_sig_check_state(pthread, sig);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
/*
* Check that the signal is not being ignored:
@@ -638,145 +811,98 @@ _thread_sig_send(struct pthread *pthread, int sig)
else if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
if (pthread->state == PS_SIGWAIT &&
sigismember(pthread->data.sigwait, sig)) {
- /* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread, PS_RUNNING);
-
/* Return the signal number: */
pthread->signo = sig;
- } else if (sigismember(&pthread->sigmask, sig))
+
+ /* Change the state of the thread to run: */
+ _thr_setrunnable_unlocked(pthread);
+ THR_SCHED_UNLOCK(curthread, pthread);
+ } else if (sigismember(&pthread->tmbx.tm_context.uc_sigmask, sig)) {
/* Add the signal to the pending set: */
sigaddset(&pthread->sigpend, sig);
- else if (pthread == curthread)
- /* Call the signal handler for the current thread: */
- thread_sig_invoke_handler(sig, NULL, NULL);
- else {
- /* Protect the scheduling queues: */
- _thread_kern_sig_defer();
+ THR_SCHED_UNLOCK(curthread, pthread);
+ } else if (pthread == curthread) {
+ ucontext_t uc;
+ siginfo_t info;
+ volatile int once;
+
+ THR_SCHED_UNLOCK(curthread, pthread);
+ build_siginfo(&info, sig);
+ once = 0;
+ THR_GETCONTEXT(&uc);
+ if (once == 0) {
+ once = 1;
+ /*
+ * Call the signal handler for the current
+ * thread:
+ */
+ thr_sig_invoke_handler(curthread, sig,
+ &info, &uc);
+ }
+ } else {
/*
* Perform any state changes due to signal
* arrival:
*/
- thread_sig_add(pthread, sig, /* has args */ 0);
- /* Unprotect the scheduling queues: */
- _thread_kern_sig_undefer();
+ thr_sig_add(pthread, sig, NULL);
+ THR_SCHED_UNLOCK(curthread, pthread);
}
}
}
-#endif
-/*
- * User thread signal handler wrapper.
- *
- * thread - current running thread
- */
-void
-_thread_sig_wrapper(int sig, siginfo_t *info, ucontext_t *context)
+static void
+thr_sigframe_add(struct pthread *thread, int sig, siginfo_t *info)
{
- struct pthread_state_data psd;
- struct pthread *thread = _get_curthread();
- __siginfohandler_t *handler;
-
- /* Save the thread's previous state. */
- thread_sigframe_save(thread, &psd);
+ if (thread->curframe == NULL)
+ PANIC("Thread doesn't have signal frame ");
- /* Check the threads previous state: */
- if (psd.psd_state != PS_RUNNING) {
+ if (thread->check_pending == 0) {
/*
- * Do a little cleanup handling for those threads in
- * queues before calling the signal handler. Signals
- * for these threads are temporarily blocked until
- * after cleanup handling.
+ * Multiple signals can be added to the same signal
+ * frame. Only save the thread's state the first time.
*/
- switch (psd.psd_state) {
- case PS_COND_WAIT:
- _cond_wait_backout(thread);
- psd.psd_state = PS_RUNNING;
- break;
-
- case PS_MUTEX_WAIT:
- _mutex_lock_backout(thread);
- psd.psd_state = PS_RUNNING;
- break;
-
- default:
- break;
- }
+ thr_sigframe_save(thread, thread->curframe);
+ thread->check_pending = 1;
+ thread->flags &= THR_FLAGS_PRIVATE;
}
-
- /* Unblock the signal in case we don't return from the handler. */
- /*
- * XXX - This is totally bogus. We need to lock the signal mask
- * somehow.
- */
- sigdelset(&_thread_sigmask, sig);
-
- /*
- * Lower the priority before calling the handler in case
- * it never returns (longjmps back):
- */
- thread->active_priority &= ~PTHREAD_SIGNAL_PRIORITY;
-
- /*
- * Reenable interruptions without checking for the need to
- * context switch.
- */
- thread->sig_defer_count = 0;
-
- if (_thread_sigact[sig -1].sa_handler != NULL) {
- handler = (__siginfohandler_t *)
- _thread_sigact[sig - 1].sa_handler;
- handler(sig, info, context);
- }
-
- /* Restore the signal frame. */
- thread_sigframe_restore(thread, &psd);
-
- /* The signal mask was restored; check for any pending signals. */
- /* XXX - thread->check_pending = 1; */
-}
-
-static void
-thread_sigframe_add(struct pthread *thread, int sig, int has_args)
-{
- struct pthread_signal_frame *psf = NULL;
- unsigned long stackp;
-
- /* Add a signal frame to the stack, pointing to our signal wrapper. */
- signalcontext(&thread->mailbox.tm_context, sig,
- (__sighandler_t *)_thread_sig_wrapper);
+ sigaddset(&thread->curframe->psf_sigset, sig);
+ if (info != NULL)
+ memcpy(&thread->siginfo[sig], info, sizeof(*info));
+ else
+ build_siginfo(&thread->siginfo[sig], sig);
/* Setup the new signal mask. */
- SIGSETOR(thread->mailbox.tm_context.uc_sigmask,
+ SIGSETOR(thread->tmbx.tm_context.uc_sigmask,
_thread_sigact[sig - 1].sa_mask);
- sigaddset(&thread->mailbox.tm_context.uc_sigmask, sig);
+ sigaddset(&thread->tmbx.tm_context.uc_sigmask, sig);
}
-static void
-thread_sigframe_restore(struct pthread *thread, struct pthread_state_data *psd)
+void
+thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf)
{
- thread->wakeup_time = psd->psd_wakeup_time;
- thread->data = psd->psd_wait_data;
- thread->state = psd->psd_state;
- thread->flags = psd->psd_flags;
- thread->interrupted = psd->psd_interrupted;
- thread->sig_defer_count = psd->psd_sig_defer_count;
+ thread->flags = psf->psf_flags;
+ thread->interrupted = psf->psf_interrupted;
+ thread->signo = psf->psf_signo;
+ thread->state = psf->psf_state;
+ thread->data = psf->psf_wait_data;
+ thread->wakeup_time = psf->psf_wakeup_time;
+ if (thread->sigmask_seqno == psf->psf_seqno)
+ thread->tmbx.tm_context.uc_sigmask = psf->psf_sigmask;
+ else
+ thread->tmbx.tm_context.uc_sigmask = thread->sigmask;
}
static void
-thread_sigframe_save(struct pthread *thread, struct pthread_state_data *psd)
+thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf)
{
- psd->psd_wakeup_time = thread->wakeup_time;
- psd->psd_wait_data = thread->data;
- psd->psd_state = thread->state;
- psd->psd_flags = thread->flags &
- (PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE);
- psd->psd_interrupted = thread->interrupted;
- psd->psd_sig_defer_count = thread->sig_defer_count;
-}
-
-void
-_thread_sig_handler(int sig, siginfo_t *info, ucontext_t *context)
-{
-
- /* Nothing. */
+ /* This has to initialize all members of the sigframe. */
+ psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE;
+ psf->psf_interrupted = thread->interrupted;
+ psf->psf_signo = thread->signo;
+ psf->psf_state = thread->state;
+ psf->psf_wait_data = thread->data;
+ psf->psf_wakeup_time = thread->wakeup_time;
+ psf->psf_sigmask = thread->tmbx.tm_context.uc_sigmask;
+ psf->psf_seqno = thread->sigmask_seqno;
+ sigemptyset(&psf->psf_sigset);
}
diff --git a/lib/libpthread/thread/thr_sigaction.c b/lib/libpthread/thread/thr_sigaction.c
index a16f859..7ede6d2 100644
--- a/lib/libpthread/thread/thr_sigaction.c
+++ b/lib/libpthread/thread/thr_sigaction.c
@@ -50,8 +50,8 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
errno = EINVAL;
ret = -1;
} else {
- if (_thread_initial == NULL)
- _thread_init();
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
/*
* Check if the existing signal action structure contents are
@@ -76,14 +76,9 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Check if the kernel needs to be advised of a change
* in signal action:
*/
- if (act != NULL && sig != SIGCHLD) {
- /*
- * Ensure the signal handler cannot be interrupted
- * by other signals. Always request the POSIX signal
- * handler arguments.
- */
- sigfillset(&gact.sa_mask);
- gact.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ if (act != NULL && sig != SIGINFO) {
+ gact.sa_mask = act->sa_mask;
+ gact.sa_flags = SA_SIGINFO | act->sa_flags;
/*
* Check if the signal handler is being set to
@@ -98,10 +93,10 @@ _sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Specify the thread kernel signal
* handler:
*/
- gact.sa_handler = (void (*) ()) _thread_sig_handler;
+ gact.sa_handler = (void (*) ())_thr_sig_handler;
/* Change the signal action in the kernel: */
- if (__sys_sigaction(sig,&gact,NULL) != 0)
+ if (__sys_sigaction(sig, &gact, NULL) != 0)
ret = -1;
}
}
diff --git a/lib/libpthread/thread/thr_sigmask.c b/lib/libpthread/thread/thr_sigmask.c
index f98c421..d9cb839 100644
--- a/lib/libpthread/thread/thr_sigmask.c
+++ b/lib/libpthread/thread/thr_sigmask.c
@@ -36,6 +36,7 @@
#include <sys/signalvar.h>
#include <errno.h>
#include <signal.h>
+#include <string.h>
#include <pthread.h>
#include "thr_private.h"
@@ -44,32 +45,59 @@ __weak_reference(_pthread_sigmask, pthread_sigmask);
int
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int i;
struct pthread *curthread = _get_curthread();
+ int ret;
+ ret = 0;
if (oset != NULL)
- bcopy(&curthread->mailbox.tm_context.uc_sigmask, oset,
- sizeof(sigset_t));
- if (set == NULL)
- return (0);
- switch (how) {
- case SIG_BLOCK:
- for (i = 0; i < _SIG_WORDS; i++)
- curthread->mailbox.tm_context.uc_sigmask.__bits[i] |=
- set->__bits[i];
- break;
- case SIG_UNBLOCK:
- for (i = 0; i < _SIG_WORDS; i++)
- curthread->mailbox.tm_context.uc_sigmask.__bits[i] &=
- ~set->__bits[i];
- break;
- case SIG_SETMASK:
- bcopy(set, &curthread->mailbox.tm_context.uc_sigmask,
- sizeof(sigset_t));
- break;
- default:
- errno = EINVAL;
- return (-1);
+ /* Return the current mask: */
+ *oset = curthread->tmbx.tm_context.uc_sigmask;
+
+ /* Check if a new signal set was provided by the caller: */
+ if (set != NULL) {
+ THR_SCHED_LOCK(curthread, curthread);
+
+ /* Process according to what to do: */
+ switch (how) {
+ /* Block signals: */
+ case SIG_BLOCK:
+ /* Add signals to the existing mask: */
+ SIGSETOR(curthread->tmbx.tm_context.uc_sigmask, *set);
+ break;
+
+ /* Unblock signals: */
+ case SIG_UNBLOCK:
+ /* Clear signals from the existing mask: */
+ SIGSETNAND(curthread->tmbx.tm_context.uc_sigmask, *set);
+ break;
+
+ /* Set the signal process mask: */
+ case SIG_SETMASK:
+ /* Set the new mask: */
+ curthread->tmbx.tm_context.uc_sigmask = *set;
+ break;
+
+ /* Trap invalid actions: */
+ default:
+ /* Return an invalid argument: */
+ errno = EINVAL;
+ ret = -1;
+ break;
+ }
+
+ if (ret == 0) {
+ curthread->sigmask =
+ curthread->tmbx.tm_context.uc_sigmask;
+ curthread->sigmask_seqno++;
+ }
+
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /*
+ * Run down any pending signals:
+ */
+ if (ret == 0)
+ _thr_sig_check_pending(curthread);
}
- return (0);
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_sigpending.c b/lib/libpthread/thread/thr_sigpending.c
index 5b3c02f..7f42ff3 100644
--- a/lib/libpthread/thread/thr_sigpending.c
+++ b/lib/libpthread/thread/thr_sigpending.c
@@ -45,6 +45,7 @@ int
_sigpending(sigset_t *set)
{
struct pthread *curthread = _get_curthread();
+ kse_critical_t crit;
int ret = 0;
/* Check for a null signal set pointer: */
@@ -54,7 +55,11 @@ _sigpending(sigset_t *set)
}
else {
*set = curthread->sigpend;
- SIGSETOR(*set, _thread_sigpending);
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_signal_lock);
+ SIGSETOR(*set, _thr_proc_sigpending);
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_signal_lock);
+ _kse_critical_leave(crit);
}
/* Return the completion status: */
return (ret);
diff --git a/lib/libpthread/thread/thr_sigsuspend.c b/lib/libpthread/thread/thr_sigsuspend.c
index dc805ac..7ce027a 100644
--- a/lib/libpthread/thread/thr_sigsuspend.c
+++ b/lib/libpthread/thread/thr_sigsuspend.c
@@ -32,22 +32,58 @@
* $FreeBSD$
*/
#include <signal.h>
-#include <sys/param.h>
-#include <sys/signalvar.h>
#include <errno.h>
#include <pthread.h>
+#include <string.h>
#include "thr_private.h"
__weak_reference(__sigsuspend, sigsuspend);
int
+_sigsuspend(const sigset_t *set)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret = -1;
+
+ /* Check if a new signal set was provided by the caller: */
+ if (set != NULL) {
+ THR_SCHED_LOCK(curthread, curthread);
+
+ /* Change the caller's mask: */
+ memcpy(&curthread->tmbx.tm_context.uc_sigmask,
+ set, sizeof(sigset_t));
+
+ THR_SET_STATE(curthread, PS_SIGSUSPEND);
+
+ THR_SCHED_UNLOCK(curthread, curthread);
+
+ /* Wait for a signal: */
+ _thr_sched_switch(curthread);
+
+ /* Always return an interrupted error: */
+ errno = EINTR;
+
+ /* Restore the signal mask: */
+ memcpy(&curthread->tmbx.tm_context.uc_sigmask,
+ &curthread->sigmask, sizeof(sigset_t));
+ } else {
+ /* Return an invalid argument error: */
+ errno = EINVAL;
+ }
+
+ /* Return the completion status: */
+ return (ret);
+}
+
+int
__sigsuspend(const sigset_t * set)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int ret;
- _thread_enter_cancellation_point();
- ret = __sys_sigsuspend(set);
- _thread_leave_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
+ ret = _sigsuspend(set);
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c
index 5364d66..b955251 100644
--- a/lib/libpthread/thread/thr_sigwait.c
+++ b/lib/libpthread/thread/thr_sigwait.c
@@ -42,11 +42,135 @@
__weak_reference(_sigwait, sigwait);
int
-_sigwait(const sigset_t * __restrict set, int * __restrict sig)
+_sigwait(const sigset_t *set, int *sig)
{
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
+ int i;
+ sigset_t tempset, waitset;
+ struct sigaction act;
+
+ _thr_enter_cancellation_point(curthread);
/*
- * All signals are invalid for waiting.
+ * Specify the thread kernel signal handler.
*/
- return (EINVAL);
+ act.sa_handler = (void (*) ()) _thr_sig_handler;
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ /* Ensure the signal handler cannot be interrupted by other signals: */
+ sigfillset(&act.sa_mask);
+
+ /*
+ * Initialize the set of signals that will be waited on:
+ */
+ waitset = *set;
+
+ /* These signals can't be waited on. */
+ sigdelset(&waitset, SIGKILL);
+ sigdelset(&waitset, SIGSTOP);
+
+ /*
+ * Check to see if a pending signal is in the wait mask.
+ * This has to be atomic. */
+ tempset = curthread->sigpend;
+ SIGSETOR(tempset, _thr_proc_sigpending);
+ SIGSETAND(tempset, waitset);
+ if (SIGNOTEMPTY(tempset)) {
+ /* Enter a loop to find a pending signal: */
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember (&tempset, i))
+ break;
+ }
+
+ /* Clear the pending signal: */
+ if (sigismember(&curthread->sigpend,i))
+ sigdelset(&curthread->sigpend,i);
+ else
+ sigdelset(&_thr_proc_sigpending,i);
+
+ /* Return the signal number to the caller: */
+ *sig = i;
+
+ _thr_leave_cancellation_point(curthread);
+ return (0);
+ }
+
+ /*
+ * Lock the array of SIG_DFL wait counts.
+ */
+ THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
+
+ /*
+ * Enter a loop to find the signals that are SIG_DFL. For
+ * these signals we must install a dummy signal handler in
+ * order for the kernel to pass them in to us. POSIX says
+ * that the _application_ must explicitly install a dummy
+ * handler for signals that are SIG_IGN in order to sigwait
+ * on them. Note that SIG_IGN signals are left in the
+ * mask because a subsequent sigaction could enable an
+ * ignored signal.
+ */
+ sigemptyset(&tempset);
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&waitset, i) &&
+ (_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
+ _thread_dfl_count[i]++;
+ sigaddset(&tempset, i);
+ if (_thread_dfl_count[i] == 1) {
+ if (__sys_sigaction(i, &act, NULL) != 0)
+ ret = -1;
+ }
+ }
+ }
+ /* Done accessing _thread_dfl_count for now. */
+ THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
+
+ if (ret == 0) {
+ /*
+ * Save the wait signal mask. The wait signal
+ * mask is independent of the threads signal mask
+ * and requires separate storage.
+ */
+ curthread->data.sigwait = &waitset;
+
+ /* Wait for a signal: */
+ THR_SCHED_LOCK(curthread, curthread);
+ THR_SET_STATE(curthread, PS_SIGWAIT);
+ THR_SCHED_UNLOCK(curthread, curthread);
+ _thr_sched_switch(curthread);
+
+ /* Return the signal number to the caller: */
+ *sig = curthread->signo;
+
+ /*
+ * Probably unnecessary, but since it's in a union struct
+ * we don't know how it could be used in the future.
+ */
+ curthread->data.sigwait = NULL;
+ }
+
+ /*
+ * Relock the array of SIG_DFL wait counts.
+ */
+ THR_LOCK_ACQUIRE(curthread, &_thread_signal_lock);
+
+ /* Restore the sigactions: */
+ act.sa_handler = SIG_DFL;
+ for (i = 1; i < NSIG; i++) {
+ if (sigismember(&tempset, i)) {
+ _thread_dfl_count[i]--;
+ if ((_thread_sigact[i - 1].sa_handler == SIG_DFL) &&
+ (_thread_dfl_count[i] == 0)) {
+ if (__sys_sigaction(i, &act, NULL) != 0)
+ ret = -1;
+ }
+ }
+ }
+ /* Done accessing _thread_dfl_count. */
+ THR_LOCK_RELEASE(curthread, &_thread_signal_lock);
+
+ _thr_leave_cancellation_point(curthread);
+
+ /* Return the completion status: */
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_sleep.c b/lib/libpthread/thread/thr_sleep.c
index 70b1c9f..0f02db7 100644
--- a/lib/libpthread/thread/thr_sleep.c
+++ b/lib/libpthread/thread/thr_sleep.c
@@ -38,11 +38,12 @@ __weak_reference(_sleep, sleep);
unsigned int
_sleep(unsigned int seconds)
{
+ struct pthread *curthread = _get_curthread();
unsigned int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sleep(seconds);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_spec.c b/lib/libpthread/thread/thr_spec.c
index 07ef387..2cd18d1 100644
--- a/lib/libpthread/thread/thr_spec.c
+++ b/lib/libpthread/thread/thr_spec.c
@@ -39,7 +39,6 @@
#include "thr_private.h"
struct pthread_key {
- spinlock_t lock;
volatile int allocated;
volatile int count;
int seqno;
@@ -47,7 +46,7 @@ struct pthread_key {
};
/* Static variables: */
-static struct pthread_key key_table[PTHREAD_KEYS_MAX];
+static struct pthread_key key_table[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@@ -56,44 +55,47 @@ __weak_reference(_pthread_setspecific, pthread_setspecific);
int
-_pthread_key_create(pthread_key_t * key, void (*destructor) (void *))
+_pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
{
+ struct pthread *curthread = _get_curthread();
+
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for ((*key) = 0; (*key) < PTHREAD_KEYS_MAX; (*key)++) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[*key].lock);
if (key_table[(*key)].allocated == 0) {
key_table[(*key)].allocated = 1;
key_table[(*key)].destructor = destructor;
key_table[(*key)].seqno++;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (0);
}
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
}
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (EAGAIN);
}
int
_pthread_key_delete(pthread_key_t key)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
if (key < PTHREAD_KEYS_MAX) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
if (key_table[key].allocated)
key_table[key].allocated = 0;
else
ret = EINVAL;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
} else
ret = EINVAL;
return (ret);
@@ -105,44 +107,41 @@ _thread_cleanupspecific(void)
struct pthread *curthread = _get_curthread();
void *data = NULL;
int key;
- int itr;
void (*destructor)( void *);
- for (itr = 0; itr < PTHREAD_DESTRUCTOR_ITERATIONS; itr++) {
- for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
- if (curthread->specific_data_count > 0) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
- destructor = NULL;
-
- if (key_table[key].allocated &&
- (curthread->specific[key].data != NULL)) {
- if (curthread->specific[key].seqno ==
- key_table[key].seqno) {
- data = (void *) curthread->specific[key].data;
- destructor = key_table[key].destructor;
- }
- curthread->specific[key].data = NULL;
- curthread->specific_data_count--;
+ if (curthread->specific != NULL) {
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (key = 0; (key < PTHREAD_KEYS_MAX) &&
+ (curthread->specific_data_count > 0); key++) {
+ destructor = NULL;
+
+ if (key_table[key].allocated &&
+ (curthread->specific[key].data != NULL)) {
+ if (curthread->specific[key].seqno ==
+ key_table[key].seqno) {
+ data = (void *)curthread->specific[key].data;
+ destructor = key_table[key].destructor;
}
+ curthread->specific[key].data = NULL;
+ curthread->specific_data_count--;
+ }
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
-
+ /*
+ * If there is a destructore, call it
+ * with the key table entry unlocked:
+ */
+ if (destructor != NULL) {
/*
- * If there is a destructore, call it
- * with the key table entry unlocked:
+ * Don't hold the lock while calling the
+ * destructor:
*/
- if (destructor)
- destructor(data);
- } else {
- free(curthread->specific);
- curthread->specific = NULL;
- return;
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ destructor(data);
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
}
}
- }
- if (curthread->specific != NULL) {
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
free(curthread->specific);
curthread->specific = NULL;
}
diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c
index ad7b222..cb71a46 100644
--- a/lib/libpthread/thread/thr_spinlock.c
+++ b/lib/libpthread/thread/thr_spinlock.c
@@ -41,9 +41,14 @@
#include <unistd.h>
#include <libc_private.h>
-
+#include "spinlock.h"
#include "thr_private.h"
+/*
+ * These are for compatability only. Spinlocks of this type
+ * are deprecated.
+ */
+
void
_spinunlock(spinlock_t *lck)
{
@@ -60,20 +65,14 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
- struct pthread *curthread = _get_curthread();
-
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Block the thread until the lock. */
- curthread->data.spinlock = lck;
- _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
+ while (lck->access_lock)
+ ;
}
-
- /* The running thread now owns the lock: */
- lck->lock_owner = (long) curthread;
}
/*
@@ -89,30 +88,12 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
- struct pthread *curthread = _get_curthread();
- int cnt = 0;
-
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- cnt++;
- if (cnt > 100) {
- char str[256];
- snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", getprogname(), curthread, lck, fname, lineno, lck->fname, lck->lineno);
- __sys_write(2,str,strlen(str));
- __sleep(1);
- cnt = 0;
- }
-
- /* Block the thread until the lock. */
- curthread->data.spinlock = lck;
- _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
+ while (lck->access_lock)
+ ;
}
-
- /* The running thread now owns the lock: */
- lck->lock_owner = (long) curthread;
- lck->fname = fname;
- lck->lineno = lineno;
}
diff --git a/lib/libpthread/thread/thr_stack.c b/lib/libpthread/thread/thr_stack.c
index c75d6ee..f14289e 100644
--- a/lib/libpthread/thread/thr_stack.c
+++ b/lib/libpthread/thread/thr_stack.c
@@ -28,9 +28,7 @@
*/
#include <sys/types.h>
#include <sys/mman.h>
-#include <sys/param.h>
#include <sys/queue.h>
-#include <sys/user.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
@@ -44,31 +42,32 @@ struct stack {
};
/*
- * Default sized (stack and guard) spare stack queue. Stacks are cached to
- * avoid additional complexity managing mmap()ed stack regions. Spare stacks
- * are used in LIFO order to increase cache locality.
+ * Default sized (stack and guard) spare stack queue. Stacks are cached
+ * to avoid additional complexity managing mmap()ed stack regions. Spare
+ * stacks are used in LIFO order to increase cache locality.
*/
-static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
+static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
/*
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
- * Stacks are cached to avoid additional complexity managing mmap()ed stack
- * regions. This list is unordered, since ordering on both stack size and guard
- * size would be more trouble than it's worth. Stacks are allocated from this
- * cache on a first size match basis.
+ * Stacks are cached to avoid additional complexity managing mmap()ed
+ * stack regions. This list is unordered, since ordering on both stack
+ * size and guard size would be more trouble than it's worth. Stacks are
+ * allocated from this cache on a first size match basis.
*/
-static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
+static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
/**
- * Base address of the last stack allocated (including its red zone, if there is
- * one). Stacks are allocated contiguously, starting beyond the top of the main
- * stack. When a new stack is created, a red zone is typically created
- * (actually, the red zone is simply left unmapped) above the top of the stack,
- * such that the stack will not be able to grow all the way to the bottom of the
- * next stack. This isn't fool-proof. It is possible for a stack to grow by a
- * large amount, such that it grows into the next stack, and as long as the
- * memory within the red zone is never accessed, nothing will prevent one thread
- * stack from trouncing all over the next.
+ * Base address of the last stack allocated (including its red zone, if
+ * there is one). Stacks are allocated contiguously, starting beyond the
+ * top of the main stack. When a new stack is created, a red zone is
+ * typically created (actually, the red zone is simply left unmapped) above
+ * the top of the stack, such that the stack will not be able to grow all
+ * the way to the bottom of the next stack. This isn't fool-proof. It is
+ * possible for a stack to grow by a large amount, such that it grows into
+ * the next stack, and as long as the memory within the red zone is never
+ * accessed, nothing will prevent one thread stack from trouncing all over
+ * the next.
*
* low memory
* . . . . . . . . . . . . . . . . . .
@@ -112,50 +111,51 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* high memory
*
*/
-static void * last_stack;
+static void *last_stack = NULL;
-void *
-_thread_stack_alloc(size_t stacksize, size_t guardsize)
+int
+_thr_stack_alloc(struct pthread_attr *attr)
{
- void *stack = NULL;
- struct stack *spare_stack;
- size_t stack_size;
+ struct stack *spare_stack;
+ struct kse *curkse;
+ kse_critical_t crit;
+ size_t stacksize;
+ size_t guardsize;
+
+ stacksize = attr->stacksize_attr;
+ guardsize = attr->guardsize_attr;
/*
- * Round up stack size to nearest multiple of _pthread_page_size,
- * so that mmap() * will work. If the stack size is not an even
- * multiple, we end up initializing things such that there is unused
- * space above the beginning of the stack, so the stack sits snugly
- * against its guard.
+ * Round up stack size to nearest multiple of _thr_page_size so
+ * that mmap() * will work. If the stack size is not an even
+ * multiple, we end up initializing things such that there is
+ * unused space above the beginning of the stack, so the stack
+ * sits snugly against its guard.
*/
- if (stacksize % _pthread_page_size != 0)
- stack_size = ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- else
- stack_size = stacksize;
+ if ((stacksize % _thr_page_size) != 0)
+ stacksize = ((stacksize / _thr_page_size) + 1) *
+ _thr_page_size;
+ attr->stackaddr_attr = NULL;
+ attr->flags &= ~THR_STACK_USER;
/*
+ * Use the garbage collector lock for synchronization of the
+ * spare stack lists and allocations from usrstack.
+ */
+ crit = _kse_critical_enter();
+ curkse = _get_curkse();
+ KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
+ /*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:
*/
- if (stack_size == PTHREAD_STACK_DEFAULT &&
- guardsize == _pthread_guard_default) {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
- /* Use the spare stack. */
+ if ((stacksize == THR_STACK_DEFAULT) &&
+ (guardsize == _thr_guard_default)) {
+ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
+ /* Use the spare stack. */
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
/*
* The user specified a non-default stack and/or guard size, so try to
@@ -163,78 +163,75 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
* rounded up stack size (stack_size) in the search:
*/
else {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- if (pthread_mutex_lock(&_gc_mutex) != 0)
- PANIC("Cannot lock gc mutex");
-
- LIST_FOREACH(spare_stack, &_mstackq, qe) {
- if (spare_stack->stacksize == stack_size &&
+ LIST_FOREACH(spare_stack, &mstackq, qe) {
+ if (spare_stack->stacksize == stacksize &&
spare_stack->guardsize == guardsize) {
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
break;
}
}
-
- /* Unlock the garbage collector mutex. */
- if (pthread_mutex_unlock(&_gc_mutex) != 0)
- PANIC("Cannot unlock gc mutex");
}
-
- /* Check if a stack was not allocated from a stack cache: */
- if (stack == NULL) {
-
+ if (attr->stackaddr_attr != NULL) {
+ /* A cached stack was found. Release the lock. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
+ }
+ else {
+ /* Allocate a stack from usrstack. */
if (last_stack == NULL)
- last_stack = _usrstack - PTHREAD_STACK_INITIAL -
- _pthread_guard_default;
+ last_stack = _usrstack - THR_STACK_INITIAL -
+ _thr_guard_default;
/* Allocate a new stack. */
- stack = last_stack - stack_size;
+ attr->stackaddr_attr = last_stack - stacksize;
/*
- * Even if stack allocation fails, we don't want to try to use
- * this location again, so unconditionally decrement
+ * Even if stack allocation fails, we don't want to try to
+ * use this location again, so unconditionally decrement
* last_stack. Under normal operating conditions, the most
- * likely reason for an mmap() error is a stack overflow of the
- * adjacent thread stack.
+ * likely reason for an mmap() error is a stack overflow of
+ * the adjacent thread stack.
*/
- last_stack -= (stack_size + guardsize);
+ last_stack -= (stacksize + guardsize);
- /* Stack: */
- if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
- -1, 0) == MAP_FAILED)
- stack = NULL;
- }
+ /* Release the lock before mmap'ing it. */
+ KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
+ _kse_critical_leave(crit);
- return (stack);
+ /* Map the stack, but not the guard page: */
+ if (mmap(attr->stackaddr_attr, stacksize,
+ PROT_READ | PROT_WRITE, MAP_STACK, -1, 0) == MAP_FAILED)
+ attr->stackaddr_attr = NULL;
+ }
+ if (attr->stackaddr_attr != NULL)
+ return (0);
+ else
+ return (-1);
}
-/* This function must be called with _gc_mutex held. */
+/* This function must be called with _thread_list_lock held. */
void
-_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
+_thr_stack_free(struct pthread_attr *attr)
{
- struct stack *spare_stack;
-
- spare_stack = (stack + stacksize - sizeof(struct stack));
- /* Round stacksize up to nearest multiple of _pthread_page_size. */
- if (stacksize % _pthread_page_size != 0) {
- spare_stack->stacksize =
- ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- } else
- spare_stack->stacksize = stacksize;
- spare_stack->guardsize = guardsize;
- spare_stack->stackaddr = stack;
-
- if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
- spare_stack->guardsize == _pthread_guard_default) {
- /* Default stack/guard size. */
- LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
- } else {
- /* Non-default stack/guard size. */
- LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
+ struct stack *spare_stack;
+
+ if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
+ && (attr->stackaddr_attr != NULL)) {
+ spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
+ - sizeof(struct stack));
+ spare_stack->stacksize = attr->stacksize_attr;
+ spare_stack->guardsize = attr->guardsize_attr;
+ spare_stack->stackaddr = attr->stackaddr_attr;
+
+ if (spare_stack->stacksize == THR_STACK_DEFAULT &&
+ spare_stack->guardsize == _thr_guard_default) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
+ }
+ attr->stackaddr_attr = NULL;
}
}
diff --git a/lib/libpthread/thread/thr_suspend_np.c b/lib/libpthread/thread/thr_suspend_np.c
index 4128437..3025584 100644
--- a/lib/libpthread/thread/thr_suspend_np.c
+++ b/lib/libpthread/thread/thr_suspend_np.c
@@ -35,7 +35,7 @@
#include <pthread.h>
#include "thr_private.h"
-static void suspend_common(struct pthread *thread);
+static void suspend_common(struct pthread *thread);
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
@@ -44,27 +44,26 @@ __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
int
_pthread_suspend_np(pthread_t thread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
/* Suspending the current thread doesn't make sense. */
if (thread == _get_curthread())
ret = EDEADLK;
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(thread)) == 0) {
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Add a reference to the thread: */
+ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
+ == 0) {
+ /* Lock the threads scheduling queue: */
+ THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Unlock the threads scheduling queue: */
+ THR_SCHED_UNLOCK(curthread, thread);
+
+ /* Don't forget to remove the reference: */
+ _thr_ref_delete(curthread, thread);
}
return (ret);
}
@@ -74,31 +73,34 @@ _pthread_suspend_all_np(void)
{
struct pthread *curthread = _get_curthread();
struct pthread *thread;
+ kse_critical_t crit;
- /*
- * Defer signals to protect the scheduling queues from
- * access by the signal handler:
- */
- _thread_kern_sig_defer();
+ /* Take the thread list lock: */
+ crit = _kse_critical_enter();
+ KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
- if (thread != curthread)
+ if ((thread != curthread) &&
+ (thread->state != PS_DEAD) &&
+ (thread->state != PS_DEADLOCK) &&
+ ((thread->flags & THR_FLAGS_EXITING) == 0)) {
+ THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
+ THR_SCHED_UNLOCK(curthread, thread);
+ }
}
- /*
- * Undefer and handle pending signals, yielding if
- * necessary:
- */
- _thread_kern_sig_undefer();
+ /* Release the thread list lock: */
+ KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
+ _kse_critical_leave(crit);
}
void
suspend_common(struct pthread *thread)
{
- thread->flags |= PTHREAD_FLAGS_SUSPENDED;
- if (thread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
- PTHREAD_PRIOQ_REMOVE(thread);
- PTHREAD_SET_STATE(thread, PS_SUSPENDED);
+ thread->flags |= THR_FLAGS_SUSPENDED;
+ if (thread->flags & THR_FLAGS_IN_RUNQ) {
+ THR_RUNQ_REMOVE(thread);
+ THR_SET_STATE(thread, PS_SUSPENDED);
}
}
diff --git a/lib/libpthread/thread/thr_switch_np.c b/lib/libpthread/thread/thr_switch_np.c
index 45c289e..b70ce70 100644
--- a/lib/libpthread/thread/thr_switch_np.c
+++ b/lib/libpthread/thread/thr_switch_np.c
@@ -43,29 +43,11 @@ __weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
int
_pthread_switch_add_np(pthread_switch_routine_t routine)
{
- int ret = 0;
-
- if (routine == NULL)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Shouldn't need a lock to protect this assigment. */
- _sched_switch_hook = routine;
-
- return(ret);
+ return (ENOTSUP);
}
int
_pthread_switch_delete_np(pthread_switch_routine_t routine)
{
- int ret = 0;
-
- if (routine != _sched_switch_hook)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Shouldn't need a lock to protect this assigment. */
- _sched_switch_hook = NULL;
-
- return(ret);
+ return (ENOTSUP);
}
diff --git a/lib/libpthread/thread/thr_system.c b/lib/libpthread/thread/thr_system.c
index 591562b..28976d3 100644
--- a/lib/libpthread/thread/thr_system.c
+++ b/lib/libpthread/thread/thr_system.c
@@ -38,11 +38,12 @@ __weak_reference(_system, system);
int
_system(const char *string)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __system(string);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_tcdrain.c b/lib/libpthread/thread/thr_tcdrain.c
index 140039b..6a2002b 100644
--- a/lib/libpthread/thread/thr_tcdrain.c
+++ b/lib/libpthread/thread/thr_tcdrain.c
@@ -38,11 +38,12 @@ __weak_reference(_tcdrain, tcdrain);
int
_tcdrain(int fd)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __tcdrain(fd);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
- return ret;
+ return (ret);
}
diff --git a/lib/libpthread/thread/thr_wait.c b/lib/libpthread/thread/thr_wait.c
index 8e9c864..98f2c8d 100644
--- a/lib/libpthread/thread/thr_wait.c
+++ b/lib/libpthread/thread/thr_wait.c
@@ -37,11 +37,12 @@ __weak_reference(_wait, wait);
pid_t
_wait(int *istat)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __wait(istat);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_wait4.c b/lib/libpthread/thread/thr_wait4.c
index 07b0c84..9f23584 100644
--- a/lib/libpthread/thread/thr_wait4.c
+++ b/lib/libpthread/thread/thr_wait4.c
@@ -43,11 +43,12 @@ __weak_reference(__wait4, wait4);
pid_t
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = _wait4(pid, istat, options, rusage);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_waitpid.c b/lib/libpthread/thread/thr_waitpid.c
index 8938a62..8ee3ce1 100644
--- a/lib/libpthread/thread/thr_waitpid.c
+++ b/lib/libpthread/thread/thr_waitpid.c
@@ -39,11 +39,12 @@ __weak_reference(_waitpid, waitpid);
pid_t
_waitpid(pid_t wpid, int *status, int options)
{
+ struct pthread *curthread = _get_curthread();
pid_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __waitpid(wpid, status, options);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_write.c b/lib/libpthread/thread/thr_write.c
index 4c8c171..53d897c 100644
--- a/lib/libpthread/thread/thr_write.c
+++ b/lib/libpthread/thread/thr_write.c
@@ -45,11 +45,12 @@ __weak_reference(__write, write);
ssize_t
__write(int fd, const void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_write(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_writev.c b/lib/libpthread/thread/thr_writev.c
index cff95b1..e13c9d2 100644
--- a/lib/libpthread/thread/thr_writev.c
+++ b/lib/libpthread/thread/thr_writev.c
@@ -47,11 +47,12 @@ __weak_reference(__writev, writev);
ssize_t
__writev(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
- _thread_enter_cancellation_point();
+ _thr_enter_cancellation_point(curthread);
ret = __sys_writev(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_leave_cancellation_point(curthread);
return ret;
}
diff --git a/lib/libpthread/thread/thr_yield.c b/lib/libpthread/thread/thr_yield.c
index e652a9c..acaa3c5 100644
--- a/lib/libpthread/thread/thr_yield.c
+++ b/lib/libpthread/thread/thr_yield.c
@@ -46,7 +46,7 @@ _sched_yield(void)
curthread->slice_usec = -1;
/* Schedule the next thread: */
- _thread_kern_sched();
+ _thr_sched_switch(curthread);
/* Always return no error. */
return(0);
@@ -62,5 +62,5 @@ _pthread_yield(void)
curthread->slice_usec = -1;
/* Schedule the next thread: */
- _thread_kern_sched();
+ _thr_sched_switch(curthread);
}
OpenPOWER on IntegriCloud