From e7efcb5302ff3b4faef7cf619f51a4b4a509f09a Mon Sep 17 00:00:00 2001 From: alfred Date: Sun, 28 Nov 1999 05:38:13 +0000 Subject: add pthread_cancel, obtained from OpenBSD. eischen (Daniel Eischen) added wrappers to protect against cancled threads orphaning internal resources. the cancelability code is still a bit fuzzy but works for test programs of my own, OpenBSD's and some examples from ORA's books. add readdir_r to both libc and libc_r add some 'const' attributes to function parameters Reviewed by: eischen, jasone --- lib/libc_r/uthread/Makefile.inc | 3 + lib/libc_r/uthread/pthread_private.h | 33 +++- lib/libc_r/uthread/uthread_attr_getinheritsched.c | 2 +- lib/libc_r/uthread/uthread_attr_getschedparam.c | 2 +- lib/libc_r/uthread/uthread_attr_getschedpolicy.c | 2 +- lib/libc_r/uthread/uthread_attr_getscope.c | 2 +- lib/libc_r/uthread/uthread_cancel.c | 179 ++++++++++++++++++ lib/libc_r/uthread/uthread_close.c | 4 +- lib/libc_r/uthread/uthread_cond.c | 102 +++++++--- lib/libc_r/uthread/uthread_create.c | 5 +- lib/libc_r/uthread/uthread_exit.c | 40 +++- lib/libc_r/uthread/uthread_fchflags.c | 25 +++ lib/libc_r/uthread/uthread_fcntl.c | 3 + lib/libc_r/uthread/uthread_fd.c | 216 +++++++++++++++++++--- lib/libc_r/uthread/uthread_file.c | 121 ++++++++++-- lib/libc_r/uthread/uthread_fsync.c | 2 + lib/libc_r/uthread/uthread_getschedparam.c | 3 +- lib/libc_r/uthread/uthread_init.c | 5 + lib/libc_r/uthread/uthread_join.c | 31 +++- lib/libc_r/uthread/uthread_kern.c | 27 ++- lib/libc_r/uthread/uthread_msync.c | 40 ++++ lib/libc_r/uthread/uthread_mutex.c | 47 ++++- lib/libc_r/uthread/uthread_nanosleep.c | 2 + lib/libc_r/uthread/uthread_open.c | 11 +- lib/libc_r/uthread/uthread_read.c | 8 +- lib/libc_r/uthread/uthread_setschedparam.c | 3 +- lib/libc_r/uthread/uthread_sigwait.c | 3 + lib/libc_r/uthread/uthread_wait4.c | 2 + lib/libc_r/uthread/uthread_write.c | 9 +- 29 files changed, 829 insertions(+), 103 deletions(-) create mode 100644 lib/libc_r/uthread/uthread_cancel.c create mode 100644 lib/libc_r/uthread/uthread_fchflags.c create mode 100644 lib/libc_r/uthread/uthread_msync.c (limited to 'lib/libc_r/uthread') diff --git a/lib/libc_r/uthread/Makefile.inc b/lib/libc_r/uthread/Makefile.inc index d42c04b..4697305 100644 --- a/lib/libc_r/uthread/Makefile.inc +++ b/lib/libc_r/uthread/Makefile.inc @@ -24,6 +24,7 @@ SRCS+= \ uthread_attr_setstacksize.c \ uthread_autoinit.cc \ uthread_bind.c \ + uthread_cancel.c \ uthread_clean.c \ uthread_close.c \ uthread_cond.c \ @@ -37,6 +38,7 @@ SRCS+= \ uthread_equal.c \ uthread_execve.c \ uthread_exit.c \ + uthread_fchflags.c \ uthread_fchmod.c \ uthread_fchown.c \ uthread_fcntl.c \ @@ -64,6 +66,7 @@ SRCS+= \ uthread_listen.c \ uthread_mattr_init.c \ uthread_mattr_kind_np.c \ + uthread_msync.c \ uthread_multi_np.c \ uthread_mutex.c \ uthread_mutex_prioceiling.c \ diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h index a58c02f..0f27063 100644 --- a/lib/libc_r/uthread/pthread_private.h +++ b/lib/libc_r/uthread/pthread_private.h @@ -253,7 +253,7 @@ struct pthread_mutex { */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ - NULL, { NULL }, 0, 0, 0, 0, TAILQ_INITIALIZER, \ + NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \ _SPINLOCK_INITIALIZER } struct pthread_mutex_attr { @@ -513,6 +513,15 @@ struct pthread { */ int sig_saved; + /* + * Cancelability flags - the lower 2 bits are used by cancel + * definitions in pthread.h + */ +#define PTHREAD_AT_CANCEL_POINT 0x0004 +#define PTHREAD_CANCELLING 0x0008 +#define PTHREAD_CANCEL_NEEDED 0x0010 + int cancelflags; + /* * Current signal mask and pending signals. */ @@ -610,15 +619,18 @@ struct pthread { */ int yield_on_sig_undefer; - /* Miscellaneous data. */ + /* Miscellaneous flags; only set with signals deferred. */ int flags; #define PTHREAD_FLAGS_PRIVATE 0x0001 #define PTHREAD_EXITING 0x0002 #define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/ #define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */ -#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link*/ -#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link*/ -#define PTHREAD_FLAGS_TRACE 0x0040 /* for debugging purposes */ +#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link */ +#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link */ +#define PTHREAD_FLAGS_IN_MUTEXQ 0x0040 /* in mutex queue using qe link */ +#define PTHREAD_FLAGS_IN_FILEQ 0x0080 /* in file lock queue using qe link */ +#define PTHREAD_FLAGS_IN_FDQ 0x0100 /* in fd lock queue using qe link */ +#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */ /* * Base priority is the user setable and retrievable priority @@ -925,6 +937,7 @@ char *__ttyname_r_basic(int, char *, size_t); char *ttyname_r(int, char *, size_t); int _find_dead_thread(pthread_t); int _find_thread(pthread_t); +void _funlock_owned(pthread_t); int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t); int _thread_fd_lock(int, int, struct timespec *); int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno); @@ -932,8 +945,9 @@ void _dispatch_signals(void); void _thread_signal(pthread_t, int); int _mutex_cv_lock(pthread_mutex_t *); int _mutex_cv_unlock(pthread_mutex_t *); +void _mutex_notify_priochange(pthread_t); int _mutex_reinit(pthread_mutex_t *); -void _mutex_notify_priochange(struct pthread *); +void _mutex_unlock_private(pthread_t); int _cond_reinit(pthread_cond_t *); int _pq_alloc(struct pq_queue *, int, int); int _pq_init(struct pq_queue *); @@ -948,8 +962,10 @@ void _waitq_setactive(void); void _waitq_clearactive(void); #endif void _thread_exit(char *, int, char *); +void _thread_exit_cleanup(void); void _thread_fd_unlock(int, int); void _thread_fd_unlock_debug(int, int, char *, int); +void _thread_fd_unlock_owned(pthread_t); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); @@ -969,6 +985,9 @@ void _thread_start_sig_handler(void); void _thread_seterrno(pthread_t,int); int _thread_fd_table_init(int fd); pthread_addr_t _thread_gc(pthread_addr_t); +void _thread_enter_cancellation_point(void); +void _thread_leave_cancellation_point(void); +void _thread_cancellation_point(void); /* #include */ int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *); @@ -1148,6 +1167,8 @@ pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *); #ifdef _SYS_POLL_H_ int _thread_sys_poll(struct pollfd *, unsigned, int); #endif +/* #include */ +int _thread_sys_msync(void *, size_t, int); __END_DECLS #endif /* !_PTHREAD_PRIVATE_H */ diff --git a/lib/libc_r/uthread/uthread_attr_getinheritsched.c b/lib/libc_r/uthread/uthread_attr_getinheritsched.c index f98fde7..7e243ed 100644 --- a/lib/libc_r/uthread/uthread_attr_getinheritsched.c +++ b/lib/libc_r/uthread/uthread_attr_getinheritsched.c @@ -37,7 +37,7 @@ #include "pthread_private.h" int -pthread_attr_getinheritsched(pthread_attr_t *attr, int *sched_inherit) +pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit) { int ret = 0; diff --git a/lib/libc_r/uthread/uthread_attr_getschedparam.c b/lib/libc_r/uthread/uthread_attr_getschedparam.c index 61c6891..46586ff 100644 --- a/lib/libc_r/uthread/uthread_attr_getschedparam.c +++ b/lib/libc_r/uthread/uthread_attr_getschedparam.c @@ -37,7 +37,7 @@ #include "pthread_private.h" int -pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param) +pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) { int ret = 0; diff --git a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c index 6ebc764..19f835c 100644 --- a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c +++ b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c @@ -37,7 +37,7 @@ #include "pthread_private.h" int -pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy) +pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) { int ret = 0; diff --git a/lib/libc_r/uthread/uthread_attr_getscope.c b/lib/libc_r/uthread/uthread_attr_getscope.c index f456585..176f01b 100644 --- a/lib/libc_r/uthread/uthread_attr_getscope.c +++ b/lib/libc_r/uthread/uthread_attr_getscope.c @@ -37,7 +37,7 @@ #include "pthread_private.h" int -pthread_attr_getscope(pthread_attr_t *attr, int *contentionscope) +pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope) { int ret = 0; diff --git a/lib/libc_r/uthread/uthread_cancel.c b/lib/libc_r/uthread/uthread_cancel.c new file mode 100644 index 0000000..bad5533 --- /dev/null +++ b/lib/libc_r/uthread/uthread_cancel.c @@ -0,0 +1,179 @@ +/* + * David Leonard , 1999. Public domain. + * $FreeBSD$ + */ + +#include +#include +#include "pthread_private.h" + +int +pthread_cancel(pthread_t pthread) +{ + int ret; + + if ((ret = _find_thread(pthread)) != 0) { + /* NOTHING */ + } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK) { + ret = 0; + } else { + /* Protect the scheduling queues: */ + _thread_kern_sig_defer(); + + /* Check if we need to kick it back into the run queue: */ + if ((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) + switch (pthread->state) { + case PS_RUNNING: + /* No need to resume: */ + pthread->cancelflags |= PTHREAD_CANCELLING; + break; + + case PS_SPINBLOCK: + case PS_FDR_WAIT: + case PS_FDW_WAIT: + case PS_POLL_WAIT: + case PS_SELECT_WAIT: + /* Remove these threads from the work queue: */ + if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ) + != 0) + PTHREAD_WORKQ_REMOVE(pthread); + /* Fall through: */ + case PS_SIGTHREAD: + case PS_SLEEP_WAIT: + case PS_WAIT_WAIT: + case PS_SIGSUSPEND: + case PS_SIGWAIT: + case PS_SUSPENDED: + /* Interrupt and resume: */ + pthread->interrupted = 1; + pthread->cancelflags |= PTHREAD_CANCELLING; + PTHREAD_NEW_STATE(pthread,PS_RUNNING); + break; + + case PS_MUTEX_WAIT: + case PS_COND_WAIT: + case PS_FDLR_WAIT: + case PS_FDLW_WAIT: + case PS_FILE_WAIT: + case PS_JOIN: + /* + * Threads in these states may be in queues. + * In order to preserve queue integrity, the + * cancelled thread must remove itself from the + * queue. Mark the thread as interrupted and + * needing cancellation, and set the state to + * running. When the thread resumes, it will + * exit after removing itself from the queue. + */ + pthread->interrupted = 1; + pthread->cancelflags |= PTHREAD_CANCEL_NEEDED; + PTHREAD_NEW_STATE(pthread,PS_RUNNING); + break; + + case PS_DEAD: + case PS_DEADLOCK: + case PS_STATE_MAX: + /* Ignore - only here to silence -Wall: */ + break; + } + /* Unprotect the scheduling queues: */ + _thread_kern_sig_undefer(); + + ret = 0; + } + return (ret); +} + +int +pthread_setcancelstate(int state, int *oldstate) +{ + int ostate; + int ret; + + ostate = _thread_run->cancelflags & PTHREAD_CANCEL_DISABLE; + + switch (state) { + case PTHREAD_CANCEL_ENABLE: + if (oldstate != NULL) + *oldstate = ostate; + _thread_run->cancelflags &= PTHREAD_CANCEL_ENABLE; + if ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0) + pthread_testcancel(); + ret = 0; + break; + case PTHREAD_CANCEL_DISABLE: + if (oldstate != NULL) + *oldstate = ostate; + _thread_run->cancelflags |= PTHREAD_CANCEL_DISABLE; + ret = 0; + break; + default: + ret = EINVAL; + } + + return (ret); +} + +int +pthread_setcanceltype(int type, int *oldtype) +{ + int otype; + int ret; + + otype = _thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS; + switch (type) { + case PTHREAD_CANCEL_ASYNCHRONOUS: + if (oldtype != NULL) + *oldtype = otype; + _thread_run->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS; + pthread_testcancel(); + ret = 0; + break; + case PTHREAD_CANCEL_DEFERRED: + if (oldtype != NULL) + *oldtype = otype; + _thread_run->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS; + ret = 0; + break; + default: + ret = EINVAL; + } + + return (ret); +} + +void +pthread_testcancel(void) +{ + + if (((_thread_run->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) && + ((_thread_run->cancelflags & PTHREAD_CANCELLING) != 0)) { + /* + * It is possible for this thread to be swapped out + * while performing cancellation; do not allow it + * to be cancelled again. + */ + _thread_run->cancelflags &= ~PTHREAD_CANCELLING; + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + PANIC("cancel"); + } +} + +void +_thread_enter_cancellation_point(void) +{ + + /* Look for a cancellation before we block: */ + pthread_testcancel(); + _thread_run->cancelflags |= PTHREAD_AT_CANCEL_POINT; +} + +void +_thread_leave_cancellation_point(void) +{ + + _thread_run->cancelflags &= ~PTHREAD_AT_CANCEL_POINT; + /* Look for a cancellation after we unblock: */ + pthread_testcancel(); +} diff --git a/lib/libc_r/uthread/uthread_close.c b/lib/libc_r/uthread/uthread_close.c index 9eba433..2580fce 100644 --- a/lib/libc_r/uthread/uthread_close.c +++ b/lib/libc_r/uthread/uthread_close.c @@ -45,10 +45,11 @@ close(int fd) { int flags; int ret; - int status; struct stat sb; struct fd_table_entry *entry; + _thread_enter_cancellation_point(); + if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) { /* * Don't allow silly programs to close the kernel pipe. @@ -98,6 +99,7 @@ close(int fd) /* Close the file descriptor: */ ret = _thread_sys_close(fd); } + _thread_leave_cancellation_point(); return (ret); } #endif diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c index 2eb05f9..09c5f22 100644 --- a/lib/libc_r/uthread/uthread_cond.c +++ b/lib/libc_r/uthread/uthread_cond.c @@ -157,8 +157,7 @@ pthread_cond_destroy(pthread_cond_t * cond) int pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) { - int rval = 0; - int status; + int rval = 0; if (cond == NULL) rval = EINVAL; @@ -169,6 +168,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) */ else if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + + _thread_enter_cancellation_point(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -193,8 +195,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) /* Return invalid argument error: */ rval = EINVAL; } else { - /* Reset the timeout flag: */ + /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; + _thread_run->interrupted = 0; /* * Queue the running thread for the condition @@ -233,7 +236,28 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); - /* Lock the mutex: */ + if (_thread_run->interrupted != 0) { + /* + * Lock the condition variable + * while removing the thread. + */ + _SPINLOCK(&(*cond)->lock); + + cond_queue_remove(*cond, + _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; + + _SPINUNLOCK(&(*cond)->lock); + } + + /* + * Note that even though this thread may have + * been canceled, POSIX requires that the mutex + * be reaquired prior to cancellation. + */ rval = _mutex_cv_lock(mutex); } } @@ -248,6 +272,13 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) rval = EINVAL; break; } + + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } + + _thread_leave_cancellation_point(); } /* Return the completion status: */ @@ -258,8 +289,7 @@ int pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec * abstime) { - int rval = 0; - int status; + int rval = 0; if (cond == NULL || abstime == NULL) rval = EINVAL; @@ -276,6 +306,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, */ if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + + _thread_enter_cancellation_point(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -306,8 +339,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; - /* Reset the timeout flag: */ + /* Reset the timeout and interrupted flags: */ _thread_run->timeout = 0; + _thread_run->interrupted = 0; /* * Queue the running thread for the condition @@ -341,12 +375,16 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, _thread_kern_sched_state_unlock(PS_COND_WAIT, &(*cond)->lock, __FILE__, __LINE__); - /* Check if the wait timedout: */ - if (_thread_run->timeout == 0) { + /* + * Check if the wait timedout or was + * interrupted (canceled): + */ + if ((_thread_run->timeout == 0) && + (_thread_run->interrupted == 0)) { /* Lock the mutex: */ rval = _mutex_cv_lock(mutex); - } - else { + + } else { /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -369,8 +407,12 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, rval = ETIMEDOUT; /* - * Lock the mutex and ignore - * any errors: + * Lock the mutex and ignore any + * errors. Note that even though + * this thread may have been + * canceled, POSIX requires that + * the mutex be reaquired prior + * to cancellation. */ (void)_mutex_cv_lock(mutex); } @@ -388,6 +430,12 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, break; } + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } + + _thread_leave_cancellation_point(); } /* Return the completion status: */ @@ -416,16 +464,7 @@ pthread_cond_signal(pthread_cond_t * cond) switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* - * Enter a loop to dequeue threads from the condition - * queue until we find one that hasn't previously - * timed out. - */ - while (((pthread = cond_queue_deq(*cond)) != NULL) && - (pthread->timeout != 0)) { - } - - if (pthread != NULL) + if ((pthread = cond_queue_deq(*cond)) != NULL) /* Allow the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); @@ -482,12 +521,7 @@ pthread_cond_broadcast(pthread_cond_t * cond) * condition queue: */ while ((pthread = cond_queue_deq(*cond)) != NULL) { - /* - * The thread is already running if the - * timeout flag is set. - */ - if (pthread->timeout == 0) - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_NEW_STATE(pthread,PS_RUNNING); } /* There are no more waiting threads: */ @@ -524,9 +558,17 @@ cond_queue_deq(pthread_cond_t cond) { pthread_t pthread; - if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { + while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { TAILQ_REMOVE(&cond->c_queue, pthread, qe); pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ; + if ((pthread->timeout == 0) && (pthread->interrupted == 0)) + /* + * Only exit the loop when we find a thread + * that hasn't timed out or been canceled; + * those threads are already running and don't + * need their run state changed. + */ + break; } return(pthread); diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c index b3ea391..f7c95d0 100644 --- a/lib/libc_r/uthread/uthread_create.c +++ b/lib/libc_r/uthread/uthread_create.c @@ -50,9 +50,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, void *(*start_routine) (void *), void *arg) { int f_gc = 0; - int i; int ret = 0; - int status; pthread_t gc_thread; pthread_t new_thread; pthread_attr_t pattr; @@ -166,6 +164,9 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, new_thread->start_routine = start_routine; new_thread->arg = arg; + new_thread->cancelflags = PTHREAD_CANCEL_ENABLE | + PTHREAD_CANCEL_DEFERRED; + /* * Write a magic value to the thread structure * to help identify valid ones: diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c index 795decc..abe4b27 100644 --- a/lib/libc_r/uthread/uthread_exit.c +++ b/lib/libc_r/uthread/uthread_exit.c @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include #ifdef _THREAD_SAFE #include @@ -101,17 +103,45 @@ _thread_exit(char *fname, int lineno, char *string) #endif } +/* + * Only called when a thread is cancelled. It may be more useful + * to call it from pthread_exit() if other ways of asynchronous or + * abnormal thread termination can be found. + */ +void +_thread_exit_cleanup(void) +{ + /* + * POSIX states that cancellation/termination of a thread should + * not release any visible resources (such as mutexes) and that + * it is the applications responsibility. Resources that are + * internal to the threads library, including file and fd locks, + * are not visible to the application and need to be released. + */ + /* Unlock all owned fd locks: */ + _thread_fd_unlock_owned(_thread_run); + + /* Unlock all owned file locks: */ + _funlock_owned(_thread_run); + + /* Unlock all private mutexes: */ + _mutex_unlock_private(_thread_run); + + /* + * This still isn't quite correct because we don't account + * for held spinlocks (see libc/stdlib/malloc.c). + */ +} + void pthread_exit(void *status) { - int sig; - long l; - pthread_t pthread; + pthread_t pthread; /* Check if this thread is already in the process of exiting: */ if ((_thread_run->flags & PTHREAD_EXITING) != 0) { char msg[128]; - snprintf(msg,"Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run); + snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run); PANIC(msg); } @@ -134,7 +164,7 @@ pthread_exit(void *status) _thread_cleanupspecific(); } - /* Free thread-specific poll_data structure, if allocated */ + /* Free thread-specific poll_data structure, if allocated: */ if (_thread_run->poll_data.fds != NULL) { free(_thread_run->poll_data.fds); _thread_run->poll_data.fds = NULL; diff --git a/lib/libc_r/uthread/uthread_fchflags.c b/lib/libc_r/uthread/uthread_fchflags.c new file mode 100644 index 0000000..ec4e747 --- /dev/null +++ b/lib/libc_r/uthread/uthread_fchflags.c @@ -0,0 +1,25 @@ +/* + * David Leonard , 1999. Public Domain. + * + * $OpenBSD: uthread_fchflags.c,v 1.1 1999/01/08 05:42:18 d Exp $ + * $FreeBSD$ + */ + +#include +#include +#ifdef _THREAD_SAFE +#include +#include "pthread_private.h" + +int +fchflags(int fd, u_long flags) +{ + int ret; + + if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) { + ret = _thread_sys_fchflags(fd, flags); + _FD_UNLOCK(fd, FD_WRITE); + } + return (ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_fcntl.c b/lib/libc_r/uthread/uthread_fcntl.c index 9a6293c..878554c 100644 --- a/lib/libc_r/uthread/uthread_fcntl.c +++ b/lib/libc_r/uthread/uthread_fcntl.c @@ -47,6 +47,8 @@ fcntl(int fd, int cmd,...) int ret; va_list ap; + _thread_enter_cancellation_point(); + /* Lock the file descriptor: */ if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) { /* Initialise the variable argument list: */ @@ -135,6 +137,7 @@ fcntl(int fd, int cmd,...) /* Unlock the file descriptor: */ _FD_UNLOCK(fd, FD_RDWR); } + _thread_leave_cancellation_point(); /* Return the completion status: */ return (ret); diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c index 76069bf..3b69639 100644 --- a/lib/libc_r/uthread/uthread_fd.c +++ b/lib/libc_r/uthread/uthread_fd.c @@ -40,9 +40,29 @@ #include #include "pthread_private.h" +#define FDQ_INSERT(q,p) \ +do { \ + TAILQ_INSERT_TAIL(q,p,qe); \ + p->flags |= PTHREAD_FLAGS_IN_FDQ; \ +} while (0) + +#define FDQ_REMOVE(q,p) \ +do { \ + if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \ + TAILQ_REMOVE(q,p,qe); \ + p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \ + } \ +} while (0) + + /* Static variables: */ static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER; +/* Prototypes: */ +static inline pthread_t fd_next_reader(int fd); +static inline pthread_t fd_next_writer(int fd); + + /* * This function *must* return -1 and set the thread specific errno * as a system call. This is because the error return from this @@ -201,11 +221,11 @@ _thread_fd_unlock(int fd, int lock_type) * Get the next thread in the queue for a * read lock on this file descriptor: */ - else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) { + else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) { } else { /* Remove this thread from the queue: */ - TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue, - _thread_fd_table[fd]->r_owner, qe); + FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, + _thread_fd_table[fd]->r_owner); /* * Set the state of the new owner of @@ -243,11 +263,11 @@ _thread_fd_unlock(int fd, int lock_type) * Get the next thread in the queue for a * write lock on this file descriptor: */ - else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) { + else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) { } else { /* Remove this thread from the queue: */ - TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue, - _thread_fd_table[fd]->w_owner, qe); + FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, + _thread_fd_table[fd]->w_owner); /* * Set the state of the new owner of @@ -290,6 +310,9 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; + /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current @@ -300,10 +323,10 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) /* Check the file descriptor and lock types: */ if (lock_type == FD_READ || lock_type == FD_RDWR) { /* - * Enter a loop to wait for the file descriptor to be - * locked for read for the current thread: + * Wait for the file descriptor to be locked + * for read for the current thread: */ - while (_thread_fd_table[fd]->r_owner != _thread_run) { + if (_thread_fd_table[fd]->r_owner != _thread_run) { /* * Check if the file descriptor is locked by * another thread: @@ -315,7 +338,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) * queue of threads waiting for a * read lock on this file descriptor: */ - TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe); + FDQ_INSERT(&_thread_fd_table[fd]->r_queue, _thread_run); /* * Save the file descriptor details @@ -350,6 +373,10 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) */ _SPINLOCK(&_thread_fd_table[fd]->lock); + if (_thread_run->interrupted != 0) { + FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, + _thread_run); + } } else { /* * The running thread now owns the @@ -365,8 +392,9 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) } } - /* Increment the read lock count: */ - _thread_fd_table[fd]->r_lockcount++; + if (_thread_fd_table[fd]->r_owner == _thread_run) + /* Increment the read lock count: */ + _thread_fd_table[fd]->r_lockcount++; } /* Check the file descriptor and lock types: */ @@ -388,7 +416,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) * write lock on this file * descriptor: */ - TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe); + FDQ_INSERT(&_thread_fd_table[fd]->w_queue, _thread_run); /* * Save the file descriptor details @@ -421,6 +449,11 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) * table entry again: */ _SPINLOCK(&_thread_fd_table[fd]->lock); + + if (_thread_run->interrupted != 0) { + FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, + _thread_run); + } } else { /* * The running thread now owns the @@ -437,12 +470,23 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout) } } - /* Increment the write lock count: */ - _thread_fd_table[fd]->w_lockcount++; + if (_thread_fd_table[fd]->w_owner == _thread_run) + /* Increment the write lock count: */ + _thread_fd_table[fd]->w_lockcount++; } /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); + + if (_thread_run->interrupted != 0) { + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) == 0) { + ret = -1; + errno = EINTR; + } else { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } + } } /* Return the completion status: */ @@ -492,11 +536,11 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno) * Get the next thread in the queue for a * read lock on this file descriptor: */ - else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) { + else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) { } else { /* Remove this thread from the queue: */ - TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue, - _thread_fd_table[fd]->r_owner, qe); + FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, + _thread_fd_table[fd]->r_owner); /* * Set the state of the new owner of @@ -534,11 +578,11 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno) * Get the next thread in the queue for a * write lock on this file descriptor: */ - else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) { + else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) { } else { /* Remove this thread from the queue: */ - TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue, - _thread_fd_table[fd]->w_owner, qe); + FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, + _thread_fd_table[fd]->w_owner); /* * Set the state of the new owner of @@ -582,6 +626,9 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, * entry: */ if ((ret = _thread_fd_table_init(fd)) == 0) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; + /* * Lock the file descriptor table entry to prevent * other threads for clashing with the current @@ -607,7 +654,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, * queue of threads waiting for a * read lock on this file descriptor: */ - TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe); + FDQ_INSERT(&_thread_fd_table[fd]->r_queue, _thread_run); /* * Save the file descriptor details @@ -689,7 +736,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, * write lock on this file * descriptor: */ - TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe); + FDQ_INSERT(&_thread_fd_table[fd]->w_queue, _thread_run); /* * Save the file descriptor details @@ -753,9 +800,132 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout, /* Unlock the file descriptor table entry: */ _SPINUNLOCK(&_thread_fd_table[fd]->lock); + + if (_thread_run->interrupted != 0) { + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) == 0) { + ret = -1; + errno = EINTR; + } else { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } + } } /* Return the completion status: */ return (ret); } + +void +_thread_fd_unlock_owned(pthread_t pthread) +{ + int fd; + + for (fd = 0; fd < _thread_dtablesize; fd++) { + if ((_thread_fd_table[fd] != NULL) && + ((_thread_fd_table[fd]->r_owner == pthread) || + (_thread_fd_table[fd]->w_owner == pthread))) { + /* + * Defer signals to protect the scheduling queues + * from access by the signal handler: + */ + _thread_kern_sig_defer(); + + /* + * Lock the file descriptor table entry to prevent + * other threads for clashing with the current + * thread's accesses: + */ + _SPINLOCK(&_thread_fd_table[fd]->lock); + + /* Check if the thread owns the read lock: */ + if (_thread_fd_table[fd]->r_owner == pthread) { + /* Clear the read lock count: */ + _thread_fd_table[fd]->r_lockcount = 0; + + /* + * Get the next thread in the queue for a + * read lock on this file descriptor: + */ + if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) { + /* Remove this thread from the queue: */ + FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, + _thread_fd_table[fd]->r_owner); + + /* + * Set the state of the new owner of + * the thread to running: + */ + PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); + } + } + + /* Check if the thread owns the write lock: */ + if (_thread_fd_table[fd]->w_owner == pthread) { + /* Clear the write lock count: */ + _thread_fd_table[fd]->w_lockcount = 0; + + /* + * Get the next thread in the queue for a + * write lock on this file descriptor: + */ + if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) { + /* Remove this thread from the queue: */ + FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, + _thread_fd_table[fd]->w_owner); + + /* + * Set the state of the new owner of + * the thread to running: + */ + PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING); + + } + } + + /* Unlock the file descriptor table entry: */ + _SPINUNLOCK(&_thread_fd_table[fd]->lock); + + /* + * Undefer and handle pending signals, yielding if + * necessary. + */ + _thread_kern_sig_undefer(); + } + } +} + +static inline pthread_t +fd_next_reader(int fd) +{ + pthread_t pthread; + + while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) && + (pthread->interrupted != 0)) { + /* + * This thread has either been interrupted by a signal or + * it has been canceled. Remove it from the queue. + */ + FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread); + } + + return (pthread); +} + +static inline pthread_t +fd_next_writer(int fd) +{ + pthread_t pthread; + + while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) && + (pthread->interrupted != 0)) { + /* + * This thread has either been interrupted by a signal or + * it has been canceled. Remove it from the queue. + */ + FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread); + } + + return (pthread); +} #endif diff --git a/lib/libc_r/uthread/uthread_file.c b/lib/libc_r/uthread/uthread_file.c index 709c8f6..5c89b09 100644 --- a/lib/libc_r/uthread/uthread_file.c +++ b/lib/libc_r/uthread/uthread_file.c @@ -225,18 +225,41 @@ _flockfile_debug(FILE * fp, char *fname, int lineno) /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); } else { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; + + /* + * Prevent being context switched out while + * adding this thread to the file lock queue. + */ + _thread_kern_sig_defer(); + /* * The file is locked for another thread. * Append this thread to the queue of * threads waiting on the lock. */ TAILQ_INSERT_TAIL(&p->l_head,_thread_run,qe); + _thread_run->flags |= PTHREAD_FLAGS_IN_FILEQ; /* Unlock the hash table: */ _SPINUNLOCK(&hash_lock); /* Wait on the FILE lock: */ _thread_kern_sched_state(PS_FILE_WAIT, fname, lineno); + + if ((_thread_run->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) { + TAILQ_REMOVE(&p->l_head,_thread_run,qe); + _thread_run->flags &= ~PTHREAD_FLAGS_IN_FILEQ; + } + + _thread_kern_sig_undefer(); + + if (((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) && + (_thread_run->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } } } return; @@ -304,7 +327,6 @@ _ftrylockfile(FILE * fp) void _funlockfile(FILE * fp) { - int status; int idx = file_idx(fp); struct file_lock *p; @@ -344,18 +366,27 @@ _funlockfile(FILE * fp) p->count = 0; /* Get the new owner of the lock: */ - if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { + while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { /* Pop the thread off the queue: */ TAILQ_REMOVE(&p->l_head,p->owner,qe); - - /* - * This is the first lock for the new - * owner: - */ - p->count = 1; - - /* Allow the new owner to run: */ - PTHREAD_NEW_STATE(p->owner,PS_RUNNING); + p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ; + + if (p->owner->interrupted == 0) { + /* + * This is the first lock for + * the new owner: + */ + p->count = 1; + + /* Allow the new owner to run: */ + PTHREAD_NEW_STATE(p->owner,PS_RUNNING); + + /* End the loop when we find a + * thread that hasn't been + * cancelled or interrupted; + */ + break; + } } } } @@ -372,4 +403,72 @@ _funlockfile(FILE * fp) return; } +void +_funlock_owned(pthread_t pthread) +{ + int idx; + struct file_lock *p, *next_p; + + /* + * Defer signals to protect the scheduling queues from + * access by the signal handler: + */ + _thread_kern_sig_defer(); + + /* Lock the hash table: */ + _SPINLOCK(&hash_lock); + + for (idx = 0; idx < NUM_HEADS; idx++) { + /* Check the static file lock first: */ + p = &flh[idx].fl; + next_p = LIST_FIRST(&flh[idx].head); + + while (p != NULL) { + if (p->owner == pthread) { + /* + * The running thread will release the + * lock now: + */ + p->count = 0; + + /* Get the new owner of the lock: */ + while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) { + /* Pop the thread off the queue: */ + TAILQ_REMOVE(&p->l_head,p->owner,qe); + p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ; + + if (p->owner->interrupted == 0) { + /* + * This is the first lock for + * the new owner: + */ + p->count = 1; + + /* Allow the new owner to run: */ + PTHREAD_NEW_STATE(p->owner,PS_RUNNING); + + /* End the loop when we find a + * thread that hasn't been + * cancelled or interrupted; + */ + break; + } + } + } + p = next_p; + if (next_p != NULL) + next_p = LIST_NEXT(next_p, entry); + } + } + + /* Unlock the hash table: */ + _SPINUNLOCK(&hash_lock); + + /* + * Undefer and handle pending signals, yielding if + * necessary: + */ + _thread_kern_sig_undefer(); +} + #endif diff --git a/lib/libc_r/uthread/uthread_fsync.c b/lib/libc_r/uthread/uthread_fsync.c index 3287edd..21c3b56 100644 --- a/lib/libc_r/uthread/uthread_fsync.c +++ b/lib/libc_r/uthread/uthread_fsync.c @@ -41,10 +41,12 @@ fsync(int fd) { int ret; + _thread_enter_cancellation_point(); if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) { ret = _thread_sys_fsync(fd); _FD_UNLOCK(fd, FD_RDWR); } + _thread_leave_cancellation_point(); return (ret); } #endif diff --git a/lib/libc_r/uthread/uthread_getschedparam.c b/lib/libc_r/uthread/uthread_getschedparam.c index b6c0c35..09d8c1b 100644 --- a/lib/libc_r/uthread/uthread_getschedparam.c +++ b/lib/libc_r/uthread/uthread_getschedparam.c @@ -37,7 +37,8 @@ #include "pthread_private.h" int -pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param) +pthread_getschedparam(pthread_t pthread, int *policy, + struct sched_param *param) { int ret; diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c index a2ce493..bab7e5b 100644 --- a/lib/libc_r/uthread/uthread_init.c +++ b/lib/libc_r/uthread/uthread_init.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -199,6 +200,10 @@ _thread_init(void) */ _thread_initial->magic = PTHREAD_MAGIC; + /* Set the initial cancel state */ + _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE | + PTHREAD_CANCEL_DEFERRED; + /* Default the priority of the initial thread: */ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; diff --git a/lib/libc_r/uthread/uthread_join.c b/lib/libc_r/uthread/uthread_join.c index 8b00082..d149cf1 100644 --- a/lib/libc_r/uthread/uthread_join.c +++ b/lib/libc_r/uthread/uthread_join.c @@ -41,16 +41,22 @@ pthread_join(pthread_t pthread, void **thread_return) { int ret = 0; pthread_t pthread1 = NULL; + + _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ - if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) + if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) { /* Invalid thread: */ + _thread_leave_cancellation_point(); return(EINVAL); + } /* Check if the caller has specified itself: */ - if (pthread == _thread_run) + if (pthread == _thread_run) { /* Avoid a deadlock condition: */ + _thread_leave_cancellation_point(); return(EDEADLK); + } /* * Find the thread in the list of active threads or in the @@ -71,12 +77,31 @@ pthread_join(pthread_t pthread, void **thread_return) /* Check if the thread is not dead: */ else if (pthread->state != PS_DEAD) { + /* Clear the interrupted flag: */ + _thread_run->interrupted = 0; + + /* + * Protect against being context switched out while + * adding this thread to the join queue. + */ + _thread_kern_sig_defer(); + /* Add the running thread to the join queue: */ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe); /* Schedule the next thread: */ _thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__); + if (_thread_run->interrupted != 0) + TAILQ_REMOVE(&(pthread->join_queue), _thread_run, qe); + + _thread_kern_sig_undefer(); + + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } + /* Check if the thread is not detached: */ if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) { /* Check if the return value is required: */ @@ -93,6 +118,8 @@ pthread_join(pthread_t pthread, void **thread_return) /* Return the thread's return value: */ *thread_return = pthread->ret; + _thread_leave_cancellation_point(); + /* Return the completion status: */ return (ret); } diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c index a4717fa..cdb84a5 100644 --- a/lib/libc_r/uthread/uthread_kern.c +++ b/lib/libc_r/uthread/uthread_kern.c @@ -67,11 +67,10 @@ _thread_kern_sched(ucontext_t * scp) char *fdata; #endif pthread_t pthread, pthread_h = NULL; - pthread_t last_thread = NULL; struct itimerval itimer; struct timespec ts, ts1; struct timeval tv, tv1; - int i, set_timer = 0; + int set_timer = 0; /* * Flag the pthread kernel as executing scheduler code @@ -109,6 +108,20 @@ __asm__("fnsave %0": :"m"(*fdata)); */ _thread_kern_in_sched = 0; + if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) && + ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) { + /* + * Cancelations override signals. + * + * Stick a cancellation point at the start of + * each async-cancellable thread's resumption. + * + * We allow threads woken at cancel points to do their + * own checks. + */ + pthread_testcancel(); + } + if (_sched_switch_hook != NULL) { /* Run the installed switch hook: */ thread_run_switch_hook(_last_user_thread, _thread_run); @@ -161,6 +174,7 @@ __asm__("fnsave %0": :"m"(*fdata)); */ switch (_thread_run->state) { case PS_DEAD: + case PS_STATE_MAX: /* to silence -Wall */ /* * Dead threads are not placed in any queue: */ @@ -249,6 +263,7 @@ __asm__("fnsave %0": :"m"(*fdata)); /* Insert into the work queue: */ PTHREAD_WORKQ_INSERT(_thread_run); + break; } } @@ -627,14 +642,12 @@ _thread_kern_sched_state_unlock(enum pthread_state state, static void _thread_kern_poll(int wait_reqd) { - char bufr[128]; int count = 0; int i, found; int kern_pipe_added = 0; int nfds = 0; int timeout_ms = 0; - struct pthread *pthread, *pthread_next; - ssize_t num; + struct pthread *pthread; struct timespec ts; struct timeval tv; @@ -1103,10 +1116,10 @@ thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) pthread_t tid_in = thread_in; if ((tid_out != NULL) && - (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0)) + (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_out = NULL; if ((tid_in != NULL) && - (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0)) + (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0) tid_in = NULL; if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { diff --git a/lib/libc_r/uthread/uthread_msync.c b/lib/libc_r/uthread/uthread_msync.c new file mode 100644 index 0000000..209286d --- /dev/null +++ b/lib/libc_r/uthread/uthread_msync.c @@ -0,0 +1,40 @@ +/* + * David Leonard , 1999. Public Domain. + * + * $OpenBSD: uthread_msync.c,v 1.2 1999/06/09 07:16:17 d Exp $ + * + * $FreeBSD$ + */ + +#include +#include +#ifdef _THREAD_SAFE +#include +#include "pthread_private.h" + +int +msync(addr, len, flags) + void *addr; + size_t len; + int flags; +{ + int ret; + + /* + * XXX This is quite pointless unless we know how to get the + * file descriptor associated with the memory, and lock it for + * write. The only real use of this wrapper is to guarantee + * a cancellation point, as per the standard. sigh. + */ + + /* This is a cancellation point: */ + _thread_enter_cancellation_point(); + + ret = _thread_sys_msync(addr, len, flags); + + /* No longer in a cancellation point: */ + _thread_leave_cancellation_point(); + + return (ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c index a402d4b..b2a06f2 100644 --- a/lib/libc_r/uthread/uthread_mutex.c +++ b/lib/libc_r/uthread/uthread_mutex.c @@ -94,7 +94,8 @@ _mutex_reinit(pthread_mutex_t * mutex) TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_owner = NULL; (*mutex)->m_data.m_count = 0; - (*mutex)->m_flags = MUTEX_FLAGS_INITED; + (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; + (*mutex)->m_flags |= MUTEX_FLAGS_INITED; (*mutex)->m_refcount = 0; (*mutex)->m_prio = 0; (*mutex)->m_saved_prio = 0; @@ -428,6 +429,9 @@ pthread_mutex_lock(pthread_mutex_t * mutex) _MUTEX_INIT_LINK(*mutex); } + /* Reset the interrupted flag: */ + _thread_run->interrupted = 0; + /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ @@ -602,6 +606,13 @@ pthread_mutex_lock(pthread_mutex_t * mutex) break; } + /* + * Check to see if this thread was interrupted and + * is still in the mutex queue of waiting threads: + */ + if (_thread_run->interrupted != 0) + mutex_queue_remove(*mutex, _thread_run); + /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); @@ -610,6 +621,11 @@ pthread_mutex_lock(pthread_mutex_t * mutex) * necessary: */ _thread_kern_sig_undefer(); + + if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) { + _thread_exit_cleanup(); + pthread_exit(PTHREAD_CANCELED); + } } /* Return the completion status: */ @@ -1314,6 +1330,18 @@ mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex) } } +void +_mutex_unlock_private(pthread_t pthread) +{ + struct pthread_mutex *m, *m_next; + + for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { + m_next = TAILQ_NEXT(m, m_qe); + if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) + pthread_mutex_unlock(&m); + } +} + /* * Dequeue a waiting thread from the head of a mutex queue in descending * priority order. @@ -1323,8 +1351,17 @@ mutex_queue_deq(pthread_mutex_t mutex) { pthread_t pthread; - if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) + while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { TAILQ_REMOVE(&mutex->m_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; + + /* + * Only exit the loop if the thread hasn't been + * cancelled. + */ + if (pthread->interrupted == 0) + break; + } return(pthread); } @@ -1335,7 +1372,10 @@ mutex_queue_deq(pthread_mutex_t mutex) static inline void mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) { - TAILQ_REMOVE(&mutex->m_queue, pthread, qe); + if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { + TAILQ_REMOVE(&mutex->m_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; + } } /* @@ -1359,6 +1399,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) tid = TAILQ_NEXT(tid, qe); TAILQ_INSERT_BEFORE(tid, pthread, qe); } + pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; } #endif diff --git a/lib/libc_r/uthread/uthread_nanosleep.c b/lib/libc_r/uthread/uthread_nanosleep.c index a922040..e4772b4 100644 --- a/lib/libc_r/uthread/uthread_nanosleep.c +++ b/lib/libc_r/uthread/uthread_nanosleep.c @@ -47,6 +47,7 @@ nanosleep(const struct timespec * time_to_sleep, struct timespec remaining_time; struct timeval tv; + _thread_enter_cancellation_point(); /* Check if the time to sleep is legal: */ if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 || time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) { @@ -116,6 +117,7 @@ nanosleep(const struct timespec * time_to_sleep, ret = -1; } } + _thread_leave_cancellation_point(); return (ret); } #endif diff --git a/lib/libc_r/uthread/uthread_open.c b/lib/libc_r/uthread/uthread_open.c index 2f30ae7..4e9993e 100644 --- a/lib/libc_r/uthread/uthread_open.c +++ b/lib/libc_r/uthread/uthread_open.c @@ -44,10 +44,11 @@ int open(const char *path, int flags,...) { - int fd; - int mode = 0; - int status; - va_list ap; + int fd; + int mode = 0; + va_list ap; + + _thread_enter_cancellation_point(); /* Check if the file is being created: */ if (flags & O_CREAT) { @@ -68,6 +69,8 @@ open(const char *path, int flags,...) fd = -1; } + _thread_leave_cancellation_point(); + /* Return the file descriptor or -1 on error: */ return (fd); } diff --git a/lib/libc_r/uthread/uthread_read.c b/lib/libc_r/uthread/uthread_read.c index 0376827..8cbb5be 100644 --- a/lib/libc_r/uthread/uthread_read.c +++ b/lib/libc_r/uthread/uthread_read.c @@ -47,9 +47,13 @@ read(int fd, void *buf, size_t nbytes) int ret; int type; + _thread_enter_cancellation_point(); + /* POSIX says to do just this: */ - if (nbytes == 0) + if (nbytes == 0) { + _thread_leave_cancellation_point(); return (0); + } /* Lock the file descriptor for read: */ if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) { @@ -61,6 +65,7 @@ read(int fd, void *buf, size_t nbytes) /* File is not open for read: */ errno = EBADF; _FD_UNLOCK(fd, FD_READ); + _thread_leave_cancellation_point(); return (-1); } @@ -92,6 +97,7 @@ read(int fd, void *buf, size_t nbytes) } _FD_UNLOCK(fd, FD_READ); } + _thread_leave_cancellation_point(); return (ret); } #endif diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c index bb66fe5..57e24e8 100644 --- a/lib/libc_r/uthread/uthread_setschedparam.c +++ b/lib/libc_r/uthread/uthread_setschedparam.c @@ -38,7 +38,8 @@ #include "pthread_private.h" int -pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param) +pthread_setschedparam(pthread_t pthread, int policy, + const struct sched_param *param) { int old_prio, in_readyq = 0, ret = 0; diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c index e08b298..a67a57f 100644 --- a/lib/libc_r/uthread/uthread_sigwait.c +++ b/lib/libc_r/uthread/uthread_sigwait.c @@ -47,6 +47,7 @@ sigwait(const sigset_t * set, int *sig) sigset_t tempset, waitset; struct sigaction act; + _thread_enter_cancellation_point(); /* * Specify the thread kernel signal handler. */ @@ -85,6 +86,7 @@ sigwait(const sigset_t * set, int *sig) /* Return the signal number to the caller: */ *sig = i; + _thread_leave_cancellation_point(); return (0); } @@ -137,6 +139,7 @@ sigwait(const sigset_t * set, int *sig) } } + _thread_leave_cancellation_point(); /* Return the completion status: */ return (ret); } diff --git a/lib/libc_r/uthread/uthread_wait4.c b/lib/libc_r/uthread/uthread_wait4.c index fc6cfba..baa697c 100644 --- a/lib/libc_r/uthread/uthread_wait4.c +++ b/lib/libc_r/uthread/uthread_wait4.c @@ -42,6 +42,7 @@ wait4(pid_t pid, int *istat, int options, struct rusage * rusage) { pid_t ret; + _thread_enter_cancellation_point(); _thread_kern_sig_defer(); /* Perform a non-blocking wait4 syscall: */ @@ -61,6 +62,7 @@ wait4(pid_t pid, int *istat, int options, struct rusage * rusage) } _thread_kern_sig_undefer(); + _thread_leave_cancellation_point(); return (ret); } diff --git a/lib/libc_r/uthread/uthread_write.c b/lib/libc_r/uthread/uthread_write.c index 9292add..09b09cd 100644 --- a/lib/libc_r/uthread/uthread_write.c +++ b/lib/libc_r/uthread/uthread_write.c @@ -50,9 +50,12 @@ write(int fd, const void *buf, size_t nbytes) ssize_t num = 0; ssize_t ret; + _thread_enter_cancellation_point(); /* POSIX says to do just this: */ - if (nbytes == 0) + if (nbytes == 0) { + _thread_leave_cancellation_point(); return (0); + } /* Lock the file descriptor for write: */ if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) { @@ -64,7 +67,8 @@ write(int fd, const void *buf, size_t nbytes) /* File is not open for write: */ errno = EBADF; _FD_UNLOCK(fd, FD_WRITE); - return (-1); + _thread_leave_cancellation_point(); + return (-1); } /* Check if file operations are to block */ @@ -129,6 +133,7 @@ write(int fd, const void *buf, size_t nbytes) } _FD_UNLOCK(fd, FD_RDWR); } + _thread_leave_cancellation_point(); return (ret); } #endif -- cgit v1.1