summaryrefslogtreecommitdiffstats
path: root/lib/libc_r
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libc_r')
-rw-r--r--lib/libc_r/Makefile10
-rw-r--r--lib/libc_r/man/Makefile.inc7
-rw-r--r--lib/libc_r/man/pthread_cancel.370
-rw-r--r--lib/libc_r/man/pthread_testcancel.3187
-rw-r--r--lib/libc_r/uthread/Makefile.inc3
-rw-r--r--lib/libc_r/uthread/pthread_private.h33
-rw-r--r--lib/libc_r/uthread/uthread_attr_getinheritsched.c2
-rw-r--r--lib/libc_r/uthread/uthread_attr_getschedparam.c2
-rw-r--r--lib/libc_r/uthread/uthread_attr_getschedpolicy.c2
-rw-r--r--lib/libc_r/uthread/uthread_attr_getscope.c2
-rw-r--r--lib/libc_r/uthread/uthread_cancel.c179
-rw-r--r--lib/libc_r/uthread/uthread_close.c4
-rw-r--r--lib/libc_r/uthread/uthread_cond.c102
-rw-r--r--lib/libc_r/uthread/uthread_create.c5
-rw-r--r--lib/libc_r/uthread/uthread_exit.c40
-rw-r--r--lib/libc_r/uthread/uthread_fchflags.c25
-rw-r--r--lib/libc_r/uthread/uthread_fcntl.c3
-rw-r--r--lib/libc_r/uthread/uthread_fd.c216
-rw-r--r--lib/libc_r/uthread/uthread_file.c121
-rw-r--r--lib/libc_r/uthread/uthread_fsync.c2
-rw-r--r--lib/libc_r/uthread/uthread_getschedparam.c3
-rw-r--r--lib/libc_r/uthread/uthread_init.c5
-rw-r--r--lib/libc_r/uthread/uthread_join.c31
-rw-r--r--lib/libc_r/uthread/uthread_kern.c27
-rw-r--r--lib/libc_r/uthread/uthread_msync.c40
-rw-r--r--lib/libc_r/uthread/uthread_mutex.c47
-rw-r--r--lib/libc_r/uthread/uthread_nanosleep.c2
-rw-r--r--lib/libc_r/uthread/uthread_open.c11
-rw-r--r--lib/libc_r/uthread/uthread_read.c8
-rw-r--r--lib/libc_r/uthread/uthread_setschedparam.c3
-rw-r--r--lib/libc_r/uthread/uthread_sigwait.c3
-rw-r--r--lib/libc_r/uthread/uthread_wait4.c2
-rw-r--r--lib/libc_r/uthread/uthread_write.c9
33 files changed, 1099 insertions, 107 deletions
diff --git a/lib/libc_r/Makefile b/lib/libc_r/Makefile
index 28ce615..87110f0 100644
--- a/lib/libc_r/Makefile
+++ b/lib/libc_r/Makefile
@@ -15,6 +15,9 @@ CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE -I${.CURDIR}/uthread
# thread locking.
CFLAGS+=-D_LOCK_DEBUG
+# enable extra internal consistancy checks
+# CFLAGS+=-D_PTHREADS_INVARIANTS
+
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/uthread
PRECIOUSLIB= yes
@@ -25,11 +28,12 @@ PRECIOUSLIB= yes
HIDDEN_SYSCALLS= accept.o bind.o close.o connect.o dup.o dup2.o \
execve.o fchflags.o fchmod.o fchown.o fcntl.o \
flock.o fpathconf.o fstat.o fstatfs.o fsync.o getdirentries.o \
- getpeername.o getsockname.o getsockopt.o ioctl.o listen.o \
- nanosleep.o nfssvc.o open.o poll.o read.o readv.o recvfrom.o \
+ getlogin.o getpeername.o getsockname.o getsockopt.o ioctl.o listen.o \
+ msync.o nanosleep.o nfssvc.o open.o poll.o read.o readv.o recvfrom.o \
recvmsg.o sched_yield.o select.o sendmsg.o sendto.o \
setsockopt.o shutdown.o sigaction.o sigaltstack.o \
- signanosleep.o sigpending.o sigprocmask.o sigsuspend.o socket.o \
+ signanosleep.o sigpending.o sigprocmask.o sigreturn.o sigsetmask.o \
+ sigsuspend.o socket.o \
socketpair.o wait4.o write.o writev.o
.include "${.CURDIR}/../libc/Makefile.inc"
diff --git a/lib/libc_r/man/Makefile.inc b/lib/libc_r/man/Makefile.inc
index 68129f6..b4c9370 100644
--- a/lib/libc_r/man/Makefile.inc
+++ b/lib/libc_r/man/Makefile.inc
@@ -12,6 +12,7 @@ MAN3+= pthread_cleanup_pop.3 \
pthread_cond_signal.3 \
pthread_cond_timedwait.3 \
pthread_cond_wait.3 \
+ pthread_cancel.3 \
pthread_create.3 \
pthread_detach.3 \
pthread_equal.3 \
@@ -36,4 +37,8 @@ MAN3+= pthread_cleanup_pop.3 \
pthread_rwlockattr_init.3 \
pthread_rwlockattr_setpshared.3 \
pthread_self.3 \
- pthread_setspecific.3
+ pthread_setspecific.3 \
+ pthread_testcancel.3
+
+MLINKS+= pthread_cancel.3 pthread_setcancelstate.3 \
+ pthread_cancel.3 pthread_getcancelstate.3
diff --git a/lib/libc_r/man/pthread_cancel.3 b/lib/libc_r/man/pthread_cancel.3
new file mode 100644
index 0000000..5755f89
--- /dev/null
+++ b/lib/libc_r/man/pthread_cancel.3
@@ -0,0 +1,70 @@
+.\" $FreeBSD$
+.Dd January 17, 1999
+.Dt PTHREAD_CANCEL 3
+.Os
+.Sh NAME
+.Nm pthread_cancel
+.Nd cancel execution of a thread
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fn pthread_cancel "pthread_t thread"
+.Sh DESCRIPTION
+The
+.Fn pthread_cancel
+function requests that
+.Fa thread
+be canceled. The target thread's cancelability state and type determines
+when the cancellation takes effect. When the cancellation is acted on,
+the cancellation cleanup handlers for
+.Fa thread
+are called. When the last cancellation cleanup handler returns,
+the thread-specific data destructor functions will be called for
+.Fa thread .
+When the last destructor function returns,
+.Fa thread
+will be terminated.
+.Pp
+The cancellation processing in the target thread runs asynchronously with
+respect to the calling thread returning from
+.Fn pthread_cancel .
+.Pp
+A status of
+.Dv PTHREAD_CANCELED
+is made available to any threads joining with the target. The symbolic
+constant
+.Dv PTHREAD_CANCELED
+expands to a constant expression of type
+.Ft "(void *)" ,
+whose value matches no pointer to an object in memory nor the value
+.Dv NULL .
+.Sh RETURN VALUES
+If successful, the
+.Fn pthread_cancel
+functions will return zero. Otherwise an error number will be returned to
+indicate the error.
+.Sh ERRORS
+.Fn pthread_cancel
+will fail if:
+.Bl -tag -width Er
+.It Bq Er ESRCH
+No thread could be found corresponding to that specified by the given
+thread ID.
+.El
+.Sh SEE ALSO
+.Xr pthread_cleanup_pop 3 ,
+.Xr pthread_cleanup_push 3 ,
+.Xr pthread_exit 3 ,
+.Xr pthread_join 3 ,
+.Xr pthread_setcancelstate 3 ,
+.Xr pthread_setcanceltype 3 ,
+.Xr pthread_testcancel 3
+.Sh STANDARDS
+.Fn pthread_cancel
+conforms to ISO/IEC 9945-1 ANSI/IEEE
+.Pq Dq Tn POSIX
+Std 1003.1 Second Edition 1996-07-12.
+.Sh AUTHORS
+This man page was written by
+.An David Leonard <d@openbsd.org>
+for the OpenBSD implementation of pthread_cancel.
diff --git a/lib/libc_r/man/pthread_testcancel.3 b/lib/libc_r/man/pthread_testcancel.3
new file mode 100644
index 0000000..670c47c
--- /dev/null
+++ b/lib/libc_r/man/pthread_testcancel.3
@@ -0,0 +1,187 @@
+.\" $FreeBSD$
+.Dd January 17, 1999
+.Dt PTHREAD_TESTCANCEL 3
+.Os
+.Sh NAME
+.Nm pthread_setcancelstate ,
+.Nm pthread_setcanceltype ,
+.Nm pthread_testcancel
+.Nd set cancelability state
+.Sh SYNOPSIS
+.Fd #include <pthread.h>
+.Ft int
+.Fn pthread_setcancelstate "int state" "int *oldstate"
+.Ft int
+.Fn pthread_setcanceltype "int type" "int *oldtype"
+.Ft void
+.Fn pthread_testcancel "void"
+.Sh DESCRIPTION
+The
+.Fn pthread_setcancelstate
+function atomically both sets the calling thread's cancelability state
+to the indicated
+.Fa state
+and returns the previous cancelability state at the location referenced by
+.Fa oldstate .
+Legal values for
+.Fa state
+are
+.Dv PTHREAD_CANCEL_ENABLE
+and
+.Dv PTHREAD_CANCEL_DISABLE .
+.Pp
+The
+.Fn pthread_setcanceltype
+function atomically both sets the calling thread's cancelability type
+to the indicated
+.Fa type
+and returns the previous cancelability type at the location referenced by
+.Fa oldtype .
+Legal values for
+.Fa type
+are
+.Dv PTHREAD_CANCEL_DEFERRED
+and
+.Dv PTHREAD_CANCEL_ASYNCHRONOUS .
+.Pp
+The cancelability state and type of any newly created threads, including the
+thread in which
+.Fn main
+was first invoked, are
+.Dv PTHREAD_CANCEL_ENABLE
+and
+.Dv PTHREAD_CANCEL_DEFERRED
+respectively.
+.Pp
+The
+.Fn pthread_testcancel
+function creates a cancellation point in the calling thread. The
+.Fn pthread_testcancel
+function has no effect if cancelability is disabled.
+.Pp
+.Ss Cancelability States
+The cancelability state of a thread determines the action taken upon
+receipt of a cancellation request. The thread may control cancellation in
+a number of ways.
+.Pp
+Each thread maintains its own
+.Dq cancelability state
+which may be encoded in two bits:
+.Bl -hang
+.It Em Cancelability Enable
+When cancelability is
+.Dv PTHREAD_CANCEL_DISABLE ,
+cancellation requests against the target thread are held pending.
+.It Em Cancelability Type
+When cancelability is enabled and the cancelability type is
+.Dv PTHREAD_CANCEL_ASYNCHRONOUS ,
+new or pending cancellation requests may be acted upon at any time.
+When cancelability is enabled and the cancelability type is
+.Dv PTHREAD_CANCEL_DEFERRED ,
+cancellation requests are held pending until a cancellation point (see
+below) is reached. If cancelability is disabled, the setting of the
+cancelability type has no immediate effect as all cancellation requests
+are held pending; however, once cancelability is enabled again the new
+type will be in effect.
+.El
+.Ss Cancellation Points
+Cancellation points will occur when a thread is executing the following
+functions:
+.Fn close ,
+.Fn creat ,
+.Fn fcntl ,
+.Fn fsync ,
+.Fn msync ,
+.Fn nanosleep ,
+.Fn open ,
+.Fn pause ,
+.Fn pthread_cond_timedwait ,
+.Fn pthread_cond_wait ,
+.Fn pthread_join ,
+.Fn pthread_testcancel ,
+.Fn read ,
+.Fn sigwaitinfo ,
+.Fn sigsuspend ,
+.Fn sigwait ,
+.Fn sleep ,
+.Fn system ,
+.Fn tcdrain ,
+.Fn wait ,
+.Fn waitpid ,
+.Fn write .
+.Sh RETURN VALUES
+If successful, the
+.Fn pthread_setcancelstate
+and
+.Fn pthread_setcanceltype
+functions will return zero. Otherwise, an error number shall be returned to
+indicate the error.
+.Pp
+The
+.Fn pthread_setcancelstate
+and
+.Fn pthread_setcanceltype
+functions are used to control the points at which a thread may be
+asynchronously canceled. For cancellation control to be usable in modular
+fashion, some rules must be followed.
+.Pp
+For purposes of this discussion, consider an object to be a generalization
+of a procedure. It is a set of procedures and global variables written as
+a unit and called by clients not known by the object. Objects may depend
+on other objects.
+.Pp
+First, cancelability should only be disabled on entry to an object, never
+explicitly enabled. On exit from an object, the cancelability state should
+always be restored to its value on entry to the object.
+.Pp
+This follows from a modularity argument: if the client of an object (or the
+client of an object that uses that object) has disabled cancelability, it is
+because the client doesn't want to have to worry about how to clean up if the
+thread is canceled while executing some sequence of actions. If an object
+is called in such a state and it enables cancelability and a cancellation
+request is pending for that thread, then the thread will be canceled,
+contrary to the wish of the client that disabled.
+.Pp
+Second, the cancelability type may be explicitly set to either
+.Em deferred
+or
+.Em asynchronous
+upon entry to an object. But as with the cancelability state, on exit from
+an object that cancelability type should always be restored to its value on
+entry to the object.
+.Pp
+Finally, only functions that are cancel-safe may be called from a thread that
+is asynchronously cancelable.
+.Sh ERRORS
+The function
+.Fn pthread_setcancelstate
+may fail with:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The specified state is not
+.Dv PTHREAD_CANCEL_ENABLE
+or
+.Dv PTHREAD_CANCEL_DISABLE .
+.El
+.Pp
+The function
+.Fn pthread_setcanceltype
+may fail with:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The specified state is not
+.Dv PTHREAD_CANCEL_DEFERRED
+or
+.Dv PTHREAD_CANCEL_ASYNCHRONOUS .
+.El
+.Sh SEE ALSO
+.Xr pthread_cancel 3
+.Sh STANDARDS
+.Fn pthread_testcancel
+conforms to ISO/IEC 9945-1 ANSI/IEEE
+.Pq Dq Tn POSIX
+Std 1003.1 Second Edition 1996-07-12.
+.Sh AUTHORS
+This man page was written by
+.An David Leonard <d@openbsd.org>
+for the OpenBSD implementation of pthread_cancel.
diff --git a/lib/libc_r/uthread/Makefile.inc b/lib/libc_r/uthread/Makefile.inc
index d42c04b..4697305 100644
--- a/lib/libc_r/uthread/Makefile.inc
+++ b/lib/libc_r/uthread/Makefile.inc
@@ -24,6 +24,7 @@ SRCS+= \
uthread_attr_setstacksize.c \
uthread_autoinit.cc \
uthread_bind.c \
+ uthread_cancel.c \
uthread_clean.c \
uthread_close.c \
uthread_cond.c \
@@ -37,6 +38,7 @@ SRCS+= \
uthread_equal.c \
uthread_execve.c \
uthread_exit.c \
+ uthread_fchflags.c \
uthread_fchmod.c \
uthread_fchown.c \
uthread_fcntl.c \
@@ -64,6 +66,7 @@ SRCS+= \
uthread_listen.c \
uthread_mattr_init.c \
uthread_mattr_kind_np.c \
+ uthread_msync.c \
uthread_multi_np.c \
uthread_mutex.c \
uthread_mutex_prioceiling.c \
diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h
index a58c02f..0f27063 100644
--- a/lib/libc_r/uthread/pthread_private.h
+++ b/lib/libc_r/uthread/pthread_private.h
@@ -253,7 +253,7 @@ struct pthread_mutex {
*/
#define PTHREAD_MUTEX_STATIC_INITIALIZER \
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
- NULL, { NULL }, 0, 0, 0, 0, TAILQ_INITIALIZER, \
+ NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
_SPINLOCK_INITIALIZER }
struct pthread_mutex_attr {
@@ -513,6 +513,15 @@ struct pthread {
*/
int sig_saved;
+ /*
+ * Cancelability flags - the lower 2 bits are used by cancel
+ * definitions in pthread.h
+ */
+#define PTHREAD_AT_CANCEL_POINT 0x0004
+#define PTHREAD_CANCELLING 0x0008
+#define PTHREAD_CANCEL_NEEDED 0x0010
+ int cancelflags;
+
/*
* Current signal mask and pending signals.
*/
@@ -610,15 +619,18 @@ struct pthread {
*/
int yield_on_sig_undefer;
- /* Miscellaneous data. */
+ /* Miscellaneous flags; only set with signals deferred. */
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
-#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link*/
-#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link*/
-#define PTHREAD_FLAGS_TRACE 0x0040 /* for debugging purposes */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link */
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link */
+#define PTHREAD_FLAGS_IN_MUTEXQ 0x0040 /* in mutex queue using qe link */
+#define PTHREAD_FLAGS_IN_FILEQ 0x0080 /* in file lock queue using qe link */
+#define PTHREAD_FLAGS_IN_FDQ 0x0100 /* in fd lock queue using qe link */
+#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
/*
* Base priority is the user setable and retrievable priority
@@ -925,6 +937,7 @@ char *__ttyname_r_basic(int, char *, size_t);
char *ttyname_r(int, char *, size_t);
int _find_dead_thread(pthread_t);
int _find_thread(pthread_t);
+void _funlock_owned(pthread_t);
int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
@@ -932,8 +945,9 @@ void _dispatch_signals(void);
void _thread_signal(pthread_t, int);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_notify_priochange(pthread_t);
int _mutex_reinit(pthread_mutex_t *);
-void _mutex_notify_priochange(struct pthread *);
+void _mutex_unlock_private(pthread_t);
int _cond_reinit(pthread_cond_t *);
int _pq_alloc(struct pq_queue *, int, int);
int _pq_init(struct pq_queue *);
@@ -948,8 +962,10 @@ void _waitq_setactive(void);
void _waitq_clearactive(void);
#endif
void _thread_exit(char *, int, char *);
+void _thread_exit_cleanup(void);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
+void _thread_fd_unlock_owned(pthread_t);
void *_thread_cleanup(pthread_t);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);
@@ -969,6 +985,9 @@ void _thread_start_sig_handler(void);
void _thread_seterrno(pthread_t,int);
int _thread_fd_table_init(int fd);
pthread_addr_t _thread_gc(pthread_addr_t);
+void _thread_enter_cancellation_point(void);
+void _thread_leave_cancellation_point(void);
+void _thread_cancellation_point(void);
/* #include <signal.h> */
int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *);
@@ -1148,6 +1167,8 @@ pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *);
#ifdef _SYS_POLL_H_
int _thread_sys_poll(struct pollfd *, unsigned, int);
#endif
+/* #include <sys/mman.h> */
+int _thread_sys_msync(void *, size_t, int);
__END_DECLS
#endif /* !_PTHREAD_PRIVATE_H */
diff --git a/lib/libc_r/uthread/uthread_attr_getinheritsched.c b/lib/libc_r/uthread/uthread_attr_getinheritsched.c
index f98fde7..7e243ed 100644
--- a/lib/libc_r/uthread/uthread_attr_getinheritsched.c
+++ b/lib/libc_r/uthread/uthread_attr_getinheritsched.c
@@ -37,7 +37,7 @@
#include "pthread_private.h"
int
-pthread_attr_getinheritsched(pthread_attr_t *attr, int *sched_inherit)
+pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit)
{
int ret = 0;
diff --git a/lib/libc_r/uthread/uthread_attr_getschedparam.c b/lib/libc_r/uthread/uthread_attr_getschedparam.c
index 61c6891..46586ff 100644
--- a/lib/libc_r/uthread/uthread_attr_getschedparam.c
+++ b/lib/libc_r/uthread/uthread_attr_getschedparam.c
@@ -37,7 +37,7 @@
#include "pthread_private.h"
int
-pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
+pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
{
int ret = 0;
diff --git a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c
index 6ebc764..19f835c 100644
--- a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c
+++ b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c
@@ -37,7 +37,7 @@
#include "pthread_private.h"
int
-pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy)
+pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
int ret = 0;
diff --git a/lib/libc_r/uthread/uthread_attr_getscope.c b/lib/libc_r/uthread/uthread_attr_getscope.c
index f456585..176f01b 100644
--- a/lib/libc_r/uthread/uthread_attr_getscope.c
+++ b/lib/libc_r/uthread/uthread_attr_getscope.c
@@ -37,7 +37,7 @@
#include "pthread_private.h"
int
-pthread_attr_getscope(pthread_attr_t *attr, int *contentionscope)
+pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope)
{
int ret = 0;
diff --git a/lib/libc_r/uthread/uthread_cancel.c b/lib/libc_r/uthread/uthread_cancel.c
new file mode 100644
index 0000000..bad5533
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_cancel.c
@@ -0,0 +1,179 @@
+/*
+ * David Leonard <d@openbsd.org>, 1999. Public domain.
+ * $FreeBSD$
+ */
+
+#include <sys/errno.h>
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_cancel(pthread_t pthread)
+{
+ int ret;
+
+ if ((ret = _find_thread(pthread)) != 0) {
+ /* NOTHING */
+ } else if (pthread->state == PS_DEAD || pthread->state == PS_DEADLOCK) {
+ ret = 0;
+ } else {
+ /* Protect the scheduling queues: */
+ _thread_kern_sig_defer();
+
+ /* Check if we need to kick it back into the run queue: */
+ if ((pthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0)
+ switch (pthread->state) {
+ case PS_RUNNING:
+ /* No need to resume: */
+ pthread->cancelflags |= PTHREAD_CANCELLING;
+ break;
+
+ case PS_SPINBLOCK:
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Remove these threads from the work queue: */
+ if ((pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ != 0)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ /* Fall through: */
+ case PS_SIGTHREAD:
+ case PS_SLEEP_WAIT:
+ case PS_WAIT_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ /* Interrupt and resume: */
+ pthread->interrupted = 1;
+ pthread->cancelflags |= PTHREAD_CANCELLING;
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ break;
+
+ case PS_MUTEX_WAIT:
+ case PS_COND_WAIT:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ case PS_JOIN:
+ /*
+ * Threads in these states may be in queues.
+ * In order to preserve queue integrity, the
+ * cancelled thread must remove itself from the
+ * queue. Mark the thread as interrupted and
+ * needing cancellation, and set the state to
+ * running. When the thread resumes, it will
+ * exit after removing itself from the queue.
+ */
+ pthread->interrupted = 1;
+ pthread->cancelflags |= PTHREAD_CANCEL_NEEDED;
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ break;
+
+ case PS_DEAD:
+ case PS_DEADLOCK:
+ case PS_STATE_MAX:
+ /* Ignore - only here to silence -Wall: */
+ break;
+ }
+ /* Unprotect the scheduling queues: */
+ _thread_kern_sig_undefer();
+
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+pthread_setcancelstate(int state, int *oldstate)
+{
+ int ostate;
+ int ret;
+
+ ostate = _thread_run->cancelflags & PTHREAD_CANCEL_DISABLE;
+
+ switch (state) {
+ case PTHREAD_CANCEL_ENABLE:
+ if (oldstate != NULL)
+ *oldstate = ostate;
+ _thread_run->cancelflags &= PTHREAD_CANCEL_ENABLE;
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)
+ pthread_testcancel();
+ ret = 0;
+ break;
+ case PTHREAD_CANCEL_DISABLE:
+ if (oldstate != NULL)
+ *oldstate = ostate;
+ _thread_run->cancelflags |= PTHREAD_CANCEL_DISABLE;
+ ret = 0;
+ break;
+ default:
+ ret = EINVAL;
+ }
+
+ return (ret);
+}
+
+int
+pthread_setcanceltype(int type, int *oldtype)
+{
+ int otype;
+ int ret;
+
+ otype = _thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS;
+ switch (type) {
+ case PTHREAD_CANCEL_ASYNCHRONOUS:
+ if (oldtype != NULL)
+ *oldtype = otype;
+ _thread_run->cancelflags |= PTHREAD_CANCEL_ASYNCHRONOUS;
+ pthread_testcancel();
+ ret = 0;
+ break;
+ case PTHREAD_CANCEL_DEFERRED:
+ if (oldtype != NULL)
+ *oldtype = otype;
+ _thread_run->cancelflags &= ~PTHREAD_CANCEL_ASYNCHRONOUS;
+ ret = 0;
+ break;
+ default:
+ ret = EINVAL;
+ }
+
+ return (ret);
+}
+
+void
+pthread_testcancel(void)
+{
+
+ if (((_thread_run->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
+ ((_thread_run->cancelflags & PTHREAD_CANCELLING) != 0)) {
+ /*
+ * It is possible for this thread to be swapped out
+ * while performing cancellation; do not allow it
+ * to be cancelled again.
+ */
+ _thread_run->cancelflags &= ~PTHREAD_CANCELLING;
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ PANIC("cancel");
+ }
+}
+
+void
+_thread_enter_cancellation_point(void)
+{
+
+ /* Look for a cancellation before we block: */
+ pthread_testcancel();
+ _thread_run->cancelflags |= PTHREAD_AT_CANCEL_POINT;
+}
+
+void
+_thread_leave_cancellation_point(void)
+{
+
+ _thread_run->cancelflags &= ~PTHREAD_AT_CANCEL_POINT;
+ /* Look for a cancellation after we unblock: */
+ pthread_testcancel();
+}
diff --git a/lib/libc_r/uthread/uthread_close.c b/lib/libc_r/uthread/uthread_close.c
index 9eba433..2580fce 100644
--- a/lib/libc_r/uthread/uthread_close.c
+++ b/lib/libc_r/uthread/uthread_close.c
@@ -45,10 +45,11 @@ close(int fd)
{
int flags;
int ret;
- int status;
struct stat sb;
struct fd_table_entry *entry;
+ _thread_enter_cancellation_point();
+
if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
/*
* Don't allow silly programs to close the kernel pipe.
@@ -98,6 +99,7 @@ close(int fd)
/* Close the file descriptor: */
ret = _thread_sys_close(fd);
}
+ _thread_leave_cancellation_point();
return (ret);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c
index 2eb05f9..09c5f22 100644
--- a/lib/libc_r/uthread/uthread_cond.c
+++ b/lib/libc_r/uthread/uthread_cond.c
@@ -157,8 +157,7 @@ pthread_cond_destroy(pthread_cond_t * cond)
int
pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
{
- int rval = 0;
- int status;
+ int rval = 0;
if (cond == NULL)
rval = EINVAL;
@@ -169,6 +168,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+
+ _thread_enter_cancellation_point();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -193,8 +195,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
/* Return invalid argument error: */
rval = EINVAL;
} else {
- /* Reset the timeout flag: */
+ /* Reset the timeout and interrupted flags: */
_thread_run->timeout = 0;
+ _thread_run->interrupted = 0;
/*
* Queue the running thread for the condition
@@ -233,7 +236,28 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
_thread_kern_sched_state_unlock(PS_COND_WAIT,
&(*cond)->lock, __FILE__, __LINE__);
- /* Lock the mutex: */
+ if (_thread_run->interrupted != 0) {
+ /*
+ * Lock the condition variable
+ * while removing the thread.
+ */
+ _SPINLOCK(&(*cond)->lock);
+
+ cond_queue_remove(*cond,
+ _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ _SPINUNLOCK(&(*cond)->lock);
+ }
+
+ /*
+ * Note that even though this thread may have
+ * been canceled, POSIX requires that the mutex
+ * be reaquired prior to cancellation.
+ */
rval = _mutex_cv_lock(mutex);
}
}
@@ -248,6 +272,13 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
rval = EINVAL;
break;
}
+
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ _thread_leave_cancellation_point();
}
/* Return the completion status: */
@@ -258,8 +289,7 @@ int
pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
{
- int rval = 0;
- int status;
+ int rval = 0;
if (cond == NULL || abstime == NULL)
rval = EINVAL;
@@ -276,6 +306,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+
+ _thread_enter_cancellation_point();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -306,8 +339,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
_thread_run->wakeup_time.tv_nsec =
abstime->tv_nsec;
- /* Reset the timeout flag: */
+ /* Reset the timeout and interrupted flags: */
_thread_run->timeout = 0;
+ _thread_run->interrupted = 0;
/*
* Queue the running thread for the condition
@@ -341,12 +375,16 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
_thread_kern_sched_state_unlock(PS_COND_WAIT,
&(*cond)->lock, __FILE__, __LINE__);
- /* Check if the wait timedout: */
- if (_thread_run->timeout == 0) {
+ /*
+ * Check if the wait timedout or was
+ * interrupted (canceled):
+ */
+ if ((_thread_run->timeout == 0) &&
+ (_thread_run->interrupted == 0)) {
/* Lock the mutex: */
rval = _mutex_cv_lock(mutex);
- }
- else {
+
+ } else {
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -369,8 +407,12 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
rval = ETIMEDOUT;
/*
- * Lock the mutex and ignore
- * any errors:
+ * Lock the mutex and ignore any
+ * errors. Note that even though
+ * this thread may have been
+ * canceled, POSIX requires that
+ * the mutex be reaquired prior
+ * to cancellation.
*/
(void)_mutex_cv_lock(mutex);
}
@@ -388,6 +430,12 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
break;
}
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ _thread_leave_cancellation_point();
}
/* Return the completion status: */
@@ -416,16 +464,7 @@ pthread_cond_signal(pthread_cond_t * cond)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /*
- * Enter a loop to dequeue threads from the condition
- * queue until we find one that hasn't previously
- * timed out.
- */
- while (((pthread = cond_queue_deq(*cond)) != NULL) &&
- (pthread->timeout != 0)) {
- }
-
- if (pthread != NULL)
+ if ((pthread = cond_queue_deq(*cond)) != NULL)
/* Allow the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
@@ -482,12 +521,7 @@ pthread_cond_broadcast(pthread_cond_t * cond)
* condition queue:
*/
while ((pthread = cond_queue_deq(*cond)) != NULL) {
- /*
- * The thread is already running if the
- * timeout flag is set.
- */
- if (pthread->timeout == 0)
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
/* There are no more waiting threads: */
@@ -524,9 +558,17 @@ cond_queue_deq(pthread_cond_t cond)
{
pthread_t pthread;
- if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
+ while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ if ((pthread->timeout == 0) && (pthread->interrupted == 0))
+ /*
+ * Only exit the loop when we find a thread
+ * that hasn't timed out or been canceled;
+ * those threads are already running and don't
+ * need their run state changed.
+ */
+ break;
}
return(pthread);
diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c
index b3ea391..f7c95d0 100644
--- a/lib/libc_r/uthread/uthread_create.c
+++ b/lib/libc_r/uthread/uthread_create.c
@@ -50,9 +50,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
int f_gc = 0;
- int i;
int ret = 0;
- int status;
pthread_t gc_thread;
pthread_t new_thread;
pthread_attr_t pattr;
@@ -166,6 +164,9 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->start_routine = start_routine;
new_thread->arg = arg;
+ new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
+ PTHREAD_CANCEL_DEFERRED;
+
/*
* Write a magic value to the thread structure
* to help identify valid ones:
diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c
index 795decc..abe4b27 100644
--- a/lib/libc_r/uthread/uthread_exit.c
+++ b/lib/libc_r/uthread/uthread_exit.c
@@ -34,6 +34,8 @@
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
@@ -101,17 +103,45 @@ _thread_exit(char *fname, int lineno, char *string)
#endif
}
+/*
+ * Only called when a thread is cancelled. It may be more useful
+ * to call it from pthread_exit() if other ways of asynchronous or
+ * abnormal thread termination can be found.
+ */
+void
+_thread_exit_cleanup(void)
+{
+ /*
+ * POSIX states that cancellation/termination of a thread should
+ * not release any visible resources (such as mutexes) and that
+ * it is the applications responsibility. Resources that are
+ * internal to the threads library, including file and fd locks,
+ * are not visible to the application and need to be released.
+ */
+ /* Unlock all owned fd locks: */
+ _thread_fd_unlock_owned(_thread_run);
+
+ /* Unlock all owned file locks: */
+ _funlock_owned(_thread_run);
+
+ /* Unlock all private mutexes: */
+ _mutex_unlock_private(_thread_run);
+
+ /*
+ * This still isn't quite correct because we don't account
+ * for held spinlocks (see libc/stdlib/malloc.c).
+ */
+}
+
void
pthread_exit(void *status)
{
- int sig;
- long l;
- pthread_t pthread;
+ pthread_t pthread;
/* Check if this thread is already in the process of exiting: */
if ((_thread_run->flags & PTHREAD_EXITING) != 0) {
char msg[128];
- snprintf(msg,"Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run);
+ snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",_thread_run);
PANIC(msg);
}
@@ -134,7 +164,7 @@ pthread_exit(void *status)
_thread_cleanupspecific();
}
- /* Free thread-specific poll_data structure, if allocated */
+ /* Free thread-specific poll_data structure, if allocated: */
if (_thread_run->poll_data.fds != NULL) {
free(_thread_run->poll_data.fds);
_thread_run->poll_data.fds = NULL;
diff --git a/lib/libc_r/uthread/uthread_fchflags.c b/lib/libc_r/uthread/uthread_fchflags.c
new file mode 100644
index 0000000..ec4e747
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_fchflags.c
@@ -0,0 +1,25 @@
+/*
+ * David Leonard <d@openbsd.org>, 1999. Public Domain.
+ *
+ * $OpenBSD: uthread_fchflags.c,v 1.1 1999/01/08 05:42:18 d Exp $
+ * $FreeBSD$
+ */
+
+#include <sys/stat.h>
+#include <unistd.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+fchflags(int fd, u_long flags)
+{
+ int ret;
+
+ if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
+ ret = _thread_sys_fchflags(fd, flags);
+ _FD_UNLOCK(fd, FD_WRITE);
+ }
+ return (ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_fcntl.c b/lib/libc_r/uthread/uthread_fcntl.c
index 9a6293c..878554c 100644
--- a/lib/libc_r/uthread/uthread_fcntl.c
+++ b/lib/libc_r/uthread/uthread_fcntl.c
@@ -47,6 +47,8 @@ fcntl(int fd, int cmd,...)
int ret;
va_list ap;
+ _thread_enter_cancellation_point();
+
/* Lock the file descriptor: */
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
/* Initialise the variable argument list: */
@@ -135,6 +137,7 @@ fcntl(int fd, int cmd,...)
/* Unlock the file descriptor: */
_FD_UNLOCK(fd, FD_RDWR);
}
+ _thread_leave_cancellation_point();
/* Return the completion status: */
return (ret);
diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c
index 76069bf..3b69639 100644
--- a/lib/libc_r/uthread/uthread_fd.c
+++ b/lib/libc_r/uthread/uthread_fd.c
@@ -40,9 +40,29 @@
#include <pthread.h>
#include "pthread_private.h"
+#define FDQ_INSERT(q,p) \
+do { \
+ TAILQ_INSERT_TAIL(q,p,qe); \
+ p->flags |= PTHREAD_FLAGS_IN_FDQ; \
+} while (0)
+
+#define FDQ_REMOVE(q,p) \
+do { \
+ if ((p->flags & PTHREAD_FLAGS_IN_FDQ) != 0) { \
+ TAILQ_REMOVE(q,p,qe); \
+ p->flags &= ~PTHREAD_FLAGS_IN_FDQ; \
+ } \
+} while (0)
+
+
/* Static variables: */
static spinlock_t fd_table_lock = _SPINLOCK_INITIALIZER;
+/* Prototypes: */
+static inline pthread_t fd_next_reader(int fd);
+static inline pthread_t fd_next_writer(int fd);
+
+
/*
* This function *must* return -1 and set the thread specific errno
* as a system call. This is because the error return from this
@@ -201,11 +221,11 @@ _thread_fd_unlock(int fd, int lock_type)
* Get the next thread in the queue for a
* read lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
} else {
/* Remove this thread from the queue: */
- TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue,
- _thread_fd_table[fd]->r_owner, qe);
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_fd_table[fd]->r_owner);
/*
* Set the state of the new owner of
@@ -243,11 +263,11 @@ _thread_fd_unlock(int fd, int lock_type)
* Get the next thread in the queue for a
* write lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
} else {
/* Remove this thread from the queue: */
- TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue,
- _thread_fd_table[fd]->w_owner, qe);
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_fd_table[fd]->w_owner);
/*
* Set the state of the new owner of
@@ -290,6 +310,9 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* entry:
*/
if ((ret = _thread_fd_table_init(fd)) == 0) {
+ /* Clear the interrupted flag: */
+ _thread_run->interrupted = 0;
+
/*
* Lock the file descriptor table entry to prevent
* other threads for clashing with the current
@@ -300,10 +323,10 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
/* Check the file descriptor and lock types: */
if (lock_type == FD_READ || lock_type == FD_RDWR) {
/*
- * Enter a loop to wait for the file descriptor to be
- * locked for read for the current thread:
+ * Wait for the file descriptor to be locked
+ * for read for the current thread:
*/
- while (_thread_fd_table[fd]->r_owner != _thread_run) {
+ if (_thread_fd_table[fd]->r_owner != _thread_run) {
/*
* Check if the file descriptor is locked by
* another thread:
@@ -315,7 +338,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* queue of threads waiting for a
* read lock on this file descriptor:
*/
- TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe);
+ FDQ_INSERT(&_thread_fd_table[fd]->r_queue, _thread_run);
/*
* Save the file descriptor details
@@ -350,6 +373,10 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
*/
_SPINLOCK(&_thread_fd_table[fd]->lock);
+ if (_thread_run->interrupted != 0) {
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_run);
+ }
} else {
/*
* The running thread now owns the
@@ -365,8 +392,9 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
}
}
- /* Increment the read lock count: */
- _thread_fd_table[fd]->r_lockcount++;
+ if (_thread_fd_table[fd]->r_owner == _thread_run)
+ /* Increment the read lock count: */
+ _thread_fd_table[fd]->r_lockcount++;
}
/* Check the file descriptor and lock types: */
@@ -388,7 +416,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* write lock on this file
* descriptor:
*/
- TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe);
+ FDQ_INSERT(&_thread_fd_table[fd]->w_queue, _thread_run);
/*
* Save the file descriptor details
@@ -421,6 +449,11 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* table entry again:
*/
_SPINLOCK(&_thread_fd_table[fd]->lock);
+
+ if (_thread_run->interrupted != 0) {
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_run);
+ }
} else {
/*
* The running thread now owns the
@@ -437,12 +470,23 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
}
}
- /* Increment the write lock count: */
- _thread_fd_table[fd]->w_lockcount++;
+ if (_thread_fd_table[fd]->w_owner == _thread_run)
+ /* Increment the write lock count: */
+ _thread_fd_table[fd]->w_lockcount++;
}
/* Unlock the file descriptor table entry: */
_SPINUNLOCK(&_thread_fd_table[fd]->lock);
+
+ if (_thread_run->interrupted != 0) {
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) == 0) {
+ ret = -1;
+ errno = EINTR;
+ } else {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
+ }
}
/* Return the completion status: */
@@ -492,11 +536,11 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
* Get the next thread in the queue for a
* read lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) == NULL) {
} else {
/* Remove this thread from the queue: */
- TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue,
- _thread_fd_table[fd]->r_owner, qe);
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_fd_table[fd]->r_owner);
/*
* Set the state of the new owner of
@@ -534,11 +578,11 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
* Get the next thread in the queue for a
* write lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) == NULL) {
} else {
/* Remove this thread from the queue: */
- TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue,
- _thread_fd_table[fd]->w_owner, qe);
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_fd_table[fd]->w_owner);
/*
* Set the state of the new owner of
@@ -582,6 +626,9 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* entry:
*/
if ((ret = _thread_fd_table_init(fd)) == 0) {
+ /* Clear the interrupted flag: */
+ _thread_run->interrupted = 0;
+
/*
* Lock the file descriptor table entry to prevent
* other threads for clashing with the current
@@ -607,7 +654,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* queue of threads waiting for a
* read lock on this file descriptor:
*/
- TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe);
+ FDQ_INSERT(&_thread_fd_table[fd]->r_queue, _thread_run);
/*
* Save the file descriptor details
@@ -689,7 +736,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* write lock on this file
* descriptor:
*/
- TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe);
+ FDQ_INSERT(&_thread_fd_table[fd]->w_queue, _thread_run);
/*
* Save the file descriptor details
@@ -753,9 +800,132 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
/* Unlock the file descriptor table entry: */
_SPINUNLOCK(&_thread_fd_table[fd]->lock);
+
+ if (_thread_run->interrupted != 0) {
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) == 0) {
+ ret = -1;
+ errno = EINTR;
+ } else {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
+ }
}
/* Return the completion status: */
return (ret);
}
+
+void
+_thread_fd_unlock_owned(pthread_t pthread)
+{
+ int fd;
+
+ for (fd = 0; fd < _thread_dtablesize; fd++) {
+ if ((_thread_fd_table[fd] != NULL) &&
+ ((_thread_fd_table[fd]->r_owner == pthread) ||
+ (_thread_fd_table[fd]->w_owner == pthread))) {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /*
+ * Lock the file descriptor table entry to prevent
+ * other threads for clashing with the current
+ * thread's accesses:
+ */
+ _SPINLOCK(&_thread_fd_table[fd]->lock);
+
+ /* Check if the thread owns the read lock: */
+ if (_thread_fd_table[fd]->r_owner == pthread) {
+ /* Clear the read lock count: */
+ _thread_fd_table[fd]->r_lockcount = 0;
+
+ /*
+ * Get the next thread in the queue for a
+ * read lock on this file descriptor:
+ */
+ if ((_thread_fd_table[fd]->r_owner = fd_next_reader(fd)) != NULL) {
+ /* Remove this thread from the queue: */
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_fd_table[fd]->r_owner);
+
+ /*
+ * Set the state of the new owner of
+ * the thread to running:
+ */
+ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
+ }
+ }
+
+ /* Check if the thread owns the write lock: */
+ if (_thread_fd_table[fd]->w_owner == pthread) {
+ /* Clear the write lock count: */
+ _thread_fd_table[fd]->w_lockcount = 0;
+
+ /*
+ * Get the next thread in the queue for a
+ * write lock on this file descriptor:
+ */
+ if ((_thread_fd_table[fd]->w_owner = fd_next_writer(fd)) != NULL) {
+ /* Remove this thread from the queue: */
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_fd_table[fd]->w_owner);
+
+ /*
+ * Set the state of the new owner of
+ * the thread to running:
+ */
+ PTHREAD_NEW_STATE(_thread_fd_table[fd]->w_owner,PS_RUNNING);
+
+ }
+ }
+
+ /* Unlock the file descriptor table entry: */
+ _SPINUNLOCK(&_thread_fd_table[fd]->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary.
+ */
+ _thread_kern_sig_undefer();
+ }
+ }
+}
+
+static inline pthread_t
+fd_next_reader(int fd)
+{
+ pthread_t pthread;
+
+ while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) != NULL) &&
+ (pthread->interrupted != 0)) {
+ /*
+ * This thread has either been interrupted by a signal or
+ * it has been canceled. Remove it from the queue.
+ */
+ FDQ_REMOVE(&_thread_fd_table[fd]->r_queue, pthread);
+ }
+
+ return (pthread);
+}
+
+static inline pthread_t
+fd_next_writer(int fd)
+{
+ pthread_t pthread;
+
+ while (((pthread = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) != NULL) &&
+ (pthread->interrupted != 0)) {
+ /*
+ * This thread has either been interrupted by a signal or
+ * it has been canceled. Remove it from the queue.
+ */
+ FDQ_REMOVE(&_thread_fd_table[fd]->w_queue, pthread);
+ }
+
+ return (pthread);
+}
#endif
diff --git a/lib/libc_r/uthread/uthread_file.c b/lib/libc_r/uthread/uthread_file.c
index 709c8f6..5c89b09 100644
--- a/lib/libc_r/uthread/uthread_file.c
+++ b/lib/libc_r/uthread/uthread_file.c
@@ -225,18 +225,41 @@ _flockfile_debug(FILE * fp, char *fname, int lineno)
/* Unlock the hash table: */
_SPINUNLOCK(&hash_lock);
} else {
+ /* Clear the interrupted flag: */
+ _thread_run->interrupted = 0;
+
+ /*
+ * Prevent being context switched out while
+ * adding this thread to the file lock queue.
+ */
+ _thread_kern_sig_defer();
+
/*
* The file is locked for another thread.
* Append this thread to the queue of
* threads waiting on the lock.
*/
TAILQ_INSERT_TAIL(&p->l_head,_thread_run,qe);
+ _thread_run->flags |= PTHREAD_FLAGS_IN_FILEQ;
/* Unlock the hash table: */
_SPINUNLOCK(&hash_lock);
/* Wait on the FILE lock: */
_thread_kern_sched_state(PS_FILE_WAIT, fname, lineno);
+
+ if ((_thread_run->flags & PTHREAD_FLAGS_IN_FILEQ) != 0) {
+ TAILQ_REMOVE(&p->l_head,_thread_run,qe);
+ _thread_run->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
+ }
+
+ _thread_kern_sig_undefer();
+
+ if (((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) &&
+ (_thread_run->cancelflags & PTHREAD_CANCEL_DISABLE) != 0) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
}
}
return;
@@ -304,7 +327,6 @@ _ftrylockfile(FILE * fp)
void
_funlockfile(FILE * fp)
{
- int status;
int idx = file_idx(fp);
struct file_lock *p;
@@ -344,18 +366,27 @@ _funlockfile(FILE * fp)
p->count = 0;
/* Get the new owner of the lock: */
- if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
+ while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
/* Pop the thread off the queue: */
TAILQ_REMOVE(&p->l_head,p->owner,qe);
-
- /*
- * This is the first lock for the new
- * owner:
- */
- p->count = 1;
-
- /* Allow the new owner to run: */
- PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
+ p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
+
+ if (p->owner->interrupted == 0) {
+ /*
+ * This is the first lock for
+ * the new owner:
+ */
+ p->count = 1;
+
+ /* Allow the new owner to run: */
+ PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
+
+ /* End the loop when we find a
+ * thread that hasn't been
+ * cancelled or interrupted;
+ */
+ break;
+ }
}
}
}
@@ -372,4 +403,72 @@ _funlockfile(FILE * fp)
return;
}
+void
+_funlock_owned(pthread_t pthread)
+{
+ int idx;
+ struct file_lock *p, *next_p;
+
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the hash table: */
+ _SPINLOCK(&hash_lock);
+
+ for (idx = 0; idx < NUM_HEADS; idx++) {
+ /* Check the static file lock first: */
+ p = &flh[idx].fl;
+ next_p = LIST_FIRST(&flh[idx].head);
+
+ while (p != NULL) {
+ if (p->owner == pthread) {
+ /*
+ * The running thread will release the
+ * lock now:
+ */
+ p->count = 0;
+
+ /* Get the new owner of the lock: */
+ while ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
+ /* Pop the thread off the queue: */
+ TAILQ_REMOVE(&p->l_head,p->owner,qe);
+ p->owner->flags &= ~PTHREAD_FLAGS_IN_FILEQ;
+
+ if (p->owner->interrupted == 0) {
+ /*
+ * This is the first lock for
+ * the new owner:
+ */
+ p->count = 1;
+
+ /* Allow the new owner to run: */
+ PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
+
+ /* End the loop when we find a
+ * thread that hasn't been
+ * cancelled or interrupted;
+ */
+ break;
+ }
+ }
+ }
+ p = next_p;
+ if (next_p != NULL)
+ next_p = LIST_NEXT(next_p, entry);
+ }
+ }
+
+ /* Unlock the hash table: */
+ _SPINUNLOCK(&hash_lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
+}
+
#endif
diff --git a/lib/libc_r/uthread/uthread_fsync.c b/lib/libc_r/uthread/uthread_fsync.c
index 3287edd..21c3b56 100644
--- a/lib/libc_r/uthread/uthread_fsync.c
+++ b/lib/libc_r/uthread/uthread_fsync.c
@@ -41,10 +41,12 @@ fsync(int fd)
{
int ret;
+ _thread_enter_cancellation_point();
if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
ret = _thread_sys_fsync(fd);
_FD_UNLOCK(fd, FD_RDWR);
}
+ _thread_leave_cancellation_point();
return (ret);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_getschedparam.c b/lib/libc_r/uthread/uthread_getschedparam.c
index b6c0c35..09d8c1b 100644
--- a/lib/libc_r/uthread/uthread_getschedparam.c
+++ b/lib/libc_r/uthread/uthread_getschedparam.c
@@ -37,7 +37,8 @@
#include "pthread_private.h"
int
-pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
+pthread_getschedparam(pthread_t pthread, int *policy,
+ struct sched_param *param)
{
int ret;
diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c
index a2ce493..bab7e5b 100644
--- a/lib/libc_r/uthread/uthread_init.c
+++ b/lib/libc_r/uthread/uthread_init.c
@@ -42,6 +42,7 @@
#include <paths.h>
#include <poll.h>
#include <unistd.h>
+#include <sys/ioctl.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
@@ -199,6 +200,10 @@ _thread_init(void)
*/
_thread_initial->magic = PTHREAD_MAGIC;
+ /* Set the initial cancel state */
+ _thread_initial->cancelflags = PTHREAD_CANCEL_ENABLE |
+ PTHREAD_CANCEL_DEFERRED;
+
/* Default the priority of the initial thread: */
_thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
_thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
diff --git a/lib/libc_r/uthread/uthread_join.c b/lib/libc_r/uthread/uthread_join.c
index 8b00082..d149cf1 100644
--- a/lib/libc_r/uthread/uthread_join.c
+++ b/lib/libc_r/uthread/uthread_join.c
@@ -41,16 +41,22 @@ pthread_join(pthread_t pthread, void **thread_return)
{
int ret = 0;
pthread_t pthread1 = NULL;
+
+ _thread_enter_cancellation_point();
/* Check if the caller has specified an invalid thread: */
- if (pthread == NULL || pthread->magic != PTHREAD_MAGIC)
+ if (pthread == NULL || pthread->magic != PTHREAD_MAGIC) {
/* Invalid thread: */
+ _thread_leave_cancellation_point();
return(EINVAL);
+ }
/* Check if the caller has specified itself: */
- if (pthread == _thread_run)
+ if (pthread == _thread_run) {
/* Avoid a deadlock condition: */
+ _thread_leave_cancellation_point();
return(EDEADLK);
+ }
/*
* Find the thread in the list of active threads or in the
@@ -71,12 +77,31 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
+ /* Clear the interrupted flag: */
+ _thread_run->interrupted = 0;
+
+ /*
+ * Protect against being context switched out while
+ * adding this thread to the join queue.
+ */
+ _thread_kern_sig_defer();
+
/* Add the running thread to the join queue: */
TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
+ if (_thread_run->interrupted != 0)
+ TAILQ_REMOVE(&(pthread->join_queue), _thread_run, qe);
+
+ _thread_kern_sig_undefer();
+
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
/* Check if the thread is not detached: */
if ((pthread->attr.flags & PTHREAD_DETACHED) == 0) {
/* Check if the return value is required: */
@@ -93,6 +118,8 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Return the thread's return value: */
*thread_return = pthread->ret;
+ _thread_leave_cancellation_point();
+
/* Return the completion status: */
return (ret);
}
diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c
index a4717fa..cdb84a5 100644
--- a/lib/libc_r/uthread/uthread_kern.c
+++ b/lib/libc_r/uthread/uthread_kern.c
@@ -67,11 +67,10 @@ _thread_kern_sched(ucontext_t * scp)
char *fdata;
#endif
pthread_t pthread, pthread_h = NULL;
- pthread_t last_thread = NULL;
struct itimerval itimer;
struct timespec ts, ts1;
struct timeval tv, tv1;
- int i, set_timer = 0;
+ int set_timer = 0;
/*
* Flag the pthread kernel as executing scheduler code
@@ -109,6 +108,20 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_kern_in_sched = 0;
+ if (((_thread_run->cancelflags & PTHREAD_AT_CANCEL_POINT) == 0) &&
+ ((_thread_run->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)) {
+ /*
+ * Cancelations override signals.
+ *
+ * Stick a cancellation point at the start of
+ * each async-cancellable thread's resumption.
+ *
+ * We allow threads woken at cancel points to do their
+ * own checks.
+ */
+ pthread_testcancel();
+ }
+
if (_sched_switch_hook != NULL) {
/* Run the installed switch hook: */
thread_run_switch_hook(_last_user_thread, _thread_run);
@@ -161,6 +174,7 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
switch (_thread_run->state) {
case PS_DEAD:
+ case PS_STATE_MAX: /* to silence -Wall */
/*
* Dead threads are not placed in any queue:
*/
@@ -249,6 +263,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Insert into the work queue: */
PTHREAD_WORKQ_INSERT(_thread_run);
+ break;
}
}
@@ -627,14 +642,12 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
static void
_thread_kern_poll(int wait_reqd)
{
- char bufr[128];
int count = 0;
int i, found;
int kern_pipe_added = 0;
int nfds = 0;
int timeout_ms = 0;
- struct pthread *pthread, *pthread_next;
- ssize_t num;
+ struct pthread *pthread;
struct timespec ts;
struct timeval tv;
@@ -1103,10 +1116,10 @@ thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
pthread_t tid_in = thread_in;
if ((tid_out != NULL) &&
- (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
tid_out = NULL;
if ((tid_in != NULL) &&
- (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
tid_in = NULL;
if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
diff --git a/lib/libc_r/uthread/uthread_msync.c b/lib/libc_r/uthread/uthread_msync.c
new file mode 100644
index 0000000..209286d
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_msync.c
@@ -0,0 +1,40 @@
+/*
+ * David Leonard <d@openbsd.org>, 1999. Public Domain.
+ *
+ * $OpenBSD: uthread_msync.c,v 1.2 1999/06/09 07:16:17 d Exp $
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+msync(addr, len, flags)
+ void *addr;
+ size_t len;
+ int flags;
+{
+ int ret;
+
+ /*
+ * XXX This is quite pointless unless we know how to get the
+ * file descriptor associated with the memory, and lock it for
+ * write. The only real use of this wrapper is to guarantee
+ * a cancellation point, as per the standard. sigh.
+ */
+
+ /* This is a cancellation point: */
+ _thread_enter_cancellation_point();
+
+ ret = _thread_sys_msync(addr, len, flags);
+
+ /* No longer in a cancellation point: */
+ _thread_leave_cancellation_point();
+
+ return (ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c
index a402d4b..b2a06f2 100644
--- a/lib/libc_r/uthread/uthread_mutex.c
+++ b/lib/libc_r/uthread/uthread_mutex.c
@@ -94,7 +94,8 @@ _mutex_reinit(pthread_mutex_t * mutex)
TAILQ_INIT(&(*mutex)->m_queue);
(*mutex)->m_owner = NULL;
(*mutex)->m_data.m_count = 0;
- (*mutex)->m_flags = MUTEX_FLAGS_INITED;
+ (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
+ (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
(*mutex)->m_refcount = 0;
(*mutex)->m_prio = 0;
(*mutex)->m_saved_prio = 0;
@@ -428,6 +429,9 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
_MUTEX_INIT_LINK(*mutex);
}
+ /* Reset the interrupted flag: */
+ _thread_run->interrupted = 0;
+
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -602,6 +606,13 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
break;
}
+ /*
+ * Check to see if this thread was interrupted and
+ * is still in the mutex queue of waiting threads:
+ */
+ if (_thread_run->interrupted != 0)
+ mutex_queue_remove(*mutex, _thread_run);
+
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
@@ -610,6 +621,11 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* necessary:
*/
_thread_kern_sig_undefer();
+
+ if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
+ _thread_exit_cleanup();
+ pthread_exit(PTHREAD_CANCELED);
+ }
}
/* Return the completion status: */
@@ -1314,6 +1330,18 @@ mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
}
}
+void
+_mutex_unlock_private(pthread_t pthread)
+{
+ struct pthread_mutex *m, *m_next;
+
+ for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
+ m_next = TAILQ_NEXT(m, m_qe);
+ if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
+ pthread_mutex_unlock(&m);
+ }
+}
+
/*
* Dequeue a waiting thread from the head of a mutex queue in descending
* priority order.
@@ -1323,8 +1351,17 @@ mutex_queue_deq(pthread_mutex_t mutex)
{
pthread_t pthread;
- if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL)
+ while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+
+ /*
+ * Only exit the loop if the thread hasn't been
+ * cancelled.
+ */
+ if (pthread->interrupted == 0)
+ break;
+ }
return(pthread);
}
@@ -1335,7 +1372,10 @@ mutex_queue_deq(pthread_mutex_t mutex)
static inline void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
- TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ }
}
/*
@@ -1359,6 +1399,7 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
tid = TAILQ_NEXT(tid, qe);
TAILQ_INSERT_BEFORE(tid, pthread, qe);
}
+ pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
}
#endif
diff --git a/lib/libc_r/uthread/uthread_nanosleep.c b/lib/libc_r/uthread/uthread_nanosleep.c
index a922040..e4772b4 100644
--- a/lib/libc_r/uthread/uthread_nanosleep.c
+++ b/lib/libc_r/uthread/uthread_nanosleep.c
@@ -47,6 +47,7 @@ nanosleep(const struct timespec * time_to_sleep,
struct timespec remaining_time;
struct timeval tv;
+ _thread_enter_cancellation_point();
/* Check if the time to sleep is legal: */
if (time_to_sleep == NULL || time_to_sleep->tv_sec < 0 ||
time_to_sleep->tv_nsec < 0 || time_to_sleep->tv_nsec >= 1000000000) {
@@ -116,6 +117,7 @@ nanosleep(const struct timespec * time_to_sleep,
ret = -1;
}
}
+ _thread_leave_cancellation_point();
return (ret);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_open.c b/lib/libc_r/uthread/uthread_open.c
index 2f30ae7..4e9993e 100644
--- a/lib/libc_r/uthread/uthread_open.c
+++ b/lib/libc_r/uthread/uthread_open.c
@@ -44,10 +44,11 @@
int
open(const char *path, int flags,...)
{
- int fd;
- int mode = 0;
- int status;
- va_list ap;
+ int fd;
+ int mode = 0;
+ va_list ap;
+
+ _thread_enter_cancellation_point();
/* Check if the file is being created: */
if (flags & O_CREAT) {
@@ -68,6 +69,8 @@ open(const char *path, int flags,...)
fd = -1;
}
+ _thread_leave_cancellation_point();
+
/* Return the file descriptor or -1 on error: */
return (fd);
}
diff --git a/lib/libc_r/uthread/uthread_read.c b/lib/libc_r/uthread/uthread_read.c
index 0376827..8cbb5be 100644
--- a/lib/libc_r/uthread/uthread_read.c
+++ b/lib/libc_r/uthread/uthread_read.c
@@ -47,9 +47,13 @@ read(int fd, void *buf, size_t nbytes)
int ret;
int type;
+ _thread_enter_cancellation_point();
+
/* POSIX says to do just this: */
- if (nbytes == 0)
+ if (nbytes == 0) {
+ _thread_leave_cancellation_point();
return (0);
+ }
/* Lock the file descriptor for read: */
if ((ret = _FD_LOCK(fd, FD_READ, NULL)) == 0) {
@@ -61,6 +65,7 @@ read(int fd, void *buf, size_t nbytes)
/* File is not open for read: */
errno = EBADF;
_FD_UNLOCK(fd, FD_READ);
+ _thread_leave_cancellation_point();
return (-1);
}
@@ -92,6 +97,7 @@ read(int fd, void *buf, size_t nbytes)
}
_FD_UNLOCK(fd, FD_READ);
}
+ _thread_leave_cancellation_point();
return (ret);
}
#endif
diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c
index bb66fe5..57e24e8 100644
--- a/lib/libc_r/uthread/uthread_setschedparam.c
+++ b/lib/libc_r/uthread/uthread_setschedparam.c
@@ -38,7 +38,8 @@
#include "pthread_private.h"
int
-pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
+pthread_setschedparam(pthread_t pthread, int policy,
+ const struct sched_param *param)
{
int old_prio, in_readyq = 0, ret = 0;
diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c
index e08b298..a67a57f 100644
--- a/lib/libc_r/uthread/uthread_sigwait.c
+++ b/lib/libc_r/uthread/uthread_sigwait.c
@@ -47,6 +47,7 @@ sigwait(const sigset_t * set, int *sig)
sigset_t tempset, waitset;
struct sigaction act;
+ _thread_enter_cancellation_point();
/*
* Specify the thread kernel signal handler.
*/
@@ -85,6 +86,7 @@ sigwait(const sigset_t * set, int *sig)
/* Return the signal number to the caller: */
*sig = i;
+ _thread_leave_cancellation_point();
return (0);
}
@@ -137,6 +139,7 @@ sigwait(const sigset_t * set, int *sig)
}
}
+ _thread_leave_cancellation_point();
/* Return the completion status: */
return (ret);
}
diff --git a/lib/libc_r/uthread/uthread_wait4.c b/lib/libc_r/uthread/uthread_wait4.c
index fc6cfba..baa697c 100644
--- a/lib/libc_r/uthread/uthread_wait4.c
+++ b/lib/libc_r/uthread/uthread_wait4.c
@@ -42,6 +42,7 @@ wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
{
pid_t ret;
+ _thread_enter_cancellation_point();
_thread_kern_sig_defer();
/* Perform a non-blocking wait4 syscall: */
@@ -61,6 +62,7 @@ wait4(pid_t pid, int *istat, int options, struct rusage * rusage)
}
_thread_kern_sig_undefer();
+ _thread_leave_cancellation_point();
return (ret);
}
diff --git a/lib/libc_r/uthread/uthread_write.c b/lib/libc_r/uthread/uthread_write.c
index 9292add..09b09cd 100644
--- a/lib/libc_r/uthread/uthread_write.c
+++ b/lib/libc_r/uthread/uthread_write.c
@@ -50,9 +50,12 @@ write(int fd, const void *buf, size_t nbytes)
ssize_t num = 0;
ssize_t ret;
+ _thread_enter_cancellation_point();
/* POSIX says to do just this: */
- if (nbytes == 0)
+ if (nbytes == 0) {
+ _thread_leave_cancellation_point();
return (0);
+ }
/* Lock the file descriptor for write: */
if ((ret = _FD_LOCK(fd, FD_WRITE, NULL)) == 0) {
@@ -64,7 +67,8 @@ write(int fd, const void *buf, size_t nbytes)
/* File is not open for write: */
errno = EBADF;
_FD_UNLOCK(fd, FD_WRITE);
- return (-1);
+ _thread_leave_cancellation_point();
+ return (-1);
}
/* Check if file operations are to block */
@@ -129,6 +133,7 @@ write(int fd, const void *buf, size_t nbytes)
}
_FD_UNLOCK(fd, FD_RDWR);
}
+ _thread_leave_cancellation_point();
return (ret);
}
#endif
OpenPOWER on IntegriCloud