summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorjb <jb@FreeBSD.org>1999-06-20 08:28:48 +0000
committerjb <jb@FreeBSD.org>1999-06-20 08:28:48 +0000
commit5e17641ba44c5097022dd1815914b2c362554edc (patch)
treefead0b6cac3fd121466f4c964b2f486beb2a0128 /lib
parentaa61764b70413d3d4c1c15cc23915b1b8bfcc2eb (diff)
downloadFreeBSD-src-5e17641ba44c5097022dd1815914b2c362554edc.zip
FreeBSD-src-5e17641ba44c5097022dd1815914b2c362554edc.tar.gz
In the words of the author:
o The polling mechanism for I/O readiness was changed from select() to poll(). In additon, a wrapped version of poll() is now provided. o The wrapped select routine now converts each fd_set to a poll array so that the thread scheduler doesn't have to perform a bitwise search for selected fds each time file descriptors are polled for I/O readiness. o The thread scheduler was modified to use a new queue (_workq) for threads that need work. Threads waiting for I/O readiness and spinblocks are added to the work queue in addition to the waiting queue. This reduces the time spent forming/searching the array of file descriptors being polled. o The waiting queue (_waitingq) is now maintained in order of thread wakeup time. This allows the thread scheduler to find the nearest wakeup time by looking at the first thread in the queue instead of searching the entire queue. o Removed file descriptor locking for select/poll routines. An application should not rely on the threads library for providing this locking; if necessary, the application should use mutexes to protect selecting/polling of file descriptors. o Retrieve and use the kernel clock rate/resolution at startup instead of hardcoding the clock resolution to 10 msec (tested with kernel running at 1000 HZ). o All queues have been changed to use queue.h macros. These include the queues of all threads, dead threads, and threads waiting for file descriptor locks. o Added reinitialization of the GC mutex and condition variable after a fork. Also prevented reallocation of the ready queue after a fork. o Prevented the wrapped close routine from closing the thread kernel pipes. o Initialized file descriptor table for stdio entries at thread init. o Provided additional flags to indicate to what queues threads belong. o Moved TAILQ initialization for statically allocated mutex and condition variables to after the spinlock. o Added dispatching of signals to pthread_kill. Removing the dispatching of signals from thread activation broke sigsuspend when pthread_kill was used to send a signal to a thread. o Temporarily set the state of a thread to PS_SUSPENDED when it is first created and placed in the list of threads so that it will not be accidentally scheduled before becoming a member of one of the scheduling queues. o Change the signal handler to queue signals to the thread kernel pipe if the scheduling queues are protected. When scheduling queues are unprotected, signals are then dequeued and handled. o Ensured that all installed signal handlers block the scheduling signal and that the scheduling signal handler blocks all other signals. This ensures that the signal handler is only interruptible for and by non-scheduling signals. An atomic lock is used to decide which instance of the signal handler will handle pending signals. o Removed _lock_thread_list and _unlock_thread_list as they are no longer used to protect the thread list. o Added missing RCS IDs to modified files. o Added checks for appropriate queue membership and activity when adding, removing, and searching the scheduling queues. These checks add very little overhead and are enabled when compiled with _PTHREADS_INVARIANTS defined. Suggested and implemented by Tor Egge with some modification by me. o Close a race condition in uthread_close. (Tor Egge) o Protect the scheduling queues while modifying them in pthread_cond_signal and _thread_fd_unlock. (Tor Egge) o Ensure that when a thread gets a mutex, the mutex is on that threads list of owned mutexes. (Tor Egge) o Set the kernel-in-scheduler flag in _thread_kern_sched_state and _thread_kern_sched_state_unlock to prevent a scheduling signal from calling the scheduler again. (Tor Egge) o Don't use TAILQ_FOREACH macro while searching the waiting queue for threads in a sigwait state, because a change of state destroys the TAILQ link. It is actually safe to do so, though, because once a sigwaiting thread is found, the loop ends and the function returns. (Tor Egge) o When dispatching signals to threads, make the thread inherit the signal deferral flag of the currently running thread. (Tor Egge) Submitted by: Daniel Eischen <eischen@vigrid.com> and Tor Egge <Tor.Egge@fast.no>
Diffstat (limited to 'lib')
-rw-r--r--lib/libc_r/uthread/Makefile.inc4
-rw-r--r--lib/libc_r/uthread/pthread_private.h257
-rw-r--r--lib/libc_r/uthread/uthread_close.c30
-rw-r--r--lib/libc_r/uthread/uthread_cond.c72
-rw-r--r--lib/libc_r/uthread/uthread_create.c38
-rw-r--r--lib/libc_r/uthread/uthread_detach.c19
-rw-r--r--lib/libc_r/uthread/uthread_execve.c4
-rw-r--r--lib/libc_r/uthread/uthread_exit.c39
-rw-r--r--lib/libc_r/uthread/uthread_fd.c70
-rw-r--r--lib/libc_r/uthread/uthread_file.c14
-rw-r--r--lib/libc_r/uthread/uthread_find_thread.c34
-rw-r--r--lib/libc_r/uthread/uthread_fork.c96
-rw-r--r--lib/libc_r/uthread/uthread_gc.c111
-rw-r--r--lib/libc_r/uthread/uthread_info.c31
-rw-r--r--lib/libc_r/uthread/uthread_init.c53
-rw-r--r--lib/libc_r/uthread/uthread_join.c3
-rw-r--r--lib/libc_r/uthread/uthread_kern.c1481
-rw-r--r--lib/libc_r/uthread/uthread_kill.c65
-rw-r--r--lib/libc_r/uthread/uthread_mutex.c192
-rw-r--r--lib/libc_r/uthread/uthread_poll.c95
-rw-r--r--lib/libc_r/uthread/uthread_priority_queue.c192
-rw-r--r--lib/libc_r/uthread/uthread_queue.c124
-rw-r--r--lib/libc_r/uthread/uthread_resume_np.c14
-rw-r--r--lib/libc_r/uthread/uthread_select.c184
-rw-r--r--lib/libc_r/uthread/uthread_setschedparam.c16
-rw-r--r--lib/libc_r/uthread/uthread_sig.c210
-rw-r--r--lib/libc_r/uthread/uthread_sigaction.c4
-rw-r--r--lib/libc_r/uthread/uthread_sigwait.c37
-rw-r--r--lib/libc_r/uthread/uthread_suspend_np.c14
-rw-r--r--lib/libkse/thread/Makefile.inc4
-rw-r--r--lib/libkse/thread/thr_close.c30
-rw-r--r--lib/libkse/thread/thr_cond.c72
-rw-r--r--lib/libkse/thread/thr_create.c38
-rw-r--r--lib/libkse/thread/thr_detach.c19
-rw-r--r--lib/libkse/thread/thr_exit.c39
-rw-r--r--lib/libkse/thread/thr_find_thread.c34
-rw-r--r--lib/libkse/thread/thr_fork.c96
-rw-r--r--lib/libkse/thread/thr_info.c31
-rw-r--r--lib/libkse/thread/thr_init.c53
-rw-r--r--lib/libkse/thread/thr_join.c3
-rw-r--r--lib/libkse/thread/thr_kern.c1481
-rw-r--r--lib/libkse/thread/thr_kill.c65
-rw-r--r--lib/libkse/thread/thr_mutex.c192
-rw-r--r--lib/libkse/thread/thr_poll.c95
-rw-r--r--lib/libkse/thread/thr_priority_queue.c192
-rw-r--r--lib/libkse/thread/thr_private.h257
-rw-r--r--lib/libkse/thread/thr_resume_np.c14
-rw-r--r--lib/libkse/thread/thr_select.c184
-rw-r--r--lib/libkse/thread/thr_setschedparam.c16
-rw-r--r--lib/libkse/thread/thr_sig.c210
-rw-r--r--lib/libkse/thread/thr_sigaction.c4
-rw-r--r--lib/libkse/thread/thr_sigwait.c37
-rw-r--r--lib/libkse/thread/thr_suspend_np.c14
-rw-r--r--lib/libpthread/thread/Makefile.inc4
-rw-r--r--lib/libpthread/thread/thr_close.c30
-rw-r--r--lib/libpthread/thread/thr_cond.c72
-rw-r--r--lib/libpthread/thread/thr_create.c38
-rw-r--r--lib/libpthread/thread/thr_detach.c19
-rw-r--r--lib/libpthread/thread/thr_exit.c39
-rw-r--r--lib/libpthread/thread/thr_find_thread.c34
-rw-r--r--lib/libpthread/thread/thr_fork.c96
-rw-r--r--lib/libpthread/thread/thr_gc.c111
-rw-r--r--lib/libpthread/thread/thr_info.c31
-rw-r--r--lib/libpthread/thread/thr_init.c53
-rw-r--r--lib/libpthread/thread/thr_join.c3
-rw-r--r--lib/libpthread/thread/thr_kern.c1481
-rw-r--r--lib/libpthread/thread/thr_kill.c65
-rw-r--r--lib/libpthread/thread/thr_mutex.c192
-rw-r--r--lib/libpthread/thread/thr_poll.c95
-rw-r--r--lib/libpthread/thread/thr_priority_queue.c192
-rw-r--r--lib/libpthread/thread/thr_private.h257
-rw-r--r--lib/libpthread/thread/thr_resume_np.c14
-rw-r--r--lib/libpthread/thread/thr_select.c184
-rw-r--r--lib/libpthread/thread/thr_setschedparam.c16
-rw-r--r--lib/libpthread/thread/thr_sig.c210
-rw-r--r--lib/libpthread/thread/thr_sigaction.c4
-rw-r--r--lib/libpthread/thread/thr_sigwait.c37
-rw-r--r--lib/libpthread/thread/thr_suspend_np.c14
78 files changed, 5601 insertions, 4373 deletions
diff --git a/lib/libc_r/uthread/Makefile.inc b/lib/libc_r/uthread/Makefile.inc
index 16799cf..22e9548 100644
--- a/lib/libc_r/uthread/Makefile.inc
+++ b/lib/libc_r/uthread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
+# $Id: Makefile.inc,v 1.17 1999/03/23 05:07:54 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -73,8 +73,8 @@ SRCS+= \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_poll.c \
uthread_priority_queue.c \
- uthread_queue.c \
uthread_read.c \
uthread_readv.c \
uthread_recvfrom.c \
diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h
index 6cacb27..036ea80 100644
--- a/lib/libc_r/uthread/pthread_private.h
+++ b/lib/libc_r/uthread/pthread_private.h
@@ -31,6 +31,7 @@
*
* Private thread definitions for the uthread kernel.
*
+ * $Id$
*/
#ifndef _PTHREAD_PRIVATE_H
@@ -68,33 +69,88 @@
/*
- * Priority queue manipulation macros:
+ * Priority queue manipulation macros (using pqe link):
*/
#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
-#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
/*
- * Waiting queue manipulation macros:
+ * Waiting queue manipulation macros (using pqe link):
*/
-#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
+#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
+#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
+#else
#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_INSERT(thrd) do { \
+ if ((thrd)->wakeup_time.tv_sec == -1) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else { \
+ pthread_t tid = TAILQ_FIRST(&_waitingq); \
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
+ ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
+ ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
+ (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
+ tid = TAILQ_NEXT(tid, pqe); \
+ if (tid == NULL) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else \
+ TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
+ } \
+} while (0)
+#define PTHREAD_WAITQ_CLEARACTIVE()
+#define PTHREAD_WAITQ_SETACTIVE()
+#endif
+
+/*
+ * Work queue manipulation macros (using qe link):
+ */
+#define PTHREAD_WORKQ_INSERT(thrd) do { \
+ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
+ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+#define PTHREAD_WORKQ_REMOVE(thrd) do { \
+ TAILQ_REMOVE(&_workq,thrd,qe); \
+ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+
/*
* State change macro without scheduling queue change:
*/
-#define PTHREAD_SET_STATE(thrd, newstate) { \
+#define PTHREAD_SET_STATE(thrd, newstate) do { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
-}
+} while (0)
/*
* State change macro with scheduling queue change - This must be
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
+ if (_thread_kern_new_state != 0) \
+ PANIC("Recursive PTHREAD_NEW_STATE"); \
+ _thread_kern_new_state = 1; \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ _thread_kern_new_state = 0; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+} while (0)
+#else
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
PTHREAD_PRIOQ_REMOVE(thrd); \
@@ -105,7 +161,8 @@
} \
} \
PTHREAD_SET_STATE(thrd, newstate); \
-}
+} while (0)
+#endif
/*
* Define the signals to be used for scheduling.
@@ -119,15 +176,6 @@
#endif
/*
- * Queue definitions.
- */
-struct pthread_queue {
- struct pthread *q_next;
- struct pthread *q_last;
- void *q_data;
-};
-
-/*
* Priority queues.
*
* XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
@@ -147,14 +195,9 @@ typedef struct pq_queue {
/*
- * Static queue initialization values.
- */
-#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
-
-/*
* TailQ initialization values.
*/
-#define TAILQ_INITIALIZER { NULL, NULL }
+#define TAILQ_INITIALIZER { NULL, NULL }
/*
* Mutex definitions.
@@ -257,7 +300,7 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL, \
+ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
0, _SPINLOCK_INITIALIZER }
/*
@@ -303,12 +346,6 @@ struct pthread_attr {
#define CLOCK_RES_NSEC 10000000
/*
- * Number of microseconds between incremental priority updates for
- * threads that are ready to run, but denied being run.
- */
-#define INC_PRIO_USEC 500000
-
-/*
* Time slice period in microseconds.
*/
#define TIMESLICE_USEC 100000
@@ -345,6 +382,7 @@ enum pthread_state {
PS_FDR_WAIT,
PS_FDW_WAIT,
PS_FILE_WAIT,
+ PS_POLL_WAIT,
PS_SELECT_WAIT,
PS_SLEEP_WAIT,
PS_WAIT_WAIT,
@@ -377,8 +415,8 @@ struct fd_table_entry {
* state of the lock on the file descriptor.
*/
spinlock_t lock;
- struct pthread_queue r_queue; /* Read queue. */
- struct pthread_queue w_queue; /* Write queue. */
+ TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
+ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
struct pthread *r_owner; /* Ptr to thread owning read lock. */
struct pthread *w_owner; /* Ptr to thread owning write lock. */
char *r_fname; /* Ptr to read lock source file name */
@@ -390,11 +428,9 @@ struct fd_table_entry {
int flags; /* Flags used in open. */
};
-struct pthread_select_data {
+struct pthread_poll_data {
int nfds;
- fd_set readfds;
- fd_set writefds;
- fd_set exceptfds;
+ struct pollfd *fds;
};
union pthread_wait_data {
@@ -406,7 +442,7 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_select_data * select_data;
+ struct pthread_poll_data * poll_data;
spinlock_t *spinlock;
};
@@ -427,15 +463,11 @@ struct pthread {
*/
spinlock_t lock;
- /*
- * Pointer to the next thread in the thread linked list.
- */
- struct pthread *nxt;
+ /* Queue entry for list of all threads: */
+ TAILQ_ENTRY(pthread) tle;
- /*
- * Pointer to the next thread in the dead thread linked list.
- */
- struct pthread *nxt_dead;
+ /* Queue entry for list of dead threads: */
+ TAILQ_ENTRY(pthread) dle;
/*
* Thread start routine, argument, stack pointer and thread
@@ -514,25 +546,25 @@ struct pthread {
*/
int error;
- /* Join queue for waiting threads: */
- struct pthread_queue join_queue;
+ /* Join queue head and link for waiting threads: */
+ TAILQ_HEAD(join_head, pthread) join_queue;
/*
- * The current thread can belong to only one scheduling queue
- * at a time (ready or waiting queue). It can also belong to
- * a queue of threads waiting on mutexes or condition variables.
- * Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links (mutexes and condition variables).
+ * The current thread can belong to only one scheduling queue at
+ * a time (ready or waiting queue). It can also belong to (only)
+ * one of:
*
- * Pointer to queue (if any) on which the current thread is waiting.
+ * o A queue of threads waiting for a mutex
+ * o A queue of threads waiting for a condition variable
+ * o A queue of threads waiting for another thread to terminate
+ * (the join queue above)
+ * o A queue of threads waiting for a file descriptor lock
+ * o A queue of threads needing work done by the kernel thread
+ * (waiting for a spinlock or file I/O)
*
- * XXX The queuing should be changed to use the TAILQ entry below.
- * XXX For the time being, it's hybrid.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links.
*/
- struct pthread_queue *queue;
-
- /* Pointer to next element in queue. */
- struct pthread *qnxt;
/* Priority queue entry for this thread: */
TAILQ_ENTRY(pthread) pqe;
@@ -544,6 +576,11 @@ struct pthread {
union pthread_wait_data data;
/*
+ * Allocated for converting select into poll.
+ */
+ struct pthread_poll_data poll_data;
+
+ /*
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
@@ -553,23 +590,26 @@ struct pthread {
int signo;
/*
- * Set to non-zero when this thread has deferred thread
- * scheduling. We allow for recursive deferral.
+ * Set to non-zero when this thread has deferred signals.
+ * We allow for recursive deferral.
*/
- int sched_defer_count;
+ int sig_defer_count;
/*
* Set to TRUE if this thread should yield after undeferring
- * thread scheduling.
+ * signals.
*/
- int yield_on_sched_undefer;
+ int yield_on_sig_undefer;
/* Miscellaneous data. */
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
-#define PTHREAD_FLAGS_TRACE 0x0008
+#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
+#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link*/
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link*/
+#define PTHREAD_FLAGS_TRACE 0x0040 /* for debugging purposes */
/*
* Base priority is the user setable and retrievable priority
@@ -592,7 +632,7 @@ struct pthread {
/*
* Active priority is always the maximum of the threads base
* priority and inherited priority. When there is a change
- * in either the real or inherited priority, the active
+ * in either the base or inherited priority, the active
* priority must be recalculated.
*/
char active_priority;
@@ -649,10 +689,10 @@ SCLASS struct pthread * volatile _thread_single
;
#endif
-/* Ptr to the first thread in the thread linked list: */
-SCLASS struct pthread * volatile _thread_link_list
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_thread_list);
#else
;
#endif
@@ -661,7 +701,7 @@ SCLASS struct pthread * volatile _thread_link_list
* Array of kernel pipe file descriptors that are used to ensure that
* no signals are missed in calls to _select.
*/
-SCLASS int _thread_kern_pipe[2]
+SCLASS int _thread_kern_pipe[2]
#ifdef GLOBAL_PTHREAD_PRIVATE
= {
-1,
@@ -670,7 +710,7 @@ SCLASS int _thread_kern_pipe[2]
#else
;
#endif
-SCLASS int _thread_kern_in_select
+SCLASS int volatile _queue_signals
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0;
#else
@@ -692,9 +732,9 @@ SCLASS struct timeval kern_inc_prio_time
#endif
/* Dead threads: */
-SCLASS struct pthread * volatile _thread_dead
+SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_dead_list);
#else
;
#endif
@@ -747,6 +787,14 @@ SCLASS struct fd_table_entry **_thread_fd_table
;
#endif
+/* Table for polling file descriptors: */
+SCLASS struct pollfd *_thread_pfd_table
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL;
+#else
+;
+#endif
+
SCLASS const int dtablecount
#ifdef GLOBAL_PTHREAD_PRIVATE
= 4096/sizeof(struct fd_table_entry);
@@ -760,6 +808,13 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
+SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= CLOCK_RES_NSEC;
+#else
+;
+#endif
+
/* Garbage collector mutex and condition variable. */
SCLASS pthread_mutex_t _gc_mutex
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -783,8 +838,20 @@ struct sigaction _thread_sigact[NSIG];
SCLASS pq_queue_t _readyq;
SCLASS TAILQ_HEAD(, pthread) _waitingq;
-/* Indicates that the waitingq now has threads ready to run. */
-SCLASS volatile int _waitingq_check_reqd
+/*
+ * Work queue:
+ */
+SCLASS TAILQ_HEAD(, pthread) _workq;
+
+/* Tracks the number of threads blocked while waiting for a spinlock. */
+SCLASS volatile int _spinblock_count
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Indicates that the signal queue needs to be checked. */
+SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0
#endif
@@ -797,6 +864,13 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
#endif
;
+/* Used for _PTHREADS_INVARIANTS checking. */
+SCLASS int _thread_kern_new_state
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -825,18 +899,23 @@ int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
void _dispatch_signals(void);
void _thread_signal(pthread_t, int);
-void _lock_thread(void);
-void _lock_thread_list(void);
-void _unlock_thread(void);
-void _unlock_thread_list(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+int _mutex_reinit(pthread_mutex_t *);
void _mutex_notify_priochange(struct pthread *);
-int _pq_init(struct pq_queue *pq, int, int);
+int _cond_reinit(pthread_cond_t *);
+int _pq_alloc(struct pq_queue *, int, int);
+int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
+#if defined(_PTHREADS_INVARIANTS)
+void _waitq_insert(pthread_t pthread);
+void _waitq_remove(pthread_t pthread);
+void _waitq_setactive(void);
+void _waitq_clearactive(void);
+#endif
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -849,18 +928,15 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
-void _thread_kern_sched_defer(void);
-void _thread_kern_sched_undefer(void);
+void _thread_kern_sig_defer(void);
+void _thread_kern_sig_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
+void _thread_sig_handle(int, struct sigcontext *);
+void _thread_sig_init(void);
void _thread_start(void);
void _thread_start_sig_handler(void);
void _thread_seterrno(pthread_t,int);
-void _thread_queue_init(struct pthread_queue *);
-void _thread_queue_enq(struct pthread_queue *, struct pthread *);
-int _thread_queue_remove(struct pthread_queue *, struct pthread *);
int _thread_fd_table_init(int fd);
-struct pthread *_thread_queue_get(struct pthread_queue *);
-struct pthread *_thread_queue_deq(struct pthread_queue *);
pthread_addr_t _thread_gc(pthread_addr_t);
/* #include <signal.h> */
@@ -1036,6 +1112,11 @@ pid_t _thread_sys_waitpid(pid_t, int *, int);
pid_t _thread_sys_wait3(int *, int, struct rusage *);
pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *);
#endif
+
+/* #include <poll.h> */
+#ifdef _SYS_POLL_H_
+int _thread_sys_poll(struct pollfd *, unsigned, int);
+#endif
__END_DECLS
#endif /* !_PTHREAD_PRIVATE_H */
diff --git a/lib/libc_r/uthread/uthread_close.c b/lib/libc_r/uthread/uthread_close.c
index 7e21853..4f55b0a 100644
--- a/lib/libc_r/uthread/uthread_close.c
+++ b/lib/libc_r/uthread/uthread_close.c
@@ -29,7 +29,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
+#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
@@ -45,12 +47,21 @@ close(int fd)
int ret;
int status;
struct stat sb;
+ struct fd_table_entry *entry;
- /* Lock the file descriptor while the file is closed: */
- if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
- /* Get file descriptor status. */
- _thread_sys_fstat(fd, &sb);
-
+ if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
+ /*
+ * Don't allow silly programs to close the kernel pipe.
+ */
+ errno = EBADF;
+ ret = -1;
+ }
+ /*
+ * Lock the file descriptor while the file is closed and get
+ * the file descriptor status:
+ */
+ else if (((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) &&
+ ((ret = _thread_sys_fstat(fd, &sb)) == 0)) {
/*
* Check if the file should be left as blocking.
*
@@ -78,11 +89,14 @@ close(int fd)
_thread_sys_fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
+ /* XXX: Assumes well behaved threads. */
+ /* XXX: Defer real close to avoid race condition */
+ entry = _thread_fd_table[fd];
+ _thread_fd_table[fd] = NULL;
+ free(entry);
+
/* Close the file descriptor: */
ret = _thread_sys_close(fd);
-
- free(_thread_fd_table[fd]);
- _thread_fd_table[fd] = NULL;
}
return (ret);
}
diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c
index e0360dd..bacfb72 100644
--- a/lib/libc_r/uthread/uthread_cond.c
+++ b/lib/libc_r/uthread/uthread_cond.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -44,6 +45,28 @@ static inline pthread_t cond_queue_deq(pthread_cond_t);
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+/* Reinitialize a condition variable to defaults. */
+int
+_cond_reinit(pthread_cond_t * cond)
+{
+ int ret = 0;
+
+ if (cond == NULL)
+ ret = EINVAL;
+ else if (*cond == NULL)
+ ret = pthread_cond_init(cond, NULL);
+ else {
+ /*
+ * Initialize the condition variable structure:
+ */
+ TAILQ_INIT(&(*cond)->c_queue);
+ (*cond)->c_flags = COND_FLAGS_INITED;
+ (*cond)->c_type = COND_TYPE_FAST;
+ (*cond)->c_mutex = NULL;
+ memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
+ }
+ return (ret);
+}
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
@@ -146,6 +169,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -155,9 +181,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
(*cond)->c_flags |= COND_FLAGS_INITED;
}
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -247,6 +270,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -256,10 +282,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_flags |= COND_FLAGS_INITED;
}
-
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -375,6 +397,12 @@ pthread_cond_signal(pthread_cond_t * cond)
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -409,6 +437,12 @@ pthread_cond_signal(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -425,14 +459,10 @@ pthread_cond_broadcast(pthread_cond_t * cond)
rval = EINVAL;
else {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues. In addition, we must assure
- * that all threads currently waiting on the condition
- * variable are signaled and are not timedout by a
- * scheduling signal that causes a preemption.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -468,9 +498,11 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
- /* Reenable preemption and yield if necessary.
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -488,7 +520,7 @@ cond_queue_deq(pthread_cond_t cond)
if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
return(pthread);
@@ -507,9 +539,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
- if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -535,6 +567,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
tid = TAILQ_NEXT(tid, qe);
TAILQ_INSERT_BEFORE(tid, pthread, qe);
}
- pthread->flags |= PTHREAD_FLAGS_QUEUED;
+ pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
}
#endif
diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c
index 438e527..f5e0b63 100644
--- a/lib/libc_r/uthread/uthread_create.c
+++ b/lib/libc_r/uthread/uthread_create.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <stdlib.h>
@@ -171,7 +172,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
- _thread_queue_init(&(new_thread->join_queue));
+ TAILQ_INIT(&(new_thread->join_queue));
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
@@ -179,46 +180,39 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
- new_thread->queue = NULL;
- new_thread->qnxt = NULL;
new_thread->flags = 0;
+ new_thread->poll_data.nfds = 0;
+ new_thread->poll_data.fds = NULL;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
/*
* Check if the garbage collector thread
* needs to be started.
*/
- f_gc = (_thread_link_list == _thread_initial);
+ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
/* Add the thread to the linked list of all threads: */
- new_thread->nxt = _thread_link_list;
- _thread_link_list = new_thread;
-
- /* Unlock the thread list: */
- _unlock_thread_list();
-
- /*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
- */
- _thread_kern_sched_defer();
+ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
new_thread->state = PS_SUSPENDED;
PTHREAD_WAITQ_INSERT(new_thread);
- } else {
+ }
+ else {
new_thread->state = PS_RUNNING;
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
}
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding
+ * if necessary.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libc_r/uthread/uthread_detach.c b/lib/libc_r/uthread/uthread_detach.c
index 05da832..73a58de2 100644
--- a/lib/libc_r/uthread/uthread_detach.c
+++ b/lib/libc_r/uthread/uthread_detach.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -53,23 +54,25 @@ pthread_detach(pthread_t pthread)
pthread->attr.flags |= PTHREAD_DETACHED;
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Enter a loop to bring all threads off the join queue: */
- while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
+ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if a
+ * scheduling signal occurred while in the critical region.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libc_r/uthread/uthread_execve.c b/lib/libc_r/uthread/uthread_execve.c
index 0dbd467..86c55f3 100644
--- a/lib/libc_r/uthread/uthread_execve.c
+++ b/lib/libc_r/uthread/uthread_execve.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <fcntl.h>
@@ -92,6 +93,9 @@ execve(const char *name, char *const * argv, char *const * envp)
act.sa_mask = _thread_sigact[i - 1].sa_mask;
act.sa_flags = _thread_sigact[i - 1].sa_flags;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+
/* Change the signal action for the process: */
_thread_sys_sigaction(i, &act, &oact);
}
diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c
index c54dbda..a5f706d 100644
--- a/lib/libc_r/uthread/uthread_exit.c
+++ b/lib/libc_r/uthread/uthread_exit.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <unistd.h>
@@ -92,7 +93,12 @@ _thread_exit(char *fname, int lineno, char *string)
_thread_sys_write(2, s, strlen(s));
/* Force this process to exit: */
+ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
+#if defined(_PTHREADS_INVARIANTS)
+ abort();
+#else
_exit(1);
+#endif
}
void
@@ -129,22 +135,24 @@ pthread_exit(void *status)
}
/*
- * Guard against preemption by a scheduling signal. A change of
- * thread state modifies the waiting and priority queues.
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Check if there are any threads joined to this one: */
- while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
+
/* Wake the joined thread and let it detach this thread: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -154,8 +162,21 @@ pthread_exit(void *status)
PANIC("Cannot lock gc mutex");
/* Add this thread to the list of dead threads. */
- _thread_run->nxt_dead = _thread_dead;
- _thread_dead = _thread_run;
+ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
+
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/*
* Signal the garbage collector thread that there is something
diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c
index e9ec27b..f0c973b 100644
--- a/lib/libc_r/uthread/uthread_fd.c
+++ b/lib/libc_r/uthread/uthread_fd.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_fd.c,v 1.9 1998/09/13 15:33:42 dt Exp $
+ * $Id: uthread_fd.c,v 1.10 1999/03/23 05:07:55 jb Exp $
*
*/
#include <errno.h>
@@ -84,14 +84,14 @@ _thread_fd_table_init(int fd)
entry->w_owner = NULL;
entry->r_fname = NULL;
entry->w_fname = NULL;
- entry->r_lineno = 0;;
- entry->w_lineno = 0;;
- entry->r_lockcount = 0;;
- entry->w_lockcount = 0;;
+ entry->r_lineno = 0;
+ entry->w_lineno = 0;
+ entry->r_lockcount = 0;
+ entry->w_lockcount = 0;
/* Initialise the read/write queues: */
- _thread_queue_init(&entry->r_queue);
- _thread_queue_init(&entry->w_queue);
+ TAILQ_INIT(&entry->r_queue);
+ TAILQ_INIT(&entry->w_queue);
/* Get the flags for the file: */
if (fd >= 3 && (entry->flags =
@@ -169,6 +169,12 @@ _thread_fd_unlock(int fd, int lock_type)
*/
if ((ret = _thread_fd_table_init(fd)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /*
* Lock the file descriptor table entry to prevent
* other threads for clashing with the current
* thread's accesses:
@@ -195,8 +201,12 @@ _thread_fd_unlock(int fd, int lock_type)
* Get the next thread in the queue for a
* read lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->r_owner = _thread_queue_deq(&_thread_fd_table[fd]->r_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) {
} else {
+ /* Remove this thread from the queue: */
+ TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_fd_table[fd]->r_owner, qe);
+
/*
* Set the state of the new owner of
* the thread to running:
@@ -233,8 +243,12 @@ _thread_fd_unlock(int fd, int lock_type)
* Get the next thread in the queue for a
* write lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->w_owner = _thread_queue_deq(&_thread_fd_table[fd]->w_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) {
} else {
+ /* Remove this thread from the queue: */
+ TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_fd_table[fd]->w_owner, qe);
+
/*
* Set the state of the new owner of
* the thread to running:
@@ -254,6 +268,12 @@ _thread_fd_unlock(int fd, int lock_type)
/* Unlock the file descriptor table entry: */
_SPINUNLOCK(&_thread_fd_table[fd]->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
}
/* Nothing to return. */
@@ -295,7 +315,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* queue of threads waiting for a
* read lock on this file descriptor:
*/
- _thread_queue_enq(&_thread_fd_table[fd]->r_queue, _thread_run);
+ TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe);
/*
* Save the file descriptor details
@@ -368,7 +388,7 @@ _thread_fd_lock(int fd, int lock_type, struct timespec * timeout)
* write lock on this file
* descriptor:
*/
- _thread_queue_enq(&_thread_fd_table[fd]->w_queue, _thread_run);
+ TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe);
/*
* Save the file descriptor details
@@ -440,6 +460,12 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
*/
if ((ret = _thread_fd_table_init(fd)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /*
* Lock the file descriptor table entry to prevent
* other threads for clashing with the current
* thread's accesses:
@@ -466,8 +492,12 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
* Get the next thread in the queue for a
* read lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->r_owner = _thread_queue_deq(&_thread_fd_table[fd]->r_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->r_owner = TAILQ_FIRST(&_thread_fd_table[fd]->r_queue)) == NULL) {
} else {
+ /* Remove this thread from the queue: */
+ TAILQ_REMOVE(&_thread_fd_table[fd]->r_queue,
+ _thread_fd_table[fd]->r_owner, qe);
+
/*
* Set the state of the new owner of
* the thread to running:
@@ -504,8 +534,12 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
* Get the next thread in the queue for a
* write lock on this file descriptor:
*/
- else if ((_thread_fd_table[fd]->w_owner = _thread_queue_deq(&_thread_fd_table[fd]->w_queue)) == NULL) {
+ else if ((_thread_fd_table[fd]->w_owner = TAILQ_FIRST(&_thread_fd_table[fd]->w_queue)) == NULL) {
} else {
+ /* Remove this thread from the queue: */
+ TAILQ_REMOVE(&_thread_fd_table[fd]->w_queue,
+ _thread_fd_table[fd]->w_owner, qe);
+
/*
* Set the state of the new owner of
* the thread to running:
@@ -525,6 +559,12 @@ _thread_fd_unlock_debug(int fd, int lock_type, char *fname, int lineno)
/* Unlock the file descriptor table entry: */
_SPINUNLOCK(&_thread_fd_table[fd]->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary.
+ */
+ _thread_kern_sig_undefer();
}
/* Nothing to return. */
@@ -567,7 +607,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* queue of threads waiting for a
* read lock on this file descriptor:
*/
- _thread_queue_enq(&_thread_fd_table[fd]->r_queue, _thread_run);
+ TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->r_queue, _thread_run, qe);
/*
* Save the file descriptor details
@@ -649,7 +689,7 @@ _thread_fd_lock_debug(int fd, int lock_type, struct timespec * timeout,
* write lock on this file
* descriptor:
*/
- _thread_queue_enq(&_thread_fd_table[fd]->w_queue, _thread_run);
+ TAILQ_INSERT_TAIL(&_thread_fd_table[fd]->w_queue, _thread_run, qe);
/*
* Save the file descriptor details
diff --git a/lib/libc_r/uthread/uthread_file.c b/lib/libc_r/uthread/uthread_file.c
index 0fb52a9..d755bc8 100644
--- a/lib/libc_r/uthread/uthread_file.c
+++ b/lib/libc_r/uthread/uthread_file.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_file.c,v 1.5 1998/09/07 21:55:01 alex Exp $
+ * $Id: uthread_file.c,v 1.6 1998/09/09 16:50:33 dt Exp $
*
* POSIX stdio FILE locking functions. These assume that the locking
* is only required at FILE structure level, not at file descriptor
@@ -310,6 +310,12 @@ _funlockfile(FILE * fp)
/* Check if this is a real file: */
if (fp->_file >= 0) {
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
/* Lock the hash table: */
_SPINLOCK(&hash_lock);
@@ -356,6 +362,12 @@ _funlockfile(FILE * fp)
/* Unlock the hash table: */
_SPINUNLOCK(&hash_lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
}
return;
}
diff --git a/lib/libc_r/uthread/uthread_find_thread.c b/lib/libc_r/uthread/uthread_find_thread.c
index e4a59a0..a010a7e 100644
--- a/lib/libc_r/uthread/uthread_find_thread.c
+++ b/lib/libc_r/uthread/uthread_find_thread.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,20 @@ _find_thread(pthread_t pthread)
/* Invalid thread: */
return(EINVAL);
- /* Lock the thread list: */
- _lock_thread_list();
-
- /* Point to the first thread in the list: */
- pthread1 = _thread_link_list;
+ /*
+ * Defer signals to protect the thread list from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_thread_list, tle) {
+ if (pthread == pthread1)
+ break;
}
- /* Unlock the thread list: */
- _unlock_thread_list();
+ /* Undefer and handle pending signals, yielding if necessary: */
+ _thread_kern_sig_undefer();
/* Return zero if the thread exists: */
return ((pthread1 != NULL) ? 0:ESRCH);
@@ -83,13 +84,10 @@ _find_dead_thread(pthread_t pthread)
if (pthread_mutex_lock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* Point to the first thread in the list: */
- pthread1 = _thread_dead;
-
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt_dead;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_dead_list, dle) {
+ if (pthread1 == pthread)
+ break;
}
/* Unlock the garbage collector mutex: */
diff --git a/lib/libc_r/uthread/uthread_fork.c b/lib/libc_r/uthread/uthread_fork.c
index 5582c1e..8d3322e 100644
--- a/lib/libc_r/uthread/uthread_fork.c
+++ b/lib/libc_r/uthread/uthread_fork.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <string.h>
@@ -44,10 +45,13 @@ fork(void)
int i, flags;
pid_t ret;
pthread_t pthread;
- pthread_t pthread_next;
+ pthread_t pthread_save;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
/* Fork a new process: */
if ((ret = _thread_sys_fork()) != 0) {
@@ -88,45 +92,79 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ }
+ /* Reinitialize the GC mutex: */
+ else if (_mutex_reinit(&_gc_mutex) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC mutex for forked process");
+ }
+ /* Reinitialize the GC condition variable: */
+ else if (_cond_reinit(&_gc_cond) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC condvar for forked process");
+ }
/* Initialize the ready queue: */
- } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
- PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_init(&_readyq) != 0) {
/* Abort this application: */
- PANIC("Cannot allocate priority ready queue.");
+ PANIC("Cannot initialize priority ready queue.");
} else {
- /* Point to the first thread in the list: */
- pthread = _thread_link_list;
-
/*
* Enter a loop to remove all threads other than
* the running thread from the thread list:
*/
+ pthread = TAILQ_FIRST(&_thread_list);
while (pthread != NULL) {
- pthread_next = pthread->nxt;
- if (pthread == _thread_run) {
- _thread_link_list = pthread;
- pthread->nxt = NULL;
- } else {
- if (pthread->attr.stackaddr_attr ==
- NULL && pthread->stack != NULL)
+ /* Save the thread to be freed: */
+ pthread_save = pthread;
+
+ /*
+ * Advance to the next thread before
+ * destroying the current thread:
+ */
+ pthread = TAILQ_NEXT(pthread, dle);
+
+ /* Make sure this isn't the running thread: */
+ if (pthread_save != _thread_run) {
+ /* Remove this thread from the list: */
+ TAILQ_REMOVE(&_thread_list,
+ pthread_save, tle);
+
+ if (pthread_save->attr.stackaddr_attr ==
+ NULL && pthread_save->stack != NULL)
/*
* Free the stack of the
* dead thread:
*/
- free(pthread->stack);
+ free(pthread_save->stack);
- if (pthread->specific_data != NULL)
- free(pthread->specific_data);
+ if (pthread_save->specific_data != NULL)
+ free(pthread_save->specific_data);
- free(pthread);
- }
+ if (pthread_save->poll_data.fds != NULL)
+ free(pthread_save->poll_data.fds);
- /* Point to the next thread: */
- pthread = pthread_next;
+ free(pthread_save);
+ }
}
- /* Re-init the waiting queues. */
+ /* Re-init the dead thread list: */
+ TAILQ_INIT(&_dead_list);
+
+ /* Re-init the waiting and work queues. */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
+
+ /* Re-init the threads mutex queue: */
+ TAILQ_INIT(&_thread_run->mutexq);
+
+ /* No spinlocks yet: */
+ _spinblock_count = 0;
+
+ /* Don't queue signals yet: */
+ _queue_signals = 0;
+
+ /* Initialize signal handling: */
+ _thread_sig_init();
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -147,15 +185,17 @@ fork(void)
_thread_fd_table[i]->w_lockcount = 0;;
/* Initialise the read/write queues: */
- _thread_queue_init(&_thread_fd_table[i]->r_queue);
- _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->r_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->w_queue);
}
}
}
}
- /* Unock the thread list: */
- _unlock_thread_list();
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/* Return the process ID: */
return (ret);
diff --git a/lib/libc_r/uthread/uthread_gc.c b/lib/libc_r/uthread/uthread_gc.c
index 510c51f..e29e29b 100644
--- a/lib/libc_r/uthread/uthread_gc.c
+++ b/lib/libc_r/uthread/uthread_gc.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $
+ * $Id: uthread_gc.c,v 1.3 1999/03/23 05:07:55 jb Exp $
*
* Garbage collector thread. Frees memory allocated for dead threads.
*
@@ -74,20 +74,26 @@ _thread_gc(pthread_addr_t arg)
/* Dump thread info to file. */
_thread_dump_info();
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
/* Check if this is the last running thread: */
- if (_thread_link_list == _thread_run &&
- _thread_link_list->nxt == NULL)
+ if (TAILQ_FIRST(&_thread_list) == _thread_run &&
+ TAILQ_NEXT(_thread_run, tle) == NULL)
/*
* This is the last thread, so it can exit
* now.
*/
f_done = 1;
- /* Unlock the thread list: */
- _unlock_thread_list();
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
/*
* Lock the garbage collector mutex which ensures that
@@ -100,48 +106,24 @@ _thread_gc(pthread_addr_t arg)
p_stack = NULL;
pthread_cln = NULL;
- /* Point to the first dead thread (if there are any): */
- pthread = _thread_dead;
-
- /* There is no previous dead thread: */
- pthread_prv = NULL;
-
/*
* Enter a loop to search for the first dead thread that
* has memory to free.
*/
- while (p_stack == NULL && pthread_cln == NULL &&
- pthread != NULL) {
- /* Save a pointer to the next thread: */
- pthread_nxt = pthread->nxt_dead;
-
+ for (pthread = TAILQ_FIRST(&_dead_list);
+ p_stack == NULL && pthread_cln == NULL && pthread != NULL;
+ pthread = TAILQ_NEXT(pthread, dle)) {
/* Check if the initial thread: */
- if (pthread == _thread_initial)
+ if (pthread == _thread_initial) {
/* Don't destroy the initial thread. */
- pthread_prv = pthread;
-
+ }
/*
* Check if this thread has detached:
*/
else if ((pthread->attr.flags &
PTHREAD_DETACHED) != 0) {
- /*
- * Check if there is no previous dead
- * thread:
- */
- if (pthread_prv == NULL)
- /*
- * The dead thread is at the head
- * of the list:
- */
- _thread_dead = pthread_nxt;
- else
- /*
- * The dead thread is not at the
- * head of the list:
- */
- pthread_prv->nxt_dead =
- pthread->nxt_dead;
+ /* Remove this thread from the dead list: */
+ TAILQ_REMOVE(&_dead_list, pthread, dle);
/*
* Check if the stack was not specified by
@@ -162,14 +144,12 @@ _thread_gc(pthread_addr_t arg)
* be freed outside the locks:
*/
pthread_cln = pthread;
+
} else {
/*
* This thread has not detached, so do
- * not destroy it:
- */
- pthread_prv = pthread;
-
- /*
+ * not destroy it.
+ *
* Check if the stack was not specified by
* the caller to pthread_create and has not
* been destroyed yet:
@@ -189,9 +169,6 @@ _thread_gc(pthread_addr_t arg)
pthread->stack = NULL;
}
}
-
- /* Point to the next thread: */
- pthread = pthread_nxt;
}
/*
@@ -229,52 +206,12 @@ _thread_gc(pthread_addr_t arg)
*/
if (p_stack != NULL)
free(p_stack);
- if (pthread_cln != NULL) {
- /* Lock the thread list: */
- _lock_thread_list();
-
- /*
- * Check if the thread is at the head of the
- * linked list.
- */
- if (_thread_link_list == pthread_cln)
- /* There is no previous thread: */
- _thread_link_list = pthread_cln->nxt;
- else {
- /* Point to the first thread in the list: */
- pthread = _thread_link_list;
-
- /*
- * Enter a loop to find the thread in the
- * linked list before the thread that is
- * about to be freed.
- */
- while (pthread != NULL &&
- pthread->nxt != pthread_cln)
- /* Point to the next thread: */
- pthread = pthread->nxt;
-
- /* Check that a previous thread was found: */
- if (pthread != NULL) {
- /*
- * Point the previous thread to
- * the one after the thread being
- * freed:
- */
- pthread->nxt = pthread_cln->nxt;
- }
- }
-
- /* Unlock the thread list: */
- _unlock_thread_list();
-
+ if (pthread_cln != NULL)
/*
* Free the memory allocated for the thread
* structure.
*/
free(pthread_cln);
- }
-
}
return (NULL);
}
diff --git a/lib/libc_r/uthread/uthread_info.c b/lib/libc_r/uthread/uthread_info.c
index d2d97da..8c0787a 100644
--- a/lib/libc_r/uthread/uthread_info.c
+++ b/lib/libc_r/uthread/uthread_info.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdio.h>
#include <fcntl.h>
@@ -55,6 +56,7 @@ static const struct s_thread_info thread_info[] = {
{PS_FDR_WAIT , "Waiting for read"},
{PS_FDW_WAIT , "Waiting for write"},
{PS_FILE_WAIT , "Waiting for FILE lock"},
+ {PS_POLL_WAIT , "Waiting on poll"},
{PS_SELECT_WAIT , "Waiting on select"},
{PS_SLEEP_WAIT , "Sleeping"},
{PS_WAIT_WAIT , "Waiting process"},
@@ -108,8 +110,7 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
/* Find the state: */
for (j = 0; j < (sizeof(thread_info) /
sizeof(struct s_thread_info)) - 1; j++)
@@ -214,8 +215,29 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
}
+ /* Output a header for threads in the work queue: */
+ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_workq, qe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
- if (_thread_dead == NULL) {
+ if (TAILQ_FIRST(&_dead_list) == NULL) {
/* Output a record: */
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
_thread_sys_write(fd, s, strlen(s));
@@ -228,8 +250,7 @@ _thread_dump_info(void)
* Enter a loop to report each thread in the global
* dead thread list:
*/
- for (pthread = _thread_dead; pthread != NULL;
- pthread = pthread->nxt_dead) {
+ TAILQ_FOREACH(pthread, &_dead_list, dle) {
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c
index e4411ce..d470301 100644
--- a/lib/libc_r/uthread/uthread_init.c
+++ b/lib/libc_r/uthread/uthread_init.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
/* Allocate space for global thread variables here: */
@@ -39,7 +40,9 @@
#include <string.h>
#include <fcntl.h>
#include <paths.h>
+#include <poll.h>
#include <unistd.h>
+#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
#ifdef _THREAD_SAFE
@@ -81,6 +84,9 @@ _thread_init(void)
int fd;
int flags;
int i;
+ int len;
+ int mib[2];
+ struct clockinfo clockinfo;
struct sigaction act;
/* Check if this function has already been called: */
@@ -147,8 +153,8 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
- /* Initialize the ready queue: */
- else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Allocate and initialize the ready queue: */
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -165,8 +171,9 @@ _thread_init(void)
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
- /* Initialize the waiting queue: */
+ /* Initialize the waiting and work queues: */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -186,23 +193,23 @@ _thread_init(void)
_thread_initial->state = PS_RUNNING;
/* Initialise the queue: */
- _thread_queue_init(&(_thread_initial->join_queue));
+ TAILQ_INIT(&(_thread_initial->join_queue));
/* Initialize the owned mutex queue and count: */
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
/* Initialise the rest of the fields: */
- _thread_initial->sched_defer_count = 0;
- _thread_initial->yield_on_sched_undefer = 0;
+ _thread_initial->poll_data.nfds = 0;
+ _thread_initial->poll_data.fds = NULL;
+ _thread_initial->sig_defer_count = 0;
+ _thread_initial->yield_on_sig_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
- _thread_initial->queue = NULL;
- _thread_initial->qnxt = NULL;
- _thread_initial->nxt = NULL;
_thread_initial->flags = 0;
_thread_initial->error = 0;
- _thread_link_list = _thread_initial;
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
_thread_run = _thread_initial;
/* Initialise the global signal action structure: */
@@ -210,6 +217,9 @@ _thread_init(void)
act.sa_handler = (void (*) ()) _thread_sig_handler;
act.sa_flags = 0;
+ /* Initialize signal handling: */
+ _thread_sig_init();
+
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
/* Check for signals which cannot be trapped: */
@@ -241,6 +251,13 @@ _thread_init(void)
PANIC("Cannot initialise signal handler");
}
+ /* Get the kernel clockrate: */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_CLOCKRATE;
+ len = sizeof (struct clockinfo);
+ if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
+ _clock_res_nsec = clockinfo.tick * 1000;
+
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
/*
@@ -256,6 +273,14 @@ _thread_init(void)
* table, so abort this process.
*/
PANIC("Cannot allocate memory for file descriptor table");
+ }
+ /* Allocate memory for the pollfd table: */
+ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
+ /*
+ * Cannot allocate memory for the file descriptor
+ * table, so abort this process.
+ */
+ PANIC("Cannot allocate memory for pollfd table");
} else {
/*
* Enter a loop to initialise the file descriptor
@@ -265,6 +290,14 @@ _thread_init(void)
/* Initialise the file descriptor table: */
_thread_fd_table[i] = NULL;
}
+
+ /* Initialize stdio file descriptor table entries: */
+ if ((_thread_fd_table_init(0) != 0) ||
+ (_thread_fd_table_init(1) != 0) ||
+ (_thread_fd_table_init(2) != 0)) {
+ PANIC("Cannot initialize stdio file descriptor "
+ "table entries");
+ }
}
}
diff --git a/lib/libc_r/uthread/uthread_join.c b/lib/libc_r/uthread/uthread_join.c
index 2043b76..f36d7f7 100644
--- a/lib/libc_r/uthread/uthread_join.c
+++ b/lib/libc_r/uthread/uthread_join.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -71,7 +72,7 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
/* Add the running thread to the join queue: */
- _thread_queue_enq(&(pthread->join_queue), _thread_run);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c
index f1e97f9..8718906 100644
--- a/lib/libc_r/uthread/uthread_kern.c
+++ b/lib/libc_r/uthread/uthread_kern.c
@@ -29,10 +29,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_kern.c,v 1.17 1999/05/07 07:59:44 jasone Exp $
+ * $Id: uthread_kern.c,v 1.16 1999/03/23 05:07:56 jb Exp $
*
*/
#include <errno.h>
+#include <poll.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
@@ -51,7 +52,10 @@
/* Static function prototype definitions: */
static void
-_thread_kern_select(int wait_reqd);
+_thread_kern_poll(int wait_reqd);
+
+static void
+dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
@@ -62,14 +66,12 @@ _thread_kern_sched(struct sigcontext * scp)
#ifndef __alpha__
char *fdata;
#endif
- pthread_t pthread;
- pthread_t pthread_h = NULL;
+ pthread_t pthread, pthread_h = NULL;
pthread_t last_thread = NULL;
struct itimerval itimer;
- struct timespec ts;
- struct timespec ts1;
- struct timeval tv;
- struct timeval tv1;
+ struct timespec ts, ts1;
+ struct timeval tv, tv1;
+ int i, set_timer = 0;
/*
* Flag the pthread kernel as executing scheduler code
@@ -111,6 +113,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Run the installed switch hook: */
thread_run_switch_hook(_last_user_thread, _thread_run);
}
+
return;
} else
/* Flag the jump buffer was the last state saved: */
@@ -127,238 +130,208 @@ __asm__("fnsave %0": :"m"(*fdata));
* either a sigreturn (if the state was saved as a sigcontext) or a
* longjmp (if the state was saved by a setjmp).
*/
- while (_thread_link_list != NULL) {
+ while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Protect the scheduling queues from access by the signal
+ * handler.
*/
- _thread_kern_select(0);
+ _queue_signals = 1;
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for sleeping threads that are ready
- * or timedout. While we're at it, also find the smallest
- * timeout value for threads waiting for a time.
- */
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- /* Check if this thread is ready: */
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ if (_thread_run != &_thread_kern_thread) {
/*
- * Check if this thread is blocked by an
- * atomic lock:
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sig_undefer = 0;
+
+ /*
+ * Save the current time as the time that the thread
+ * became inactive:
*/
- else if (pthread->state == PS_SPINBLOCK) {
+ _thread_run->last_inactive.tv_sec = tv.tv_sec;
+ _thread_run->last_inactive.tv_usec = tv.tv_usec;
+
+ /*
+ * Place the currently running thread into the
+ * appropriate queue(s).
+ */
+ switch (_thread_run->state) {
+ case PS_DEAD:
/*
- * If the lock is available, let
- * the thread run.
+ * Dead threads are not placed in any queue:
*/
- if (pthread->data.spinlock->access_lock == 0) {
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+ break;
- /* Check if this thread is to timeout: */
- } else if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /* Check if this thread is to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
+ case PS_RUNNING:
/*
- * Check if this thread is to wakeup
- * immediately or if it is past its wakeup
- * time:
+ * Runnable threads can't be placed in the
+ * priority queue until after waiting threads
+ * are polled (to preserve round-robin
+ * scheduling).
*/
- else if ((pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) ||
- (ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec >= pthread->wakeup_time.tv_nsec))) {
- /*
- * Check if this thread is waiting on
- * select:
- */
- if (pthread->state == PS_SELECT_WAIT) {
- /*
- * The select has timed out, so
- * zero the file descriptor
- * sets:
- */
- FD_ZERO(&pthread->data.select_data->readfds);
- FD_ZERO(&pthread->data.select_data->writefds);
- FD_ZERO(&pthread->data.select_data->exceptfds);
- pthread->data.select_data->nfds = 0;
- }
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
/*
- * Return an error as an interrupted
- * wait:
+ * Accumulate the number of microseconds that
+ * this thread has run for:
*/
- _thread_seterrno(pthread, EINTR);
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+ break;
- /*
- * Flag the timeout in the thread
- * structure:
- */
- pthread->timeout = 1;
+ /*
+ * States which do not depend on file descriptor I/O
+ * operations or timeouts:
+ */
+ case PS_DEADLOCK:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGTHREAD:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_WAIT_WAIT:
+ /* No timeouts for these states: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
- /*
- * Change the threads state to allow
- * it to be restarted:
- */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- } else {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + CLOCK_RES_NSEC;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
- /*
- * Check for underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow
- * of the nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of
- * the nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure
- * to a timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ /* States which can timeout: */
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
+
+ /* States that require periodic work: */
+ case PS_SPINBLOCK:
+ /* No timeouts for this state: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
+
+ /* Increment spinblock count: */
+ _spinblock_count++;
+ /* fall through */
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+
+ /* Insert into the work queue: */
+ PTHREAD_WORKQ_INSERT(_thread_run);
}
}
- /* Check if there is a current thread: */
- if (_thread_run != &_thread_kern_thread) {
- /*
- * This thread no longer needs to yield the CPU.
- */
- _thread_run->yield_on_sched_undefer = 0;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ _thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ /*
+ * Wake up threads that have timedout. This has to be
+ * done after polling in case a thread does a poll or
+ * select with zero time.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1) &&
+ (((pthread->wakeup_time.tv_sec == 0) &&
+ (pthread->wakeup_time.tv_nsec == 0)) ||
+ (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
+ ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
+ (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
+ switch (pthread->state) {
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Return zero file descriptors ready: */
+ pthread->data.poll_data->nfds = 0;
+ /* fall through */
+ default:
+ /*
+ * Remove this thread from the waiting queue
+ * (and work queue if necessary) and place it
+ * in the ready queue.
+ */
+ PTHREAD_WAITQ_CLEARACTIVE();
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ break;
+ }
/*
- * Accumulate the number of microseconds that this
- * thread has run for:
+ * Flag the timeout in the thread structure:
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
- if (_thread_run->state == PS_RUNNING) {
- if (_thread_run->slice_usec == -1) {
- /*
- * The thread exceeded its time
- * quantum or it yielded the CPU;
- * place it at the tail of the
- * queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
- } else {
- /*
- * The thread hasn't exceeded its
- * interval. Place it at the head
- * of the queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
- }
- }
- else if (_thread_run->state == PS_DEAD) {
+ pthread->timeout = 1;
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+
+ /*
+ * Check if there is a current runnable thread that isn't
+ * already in the ready queue:
+ */
+ if ((_thread_run != &_thread_kern_thread) &&
+ (_thread_run->state == PS_RUNNING) &&
+ ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (_thread_run->slice_usec == -1) {
/*
- * Don't add dead threads to the waiting
- * queue, because when they're reaped, it
- * will corrupt the queue.
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
*/
- }
- else {
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * This thread has changed state and needs
- * to be placed in the waiting queue.
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- PTHREAD_WAITQ_INSERT(_thread_run);
-
- /* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
/*
* Get the highest priority thread in the ready queue.
*/
- pthread_h = PTHREAD_PRIOQ_FIRST;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
/* Check if there are no threads ready to run: */
if (pthread_h == NULL) {
@@ -369,18 +342,84 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_run = &_thread_kern_thread;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
/*
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_select(1);
- } else {
+ _thread_kern_poll(1);
+ }
+ else {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /* Check to see if there is more than one thread: */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ /*
+ * Check for signals queued while the scheduling
+ * queues were protected:
+ */
+ while (_sigq_check_reqd != 0) {
+ /* Clear before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues again: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /*
+ * Check for a higher priority thread that
+ * became runnable due to signal handling.
+ */
+ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > pthread_h->active_priority)) {
+ /*
+ * Insert the lower priority thread
+ * at the head of its priority list:
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
+
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /* There's a new thread in town: */
+ pthread_h = pthread;
+ }
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /*
+ * Check to see if there is more than one
+ * thread:
+ */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+ }
+
/* Make the selected thread the current thread: */
_thread_run = pthread_h;
- /* Remove the thread from the ready queue. */
- PTHREAD_PRIOQ_REMOVE(_thread_run);
-
/*
* Save the current time as the time that the thread
* became active:
@@ -389,6 +428,76 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->last_active.tv_usec = tv.tv_usec;
/*
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /* Get first thread on the waiting list: */
+ if ((pthread != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1)) {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + _clock_res_nsec;
+
+ /*
+ * Check for underflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure to a
+ * timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
+ }
+
+ /*
* Check if this thread is running for the first time
* or running again after using its full time slice
* allocation:
@@ -399,7 +508,7 @@ __asm__("fnsave %0": :"m"(*fdata));
}
/* Check if there is more than one thread: */
- if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
+ if (set_timer != 0) {
/*
* Start the interval timer for the
* calculated time interval:
@@ -462,6 +571,19 @@ __asm__("fnsave %0": :"m"(*fdata));
void
_thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and is placed into the proper queue.
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -476,6 +598,20 @@ void
_thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and it is placed into the proper
+ * queue(s).
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -489,384 +625,193 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
}
static void
-_thread_kern_select(int wait_reqd)
+_thread_kern_poll(int wait_reqd)
{
char bufr[128];
- fd_set fd_set_except;
- fd_set fd_set_read;
- fd_set fd_set_write;
int count = 0;
- int count_dec;
- int found_one;
- int i;
- int nfds = -1;
- int settimeout;
- pthread_t pthread;
+ int i, found;
+ int kern_pipe_added = 0;
+ int nfds = 0;
+ int timeout_ms = 0;
+ struct pthread *pthread, *pthread_next;
ssize_t num;
struct timespec ts;
- struct timespec ts1;
- struct timeval *p_tv;
struct timeval tv;
- struct timeval tv1;
-
- /* Zero the file descriptor sets: */
- FD_ZERO(&fd_set_read);
- FD_ZERO(&fd_set_write);
- FD_ZERO(&fd_set_except);
/* Check if the caller wants to wait: */
- if (wait_reqd) {
- /*
- * Add the pthread kernel pipe file descriptor to the read
- * set:
- */
- FD_SET(_thread_kern_pipe[0], &fd_set_read);
- nfds = _thread_kern_pipe[0];
-
+ if (wait_reqd == 0) {
+ timeout_ms = 0;
+ }
+ else {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+
+ _queue_signals = 1;
+ pthread = TAILQ_FIRST(&_waitingq);
+ _queue_signals = 0;
+
+ if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
+ /*
+ * Either there are no threads in the waiting queue,
+ * or there are no threads that can timeout.
+ */
+ timeout_ms = INFTIM;
+ }
+ else {
+ /*
+ * Calculate the time left for the next thread to
+ * timeout allowing for the clock resolution:
+ */
+ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
+ _clock_res_nsec) / 1000000);
+ /*
+ * Don't allow negative timeouts:
+ */
+ if (timeout_ms < 0)
+ timeout_ms = 0;
+ }
}
- /* Initialise the time value structure: */
- tv.tv_sec = 0;
- tv.tv_usec = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
/*
- * Enter a loop to process threads waiting on either file descriptors
- * or times:
+ * Check to see if the signal queue needs to be walked to look
+ * for threads awoken by a signal while in the scheduler. Only
+ * do this if a wait is specified; otherwise, the waiting queue
+ * will be checked after the zero-timed _poll.
*/
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Assume that this state does not time out: */
- settimeout = 0;
+ while ((_sigq_check_reqd != 0) && (timeout_ms != 0)) {
+ /* Reset flag before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ dequeue_signals();
- /* Process according to thread state: */
- switch (pthread->state) {
/*
- * States which do not depend on file descriptor I/O
- * operations or timeouts:
+ * Check for a thread that became runnable due to
+ * a signal:
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGTHREAD:
- case PS_SIGWAIT:
- case PS_STATE_MAX:
- case PS_WAIT_WAIT:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ if (PTHREAD_PRIOQ_FIRST() != NULL) {
/*
- * A signal occurred and made this thread ready
- * while in the scheduler or while the scheduling
- * queues were protected.
+ * Since there is at least one runnable thread,
+ * disable the wait.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ timeout_ms = 0;
+ }
+ }
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /* Add the file descriptor to the read set: */
- FD_SET(pthread->data.fd.fd, &fd_set_read);
+ /*
+ * Form the poll table:
+ */
+ nfds = 0;
+ if (timeout_ms != 0) {
+ /* Add the kernel pipe to the poll table: */
+ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].revents = 0;
+ nfds++;
+ kern_pipe_added = 1;
+ }
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ switch (pthread->state) {
+ case PS_SPINBLOCK:
/*
- * Check if this file descriptor is greater than any
- * of those seen so far:
+ * If the lock is available, let the thread run.
*/
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ /* One less thread in a spinblock state: */
+ _spinblock_count--;
}
- /* Increment the file descriptor count: */
- count++;
+ break;
- /* This state can time out: */
- settimeout = 1;
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
+ }
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /* Add the file descriptor to the write set: */
- FD_SET(pthread->data.fd.fd, &fd_set_write);
-
- /*
- * Check if this file descriptor is greater than any
- * of those seen so far:
- */
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLWRNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
}
- /* Increment the file descriptor count: */
- count++;
-
- /* This state can time out: */
- settimeout = 1;
break;
- /* States that time out: */
- case PS_SLEEP_WAIT:
- case PS_COND_WAIT:
- /* Flag a timeout as required: */
- settimeout = 1;
- break;
-
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Enter a loop to process each file descriptor in
- * the thread-specific file descriptor sets:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Check if this file descriptor is set for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Add the file descriptor to the
- * exception set:
- */
- FD_SET(i, &fd_set_except);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Add the file descriptor to the
- * write set:
- */
- FD_SET(i, &fd_set_write);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Add the file descriptor to the
- * read set:
- */
- FD_SET(i, &fd_set_read);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
+ /* Limit number of polled files to table size: */
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ _thread_pfd_table[nfds + i].fd =
+ pthread->data.poll_data->fds[i].fd;
+ _thread_pfd_table[nfds + i].events =
+ pthread->data.poll_data->fds[i].events;
}
+ nfds += pthread->data.poll_data->nfds;
}
-
- /* This state can time out: */
- settimeout = 1;
break;
- }
-
- /*
- * Check if the caller wants to wait and if the thread state
- * is one that times out:
- */
- if (wait_reqd && settimeout) {
- /* Check if this thread wants to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /* Check if this thread doesn't want to wait at all: */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- /* Override the caller's request to wait: */
- wait_reqd = 0;
- } else {
- /*
- * Calculate the time until this thread is
- * ready, allowing for the clock resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for underflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
- /*
- * Check if no time value has been found yet,
- * or if the thread will be ready sooner that
- * the earliest one found so far:
- */
- if ((tv.tv_sec == 0 && tv.tv_usec == 0) || timercmp(&tv1, &tv, <)) {
- /* Update the time value: */
- tv.tv_sec = tv1.tv_sec;
- tv.tv_usec = tv1.tv_usec;
- }
- }
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
- /* Check if the caller wants to wait: */
- if (wait_reqd) {
- /* Check if no threads were found with timeouts: */
- if (tv.tv_sec == 0 && tv.tv_usec == 0) {
- /* Wait forever: */
- p_tv = NULL;
- } else {
- /*
- * Point to the time value structure which contains
- * the earliest time that a thread will be ready:
- */
- p_tv = &tv;
- }
+ /*
+ * Wait for a file descriptor to be ready for read, write, or
+ * an exception, or a timeout to occur:
+ */
+ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
+ if (kern_pipe_added != 0)
/*
- * Flag the pthread kernel as in a select. This is to avoid
- * the window between the next statement that unblocks
- * signals and the select statement which follows.
+ * Remove the pthread kernel pipe file descriptor
+ * from the pollfd table:
*/
- _thread_kern_in_select = 1;
+ nfds = 1;
+ else
+ nfds = 0;
+ /*
+ * Check if it is possible that there are bytes in the kernel
+ * read pipe waiting to be read:
+ */
+ if (count < 0 || ((kern_pipe_added != 0) &&
+ (_thread_pfd_table[0].revents & POLLRDNORM))) {
/*
- * Wait for a file descriptor to be ready for read, write, or
- * an exception, or a timeout to occur:
+ * If the kernel read pipe was included in the
+ * count:
*/
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
+ if (count > 0) {
+ /* Decrement the count of file descriptors: */
+ count--;
+ }
- /* Reset the kernel in select flag: */
- _thread_kern_in_select = 0;
+ if (_sigq_check_reqd != 0) {
+ /* Reset flag before handling signals: */
+ _sigq_check_reqd = 0;
- /*
- * Check if it is possible that there are bytes in the kernel
- * read pipe waiting to be read:
- */
- if (count < 0 || FD_ISSET(_thread_kern_pipe[0], &fd_set_read)) {
- /*
- * Check if the kernel read pipe was included in the
- * count:
- */
- if (count > 0) {
- /*
- * Remove the kernel read pipe from the
- * count:
- */
- FD_CLR(_thread_kern_pipe[0], &fd_set_read);
-
- /* Decrement the count of file descriptors: */
- count--;
- }
- /*
- * Enter a loop to read (and trash) bytes from the
- * pthread kernel pipe:
- */
- while ((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) {
- /*
- * The buffer read contains one byte per
- * signal and each byte is the signal number.
- * This data is not used, but the fact that
- * the signal handler wrote to the pipe *is*
- * used to cause the _select call
- * to complete if the signal occurred between
- * the time when signals were unblocked and
- * the _select select call being
- * made.
- */
- }
+ dequeue_signals();
}
}
- /* Check if there are file descriptors to poll: */
- else if (count > 0) {
- /*
- * Point to the time value structure which has been zeroed so
- * that the call to _select will not wait:
- */
- p_tv = &tv;
-
- /* Poll file descrptors without wait: */
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
- }
/*
* Check if any file descriptors are ready:
@@ -875,301 +820,133 @@ _thread_kern_select(int wait_reqd)
/*
* Enter a loop to look for threads waiting on file
* descriptors that are flagged as available by the
- * _select syscall:
+ * _poll syscall:
*/
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Process according to thread state: */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
switch (pthread->state) {
- /*
- * States which do not depend on file
- * descriptor I/O operations:
- */
- case PS_COND_WAIT:
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ case PS_SPINBLOCK:
/*
- * A signal occurred and made this thread
- * ready while in the scheduler.
+ * If the lock is available, let the thread run.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /*
- * Check if the file descriptor is available
- * for read:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_read)) {
/*
- * Change the thread state to allow
- * it to read from the file when it
- * is scheduled next:
+ * One less thread in a spinblock state:
*/
- pthread->state = PS_RUNNING;
+ _spinblock_count--;
+ }
+ break;
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLRDNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /*
- * Check if the file descriptor is available
- * for write:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_write)) {
- /*
- * Change the thread state to allow
- * it to write to the file when it is
- * scheduled next:
- */
- pthread->state = PS_RUNNING;
-
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLWRNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Reset the flag that indicates if a file
- * descriptor is ready for some type of
- * operation:
- */
- count_dec = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file descriptors
- * for the first descriptor that is ready:
- */
- for (i = 0; i < pthread->data.select_data->nfds && count_dec == 0; i++) {
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
/*
- * Check if this file descriptor does
- * not have an exception:
+ * Enter a loop looking for I/O
+ * readiness:
*/
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds) && FD_ISSET(i, &fd_set_except)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
- }
- /*
- * Check if this file descriptor is
- * not ready for write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds) && FD_ISSET(i, &fd_set_write)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+ found = 0;
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ if (_thread_pfd_table[nfds + i].revents != 0) {
+ pthread->data.poll_data->fds[i].revents =
+ _thread_pfd_table[nfds + i].revents;
+ found++;
+ }
}
- /*
- * Check if this file descriptor is
- * not ready for read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds) && FD_ISSET(i, &fd_set_read)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+
+ /* Increment before destroying: */
+ nfds += pthread->data.poll_data->nfds;
+
+ if (found != 0) {
+ pthread->data.poll_data->nfds = found;
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
}
+ else
+ nfds += pthread->data.poll_data->nfds;
+ break;
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
+ }
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+ else if (_spinblock_count != 0) {
+ /*
+ * Enter a loop to look for threads waiting on a spinlock
+ * that is now available.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ if (pthread->state == PS_SPINBLOCK) {
/*
- * Check if any file descriptors are ready
- * for the current thread:
+ * If the lock is available, let the thread run.
*/
- if (count_dec) {
- /*
- * Reset the count of file
- * descriptors that are ready for
- * this thread:
- */
- found_one = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file
- * descriptors:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Reset the count of
- * operations for which the
- * current file descriptor is
- * ready:
- */
- count_dec = 0;
-
- /*
- * Check if this file
- * descriptor is selected for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Check if this file
- * descriptor has an
- * exception:
- */
- if (FD_ISSET(i, &fd_set_except)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->exceptfds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for write:
- */
- if (FD_ISSET(i, &fd_set_write)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->writefds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for read:
- */
- if (FD_ISSET(i, &fd_set_read)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->readfds);
- }
- }
- /*
- * Check if the current file
- * descriptor is ready for
- * any one of the operations:
- */
- if (count_dec > 0) {
- /*
- * Increment the
- * count of file
- * descriptors that
- * are ready for the
- * current thread:
- */
- found_one++;
- }
- }
-
- /*
- * Return the number of file
- * descriptors that are ready:
- */
- pthread->data.select_data->nfds = found_one;
-
- /*
- * Change the state of the current
- * thread to run:
- */
- pthread->state = PS_RUNNING;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
/*
- * Remove it from the waiting queue
- * and add it to the ready queue:
+ * One less thread in a spinblock state:
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ _spinblock_count--;
}
- break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ while (_sigq_check_reqd != 0) {
+ /* Handle queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
}
/* Nothing to return. */
@@ -1219,59 +996,101 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
void
-_thread_kern_sched_defer(void)
+_thread_kern_sig_defer(void)
{
- /* Allow scheduling deferral to be recursive. */
- _thread_run->sched_defer_count++;
+ /* Allow signal deferral to be recursive. */
+ _thread_run->sig_defer_count++;
}
void
-_thread_kern_sched_undefer(void)
+_thread_kern_sig_undefer(void)
{
pthread_t pthread;
int need_resched = 0;
/*
* Perform checks to yield only if we are about to undefer
- * scheduling.
+ * signals.
*/
- if (_thread_run->sched_defer_count == 1) {
+ if (_thread_run->sig_defer_count > 1) {
+ /* Decrement the signal deferral count. */
+ _thread_run->sig_defer_count--;
+ }
+ else if (_thread_run->sig_defer_count == 1) {
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
+
/*
- * Check if the waiting queue needs to be examined for
- * threads that are now ready:
+ * Check if there are queued signals:
*/
- while (_waitingq_check_reqd != 0) {
- /* Clear the flag before checking the waiting queue: */
- _waitingq_check_reqd = 0;
-
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ while (_sigq_check_reqd != 0) {
+ /* Defer scheduling while we process queued signals: */
+ _thread_run->sig_defer_count = 1;
+
+ /* Clear the flag before checking the signal queue: */
+ _sigq_check_reqd = 0;
+
+ /* Dequeue and handle signals: */
+ dequeue_signals();
+
+ /*
+ * Avoiding an unnecessary check to reschedule, check
+ * to see if signal handling caused a higher priority
+ * thread to become ready.
+ */
+ if ((need_resched == 0) &&
+ (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ need_resched = 1;
}
+
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
}
- /*
- * We need to yield if a thread change of state caused a
- * higher priority thread to become ready, or if a
- * scheduling signal occurred while preemption was disabled.
- */
- if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority)) ||
- (_thread_run->yield_on_sched_undefer != 0)) {
- _thread_run->yield_on_sched_undefer = 0;
- need_resched = 1;
+ /* Yield the CPU if necessary: */
+ if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ _thread_run->yield_on_sig_undefer = 0;
+ _thread_kern_sched(NULL);
}
}
+}
- if (_thread_run->sched_defer_count > 0) {
- /* Decrement the scheduling deferral count. */
- _thread_run->sched_defer_count--;
+static void
+dequeue_signals(void)
+{
+ char bufr[128];
+ int i, num;
- /* Yield the CPU if necessary: */
- if (need_resched)
- _thread_kern_sched(NULL);
+ /*
+ * Enter a loop to read and handle queued signals from the
+ * pthread kernel pipe:
+ */
+ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
+ sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
+ /*
+ * The buffer read contains one byte per signal and
+ * each byte is the signal number.
+ */
+ for (i = 0; i < num; i++) {
+ if ((int) bufr[i] == _SCHED_SIGNAL) {
+ /*
+ * Scheduling signals shouldn't ever be
+ * queued; just ignore it for now.
+ */
+ }
+ else {
+ /* Handle this signal: */
+ _thread_sig_handle((int) bufr[i], NULL);
+ }
+ }
+ }
+ if ((num < 0) && (errno != EAGAIN)) {
+ /*
+ * The only error we should expect is if there is
+ * no data to read.
+ */
+ PANIC("Unable to read from thread kernel pipe");
}
}
diff --git a/lib/libc_r/uthread/uthread_kill.c b/lib/libc_r/uthread/uthread_kill.c
index c729179..f5bb6e2 100644
--- a/lib/libc_r/uthread/uthread_kill.c
+++ b/lib/libc_r/uthread/uthread_kill.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <signal.h>
@@ -46,18 +47,18 @@ pthread_kill(pthread_t pthread, int sig)
/* Invalid signal: */
ret = EINVAL;
- /* Ignored signals get dropped on the floor. */
- else if (_thread_sigact[sig - 1].sa_handler == SIG_IGN)
- ret = 0;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
+ */
+ else if (((ret = _find_thread(pthread)) == 0) && (sig > 0) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
switch (pthread->state) {
case PS_SIGSUSPEND:
@@ -90,15 +91,19 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
- case PS_SELECT_WAIT:
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
+ case PS_SELECT_WAIT:
if (!sigismember(&pthread->sigmask, sig) &&
(_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
@@ -116,11 +121,43 @@ pthread_kill(pthread_t pthread, int sig)
break;
}
+
+ /*
+ * Check that a custom handler is installed
+ * and if the signal is not blocked:
+ */
+ if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
+ _thread_sigact[sig - 1].sa_handler != SIG_IGN &&
+ sigismember(&pthread->sigpend, sig) &&
+ !sigismember(&pthread->sigmask, sig)) {
+ pthread_t pthread_saved = _thread_run;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
+ _thread_run = pthread;
+
+ /* Clear the pending signal: */
+ sigdelset(&pthread->sigpend, sig);
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ (*(_thread_sigact[sig - 1].sa_handler))(sig);
+
+ _thread_run = pthread_saved;
+
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
+ }
+
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c
index fa9c8cf..c967b46 100644
--- a/lib/libc_r/uthread/uthread_mutex.c
+++ b/lib/libc_r/uthread/uthread_mutex.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -39,6 +40,25 @@
#include <pthread.h>
#include "pthread_private.h"
+#if defined(_PTHREADS_INVARIANTS)
+#define _MUTEX_INIT_LINK(m) do { \
+ (m)->m_qe.tqe_prev = NULL; \
+ (m)->m_qe.tqe_next = NULL; \
+} while (0)
+#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+ if ((m)->m_qe.tqe_prev == NULL) \
+ PANIC("mutex is not on list"); \
+} while (0)
+#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+ if (((m)->m_qe.tqe_prev != NULL) || \
+ ((m)->m_qe.tqe_next != NULL)) \
+ PANIC("mutex is on list"); \
+} while (0)
+#else
+#define _MUTEX_INIT_LINK(m)
+#define _MUTEX_ASSERT_IS_OWNED(m)
+#define _MUTEX_ASSERT_NOT_OWNED(m)
+#endif
/*
* Prototypes
@@ -55,6 +75,34 @@ static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+/* Reinitialize a mutex to defaults. */
+int
+_mutex_reinit(pthread_mutex_t * mutex)
+{
+ int ret = 0;
+
+ if (mutex == NULL)
+ ret = EINVAL;
+ else if (*mutex == NULL)
+ ret = pthread_mutex_init(mutex, NULL);
+ else {
+ /*
+ * Initialize the mutex structure:
+ */
+ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
+ (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
+ TAILQ_INIT(&(*mutex)->m_queue);
+ (*mutex)->m_owner = NULL;
+ (*mutex)->m_data.m_count = 0;
+ (*mutex)->m_flags = MUTEX_FLAGS_INITED;
+ (*mutex)->m_refcount = 0;
+ (*mutex)->m_prio = 0;
+ (*mutex)->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(*mutex);
+ memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
+ }
+ return (ret);
+}
int
pthread_mutex_init(pthread_mutex_t * mutex,
@@ -138,6 +186,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
else
pmutex->m_prio = 0;
pmutex->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(pmutex);
memset(&pmutex->lock, 0, sizeof(pmutex->lock));
*mutex = pmutex;
} else {
@@ -147,7 +196,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return (ret);
+ return(ret);
}
int
@@ -177,6 +226,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
* Free the memory allocated for the mutex
* structure:
*/
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
free(*mutex);
/*
@@ -222,28 +272,24 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
+ _MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -254,6 +300,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -282,6 +329,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_thread_run->inherited_priority;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -317,6 +365,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -337,10 +386,10 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -361,28 +410,24 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
+ _MUTEX_INIT_LINK(*mutex);
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -392,6 +437,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -419,12 +465,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -449,6 +489,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -481,12 +522,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -519,6 +554,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -556,12 +592,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
ret = _thread_run->error;
_thread_run->error = 0;
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -576,10 +606,10 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -683,15 +713,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
ret = EINVAL;
} else {
/*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
@@ -723,8 +748,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_data.m_count = 0;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of
@@ -738,6 +765,19 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
*/
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+
+ /*
+ * Add the mutex to the threads list of
+ * owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
}
break;
@@ -784,8 +824,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of threads
@@ -891,8 +933,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
+ (*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Enter a loop to find a waiting thread whose
@@ -913,6 +957,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_owner->error = EINVAL;
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+ /*
+ * The thread is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
/* Check for a new owner: */
@@ -978,10 +1027,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -990,11 +1039,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
/*
- * This function is called when a change in base priority occurs
- * for a thread that is thread holding, or waiting for, a priority
- * protection or inheritence mutex. A change in a threads base
- * priority can effect changes to active priorities of other threads
- * and to the ordering of mutex locking by waiting threads.
+ * This function is called when a change in base priority occurs for
+ * a thread that is holding or waiting for a priority protection or
+ * inheritence mutex. A change in a threads base priority can effect
+ * changes to active priorities of other threads and to the ordering
+ * of mutex locking by waiting threads.
*
* This must be called while thread scheduling is deferred.
*/
@@ -1231,8 +1280,7 @@ mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
* If this thread is in the priority queue, it must be
* removed and reinserted for its new priority.
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
/*
* Remove the thread from the priority queue
* before changing its priority:
diff --git a/lib/libc_r/uthread/uthread_poll.c b/lib/libc_r/uthread/uthread_poll.c
new file mode 100644
index 0000000..a54d023
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_poll.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+
+int
+poll(struct pollfd *fds, unsigned int nfds, int timeout)
+{
+ struct timespec ts;
+ int numfds = nfds;
+ int i, ret = 0, found = 0;
+ struct pthread_poll_data data;
+
+ if (numfds > _thread_dtablesize) {
+ numfds = _thread_dtablesize;
+ }
+ /* Check if a timeout was specified: */
+ if (timeout == INFTIM) {
+ /* Wait for ever: */
+ _thread_kern_set_timeout(NULL);
+ } else if (timeout != 0) {
+ /* Convert the timeout in msec to a timespec: */
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000;
+
+ /* Set the wake up time: */
+ _thread_kern_set_timeout(&ts);
+ }
+
+ if (((ret = _thread_sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
+ data.nfds = numfds;
+ data.fds = fds;
+
+ /*
+ * Clear revents in case of a timeout which leaves fds
+ * unchanged:
+ */
+ for (i = 0; i < numfds; i++) {
+ fds[i].revents = 0;
+ }
+
+ _thread_run->data.poll_data = &data;
+ _thread_run->interrupted = 0;
+ _thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
+ if (_thread_run->interrupted) {
+ errno = EINTR;
+ ret = -1;
+ } else {
+ ret = data.nfds;
+ }
+ }
+
+ return (ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_priority_queue.c b/lib/libc_r/uthread/uthread_priority_queue.c
index 516a1e0..0a00a9d3 100644
--- a/lib/libc_r/uthread/uthread_priority_queue.c
+++ b/lib/libc_r/uthread/uthread_priority_queue.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <sys/queue.h>
@@ -40,9 +41,51 @@
/* Prototypes: */
static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+#if defined(_PTHREADS_INVARIANTS)
+
+static int _pq_active = 0;
+
+#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
+
+#define _PQ_SET_ACTIVE() _pq_active = 1
+#define _PQ_CLEAR_ACTIVE() _pq_active = 0
+#define _PQ_ASSERT_ACTIVE(msg) do { \
+ if (_pq_active == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_INACTIVE(msg) do { \
+ if (_pq_active != 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
+ if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ PANIC(msg); \
+} while (0)
+
+#else
+
+#define _PQ_SET_ACTIVE()
+#define _PQ_CLEAR_ACTIVE()
+#define _PQ_ASSERT_ACTIVE(msg)
+#define _PQ_ASSERT_INACTIVE(msg)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
+#define _PQ_CHECK_PRIO()
+
+#endif
+
int
-_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
int i, ret = 0;
int prioslots = maxprio - minprio + 1;
@@ -56,8 +99,26 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
ret = -1;
else {
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+
+ ret = _pq_init(pq);
+
+ }
+ return (ret);
+}
+
+int
+_pq_init(pq_queue_t *pq)
+{
+ int i, ret = 0;
+
+ if ((pq == NULL) || (pq->pq_lists == NULL))
+ ret = -1;
+
+ else {
/* Initialize the queue for each priority slot: */
- for (i = 0; i < prioslots; i++) {
+ for (i = 0; i < pq->pq_size; i++) {
TAILQ_INIT(&pq->pq_lists[i].pl_head);
pq->pq_lists[i].pl_prio = i;
pq->pq_lists[i].pl_queued = 0;
@@ -65,9 +126,7 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
/* Initialize the priority queue: */
TAILQ_INIT(&pq->pq_queue);
-
- /* Remember the queue size: */
- pq->pq_size = prioslots;
+ _PQ_CLEAR_ACTIVE();
}
return (ret);
}
@@ -77,7 +136,27 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+
+ /*
+ * Remove this thread from priority list. Note that if
+ * the priority list becomes empty, it is not removed
+ * from the priority queue because another thread may be
+ * added to the priority list (resulting in a needless
+ * removal/insertion). Priority lists are only removed
+ * from the priority queue when _pq_first is called.
+ */
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+
+ /* This thread is now longer in the priority queue. */
+ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -86,10 +165,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_head: Already in priority queue");
+
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -98,10 +190,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_tail: Already in priority queue");
+
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -111,6 +216,12 @@ _pq_first(pq_queue_t *pq)
pq_list_t *pql;
pthread_t pthread = NULL;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_first: pq_active");
+ _PQ_SET_ACTIVE();
+
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
@@ -124,6 +235,8 @@ _pq_first(pq_queue_t *pq)
pql->pl_queued = 0;
}
}
+
+ _PQ_CLEAR_ACTIVE();
return (pthread);
}
@@ -134,9 +247,14 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq_list_t *pql;
/*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+
+ /*
* The priority queue is in descending priority order. Start at
* the beginning of the queue and find the list before which the
- * new list should to be inserted.
+ * new list should be inserted.
*/
pql = TAILQ_FIRST(&pq->pq_queue);
while ((pql != NULL) && (pql->pl_prio > prio))
@@ -152,4 +270,66 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
+#if defined(_PTHREADS_INVARIANTS)
+void
+_waitq_insert(pthread_t pthread)
+{
+ pthread_t tid;
+
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
+
+ if (pthread->wakeup_time.tv_sec == -1)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else {
+ tid = TAILQ_FIRST(&_waitingq);
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
+ ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
+ ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
+ (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
+ tid = TAILQ_NEXT(tid, pqe);
+ if (tid == NULL)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else
+ TAILQ_INSERT_BEFORE(tid, pthread, pqe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_remove(pthread_t pthread)
+{
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
+
+ TAILQ_REMOVE(&_waitingq, pthread, pqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_setactive(void)
+{
+ _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
+ _PQ_SET_ACTIVE();
+}
+
+void
+_waitq_clearactive(void)
+{
+ _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
+ _PQ_CLEAR_ACTIVE();
+}
+#endif
#endif
diff --git a/lib/libc_r/uthread/uthread_queue.c b/lib/libc_r/uthread/uthread_queue.c
deleted file mode 100644
index 5225ab8..0000000
--- a/lib/libc_r/uthread/uthread_queue.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- */
-#include <stdio.h>
-#ifdef _THREAD_SAFE
-#include <pthread.h>
-#include "pthread_private.h"
-
-void
-_thread_queue_init(struct pthread_queue * queue)
-{
- /* Initialise the pointers in the queue structure: */
- queue->q_next = NULL;
- queue->q_last = NULL;
- queue->q_data = NULL;
- return;
-}
-
-void
-_thread_queue_enq(struct pthread_queue * queue, struct pthread * thread)
-{
- if (queue->q_last) {
- queue->q_last->qnxt = thread;
- } else {
- queue->q_next = thread;
- }
- queue->q_last = thread;
- thread->queue = queue;
- thread->qnxt = NULL;
- return;
-}
-
-struct pthread *
-_thread_queue_get(struct pthread_queue * queue)
-{
- /* Return the pointer to the next thread in the queue: */
- return (queue->q_next);
-}
-
-struct pthread *
-_thread_queue_deq(struct pthread_queue * queue)
-{
- struct pthread *thread = NULL;
-
- if (queue->q_next) {
- thread = queue->q_next;
- if (!(queue->q_next = queue->q_next->qnxt)) {
- queue->q_last = NULL;
- }
- thread->queue = NULL;
- thread->qnxt = NULL;
- }
- return (thread);
-}
-
-int
-_thread_queue_remove(struct pthread_queue * queue, struct pthread * thread)
-{
- struct pthread **current = &(queue->q_next);
- struct pthread *prev = NULL;
- int ret = -1;
-
- while (*current) {
- if (*current == thread) {
- if ((*current)->qnxt) {
- *current = (*current)->qnxt;
- } else {
- queue->q_last = prev;
- *current = NULL;
- }
- ret = 0;
- break;
- }
- prev = *current;
- current = &((*current)->qnxt);
- }
- thread->queue = NULL;
- thread->qnxt = NULL;
- return (ret);
-}
-
-int
-pthread_llist_remove(struct pthread ** llist, struct pthread * thread)
-{
- while (*llist) {
- if (*llist == thread) {
- *llist = thread->qnxt;
- return (0);
- }
- llist = &(*llist)->qnxt;
- }
- return (-1);
-}
-
-#endif
diff --git a/lib/libc_r/uthread/uthread_resume_np.c b/lib/libc_r/uthread/uthread_resume_np.c
index 885a457..b12baba 100644
--- a/lib/libc_r/uthread/uthread_resume_np.c
+++ b/lib/libc_r/uthread/uthread_resume_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,19 @@ pthread_resume_np(pthread_t thread)
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
}
return(ret);
diff --git a/lib/libc_r/uthread/uthread_select.c b/lib/libc_r/uthread/uthread_select.c
index 6d7d7dc..c2f86c5 100644
--- a/lib/libc_r/uthread/uthread_select.c
+++ b/lib/libc_r/uthread/uthread_select.c
@@ -29,10 +29,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <unistd.h>
#include <errno.h>
+#include <poll.h>
#include <string.h>
+#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/fcntl.h>
@@ -44,12 +47,10 @@ int
select(int numfds, fd_set * readfds, fd_set * writefds,
fd_set * exceptfds, struct timeval * timeout)
{
- fd_set read_locks, write_locks, rdwr_locks;
struct timespec ts;
- struct timeval zero_timeout = {0, 0};
- int i, ret = 0, got_all_locks = 1;
- int f_wait = 1;
- struct pthread_select_data data;
+ int i, ret = 0, f_wait = 1;
+ int pfd_index, got_one = 0, fd_count = 0;
+ struct pthread_poll_data data;
if (numfds > _thread_dtablesize) {
numfds = _thread_dtablesize;
@@ -68,112 +69,129 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
_thread_kern_set_timeout(NULL);
}
- FD_ZERO(&read_locks);
- FD_ZERO(&write_locks);
- FD_ZERO(&rdwr_locks);
-
- /* lock readfds */
+ /* Count the number of file descriptors to be polled: */
if (readfds || writefds || exceptfds) {
for (i = 0; i < numfds; i++) {
- if ((readfds && (FD_ISSET(i, readfds))) || (exceptfds && FD_ISSET(i, exceptfds))) {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_RDWR, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &rdwr_locks);
- } else {
- if ((ret = _FD_LOCK(i, FD_READ, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &read_locks);
- }
- } else {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_WRITE, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &write_locks);
- }
+ if ((readfds && FD_ISSET(i, readfds)) ||
+ (exceptfds && FD_ISSET(i, exceptfds)) ||
+ (writefds && FD_ISSET(i, writefds))) {
+ fd_count++;
}
}
}
- if (got_all_locks) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
- }
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+
+ /*
+ * Allocate memory for poll data if it hasn't already been
+ * allocated or if previously allocated memory is insufficient.
+ */
+ if ((_thread_run->poll_data.fds == NULL) ||
+ (_thread_run->poll_data.nfds < fd_count)) {
+ data.fds = (struct pollfd *) realloc(_thread_run->poll_data.fds,
+ sizeof(struct pollfd) * MAX(128, fd_count));
+ if (data.fds == NULL) {
+ errno = ENOMEM;
+ ret = -1;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ else {
+ /*
+ * Note that the threads poll data always
+ * indicates what is allocated, not what is
+ * currently being polled.
+ */
+ _thread_run->poll_data.fds = data.fds;
+ _thread_run->poll_data.nfds = MAX(128, fd_count);
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
+ }
+ if (ret == 0) {
+ /* Setup the wait data. */
+ data.fds = _thread_run->poll_data.fds;
+ data.nfds = fd_count;
+
+ /*
+ * Setup the array of pollfds. Optimize this by
+ * running the loop in reverse and stopping when
+ * the number of selected file descriptors is reached.
+ */
+ for (i = numfds - 1, pfd_index = fd_count - 1;
+ (i >= 0) && (pfd_index >= 0); i--) {
+ data.fds[pfd_index].events = 0;
+ if (readfds && FD_ISSET(i, readfds)) {
+ data.fds[pfd_index].events = POLLRDNORM;
}
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+ if (exceptfds && FD_ISSET(i, exceptfds)) {
+ data.fds[pfd_index].events |= POLLRDBAND;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ if (writefds && FD_ISSET(i, writefds)) {
+ data.fds[pfd_index].events |= POLLWRNORM;
+ }
+ if (data.fds[pfd_index].events != 0) {
+ /*
+ * Set the file descriptor to be polled and
+ * clear revents in case of a timeout which
+ * leaves fds unchanged:
+ */
+ data.fds[pfd_index].fd = i;
+ data.fds[pfd_index].revents = 0;
+ pfd_index--;
}
- _thread_run->data.select_data = &data;
+ }
+ if (((ret = _thread_sys_poll(data.fds, data.nfds, 0)) == 0) &&
+ (f_wait != 0)) {
+ _thread_run->data.poll_data = &data;
_thread_run->interrupted = 0;
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
if (_thread_run->interrupted) {
errno = EINTR;
+ data.nfds = 0;
ret = -1;
} else
ret = data.nfds;
}
}
- /* clean up the locks */
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &read_locks))
- _FD_UNLOCK(i, FD_READ);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &rdwr_locks))
- _FD_UNLOCK(i, FD_RDWR);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &write_locks))
- _FD_UNLOCK(i, FD_WRITE);
if (ret >= 0) {
- if (readfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, readfds) &&
- !FD_ISSET(i, &data.readfds)) {
- FD_CLR(i, readfds);
+ numfds = 0;
+ for (i = 0; i < fd_count; i++) {
+ /*
+ * Check the results of the poll and clear
+ * this file descriptor from the fdset if
+ * the requested event wasn't ready.
+ */
+ got_one = 0;
+ if (readfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, readfds)) {
+ if (data.fds[i].revents & (POLLIN |
+ POLLRDNORM))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd, readfds);
}
}
- }
- if (writefds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, writefds) &&
- !FD_ISSET(i, &data.writefds)) {
- FD_CLR(i, writefds);
+ if (writefds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, writefds)) {
+ if (data.fds[i].revents & (POLLOUT |
+ POLLWRNORM | POLLWRBAND))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ writefds);
}
}
- }
- if (exceptfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, exceptfds) &&
- !FD_ISSET(i, &data.exceptfds)) {
- FD_CLR(i, exceptfds);
+ if (exceptfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, exceptfds)) {
+ if (data.fds[i].revents & (POLLRDBAND |
+ POLLPRI | POLLHUP | POLLERR |
+ POLLNVAL))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ exceptfds);
}
}
+ if (got_one)
+ numfds++;
}
+ ret = numfds;
}
return (ret);
diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c
index 93635da..1ba0e20 100644
--- a/lib/libc_r/uthread/uthread_setschedparam.c
+++ b/lib/libc_r/uthread/uthread_setschedparam.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <sys/param.h>
@@ -50,10 +51,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/*
- * Guard against being preempted by a scheduling
- * signal:
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
if (param->sched_priority != pthread->base_priority) {
/*
@@ -61,8 +62,7 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
* queue before any adjustments are made to its
* active priority:
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
in_readyq = 1;
old_prio = pthread->active_priority;
PTHREAD_PRIOQ_REMOVE(pthread);
@@ -103,10 +103,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
pthread->attr.sched_policy = policy;
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
diff --git a/lib/libc_r/uthread/uthread_sig.c b/lib/libc_r/uthread/uthread_sig.c
index e51d949..ac475a4 100644
--- a/lib/libc_r/uthread/uthread_sig.c
+++ b/lib/libc_r/uthread/uthread_sig.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <fcntl.h>
@@ -38,107 +39,51 @@
#include <pthread.h>
#include "pthread_private.h"
-/*
- * State change macro for signal handler:
- */
-#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
- if ((_thread_run->sched_defer_count == 0) && \
- (_thread_kern_in_sched == 0)) { \
- PTHREAD_NEW_STATE(thrd, newstate); \
- } else { \
- _waitingq_check_reqd = 1; \
- PTHREAD_SET_STATE(thrd, newstate); \
- } \
-}
-
/* Static variables: */
-static int volatile yield_on_unlock_thread = 0;
-static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
-
-/* Lock the thread list: */
-void
-_lock_thread_list()
-{
- /* Lock the thread list: */
- _SPINLOCK(&thread_link_list_lock);
-}
+static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
+unsigned int pending_sigs[NSIG];
+unsigned int handled_sigs[NSIG];
+int volatile check_pending = 0;
-/* Lock the thread list: */
+/* Initialize signal handling facility: */
void
-_unlock_thread_list()
+_thread_sig_init(void)
{
- /* Unlock the thread list: */
- _SPINUNLOCK(&thread_link_list_lock);
-
- /*
- * Check if a scheduler interrupt occurred while the thread
- * list was locked:
- */
- if (yield_on_unlock_thread) {
- /* Reset the interrupt flag: */
- yield_on_unlock_thread = 0;
+ int i;
- /* This thread has overstayed it's welcome: */
- sched_yield();
+ /* Clear pending and handled signal counts: */
+ for (i = 1; i < NSIG; i++) {
+ pending_sigs[i - 1] = 0;
+ handled_sigs[i - 1] = 0;
}
+
+ /* Clear the lock: */
+ signal_lock.access_lock = 0;
}
void
_thread_sig_handler(int sig, int code, struct sigcontext * scp)
{
- char c;
- int i;
- pthread_t pthread;
-
- /*
- * Check if the pthread kernel has unblocked signals (or is about to)
- * and was on its way into a _select when the current
- * signal interrupted it:
- */
- if (_thread_kern_in_select) {
- /* Cast the signal number to a character variable: */
- c = sig;
-
- /*
- * Write the signal number to the kernel pipe so that it will
- * be ready to read when this signal handler returns. This
- * means that the _select call will complete
- * immediately.
- */
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
- }
- /* Check if the signal requires a dump of thread information: */
- if (sig == SIGINFO)
- /* Dump thread information to file: */
- _thread_dump_info();
+ char c;
+ int i;
/* Check if an interval timer signal: */
- else if (sig == _SCHED_SIGNAL) {
- /* Check if the scheduler interrupt has come at an
- * unfortunate time which one of the threads is
- * modifying the thread list:
- */
- if (thread_link_list_lock.access_lock)
+ if (sig == _SCHED_SIGNAL) {
+ if (_thread_kern_in_sched != 0) {
/*
- * Set a flag so that the thread that has
- * the lock yields when it unlocks the
- * thread list:
+ * The scheduler is already running; ignore this
+ * signal.
*/
- yield_on_unlock_thread = 1;
-
+ }
/*
* Check if the scheduler interrupt has come when
* the currently running thread has deferred thread
- * scheduling.
+ * signals.
*/
- else if (_thread_run->sched_defer_count)
- _thread_run->yield_on_sched_undefer = 1;
+ else if (_thread_run->sig_defer_count > 0)
+ _thread_run->yield_on_sig_undefer = 1;
- /*
- * Check if the kernel has not been interrupted while
- * executing scheduler code:
- */
- else if (!_thread_kern_in_sched) {
+ else {
/*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
@@ -152,6 +97,72 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
PANIC("Returned to signal function from scheduler");
}
+ }
+ /*
+ * Check if the kernel has been interrupted while the scheduler
+ * is accessing the scheduling queues or if there is a currently
+ * running thread that has deferred signals.
+ */
+ else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
+ (_thread_run->sig_defer_count > 0))) {
+ /* Cast the signal number to a character variable: */
+ c = sig;
+
+ /*
+ * Write the signal number to the kernel pipe so that it will
+ * be ready to read when this signal handler returns.
+ */
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+
+ /* Indicate that there are queued signals in the pipe. */
+ _sigq_check_reqd = 1;
+ }
+ else {
+ if (_atomic_lock(&signal_lock.access_lock)) {
+ /* There is another signal handler running: */
+ pending_sigs[sig - 1]++;
+ check_pending = 1;
+ }
+ else {
+ /* It's safe to handle the signal now. */
+ _thread_sig_handle(sig, scp);
+
+ /* Reset the pending and handled count back to 0: */
+ pending_sigs[sig - 1] = 0;
+ handled_sigs[sig - 1] = 0;
+
+ signal_lock.access_lock = 0;
+ }
+
+ /* Enter a loop to process pending signals: */
+ while ((check_pending != 0) &&
+ (_atomic_lock(&signal_lock.access_lock) == 0)) {
+ check_pending = 0;
+ for (i = 1; i < NSIG; i++) {
+ if (pending_sigs[i - 1] > handled_sigs[i - 1])
+ _thread_sig_handle(i, scp);
+ }
+ signal_lock.access_lock = 0;
+ }
+ }
+}
+
+void
+_thread_sig_handle(int sig, struct sigcontext * scp)
+{
+ int i;
+ pthread_t pthread, pthread_next;
+
+ /* Check if the signal requires a dump of thread information: */
+ if (sig == SIGINFO)
+ /* Dump thread information to file: */
+ _thread_dump_info();
+
+ /* Check if an interval timer signal: */
+ else if (sig == _SCHED_SIGNAL) {
+ /*
+ * This shouldn't ever occur (should this panic?).
+ */
} else {
/* Check if a child has terminated: */
if (sig == SIGCHLD) {
@@ -183,10 +194,9 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to discard pending SIGCONT
* signals:
*/
- for (pthread = _thread_link_list;
- pthread != NULL;
- pthread = pthread->nxt)
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
sigdelset(&pthread->sigpend,SIGCONT);
+ }
}
/*
@@ -196,11 +206,18 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly destroying
+ * the link entry.
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -219,10 +236,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to process each thread in the linked
* list:
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
pthread_t pthread_saved = _thread_run;
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
_thread_run = pthread;
_thread_signal(pthread,sig);
@@ -232,6 +252,10 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_dispatch_signals();
_thread_run = pthread_saved;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
}
}
@@ -284,7 +308,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -296,6 +320,7 @@ _thread_signal(pthread_t pthread, int sig)
*/
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
case PS_SELECT_WAIT:
if (sig != SIGCHLD ||
@@ -303,8 +328,11 @@ _thread_signal(pthread_t pthread, int sig)
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -319,7 +347,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libc_r/uthread/uthread_sigaction.c b/lib/libc_r/uthread/uthread_sigaction.c
index 73a3b21..ed390d1 100644
--- a/lib/libc_r/uthread/uthread_sigaction.c
+++ b/lib/libc_r/uthread/uthread_sigaction.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -77,6 +78,9 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
gact.sa_mask = act->sa_mask;
gact.sa_flags = 0;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+
/*
* Check if the signal handler is being set to
* the default or ignore handlers:
diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c
index 98a5359..6c7d8d3 100644
--- a/lib/libc_r/uthread/uthread_sigwait.c
+++ b/lib/libc_r/uthread/uthread_sigwait.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -41,7 +42,7 @@ sigwait(const sigset_t * set, int *sig)
{
int ret = 0;
int i;
- sigset_t tempset;
+ sigset_t tempset, waitset;
struct sigaction act;
/*
@@ -51,17 +52,23 @@ sigwait(const sigset_t * set, int *sig)
act.sa_flags = SA_RESTART;
act.sa_mask = *set;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+
/*
- * These signals can't be waited on.
+ * Initialize the set of signals that will be waited on:
*/
- sigdelset(&act.sa_mask, SIGKILL);
- sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, _SCHED_SIGNAL);
- sigdelset(&act.sa_mask, SIGCHLD);
- sigdelset(&act.sa_mask, SIGINFO);
+ waitset = *set;
+
+ /* These signals can't be waited on. */
+ sigdelset(&waitset, SIGKILL);
+ sigdelset(&waitset, SIGSTOP);
+ sigdelset(&waitset, _SCHED_SIGNAL);
+ sigdelset(&waitset, SIGCHLD);
+ sigdelset(&waitset, SIGINFO);
/* Check to see if a pending signal is in the wait mask. */
- if (tempset = (_thread_run->sigpend & act.sa_mask)) {
+ if (tempset = (_thread_run->sigpend & waitset)) {
/* Enter a loop to find a pending signal: */
for (i = 1; i < NSIG; i++) {
if (sigismember (&tempset, i))
@@ -81,17 +88,17 @@ sigwait(const sigset_t * set, int *sig)
* Enter a loop to find the signals that are SIG_DFL. For
* these signals we must install a dummy signal handler in
* order for the kernel to pass them in to us. POSIX says
- * that the application must explicitly install a dummy
+ * that the _application_ must explicitly install a dummy
* handler for signals that are SIG_IGN in order to sigwait
* on them. Note that SIG_IGN signals are left in the
* mask because a subsequent sigaction could enable an
* ignored signal.
*/
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i)) {
- if (_thread_sigact[i - 1].sa_handler == SIG_DFL)
- if (_thread_sys_sigaction(i,&act,NULL) != 0)
- ret = -1;
+ if (sigismember(&waitset, i) &&
+ (_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
+ if (_thread_sys_sigaction(i,&act,NULL) != 0)
+ ret = -1;
}
}
if (ret == 0) {
@@ -101,7 +108,7 @@ sigwait(const sigset_t * set, int *sig)
* mask is independent of the threads signal mask
* and requires separate storage.
*/
- _thread_run->data.sigwait = &act.sa_mask;
+ _thread_run->data.sigwait = &waitset;
/* Wait for a signal: */
_thread_kern_sched_state(PS_SIGWAIT, __FILE__, __LINE__);
@@ -119,7 +126,7 @@ sigwait(const sigset_t * set, int *sig)
/* Restore the sigactions: */
act.sa_handler = SIG_DFL;
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i) &&
+ if (sigismember(&waitset, i) &&
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
if (_thread_sys_sigaction(i,&act,NULL) != 0)
ret = -1;
diff --git a/lib/libc_r/uthread/uthread_suspend_np.c b/lib/libc_r/uthread/uthread_suspend_np.c
index 6a6eaf4..02943bb 100644
--- a/lib/libc_r/uthread/uthread_suspend_np.c
+++ b/lib/libc_r/uthread/uthread_suspend_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -52,20 +53,19 @@ pthread_suspend_np(pthread_t thread)
}
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
diff --git a/lib/libkse/thread/Makefile.inc b/lib/libkse/thread/Makefile.inc
index 16799cf..22e9548 100644
--- a/lib/libkse/thread/Makefile.inc
+++ b/lib/libkse/thread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
+# $Id: Makefile.inc,v 1.17 1999/03/23 05:07:54 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -73,8 +73,8 @@ SRCS+= \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_poll.c \
uthread_priority_queue.c \
- uthread_queue.c \
uthread_read.c \
uthread_readv.c \
uthread_recvfrom.c \
diff --git a/lib/libkse/thread/thr_close.c b/lib/libkse/thread/thr_close.c
index 7e21853..4f55b0a 100644
--- a/lib/libkse/thread/thr_close.c
+++ b/lib/libkse/thread/thr_close.c
@@ -29,7 +29,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
+#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
@@ -45,12 +47,21 @@ close(int fd)
int ret;
int status;
struct stat sb;
+ struct fd_table_entry *entry;
- /* Lock the file descriptor while the file is closed: */
- if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
- /* Get file descriptor status. */
- _thread_sys_fstat(fd, &sb);
-
+ if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
+ /*
+ * Don't allow silly programs to close the kernel pipe.
+ */
+ errno = EBADF;
+ ret = -1;
+ }
+ /*
+ * Lock the file descriptor while the file is closed and get
+ * the file descriptor status:
+ */
+ else if (((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) &&
+ ((ret = _thread_sys_fstat(fd, &sb)) == 0)) {
/*
* Check if the file should be left as blocking.
*
@@ -78,11 +89,14 @@ close(int fd)
_thread_sys_fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
+ /* XXX: Assumes well behaved threads. */
+ /* XXX: Defer real close to avoid race condition */
+ entry = _thread_fd_table[fd];
+ _thread_fd_table[fd] = NULL;
+ free(entry);
+
/* Close the file descriptor: */
ret = _thread_sys_close(fd);
-
- free(_thread_fd_table[fd]);
- _thread_fd_table[fd] = NULL;
}
return (ret);
}
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index e0360dd..bacfb72 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -44,6 +45,28 @@ static inline pthread_t cond_queue_deq(pthread_cond_t);
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+/* Reinitialize a condition variable to defaults. */
+int
+_cond_reinit(pthread_cond_t * cond)
+{
+ int ret = 0;
+
+ if (cond == NULL)
+ ret = EINVAL;
+ else if (*cond == NULL)
+ ret = pthread_cond_init(cond, NULL);
+ else {
+ /*
+ * Initialize the condition variable structure:
+ */
+ TAILQ_INIT(&(*cond)->c_queue);
+ (*cond)->c_flags = COND_FLAGS_INITED;
+ (*cond)->c_type = COND_TYPE_FAST;
+ (*cond)->c_mutex = NULL;
+ memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
+ }
+ return (ret);
+}
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
@@ -146,6 +169,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -155,9 +181,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
(*cond)->c_flags |= COND_FLAGS_INITED;
}
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -247,6 +270,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -256,10 +282,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_flags |= COND_FLAGS_INITED;
}
-
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -375,6 +397,12 @@ pthread_cond_signal(pthread_cond_t * cond)
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -409,6 +437,12 @@ pthread_cond_signal(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -425,14 +459,10 @@ pthread_cond_broadcast(pthread_cond_t * cond)
rval = EINVAL;
else {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues. In addition, we must assure
- * that all threads currently waiting on the condition
- * variable are signaled and are not timedout by a
- * scheduling signal that causes a preemption.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -468,9 +498,11 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
- /* Reenable preemption and yield if necessary.
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -488,7 +520,7 @@ cond_queue_deq(pthread_cond_t cond)
if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
return(pthread);
@@ -507,9 +539,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
- if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -535,6 +567,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
tid = TAILQ_NEXT(tid, qe);
TAILQ_INSERT_BEFORE(tid, pthread, qe);
}
- pthread->flags |= PTHREAD_FLAGS_QUEUED;
+ pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
}
#endif
diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c
index 438e527..f5e0b63 100644
--- a/lib/libkse/thread/thr_create.c
+++ b/lib/libkse/thread/thr_create.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <stdlib.h>
@@ -171,7 +172,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
- _thread_queue_init(&(new_thread->join_queue));
+ TAILQ_INIT(&(new_thread->join_queue));
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
@@ -179,46 +180,39 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
- new_thread->queue = NULL;
- new_thread->qnxt = NULL;
new_thread->flags = 0;
+ new_thread->poll_data.nfds = 0;
+ new_thread->poll_data.fds = NULL;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
/*
* Check if the garbage collector thread
* needs to be started.
*/
- f_gc = (_thread_link_list == _thread_initial);
+ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
/* Add the thread to the linked list of all threads: */
- new_thread->nxt = _thread_link_list;
- _thread_link_list = new_thread;
-
- /* Unlock the thread list: */
- _unlock_thread_list();
-
- /*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
- */
- _thread_kern_sched_defer();
+ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
new_thread->state = PS_SUSPENDED;
PTHREAD_WAITQ_INSERT(new_thread);
- } else {
+ }
+ else {
new_thread->state = PS_RUNNING;
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
}
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding
+ * if necessary.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libkse/thread/thr_detach.c b/lib/libkse/thread/thr_detach.c
index 05da832..73a58de2 100644
--- a/lib/libkse/thread/thr_detach.c
+++ b/lib/libkse/thread/thr_detach.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -53,23 +54,25 @@ pthread_detach(pthread_t pthread)
pthread->attr.flags |= PTHREAD_DETACHED;
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Enter a loop to bring all threads off the join queue: */
- while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
+ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if a
+ * scheduling signal occurred while in the critical region.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c
index c54dbda..a5f706d 100644
--- a/lib/libkse/thread/thr_exit.c
+++ b/lib/libkse/thread/thr_exit.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <unistd.h>
@@ -92,7 +93,12 @@ _thread_exit(char *fname, int lineno, char *string)
_thread_sys_write(2, s, strlen(s));
/* Force this process to exit: */
+ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
+#if defined(_PTHREADS_INVARIANTS)
+ abort();
+#else
_exit(1);
+#endif
}
void
@@ -129,22 +135,24 @@ pthread_exit(void *status)
}
/*
- * Guard against preemption by a scheduling signal. A change of
- * thread state modifies the waiting and priority queues.
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Check if there are any threads joined to this one: */
- while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
+
/* Wake the joined thread and let it detach this thread: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -154,8 +162,21 @@ pthread_exit(void *status)
PANIC("Cannot lock gc mutex");
/* Add this thread to the list of dead threads. */
- _thread_run->nxt_dead = _thread_dead;
- _thread_dead = _thread_run;
+ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
+
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/*
* Signal the garbage collector thread that there is something
diff --git a/lib/libkse/thread/thr_find_thread.c b/lib/libkse/thread/thr_find_thread.c
index e4a59a0..a010a7e 100644
--- a/lib/libkse/thread/thr_find_thread.c
+++ b/lib/libkse/thread/thr_find_thread.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,20 @@ _find_thread(pthread_t pthread)
/* Invalid thread: */
return(EINVAL);
- /* Lock the thread list: */
- _lock_thread_list();
-
- /* Point to the first thread in the list: */
- pthread1 = _thread_link_list;
+ /*
+ * Defer signals to protect the thread list from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_thread_list, tle) {
+ if (pthread == pthread1)
+ break;
}
- /* Unlock the thread list: */
- _unlock_thread_list();
+ /* Undefer and handle pending signals, yielding if necessary: */
+ _thread_kern_sig_undefer();
/* Return zero if the thread exists: */
return ((pthread1 != NULL) ? 0:ESRCH);
@@ -83,13 +84,10 @@ _find_dead_thread(pthread_t pthread)
if (pthread_mutex_lock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* Point to the first thread in the list: */
- pthread1 = _thread_dead;
-
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt_dead;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_dead_list, dle) {
+ if (pthread1 == pthread)
+ break;
}
/* Unlock the garbage collector mutex: */
diff --git a/lib/libkse/thread/thr_fork.c b/lib/libkse/thread/thr_fork.c
index 5582c1e..8d3322e 100644
--- a/lib/libkse/thread/thr_fork.c
+++ b/lib/libkse/thread/thr_fork.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <string.h>
@@ -44,10 +45,13 @@ fork(void)
int i, flags;
pid_t ret;
pthread_t pthread;
- pthread_t pthread_next;
+ pthread_t pthread_save;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
/* Fork a new process: */
if ((ret = _thread_sys_fork()) != 0) {
@@ -88,45 +92,79 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ }
+ /* Reinitialize the GC mutex: */
+ else if (_mutex_reinit(&_gc_mutex) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC mutex for forked process");
+ }
+ /* Reinitialize the GC condition variable: */
+ else if (_cond_reinit(&_gc_cond) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC condvar for forked process");
+ }
/* Initialize the ready queue: */
- } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
- PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_init(&_readyq) != 0) {
/* Abort this application: */
- PANIC("Cannot allocate priority ready queue.");
+ PANIC("Cannot initialize priority ready queue.");
} else {
- /* Point to the first thread in the list: */
- pthread = _thread_link_list;
-
/*
* Enter a loop to remove all threads other than
* the running thread from the thread list:
*/
+ pthread = TAILQ_FIRST(&_thread_list);
while (pthread != NULL) {
- pthread_next = pthread->nxt;
- if (pthread == _thread_run) {
- _thread_link_list = pthread;
- pthread->nxt = NULL;
- } else {
- if (pthread->attr.stackaddr_attr ==
- NULL && pthread->stack != NULL)
+ /* Save the thread to be freed: */
+ pthread_save = pthread;
+
+ /*
+ * Advance to the next thread before
+ * destroying the current thread:
+ */
+ pthread = TAILQ_NEXT(pthread, dle);
+
+ /* Make sure this isn't the running thread: */
+ if (pthread_save != _thread_run) {
+ /* Remove this thread from the list: */
+ TAILQ_REMOVE(&_thread_list,
+ pthread_save, tle);
+
+ if (pthread_save->attr.stackaddr_attr ==
+ NULL && pthread_save->stack != NULL)
/*
* Free the stack of the
* dead thread:
*/
- free(pthread->stack);
+ free(pthread_save->stack);
- if (pthread->specific_data != NULL)
- free(pthread->specific_data);
+ if (pthread_save->specific_data != NULL)
+ free(pthread_save->specific_data);
- free(pthread);
- }
+ if (pthread_save->poll_data.fds != NULL)
+ free(pthread_save->poll_data.fds);
- /* Point to the next thread: */
- pthread = pthread_next;
+ free(pthread_save);
+ }
}
- /* Re-init the waiting queues. */
+ /* Re-init the dead thread list: */
+ TAILQ_INIT(&_dead_list);
+
+ /* Re-init the waiting and work queues. */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
+
+ /* Re-init the threads mutex queue: */
+ TAILQ_INIT(&_thread_run->mutexq);
+
+ /* No spinlocks yet: */
+ _spinblock_count = 0;
+
+ /* Don't queue signals yet: */
+ _queue_signals = 0;
+
+ /* Initialize signal handling: */
+ _thread_sig_init();
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -147,15 +185,17 @@ fork(void)
_thread_fd_table[i]->w_lockcount = 0;;
/* Initialise the read/write queues: */
- _thread_queue_init(&_thread_fd_table[i]->r_queue);
- _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->r_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->w_queue);
}
}
}
}
- /* Unock the thread list: */
- _unlock_thread_list();
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/* Return the process ID: */
return (ret);
diff --git a/lib/libkse/thread/thr_info.c b/lib/libkse/thread/thr_info.c
index d2d97da..8c0787a 100644
--- a/lib/libkse/thread/thr_info.c
+++ b/lib/libkse/thread/thr_info.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdio.h>
#include <fcntl.h>
@@ -55,6 +56,7 @@ static const struct s_thread_info thread_info[] = {
{PS_FDR_WAIT , "Waiting for read"},
{PS_FDW_WAIT , "Waiting for write"},
{PS_FILE_WAIT , "Waiting for FILE lock"},
+ {PS_POLL_WAIT , "Waiting on poll"},
{PS_SELECT_WAIT , "Waiting on select"},
{PS_SLEEP_WAIT , "Sleeping"},
{PS_WAIT_WAIT , "Waiting process"},
@@ -108,8 +110,7 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
/* Find the state: */
for (j = 0; j < (sizeof(thread_info) /
sizeof(struct s_thread_info)) - 1; j++)
@@ -214,8 +215,29 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
}
+ /* Output a header for threads in the work queue: */
+ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_workq, qe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
- if (_thread_dead == NULL) {
+ if (TAILQ_FIRST(&_dead_list) == NULL) {
/* Output a record: */
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
_thread_sys_write(fd, s, strlen(s));
@@ -228,8 +250,7 @@ _thread_dump_info(void)
* Enter a loop to report each thread in the global
* dead thread list:
*/
- for (pthread = _thread_dead; pthread != NULL;
- pthread = pthread->nxt_dead) {
+ TAILQ_FOREACH(pthread, &_dead_list, dle) {
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index e4411ce..d470301 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
/* Allocate space for global thread variables here: */
@@ -39,7 +40,9 @@
#include <string.h>
#include <fcntl.h>
#include <paths.h>
+#include <poll.h>
#include <unistd.h>
+#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
#ifdef _THREAD_SAFE
@@ -81,6 +84,9 @@ _thread_init(void)
int fd;
int flags;
int i;
+ int len;
+ int mib[2];
+ struct clockinfo clockinfo;
struct sigaction act;
/* Check if this function has already been called: */
@@ -147,8 +153,8 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
- /* Initialize the ready queue: */
- else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Allocate and initialize the ready queue: */
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -165,8 +171,9 @@ _thread_init(void)
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
- /* Initialize the waiting queue: */
+ /* Initialize the waiting and work queues: */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -186,23 +193,23 @@ _thread_init(void)
_thread_initial->state = PS_RUNNING;
/* Initialise the queue: */
- _thread_queue_init(&(_thread_initial->join_queue));
+ TAILQ_INIT(&(_thread_initial->join_queue));
/* Initialize the owned mutex queue and count: */
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
/* Initialise the rest of the fields: */
- _thread_initial->sched_defer_count = 0;
- _thread_initial->yield_on_sched_undefer = 0;
+ _thread_initial->poll_data.nfds = 0;
+ _thread_initial->poll_data.fds = NULL;
+ _thread_initial->sig_defer_count = 0;
+ _thread_initial->yield_on_sig_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
- _thread_initial->queue = NULL;
- _thread_initial->qnxt = NULL;
- _thread_initial->nxt = NULL;
_thread_initial->flags = 0;
_thread_initial->error = 0;
- _thread_link_list = _thread_initial;
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
_thread_run = _thread_initial;
/* Initialise the global signal action structure: */
@@ -210,6 +217,9 @@ _thread_init(void)
act.sa_handler = (void (*) ()) _thread_sig_handler;
act.sa_flags = 0;
+ /* Initialize signal handling: */
+ _thread_sig_init();
+
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
/* Check for signals which cannot be trapped: */
@@ -241,6 +251,13 @@ _thread_init(void)
PANIC("Cannot initialise signal handler");
}
+ /* Get the kernel clockrate: */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_CLOCKRATE;
+ len = sizeof (struct clockinfo);
+ if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
+ _clock_res_nsec = clockinfo.tick * 1000;
+
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
/*
@@ -256,6 +273,14 @@ _thread_init(void)
* table, so abort this process.
*/
PANIC("Cannot allocate memory for file descriptor table");
+ }
+ /* Allocate memory for the pollfd table: */
+ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
+ /*
+ * Cannot allocate memory for the file descriptor
+ * table, so abort this process.
+ */
+ PANIC("Cannot allocate memory for pollfd table");
} else {
/*
* Enter a loop to initialise the file descriptor
@@ -265,6 +290,14 @@ _thread_init(void)
/* Initialise the file descriptor table: */
_thread_fd_table[i] = NULL;
}
+
+ /* Initialize stdio file descriptor table entries: */
+ if ((_thread_fd_table_init(0) != 0) ||
+ (_thread_fd_table_init(1) != 0) ||
+ (_thread_fd_table_init(2) != 0)) {
+ PANIC("Cannot initialize stdio file descriptor "
+ "table entries");
+ }
}
}
diff --git a/lib/libkse/thread/thr_join.c b/lib/libkse/thread/thr_join.c
index 2043b76..f36d7f7 100644
--- a/lib/libkse/thread/thr_join.c
+++ b/lib/libkse/thread/thr_join.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -71,7 +72,7 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
/* Add the running thread to the join queue: */
- _thread_queue_enq(&(pthread->join_queue), _thread_run);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index f1e97f9..8718906 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -29,10 +29,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_kern.c,v 1.17 1999/05/07 07:59:44 jasone Exp $
+ * $Id: uthread_kern.c,v 1.16 1999/03/23 05:07:56 jb Exp $
*
*/
#include <errno.h>
+#include <poll.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
@@ -51,7 +52,10 @@
/* Static function prototype definitions: */
static void
-_thread_kern_select(int wait_reqd);
+_thread_kern_poll(int wait_reqd);
+
+static void
+dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
@@ -62,14 +66,12 @@ _thread_kern_sched(struct sigcontext * scp)
#ifndef __alpha__
char *fdata;
#endif
- pthread_t pthread;
- pthread_t pthread_h = NULL;
+ pthread_t pthread, pthread_h = NULL;
pthread_t last_thread = NULL;
struct itimerval itimer;
- struct timespec ts;
- struct timespec ts1;
- struct timeval tv;
- struct timeval tv1;
+ struct timespec ts, ts1;
+ struct timeval tv, tv1;
+ int i, set_timer = 0;
/*
* Flag the pthread kernel as executing scheduler code
@@ -111,6 +113,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Run the installed switch hook: */
thread_run_switch_hook(_last_user_thread, _thread_run);
}
+
return;
} else
/* Flag the jump buffer was the last state saved: */
@@ -127,238 +130,208 @@ __asm__("fnsave %0": :"m"(*fdata));
* either a sigreturn (if the state was saved as a sigcontext) or a
* longjmp (if the state was saved by a setjmp).
*/
- while (_thread_link_list != NULL) {
+ while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Protect the scheduling queues from access by the signal
+ * handler.
*/
- _thread_kern_select(0);
+ _queue_signals = 1;
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for sleeping threads that are ready
- * or timedout. While we're at it, also find the smallest
- * timeout value for threads waiting for a time.
- */
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- /* Check if this thread is ready: */
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ if (_thread_run != &_thread_kern_thread) {
/*
- * Check if this thread is blocked by an
- * atomic lock:
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sig_undefer = 0;
+
+ /*
+ * Save the current time as the time that the thread
+ * became inactive:
*/
- else if (pthread->state == PS_SPINBLOCK) {
+ _thread_run->last_inactive.tv_sec = tv.tv_sec;
+ _thread_run->last_inactive.tv_usec = tv.tv_usec;
+
+ /*
+ * Place the currently running thread into the
+ * appropriate queue(s).
+ */
+ switch (_thread_run->state) {
+ case PS_DEAD:
/*
- * If the lock is available, let
- * the thread run.
+ * Dead threads are not placed in any queue:
*/
- if (pthread->data.spinlock->access_lock == 0) {
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+ break;
- /* Check if this thread is to timeout: */
- } else if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /* Check if this thread is to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
+ case PS_RUNNING:
/*
- * Check if this thread is to wakeup
- * immediately or if it is past its wakeup
- * time:
+ * Runnable threads can't be placed in the
+ * priority queue until after waiting threads
+ * are polled (to preserve round-robin
+ * scheduling).
*/
- else if ((pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) ||
- (ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec >= pthread->wakeup_time.tv_nsec))) {
- /*
- * Check if this thread is waiting on
- * select:
- */
- if (pthread->state == PS_SELECT_WAIT) {
- /*
- * The select has timed out, so
- * zero the file descriptor
- * sets:
- */
- FD_ZERO(&pthread->data.select_data->readfds);
- FD_ZERO(&pthread->data.select_data->writefds);
- FD_ZERO(&pthread->data.select_data->exceptfds);
- pthread->data.select_data->nfds = 0;
- }
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
/*
- * Return an error as an interrupted
- * wait:
+ * Accumulate the number of microseconds that
+ * this thread has run for:
*/
- _thread_seterrno(pthread, EINTR);
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+ break;
- /*
- * Flag the timeout in the thread
- * structure:
- */
- pthread->timeout = 1;
+ /*
+ * States which do not depend on file descriptor I/O
+ * operations or timeouts:
+ */
+ case PS_DEADLOCK:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGTHREAD:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_WAIT_WAIT:
+ /* No timeouts for these states: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
- /*
- * Change the threads state to allow
- * it to be restarted:
- */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- } else {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + CLOCK_RES_NSEC;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
- /*
- * Check for underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow
- * of the nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of
- * the nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure
- * to a timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ /* States which can timeout: */
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
+
+ /* States that require periodic work: */
+ case PS_SPINBLOCK:
+ /* No timeouts for this state: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
+
+ /* Increment spinblock count: */
+ _spinblock_count++;
+ /* fall through */
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+
+ /* Insert into the work queue: */
+ PTHREAD_WORKQ_INSERT(_thread_run);
}
}
- /* Check if there is a current thread: */
- if (_thread_run != &_thread_kern_thread) {
- /*
- * This thread no longer needs to yield the CPU.
- */
- _thread_run->yield_on_sched_undefer = 0;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ _thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ /*
+ * Wake up threads that have timedout. This has to be
+ * done after polling in case a thread does a poll or
+ * select with zero time.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1) &&
+ (((pthread->wakeup_time.tv_sec == 0) &&
+ (pthread->wakeup_time.tv_nsec == 0)) ||
+ (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
+ ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
+ (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
+ switch (pthread->state) {
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Return zero file descriptors ready: */
+ pthread->data.poll_data->nfds = 0;
+ /* fall through */
+ default:
+ /*
+ * Remove this thread from the waiting queue
+ * (and work queue if necessary) and place it
+ * in the ready queue.
+ */
+ PTHREAD_WAITQ_CLEARACTIVE();
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ break;
+ }
/*
- * Accumulate the number of microseconds that this
- * thread has run for:
+ * Flag the timeout in the thread structure:
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
- if (_thread_run->state == PS_RUNNING) {
- if (_thread_run->slice_usec == -1) {
- /*
- * The thread exceeded its time
- * quantum or it yielded the CPU;
- * place it at the tail of the
- * queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
- } else {
- /*
- * The thread hasn't exceeded its
- * interval. Place it at the head
- * of the queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
- }
- }
- else if (_thread_run->state == PS_DEAD) {
+ pthread->timeout = 1;
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+
+ /*
+ * Check if there is a current runnable thread that isn't
+ * already in the ready queue:
+ */
+ if ((_thread_run != &_thread_kern_thread) &&
+ (_thread_run->state == PS_RUNNING) &&
+ ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (_thread_run->slice_usec == -1) {
/*
- * Don't add dead threads to the waiting
- * queue, because when they're reaped, it
- * will corrupt the queue.
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
*/
- }
- else {
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * This thread has changed state and needs
- * to be placed in the waiting queue.
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- PTHREAD_WAITQ_INSERT(_thread_run);
-
- /* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
/*
* Get the highest priority thread in the ready queue.
*/
- pthread_h = PTHREAD_PRIOQ_FIRST;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
/* Check if there are no threads ready to run: */
if (pthread_h == NULL) {
@@ -369,18 +342,84 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_run = &_thread_kern_thread;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
/*
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_select(1);
- } else {
+ _thread_kern_poll(1);
+ }
+ else {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /* Check to see if there is more than one thread: */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ /*
+ * Check for signals queued while the scheduling
+ * queues were protected:
+ */
+ while (_sigq_check_reqd != 0) {
+ /* Clear before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues again: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /*
+ * Check for a higher priority thread that
+ * became runnable due to signal handling.
+ */
+ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > pthread_h->active_priority)) {
+ /*
+ * Insert the lower priority thread
+ * at the head of its priority list:
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
+
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /* There's a new thread in town: */
+ pthread_h = pthread;
+ }
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /*
+ * Check to see if there is more than one
+ * thread:
+ */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+ }
+
/* Make the selected thread the current thread: */
_thread_run = pthread_h;
- /* Remove the thread from the ready queue. */
- PTHREAD_PRIOQ_REMOVE(_thread_run);
-
/*
* Save the current time as the time that the thread
* became active:
@@ -389,6 +428,76 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->last_active.tv_usec = tv.tv_usec;
/*
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /* Get first thread on the waiting list: */
+ if ((pthread != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1)) {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + _clock_res_nsec;
+
+ /*
+ * Check for underflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure to a
+ * timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
+ }
+
+ /*
* Check if this thread is running for the first time
* or running again after using its full time slice
* allocation:
@@ -399,7 +508,7 @@ __asm__("fnsave %0": :"m"(*fdata));
}
/* Check if there is more than one thread: */
- if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
+ if (set_timer != 0) {
/*
* Start the interval timer for the
* calculated time interval:
@@ -462,6 +571,19 @@ __asm__("fnsave %0": :"m"(*fdata));
void
_thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and is placed into the proper queue.
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -476,6 +598,20 @@ void
_thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and it is placed into the proper
+ * queue(s).
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -489,384 +625,193 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
}
static void
-_thread_kern_select(int wait_reqd)
+_thread_kern_poll(int wait_reqd)
{
char bufr[128];
- fd_set fd_set_except;
- fd_set fd_set_read;
- fd_set fd_set_write;
int count = 0;
- int count_dec;
- int found_one;
- int i;
- int nfds = -1;
- int settimeout;
- pthread_t pthread;
+ int i, found;
+ int kern_pipe_added = 0;
+ int nfds = 0;
+ int timeout_ms = 0;
+ struct pthread *pthread, *pthread_next;
ssize_t num;
struct timespec ts;
- struct timespec ts1;
- struct timeval *p_tv;
struct timeval tv;
- struct timeval tv1;
-
- /* Zero the file descriptor sets: */
- FD_ZERO(&fd_set_read);
- FD_ZERO(&fd_set_write);
- FD_ZERO(&fd_set_except);
/* Check if the caller wants to wait: */
- if (wait_reqd) {
- /*
- * Add the pthread kernel pipe file descriptor to the read
- * set:
- */
- FD_SET(_thread_kern_pipe[0], &fd_set_read);
- nfds = _thread_kern_pipe[0];
-
+ if (wait_reqd == 0) {
+ timeout_ms = 0;
+ }
+ else {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+
+ _queue_signals = 1;
+ pthread = TAILQ_FIRST(&_waitingq);
+ _queue_signals = 0;
+
+ if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
+ /*
+ * Either there are no threads in the waiting queue,
+ * or there are no threads that can timeout.
+ */
+ timeout_ms = INFTIM;
+ }
+ else {
+ /*
+ * Calculate the time left for the next thread to
+ * timeout allowing for the clock resolution:
+ */
+ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
+ _clock_res_nsec) / 1000000);
+ /*
+ * Don't allow negative timeouts:
+ */
+ if (timeout_ms < 0)
+ timeout_ms = 0;
+ }
}
- /* Initialise the time value structure: */
- tv.tv_sec = 0;
- tv.tv_usec = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
/*
- * Enter a loop to process threads waiting on either file descriptors
- * or times:
+ * Check to see if the signal queue needs to be walked to look
+ * for threads awoken by a signal while in the scheduler. Only
+ * do this if a wait is specified; otherwise, the waiting queue
+ * will be checked after the zero-timed _poll.
*/
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Assume that this state does not time out: */
- settimeout = 0;
+ while ((_sigq_check_reqd != 0) && (timeout_ms != 0)) {
+ /* Reset flag before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ dequeue_signals();
- /* Process according to thread state: */
- switch (pthread->state) {
/*
- * States which do not depend on file descriptor I/O
- * operations or timeouts:
+ * Check for a thread that became runnable due to
+ * a signal:
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGTHREAD:
- case PS_SIGWAIT:
- case PS_STATE_MAX:
- case PS_WAIT_WAIT:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ if (PTHREAD_PRIOQ_FIRST() != NULL) {
/*
- * A signal occurred and made this thread ready
- * while in the scheduler or while the scheduling
- * queues were protected.
+ * Since there is at least one runnable thread,
+ * disable the wait.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ timeout_ms = 0;
+ }
+ }
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /* Add the file descriptor to the read set: */
- FD_SET(pthread->data.fd.fd, &fd_set_read);
+ /*
+ * Form the poll table:
+ */
+ nfds = 0;
+ if (timeout_ms != 0) {
+ /* Add the kernel pipe to the poll table: */
+ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].revents = 0;
+ nfds++;
+ kern_pipe_added = 1;
+ }
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ switch (pthread->state) {
+ case PS_SPINBLOCK:
/*
- * Check if this file descriptor is greater than any
- * of those seen so far:
+ * If the lock is available, let the thread run.
*/
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ /* One less thread in a spinblock state: */
+ _spinblock_count--;
}
- /* Increment the file descriptor count: */
- count++;
+ break;
- /* This state can time out: */
- settimeout = 1;
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
+ }
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /* Add the file descriptor to the write set: */
- FD_SET(pthread->data.fd.fd, &fd_set_write);
-
- /*
- * Check if this file descriptor is greater than any
- * of those seen so far:
- */
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLWRNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
}
- /* Increment the file descriptor count: */
- count++;
-
- /* This state can time out: */
- settimeout = 1;
break;
- /* States that time out: */
- case PS_SLEEP_WAIT:
- case PS_COND_WAIT:
- /* Flag a timeout as required: */
- settimeout = 1;
- break;
-
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Enter a loop to process each file descriptor in
- * the thread-specific file descriptor sets:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Check if this file descriptor is set for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Add the file descriptor to the
- * exception set:
- */
- FD_SET(i, &fd_set_except);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Add the file descriptor to the
- * write set:
- */
- FD_SET(i, &fd_set_write);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Add the file descriptor to the
- * read set:
- */
- FD_SET(i, &fd_set_read);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
+ /* Limit number of polled files to table size: */
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ _thread_pfd_table[nfds + i].fd =
+ pthread->data.poll_data->fds[i].fd;
+ _thread_pfd_table[nfds + i].events =
+ pthread->data.poll_data->fds[i].events;
}
+ nfds += pthread->data.poll_data->nfds;
}
-
- /* This state can time out: */
- settimeout = 1;
break;
- }
-
- /*
- * Check if the caller wants to wait and if the thread state
- * is one that times out:
- */
- if (wait_reqd && settimeout) {
- /* Check if this thread wants to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /* Check if this thread doesn't want to wait at all: */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- /* Override the caller's request to wait: */
- wait_reqd = 0;
- } else {
- /*
- * Calculate the time until this thread is
- * ready, allowing for the clock resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for underflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
- /*
- * Check if no time value has been found yet,
- * or if the thread will be ready sooner that
- * the earliest one found so far:
- */
- if ((tv.tv_sec == 0 && tv.tv_usec == 0) || timercmp(&tv1, &tv, <)) {
- /* Update the time value: */
- tv.tv_sec = tv1.tv_sec;
- tv.tv_usec = tv1.tv_usec;
- }
- }
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
- /* Check if the caller wants to wait: */
- if (wait_reqd) {
- /* Check if no threads were found with timeouts: */
- if (tv.tv_sec == 0 && tv.tv_usec == 0) {
- /* Wait forever: */
- p_tv = NULL;
- } else {
- /*
- * Point to the time value structure which contains
- * the earliest time that a thread will be ready:
- */
- p_tv = &tv;
- }
+ /*
+ * Wait for a file descriptor to be ready for read, write, or
+ * an exception, or a timeout to occur:
+ */
+ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
+ if (kern_pipe_added != 0)
/*
- * Flag the pthread kernel as in a select. This is to avoid
- * the window between the next statement that unblocks
- * signals and the select statement which follows.
+ * Remove the pthread kernel pipe file descriptor
+ * from the pollfd table:
*/
- _thread_kern_in_select = 1;
+ nfds = 1;
+ else
+ nfds = 0;
+ /*
+ * Check if it is possible that there are bytes in the kernel
+ * read pipe waiting to be read:
+ */
+ if (count < 0 || ((kern_pipe_added != 0) &&
+ (_thread_pfd_table[0].revents & POLLRDNORM))) {
/*
- * Wait for a file descriptor to be ready for read, write, or
- * an exception, or a timeout to occur:
+ * If the kernel read pipe was included in the
+ * count:
*/
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
+ if (count > 0) {
+ /* Decrement the count of file descriptors: */
+ count--;
+ }
- /* Reset the kernel in select flag: */
- _thread_kern_in_select = 0;
+ if (_sigq_check_reqd != 0) {
+ /* Reset flag before handling signals: */
+ _sigq_check_reqd = 0;
- /*
- * Check if it is possible that there are bytes in the kernel
- * read pipe waiting to be read:
- */
- if (count < 0 || FD_ISSET(_thread_kern_pipe[0], &fd_set_read)) {
- /*
- * Check if the kernel read pipe was included in the
- * count:
- */
- if (count > 0) {
- /*
- * Remove the kernel read pipe from the
- * count:
- */
- FD_CLR(_thread_kern_pipe[0], &fd_set_read);
-
- /* Decrement the count of file descriptors: */
- count--;
- }
- /*
- * Enter a loop to read (and trash) bytes from the
- * pthread kernel pipe:
- */
- while ((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) {
- /*
- * The buffer read contains one byte per
- * signal and each byte is the signal number.
- * This data is not used, but the fact that
- * the signal handler wrote to the pipe *is*
- * used to cause the _select call
- * to complete if the signal occurred between
- * the time when signals were unblocked and
- * the _select select call being
- * made.
- */
- }
+ dequeue_signals();
}
}
- /* Check if there are file descriptors to poll: */
- else if (count > 0) {
- /*
- * Point to the time value structure which has been zeroed so
- * that the call to _select will not wait:
- */
- p_tv = &tv;
-
- /* Poll file descrptors without wait: */
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
- }
/*
* Check if any file descriptors are ready:
@@ -875,301 +820,133 @@ _thread_kern_select(int wait_reqd)
/*
* Enter a loop to look for threads waiting on file
* descriptors that are flagged as available by the
- * _select syscall:
+ * _poll syscall:
*/
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Process according to thread state: */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
switch (pthread->state) {
- /*
- * States which do not depend on file
- * descriptor I/O operations:
- */
- case PS_COND_WAIT:
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ case PS_SPINBLOCK:
/*
- * A signal occurred and made this thread
- * ready while in the scheduler.
+ * If the lock is available, let the thread run.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /*
- * Check if the file descriptor is available
- * for read:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_read)) {
/*
- * Change the thread state to allow
- * it to read from the file when it
- * is scheduled next:
+ * One less thread in a spinblock state:
*/
- pthread->state = PS_RUNNING;
+ _spinblock_count--;
+ }
+ break;
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLRDNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /*
- * Check if the file descriptor is available
- * for write:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_write)) {
- /*
- * Change the thread state to allow
- * it to write to the file when it is
- * scheduled next:
- */
- pthread->state = PS_RUNNING;
-
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLWRNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Reset the flag that indicates if a file
- * descriptor is ready for some type of
- * operation:
- */
- count_dec = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file descriptors
- * for the first descriptor that is ready:
- */
- for (i = 0; i < pthread->data.select_data->nfds && count_dec == 0; i++) {
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
/*
- * Check if this file descriptor does
- * not have an exception:
+ * Enter a loop looking for I/O
+ * readiness:
*/
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds) && FD_ISSET(i, &fd_set_except)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
- }
- /*
- * Check if this file descriptor is
- * not ready for write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds) && FD_ISSET(i, &fd_set_write)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+ found = 0;
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ if (_thread_pfd_table[nfds + i].revents != 0) {
+ pthread->data.poll_data->fds[i].revents =
+ _thread_pfd_table[nfds + i].revents;
+ found++;
+ }
}
- /*
- * Check if this file descriptor is
- * not ready for read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds) && FD_ISSET(i, &fd_set_read)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+
+ /* Increment before destroying: */
+ nfds += pthread->data.poll_data->nfds;
+
+ if (found != 0) {
+ pthread->data.poll_data->nfds = found;
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
}
+ else
+ nfds += pthread->data.poll_data->nfds;
+ break;
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
+ }
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+ else if (_spinblock_count != 0) {
+ /*
+ * Enter a loop to look for threads waiting on a spinlock
+ * that is now available.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ if (pthread->state == PS_SPINBLOCK) {
/*
- * Check if any file descriptors are ready
- * for the current thread:
+ * If the lock is available, let the thread run.
*/
- if (count_dec) {
- /*
- * Reset the count of file
- * descriptors that are ready for
- * this thread:
- */
- found_one = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file
- * descriptors:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Reset the count of
- * operations for which the
- * current file descriptor is
- * ready:
- */
- count_dec = 0;
-
- /*
- * Check if this file
- * descriptor is selected for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Check if this file
- * descriptor has an
- * exception:
- */
- if (FD_ISSET(i, &fd_set_except)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->exceptfds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for write:
- */
- if (FD_ISSET(i, &fd_set_write)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->writefds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for read:
- */
- if (FD_ISSET(i, &fd_set_read)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->readfds);
- }
- }
- /*
- * Check if the current file
- * descriptor is ready for
- * any one of the operations:
- */
- if (count_dec > 0) {
- /*
- * Increment the
- * count of file
- * descriptors that
- * are ready for the
- * current thread:
- */
- found_one++;
- }
- }
-
- /*
- * Return the number of file
- * descriptors that are ready:
- */
- pthread->data.select_data->nfds = found_one;
-
- /*
- * Change the state of the current
- * thread to run:
- */
- pthread->state = PS_RUNNING;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
/*
- * Remove it from the waiting queue
- * and add it to the ready queue:
+ * One less thread in a spinblock state:
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ _spinblock_count--;
}
- break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ while (_sigq_check_reqd != 0) {
+ /* Handle queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
}
/* Nothing to return. */
@@ -1219,59 +996,101 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
void
-_thread_kern_sched_defer(void)
+_thread_kern_sig_defer(void)
{
- /* Allow scheduling deferral to be recursive. */
- _thread_run->sched_defer_count++;
+ /* Allow signal deferral to be recursive. */
+ _thread_run->sig_defer_count++;
}
void
-_thread_kern_sched_undefer(void)
+_thread_kern_sig_undefer(void)
{
pthread_t pthread;
int need_resched = 0;
/*
* Perform checks to yield only if we are about to undefer
- * scheduling.
+ * signals.
*/
- if (_thread_run->sched_defer_count == 1) {
+ if (_thread_run->sig_defer_count > 1) {
+ /* Decrement the signal deferral count. */
+ _thread_run->sig_defer_count--;
+ }
+ else if (_thread_run->sig_defer_count == 1) {
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
+
/*
- * Check if the waiting queue needs to be examined for
- * threads that are now ready:
+ * Check if there are queued signals:
*/
- while (_waitingq_check_reqd != 0) {
- /* Clear the flag before checking the waiting queue: */
- _waitingq_check_reqd = 0;
-
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ while (_sigq_check_reqd != 0) {
+ /* Defer scheduling while we process queued signals: */
+ _thread_run->sig_defer_count = 1;
+
+ /* Clear the flag before checking the signal queue: */
+ _sigq_check_reqd = 0;
+
+ /* Dequeue and handle signals: */
+ dequeue_signals();
+
+ /*
+ * Avoiding an unnecessary check to reschedule, check
+ * to see if signal handling caused a higher priority
+ * thread to become ready.
+ */
+ if ((need_resched == 0) &&
+ (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ need_resched = 1;
}
+
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
}
- /*
- * We need to yield if a thread change of state caused a
- * higher priority thread to become ready, or if a
- * scheduling signal occurred while preemption was disabled.
- */
- if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority)) ||
- (_thread_run->yield_on_sched_undefer != 0)) {
- _thread_run->yield_on_sched_undefer = 0;
- need_resched = 1;
+ /* Yield the CPU if necessary: */
+ if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ _thread_run->yield_on_sig_undefer = 0;
+ _thread_kern_sched(NULL);
}
}
+}
- if (_thread_run->sched_defer_count > 0) {
- /* Decrement the scheduling deferral count. */
- _thread_run->sched_defer_count--;
+static void
+dequeue_signals(void)
+{
+ char bufr[128];
+ int i, num;
- /* Yield the CPU if necessary: */
- if (need_resched)
- _thread_kern_sched(NULL);
+ /*
+ * Enter a loop to read and handle queued signals from the
+ * pthread kernel pipe:
+ */
+ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
+ sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
+ /*
+ * The buffer read contains one byte per signal and
+ * each byte is the signal number.
+ */
+ for (i = 0; i < num; i++) {
+ if ((int) bufr[i] == _SCHED_SIGNAL) {
+ /*
+ * Scheduling signals shouldn't ever be
+ * queued; just ignore it for now.
+ */
+ }
+ else {
+ /* Handle this signal: */
+ _thread_sig_handle((int) bufr[i], NULL);
+ }
+ }
+ }
+ if ((num < 0) && (errno != EAGAIN)) {
+ /*
+ * The only error we should expect is if there is
+ * no data to read.
+ */
+ PANIC("Unable to read from thread kernel pipe");
}
}
diff --git a/lib/libkse/thread/thr_kill.c b/lib/libkse/thread/thr_kill.c
index c729179..f5bb6e2 100644
--- a/lib/libkse/thread/thr_kill.c
+++ b/lib/libkse/thread/thr_kill.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <signal.h>
@@ -46,18 +47,18 @@ pthread_kill(pthread_t pthread, int sig)
/* Invalid signal: */
ret = EINVAL;
- /* Ignored signals get dropped on the floor. */
- else if (_thread_sigact[sig - 1].sa_handler == SIG_IGN)
- ret = 0;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
+ */
+ else if (((ret = _find_thread(pthread)) == 0) && (sig > 0) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
switch (pthread->state) {
case PS_SIGSUSPEND:
@@ -90,15 +91,19 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
- case PS_SELECT_WAIT:
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
+ case PS_SELECT_WAIT:
if (!sigismember(&pthread->sigmask, sig) &&
(_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
@@ -116,11 +121,43 @@ pthread_kill(pthread_t pthread, int sig)
break;
}
+
+ /*
+ * Check that a custom handler is installed
+ * and if the signal is not blocked:
+ */
+ if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
+ _thread_sigact[sig - 1].sa_handler != SIG_IGN &&
+ sigismember(&pthread->sigpend, sig) &&
+ !sigismember(&pthread->sigmask, sig)) {
+ pthread_t pthread_saved = _thread_run;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
+ _thread_run = pthread;
+
+ /* Clear the pending signal: */
+ sigdelset(&pthread->sigpend, sig);
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ (*(_thread_sigact[sig - 1].sa_handler))(sig);
+
+ _thread_run = pthread_saved;
+
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
+ }
+
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index fa9c8cf..c967b46 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -39,6 +40,25 @@
#include <pthread.h>
#include "pthread_private.h"
+#if defined(_PTHREADS_INVARIANTS)
+#define _MUTEX_INIT_LINK(m) do { \
+ (m)->m_qe.tqe_prev = NULL; \
+ (m)->m_qe.tqe_next = NULL; \
+} while (0)
+#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+ if ((m)->m_qe.tqe_prev == NULL) \
+ PANIC("mutex is not on list"); \
+} while (0)
+#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+ if (((m)->m_qe.tqe_prev != NULL) || \
+ ((m)->m_qe.tqe_next != NULL)) \
+ PANIC("mutex is on list"); \
+} while (0)
+#else
+#define _MUTEX_INIT_LINK(m)
+#define _MUTEX_ASSERT_IS_OWNED(m)
+#define _MUTEX_ASSERT_NOT_OWNED(m)
+#endif
/*
* Prototypes
@@ -55,6 +75,34 @@ static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+/* Reinitialize a mutex to defaults. */
+int
+_mutex_reinit(pthread_mutex_t * mutex)
+{
+ int ret = 0;
+
+ if (mutex == NULL)
+ ret = EINVAL;
+ else if (*mutex == NULL)
+ ret = pthread_mutex_init(mutex, NULL);
+ else {
+ /*
+ * Initialize the mutex structure:
+ */
+ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
+ (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
+ TAILQ_INIT(&(*mutex)->m_queue);
+ (*mutex)->m_owner = NULL;
+ (*mutex)->m_data.m_count = 0;
+ (*mutex)->m_flags = MUTEX_FLAGS_INITED;
+ (*mutex)->m_refcount = 0;
+ (*mutex)->m_prio = 0;
+ (*mutex)->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(*mutex);
+ memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
+ }
+ return (ret);
+}
int
pthread_mutex_init(pthread_mutex_t * mutex,
@@ -138,6 +186,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
else
pmutex->m_prio = 0;
pmutex->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(pmutex);
memset(&pmutex->lock, 0, sizeof(pmutex->lock));
*mutex = pmutex;
} else {
@@ -147,7 +196,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return (ret);
+ return(ret);
}
int
@@ -177,6 +226,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
* Free the memory allocated for the mutex
* structure:
*/
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
free(*mutex);
/*
@@ -222,28 +272,24 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
+ _MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -254,6 +300,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -282,6 +329,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_thread_run->inherited_priority;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -317,6 +365,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -337,10 +386,10 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -361,28 +410,24 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
+ _MUTEX_INIT_LINK(*mutex);
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -392,6 +437,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -419,12 +465,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -449,6 +489,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -481,12 +522,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -519,6 +554,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -556,12 +592,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
ret = _thread_run->error;
_thread_run->error = 0;
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -576,10 +606,10 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -683,15 +713,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
ret = EINVAL;
} else {
/*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
@@ -723,8 +748,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_data.m_count = 0;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of
@@ -738,6 +765,19 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
*/
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+
+ /*
+ * Add the mutex to the threads list of
+ * owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
}
break;
@@ -784,8 +824,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of threads
@@ -891,8 +933,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
+ (*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Enter a loop to find a waiting thread whose
@@ -913,6 +957,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_owner->error = EINVAL;
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+ /*
+ * The thread is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
/* Check for a new owner: */
@@ -978,10 +1027,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -990,11 +1039,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
/*
- * This function is called when a change in base priority occurs
- * for a thread that is thread holding, or waiting for, a priority
- * protection or inheritence mutex. A change in a threads base
- * priority can effect changes to active priorities of other threads
- * and to the ordering of mutex locking by waiting threads.
+ * This function is called when a change in base priority occurs for
+ * a thread that is holding or waiting for a priority protection or
+ * inheritence mutex. A change in a threads base priority can effect
+ * changes to active priorities of other threads and to the ordering
+ * of mutex locking by waiting threads.
*
* This must be called while thread scheduling is deferred.
*/
@@ -1231,8 +1280,7 @@ mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
* If this thread is in the priority queue, it must be
* removed and reinserted for its new priority.
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
/*
* Remove the thread from the priority queue
* before changing its priority:
diff --git a/lib/libkse/thread/thr_poll.c b/lib/libkse/thread/thr_poll.c
new file mode 100644
index 0000000..a54d023
--- /dev/null
+++ b/lib/libkse/thread/thr_poll.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+
+int
+poll(struct pollfd *fds, unsigned int nfds, int timeout)
+{
+ struct timespec ts;
+ int numfds = nfds;
+ int i, ret = 0, found = 0;
+ struct pthread_poll_data data;
+
+ if (numfds > _thread_dtablesize) {
+ numfds = _thread_dtablesize;
+ }
+ /* Check if a timeout was specified: */
+ if (timeout == INFTIM) {
+ /* Wait for ever: */
+ _thread_kern_set_timeout(NULL);
+ } else if (timeout != 0) {
+ /* Convert the timeout in msec to a timespec: */
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000;
+
+ /* Set the wake up time: */
+ _thread_kern_set_timeout(&ts);
+ }
+
+ if (((ret = _thread_sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
+ data.nfds = numfds;
+ data.fds = fds;
+
+ /*
+ * Clear revents in case of a timeout which leaves fds
+ * unchanged:
+ */
+ for (i = 0; i < numfds; i++) {
+ fds[i].revents = 0;
+ }
+
+ _thread_run->data.poll_data = &data;
+ _thread_run->interrupted = 0;
+ _thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
+ if (_thread_run->interrupted) {
+ errno = EINTR;
+ ret = -1;
+ } else {
+ ret = data.nfds;
+ }
+ }
+
+ return (ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c
index 516a1e0..0a00a9d3 100644
--- a/lib/libkse/thread/thr_priority_queue.c
+++ b/lib/libkse/thread/thr_priority_queue.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <sys/queue.h>
@@ -40,9 +41,51 @@
/* Prototypes: */
static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+#if defined(_PTHREADS_INVARIANTS)
+
+static int _pq_active = 0;
+
+#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
+
+#define _PQ_SET_ACTIVE() _pq_active = 1
+#define _PQ_CLEAR_ACTIVE() _pq_active = 0
+#define _PQ_ASSERT_ACTIVE(msg) do { \
+ if (_pq_active == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_INACTIVE(msg) do { \
+ if (_pq_active != 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
+ if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ PANIC(msg); \
+} while (0)
+
+#else
+
+#define _PQ_SET_ACTIVE()
+#define _PQ_CLEAR_ACTIVE()
+#define _PQ_ASSERT_ACTIVE(msg)
+#define _PQ_ASSERT_INACTIVE(msg)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
+#define _PQ_CHECK_PRIO()
+
+#endif
+
int
-_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
int i, ret = 0;
int prioslots = maxprio - minprio + 1;
@@ -56,8 +99,26 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
ret = -1;
else {
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+
+ ret = _pq_init(pq);
+
+ }
+ return (ret);
+}
+
+int
+_pq_init(pq_queue_t *pq)
+{
+ int i, ret = 0;
+
+ if ((pq == NULL) || (pq->pq_lists == NULL))
+ ret = -1;
+
+ else {
/* Initialize the queue for each priority slot: */
- for (i = 0; i < prioslots; i++) {
+ for (i = 0; i < pq->pq_size; i++) {
TAILQ_INIT(&pq->pq_lists[i].pl_head);
pq->pq_lists[i].pl_prio = i;
pq->pq_lists[i].pl_queued = 0;
@@ -65,9 +126,7 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
/* Initialize the priority queue: */
TAILQ_INIT(&pq->pq_queue);
-
- /* Remember the queue size: */
- pq->pq_size = prioslots;
+ _PQ_CLEAR_ACTIVE();
}
return (ret);
}
@@ -77,7 +136,27 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+
+ /*
+ * Remove this thread from priority list. Note that if
+ * the priority list becomes empty, it is not removed
+ * from the priority queue because another thread may be
+ * added to the priority list (resulting in a needless
+ * removal/insertion). Priority lists are only removed
+ * from the priority queue when _pq_first is called.
+ */
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+
+ /* This thread is now longer in the priority queue. */
+ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -86,10 +165,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_head: Already in priority queue");
+
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -98,10 +190,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_tail: Already in priority queue");
+
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -111,6 +216,12 @@ _pq_first(pq_queue_t *pq)
pq_list_t *pql;
pthread_t pthread = NULL;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_first: pq_active");
+ _PQ_SET_ACTIVE();
+
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
@@ -124,6 +235,8 @@ _pq_first(pq_queue_t *pq)
pql->pl_queued = 0;
}
}
+
+ _PQ_CLEAR_ACTIVE();
return (pthread);
}
@@ -134,9 +247,14 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq_list_t *pql;
/*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+
+ /*
* The priority queue is in descending priority order. Start at
* the beginning of the queue and find the list before which the
- * new list should to be inserted.
+ * new list should be inserted.
*/
pql = TAILQ_FIRST(&pq->pq_queue);
while ((pql != NULL) && (pql->pl_prio > prio))
@@ -152,4 +270,66 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
+#if defined(_PTHREADS_INVARIANTS)
+void
+_waitq_insert(pthread_t pthread)
+{
+ pthread_t tid;
+
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
+
+ if (pthread->wakeup_time.tv_sec == -1)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else {
+ tid = TAILQ_FIRST(&_waitingq);
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
+ ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
+ ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
+ (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
+ tid = TAILQ_NEXT(tid, pqe);
+ if (tid == NULL)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else
+ TAILQ_INSERT_BEFORE(tid, pthread, pqe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_remove(pthread_t pthread)
+{
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
+
+ TAILQ_REMOVE(&_waitingq, pthread, pqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_setactive(void)
+{
+ _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
+ _PQ_SET_ACTIVE();
+}
+
+void
+_waitq_clearactive(void)
+{
+ _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
+ _PQ_CLEAR_ACTIVE();
+}
+#endif
#endif
diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h
index 6cacb27..036ea80 100644
--- a/lib/libkse/thread/thr_private.h
+++ b/lib/libkse/thread/thr_private.h
@@ -31,6 +31,7 @@
*
* Private thread definitions for the uthread kernel.
*
+ * $Id$
*/
#ifndef _PTHREAD_PRIVATE_H
@@ -68,33 +69,88 @@
/*
- * Priority queue manipulation macros:
+ * Priority queue manipulation macros (using pqe link):
*/
#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
-#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
/*
- * Waiting queue manipulation macros:
+ * Waiting queue manipulation macros (using pqe link):
*/
-#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
+#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
+#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
+#else
#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_INSERT(thrd) do { \
+ if ((thrd)->wakeup_time.tv_sec == -1) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else { \
+ pthread_t tid = TAILQ_FIRST(&_waitingq); \
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
+ ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
+ ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
+ (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
+ tid = TAILQ_NEXT(tid, pqe); \
+ if (tid == NULL) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else \
+ TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
+ } \
+} while (0)
+#define PTHREAD_WAITQ_CLEARACTIVE()
+#define PTHREAD_WAITQ_SETACTIVE()
+#endif
+
+/*
+ * Work queue manipulation macros (using qe link):
+ */
+#define PTHREAD_WORKQ_INSERT(thrd) do { \
+ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
+ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+#define PTHREAD_WORKQ_REMOVE(thrd) do { \
+ TAILQ_REMOVE(&_workq,thrd,qe); \
+ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+
/*
* State change macro without scheduling queue change:
*/
-#define PTHREAD_SET_STATE(thrd, newstate) { \
+#define PTHREAD_SET_STATE(thrd, newstate) do { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
-}
+} while (0)
/*
* State change macro with scheduling queue change - This must be
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
+ if (_thread_kern_new_state != 0) \
+ PANIC("Recursive PTHREAD_NEW_STATE"); \
+ _thread_kern_new_state = 1; \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ _thread_kern_new_state = 0; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+} while (0)
+#else
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
PTHREAD_PRIOQ_REMOVE(thrd); \
@@ -105,7 +161,8 @@
} \
} \
PTHREAD_SET_STATE(thrd, newstate); \
-}
+} while (0)
+#endif
/*
* Define the signals to be used for scheduling.
@@ -119,15 +176,6 @@
#endif
/*
- * Queue definitions.
- */
-struct pthread_queue {
- struct pthread *q_next;
- struct pthread *q_last;
- void *q_data;
-};
-
-/*
* Priority queues.
*
* XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
@@ -147,14 +195,9 @@ typedef struct pq_queue {
/*
- * Static queue initialization values.
- */
-#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
-
-/*
* TailQ initialization values.
*/
-#define TAILQ_INITIALIZER { NULL, NULL }
+#define TAILQ_INITIALIZER { NULL, NULL }
/*
* Mutex definitions.
@@ -257,7 +300,7 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL, \
+ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
0, _SPINLOCK_INITIALIZER }
/*
@@ -303,12 +346,6 @@ struct pthread_attr {
#define CLOCK_RES_NSEC 10000000
/*
- * Number of microseconds between incremental priority updates for
- * threads that are ready to run, but denied being run.
- */
-#define INC_PRIO_USEC 500000
-
-/*
* Time slice period in microseconds.
*/
#define TIMESLICE_USEC 100000
@@ -345,6 +382,7 @@ enum pthread_state {
PS_FDR_WAIT,
PS_FDW_WAIT,
PS_FILE_WAIT,
+ PS_POLL_WAIT,
PS_SELECT_WAIT,
PS_SLEEP_WAIT,
PS_WAIT_WAIT,
@@ -377,8 +415,8 @@ struct fd_table_entry {
* state of the lock on the file descriptor.
*/
spinlock_t lock;
- struct pthread_queue r_queue; /* Read queue. */
- struct pthread_queue w_queue; /* Write queue. */
+ TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
+ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
struct pthread *r_owner; /* Ptr to thread owning read lock. */
struct pthread *w_owner; /* Ptr to thread owning write lock. */
char *r_fname; /* Ptr to read lock source file name */
@@ -390,11 +428,9 @@ struct fd_table_entry {
int flags; /* Flags used in open. */
};
-struct pthread_select_data {
+struct pthread_poll_data {
int nfds;
- fd_set readfds;
- fd_set writefds;
- fd_set exceptfds;
+ struct pollfd *fds;
};
union pthread_wait_data {
@@ -406,7 +442,7 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_select_data * select_data;
+ struct pthread_poll_data * poll_data;
spinlock_t *spinlock;
};
@@ -427,15 +463,11 @@ struct pthread {
*/
spinlock_t lock;
- /*
- * Pointer to the next thread in the thread linked list.
- */
- struct pthread *nxt;
+ /* Queue entry for list of all threads: */
+ TAILQ_ENTRY(pthread) tle;
- /*
- * Pointer to the next thread in the dead thread linked list.
- */
- struct pthread *nxt_dead;
+ /* Queue entry for list of dead threads: */
+ TAILQ_ENTRY(pthread) dle;
/*
* Thread start routine, argument, stack pointer and thread
@@ -514,25 +546,25 @@ struct pthread {
*/
int error;
- /* Join queue for waiting threads: */
- struct pthread_queue join_queue;
+ /* Join queue head and link for waiting threads: */
+ TAILQ_HEAD(join_head, pthread) join_queue;
/*
- * The current thread can belong to only one scheduling queue
- * at a time (ready or waiting queue). It can also belong to
- * a queue of threads waiting on mutexes or condition variables.
- * Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links (mutexes and condition variables).
+ * The current thread can belong to only one scheduling queue at
+ * a time (ready or waiting queue). It can also belong to (only)
+ * one of:
*
- * Pointer to queue (if any) on which the current thread is waiting.
+ * o A queue of threads waiting for a mutex
+ * o A queue of threads waiting for a condition variable
+ * o A queue of threads waiting for another thread to terminate
+ * (the join queue above)
+ * o A queue of threads waiting for a file descriptor lock
+ * o A queue of threads needing work done by the kernel thread
+ * (waiting for a spinlock or file I/O)
*
- * XXX The queuing should be changed to use the TAILQ entry below.
- * XXX For the time being, it's hybrid.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links.
*/
- struct pthread_queue *queue;
-
- /* Pointer to next element in queue. */
- struct pthread *qnxt;
/* Priority queue entry for this thread: */
TAILQ_ENTRY(pthread) pqe;
@@ -544,6 +576,11 @@ struct pthread {
union pthread_wait_data data;
/*
+ * Allocated for converting select into poll.
+ */
+ struct pthread_poll_data poll_data;
+
+ /*
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
@@ -553,23 +590,26 @@ struct pthread {
int signo;
/*
- * Set to non-zero when this thread has deferred thread
- * scheduling. We allow for recursive deferral.
+ * Set to non-zero when this thread has deferred signals.
+ * We allow for recursive deferral.
*/
- int sched_defer_count;
+ int sig_defer_count;
/*
* Set to TRUE if this thread should yield after undeferring
- * thread scheduling.
+ * signals.
*/
- int yield_on_sched_undefer;
+ int yield_on_sig_undefer;
/* Miscellaneous data. */
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
-#define PTHREAD_FLAGS_TRACE 0x0008
+#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
+#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link*/
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link*/
+#define PTHREAD_FLAGS_TRACE 0x0040 /* for debugging purposes */
/*
* Base priority is the user setable and retrievable priority
@@ -592,7 +632,7 @@ struct pthread {
/*
* Active priority is always the maximum of the threads base
* priority and inherited priority. When there is a change
- * in either the real or inherited priority, the active
+ * in either the base or inherited priority, the active
* priority must be recalculated.
*/
char active_priority;
@@ -649,10 +689,10 @@ SCLASS struct pthread * volatile _thread_single
;
#endif
-/* Ptr to the first thread in the thread linked list: */
-SCLASS struct pthread * volatile _thread_link_list
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_thread_list);
#else
;
#endif
@@ -661,7 +701,7 @@ SCLASS struct pthread * volatile _thread_link_list
* Array of kernel pipe file descriptors that are used to ensure that
* no signals are missed in calls to _select.
*/
-SCLASS int _thread_kern_pipe[2]
+SCLASS int _thread_kern_pipe[2]
#ifdef GLOBAL_PTHREAD_PRIVATE
= {
-1,
@@ -670,7 +710,7 @@ SCLASS int _thread_kern_pipe[2]
#else
;
#endif
-SCLASS int _thread_kern_in_select
+SCLASS int volatile _queue_signals
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0;
#else
@@ -692,9 +732,9 @@ SCLASS struct timeval kern_inc_prio_time
#endif
/* Dead threads: */
-SCLASS struct pthread * volatile _thread_dead
+SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_dead_list);
#else
;
#endif
@@ -747,6 +787,14 @@ SCLASS struct fd_table_entry **_thread_fd_table
;
#endif
+/* Table for polling file descriptors: */
+SCLASS struct pollfd *_thread_pfd_table
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL;
+#else
+;
+#endif
+
SCLASS const int dtablecount
#ifdef GLOBAL_PTHREAD_PRIVATE
= 4096/sizeof(struct fd_table_entry);
@@ -760,6 +808,13 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
+SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= CLOCK_RES_NSEC;
+#else
+;
+#endif
+
/* Garbage collector mutex and condition variable. */
SCLASS pthread_mutex_t _gc_mutex
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -783,8 +838,20 @@ struct sigaction _thread_sigact[NSIG];
SCLASS pq_queue_t _readyq;
SCLASS TAILQ_HEAD(, pthread) _waitingq;
-/* Indicates that the waitingq now has threads ready to run. */
-SCLASS volatile int _waitingq_check_reqd
+/*
+ * Work queue:
+ */
+SCLASS TAILQ_HEAD(, pthread) _workq;
+
+/* Tracks the number of threads blocked while waiting for a spinlock. */
+SCLASS volatile int _spinblock_count
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Indicates that the signal queue needs to be checked. */
+SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0
#endif
@@ -797,6 +864,13 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
#endif
;
+/* Used for _PTHREADS_INVARIANTS checking. */
+SCLASS int _thread_kern_new_state
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -825,18 +899,23 @@ int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
void _dispatch_signals(void);
void _thread_signal(pthread_t, int);
-void _lock_thread(void);
-void _lock_thread_list(void);
-void _unlock_thread(void);
-void _unlock_thread_list(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+int _mutex_reinit(pthread_mutex_t *);
void _mutex_notify_priochange(struct pthread *);
-int _pq_init(struct pq_queue *pq, int, int);
+int _cond_reinit(pthread_cond_t *);
+int _pq_alloc(struct pq_queue *, int, int);
+int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
+#if defined(_PTHREADS_INVARIANTS)
+void _waitq_insert(pthread_t pthread);
+void _waitq_remove(pthread_t pthread);
+void _waitq_setactive(void);
+void _waitq_clearactive(void);
+#endif
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -849,18 +928,15 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
-void _thread_kern_sched_defer(void);
-void _thread_kern_sched_undefer(void);
+void _thread_kern_sig_defer(void);
+void _thread_kern_sig_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
+void _thread_sig_handle(int, struct sigcontext *);
+void _thread_sig_init(void);
void _thread_start(void);
void _thread_start_sig_handler(void);
void _thread_seterrno(pthread_t,int);
-void _thread_queue_init(struct pthread_queue *);
-void _thread_queue_enq(struct pthread_queue *, struct pthread *);
-int _thread_queue_remove(struct pthread_queue *, struct pthread *);
int _thread_fd_table_init(int fd);
-struct pthread *_thread_queue_get(struct pthread_queue *);
-struct pthread *_thread_queue_deq(struct pthread_queue *);
pthread_addr_t _thread_gc(pthread_addr_t);
/* #include <signal.h> */
@@ -1036,6 +1112,11 @@ pid_t _thread_sys_waitpid(pid_t, int *, int);
pid_t _thread_sys_wait3(int *, int, struct rusage *);
pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *);
#endif
+
+/* #include <poll.h> */
+#ifdef _SYS_POLL_H_
+int _thread_sys_poll(struct pollfd *, unsigned, int);
+#endif
__END_DECLS
#endif /* !_PTHREAD_PRIVATE_H */
diff --git a/lib/libkse/thread/thr_resume_np.c b/lib/libkse/thread/thr_resume_np.c
index 885a457..b12baba 100644
--- a/lib/libkse/thread/thr_resume_np.c
+++ b/lib/libkse/thread/thr_resume_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,19 @@ pthread_resume_np(pthread_t thread)
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
}
return(ret);
diff --git a/lib/libkse/thread/thr_select.c b/lib/libkse/thread/thr_select.c
index 6d7d7dc..c2f86c5 100644
--- a/lib/libkse/thread/thr_select.c
+++ b/lib/libkse/thread/thr_select.c
@@ -29,10 +29,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <unistd.h>
#include <errno.h>
+#include <poll.h>
#include <string.h>
+#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/fcntl.h>
@@ -44,12 +47,10 @@ int
select(int numfds, fd_set * readfds, fd_set * writefds,
fd_set * exceptfds, struct timeval * timeout)
{
- fd_set read_locks, write_locks, rdwr_locks;
struct timespec ts;
- struct timeval zero_timeout = {0, 0};
- int i, ret = 0, got_all_locks = 1;
- int f_wait = 1;
- struct pthread_select_data data;
+ int i, ret = 0, f_wait = 1;
+ int pfd_index, got_one = 0, fd_count = 0;
+ struct pthread_poll_data data;
if (numfds > _thread_dtablesize) {
numfds = _thread_dtablesize;
@@ -68,112 +69,129 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
_thread_kern_set_timeout(NULL);
}
- FD_ZERO(&read_locks);
- FD_ZERO(&write_locks);
- FD_ZERO(&rdwr_locks);
-
- /* lock readfds */
+ /* Count the number of file descriptors to be polled: */
if (readfds || writefds || exceptfds) {
for (i = 0; i < numfds; i++) {
- if ((readfds && (FD_ISSET(i, readfds))) || (exceptfds && FD_ISSET(i, exceptfds))) {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_RDWR, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &rdwr_locks);
- } else {
- if ((ret = _FD_LOCK(i, FD_READ, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &read_locks);
- }
- } else {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_WRITE, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &write_locks);
- }
+ if ((readfds && FD_ISSET(i, readfds)) ||
+ (exceptfds && FD_ISSET(i, exceptfds)) ||
+ (writefds && FD_ISSET(i, writefds))) {
+ fd_count++;
}
}
}
- if (got_all_locks) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
- }
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+
+ /*
+ * Allocate memory for poll data if it hasn't already been
+ * allocated or if previously allocated memory is insufficient.
+ */
+ if ((_thread_run->poll_data.fds == NULL) ||
+ (_thread_run->poll_data.nfds < fd_count)) {
+ data.fds = (struct pollfd *) realloc(_thread_run->poll_data.fds,
+ sizeof(struct pollfd) * MAX(128, fd_count));
+ if (data.fds == NULL) {
+ errno = ENOMEM;
+ ret = -1;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ else {
+ /*
+ * Note that the threads poll data always
+ * indicates what is allocated, not what is
+ * currently being polled.
+ */
+ _thread_run->poll_data.fds = data.fds;
+ _thread_run->poll_data.nfds = MAX(128, fd_count);
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
+ }
+ if (ret == 0) {
+ /* Setup the wait data. */
+ data.fds = _thread_run->poll_data.fds;
+ data.nfds = fd_count;
+
+ /*
+ * Setup the array of pollfds. Optimize this by
+ * running the loop in reverse and stopping when
+ * the number of selected file descriptors is reached.
+ */
+ for (i = numfds - 1, pfd_index = fd_count - 1;
+ (i >= 0) && (pfd_index >= 0); i--) {
+ data.fds[pfd_index].events = 0;
+ if (readfds && FD_ISSET(i, readfds)) {
+ data.fds[pfd_index].events = POLLRDNORM;
}
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+ if (exceptfds && FD_ISSET(i, exceptfds)) {
+ data.fds[pfd_index].events |= POLLRDBAND;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ if (writefds && FD_ISSET(i, writefds)) {
+ data.fds[pfd_index].events |= POLLWRNORM;
+ }
+ if (data.fds[pfd_index].events != 0) {
+ /*
+ * Set the file descriptor to be polled and
+ * clear revents in case of a timeout which
+ * leaves fds unchanged:
+ */
+ data.fds[pfd_index].fd = i;
+ data.fds[pfd_index].revents = 0;
+ pfd_index--;
}
- _thread_run->data.select_data = &data;
+ }
+ if (((ret = _thread_sys_poll(data.fds, data.nfds, 0)) == 0) &&
+ (f_wait != 0)) {
+ _thread_run->data.poll_data = &data;
_thread_run->interrupted = 0;
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
if (_thread_run->interrupted) {
errno = EINTR;
+ data.nfds = 0;
ret = -1;
} else
ret = data.nfds;
}
}
- /* clean up the locks */
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &read_locks))
- _FD_UNLOCK(i, FD_READ);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &rdwr_locks))
- _FD_UNLOCK(i, FD_RDWR);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &write_locks))
- _FD_UNLOCK(i, FD_WRITE);
if (ret >= 0) {
- if (readfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, readfds) &&
- !FD_ISSET(i, &data.readfds)) {
- FD_CLR(i, readfds);
+ numfds = 0;
+ for (i = 0; i < fd_count; i++) {
+ /*
+ * Check the results of the poll and clear
+ * this file descriptor from the fdset if
+ * the requested event wasn't ready.
+ */
+ got_one = 0;
+ if (readfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, readfds)) {
+ if (data.fds[i].revents & (POLLIN |
+ POLLRDNORM))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd, readfds);
}
}
- }
- if (writefds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, writefds) &&
- !FD_ISSET(i, &data.writefds)) {
- FD_CLR(i, writefds);
+ if (writefds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, writefds)) {
+ if (data.fds[i].revents & (POLLOUT |
+ POLLWRNORM | POLLWRBAND))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ writefds);
}
}
- }
- if (exceptfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, exceptfds) &&
- !FD_ISSET(i, &data.exceptfds)) {
- FD_CLR(i, exceptfds);
+ if (exceptfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, exceptfds)) {
+ if (data.fds[i].revents & (POLLRDBAND |
+ POLLPRI | POLLHUP | POLLERR |
+ POLLNVAL))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ exceptfds);
}
}
+ if (got_one)
+ numfds++;
}
+ ret = numfds;
}
return (ret);
diff --git a/lib/libkse/thread/thr_setschedparam.c b/lib/libkse/thread/thr_setschedparam.c
index 93635da..1ba0e20 100644
--- a/lib/libkse/thread/thr_setschedparam.c
+++ b/lib/libkse/thread/thr_setschedparam.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <sys/param.h>
@@ -50,10 +51,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/*
- * Guard against being preempted by a scheduling
- * signal:
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
if (param->sched_priority != pthread->base_priority) {
/*
@@ -61,8 +62,7 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
* queue before any adjustments are made to its
* active priority:
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
in_readyq = 1;
old_prio = pthread->active_priority;
PTHREAD_PRIOQ_REMOVE(pthread);
@@ -103,10 +103,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
pthread->attr.sched_policy = policy;
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c
index e51d949..ac475a4 100644
--- a/lib/libkse/thread/thr_sig.c
+++ b/lib/libkse/thread/thr_sig.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <fcntl.h>
@@ -38,107 +39,51 @@
#include <pthread.h>
#include "pthread_private.h"
-/*
- * State change macro for signal handler:
- */
-#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
- if ((_thread_run->sched_defer_count == 0) && \
- (_thread_kern_in_sched == 0)) { \
- PTHREAD_NEW_STATE(thrd, newstate); \
- } else { \
- _waitingq_check_reqd = 1; \
- PTHREAD_SET_STATE(thrd, newstate); \
- } \
-}
-
/* Static variables: */
-static int volatile yield_on_unlock_thread = 0;
-static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
-
-/* Lock the thread list: */
-void
-_lock_thread_list()
-{
- /* Lock the thread list: */
- _SPINLOCK(&thread_link_list_lock);
-}
+static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
+unsigned int pending_sigs[NSIG];
+unsigned int handled_sigs[NSIG];
+int volatile check_pending = 0;
-/* Lock the thread list: */
+/* Initialize signal handling facility: */
void
-_unlock_thread_list()
+_thread_sig_init(void)
{
- /* Unlock the thread list: */
- _SPINUNLOCK(&thread_link_list_lock);
-
- /*
- * Check if a scheduler interrupt occurred while the thread
- * list was locked:
- */
- if (yield_on_unlock_thread) {
- /* Reset the interrupt flag: */
- yield_on_unlock_thread = 0;
+ int i;
- /* This thread has overstayed it's welcome: */
- sched_yield();
+ /* Clear pending and handled signal counts: */
+ for (i = 1; i < NSIG; i++) {
+ pending_sigs[i - 1] = 0;
+ handled_sigs[i - 1] = 0;
}
+
+ /* Clear the lock: */
+ signal_lock.access_lock = 0;
}
void
_thread_sig_handler(int sig, int code, struct sigcontext * scp)
{
- char c;
- int i;
- pthread_t pthread;
-
- /*
- * Check if the pthread kernel has unblocked signals (or is about to)
- * and was on its way into a _select when the current
- * signal interrupted it:
- */
- if (_thread_kern_in_select) {
- /* Cast the signal number to a character variable: */
- c = sig;
-
- /*
- * Write the signal number to the kernel pipe so that it will
- * be ready to read when this signal handler returns. This
- * means that the _select call will complete
- * immediately.
- */
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
- }
- /* Check if the signal requires a dump of thread information: */
- if (sig == SIGINFO)
- /* Dump thread information to file: */
- _thread_dump_info();
+ char c;
+ int i;
/* Check if an interval timer signal: */
- else if (sig == _SCHED_SIGNAL) {
- /* Check if the scheduler interrupt has come at an
- * unfortunate time which one of the threads is
- * modifying the thread list:
- */
- if (thread_link_list_lock.access_lock)
+ if (sig == _SCHED_SIGNAL) {
+ if (_thread_kern_in_sched != 0) {
/*
- * Set a flag so that the thread that has
- * the lock yields when it unlocks the
- * thread list:
+ * The scheduler is already running; ignore this
+ * signal.
*/
- yield_on_unlock_thread = 1;
-
+ }
/*
* Check if the scheduler interrupt has come when
* the currently running thread has deferred thread
- * scheduling.
+ * signals.
*/
- else if (_thread_run->sched_defer_count)
- _thread_run->yield_on_sched_undefer = 1;
+ else if (_thread_run->sig_defer_count > 0)
+ _thread_run->yield_on_sig_undefer = 1;
- /*
- * Check if the kernel has not been interrupted while
- * executing scheduler code:
- */
- else if (!_thread_kern_in_sched) {
+ else {
/*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
@@ -152,6 +97,72 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
PANIC("Returned to signal function from scheduler");
}
+ }
+ /*
+ * Check if the kernel has been interrupted while the scheduler
+ * is accessing the scheduling queues or if there is a currently
+ * running thread that has deferred signals.
+ */
+ else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
+ (_thread_run->sig_defer_count > 0))) {
+ /* Cast the signal number to a character variable: */
+ c = sig;
+
+ /*
+ * Write the signal number to the kernel pipe so that it will
+ * be ready to read when this signal handler returns.
+ */
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+
+ /* Indicate that there are queued signals in the pipe. */
+ _sigq_check_reqd = 1;
+ }
+ else {
+ if (_atomic_lock(&signal_lock.access_lock)) {
+ /* There is another signal handler running: */
+ pending_sigs[sig - 1]++;
+ check_pending = 1;
+ }
+ else {
+ /* It's safe to handle the signal now. */
+ _thread_sig_handle(sig, scp);
+
+ /* Reset the pending and handled count back to 0: */
+ pending_sigs[sig - 1] = 0;
+ handled_sigs[sig - 1] = 0;
+
+ signal_lock.access_lock = 0;
+ }
+
+ /* Enter a loop to process pending signals: */
+ while ((check_pending != 0) &&
+ (_atomic_lock(&signal_lock.access_lock) == 0)) {
+ check_pending = 0;
+ for (i = 1; i < NSIG; i++) {
+ if (pending_sigs[i - 1] > handled_sigs[i - 1])
+ _thread_sig_handle(i, scp);
+ }
+ signal_lock.access_lock = 0;
+ }
+ }
+}
+
+void
+_thread_sig_handle(int sig, struct sigcontext * scp)
+{
+ int i;
+ pthread_t pthread, pthread_next;
+
+ /* Check if the signal requires a dump of thread information: */
+ if (sig == SIGINFO)
+ /* Dump thread information to file: */
+ _thread_dump_info();
+
+ /* Check if an interval timer signal: */
+ else if (sig == _SCHED_SIGNAL) {
+ /*
+ * This shouldn't ever occur (should this panic?).
+ */
} else {
/* Check if a child has terminated: */
if (sig == SIGCHLD) {
@@ -183,10 +194,9 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to discard pending SIGCONT
* signals:
*/
- for (pthread = _thread_link_list;
- pthread != NULL;
- pthread = pthread->nxt)
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
sigdelset(&pthread->sigpend,SIGCONT);
+ }
}
/*
@@ -196,11 +206,18 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly destroying
+ * the link entry.
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -219,10 +236,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to process each thread in the linked
* list:
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
pthread_t pthread_saved = _thread_run;
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
_thread_run = pthread;
_thread_signal(pthread,sig);
@@ -232,6 +252,10 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_dispatch_signals();
_thread_run = pthread_saved;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
}
}
@@ -284,7 +308,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -296,6 +320,7 @@ _thread_signal(pthread_t pthread, int sig)
*/
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
case PS_SELECT_WAIT:
if (sig != SIGCHLD ||
@@ -303,8 +328,11 @@ _thread_signal(pthread_t pthread, int sig)
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -319,7 +347,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libkse/thread/thr_sigaction.c b/lib/libkse/thread/thr_sigaction.c
index 73a3b21..ed390d1 100644
--- a/lib/libkse/thread/thr_sigaction.c
+++ b/lib/libkse/thread/thr_sigaction.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -77,6 +78,9 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
gact.sa_mask = act->sa_mask;
gact.sa_flags = 0;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+
/*
* Check if the signal handler is being set to
* the default or ignore handlers:
diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c
index 98a5359..6c7d8d3 100644
--- a/lib/libkse/thread/thr_sigwait.c
+++ b/lib/libkse/thread/thr_sigwait.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -41,7 +42,7 @@ sigwait(const sigset_t * set, int *sig)
{
int ret = 0;
int i;
- sigset_t tempset;
+ sigset_t tempset, waitset;
struct sigaction act;
/*
@@ -51,17 +52,23 @@ sigwait(const sigset_t * set, int *sig)
act.sa_flags = SA_RESTART;
act.sa_mask = *set;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+
/*
- * These signals can't be waited on.
+ * Initialize the set of signals that will be waited on:
*/
- sigdelset(&act.sa_mask, SIGKILL);
- sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, _SCHED_SIGNAL);
- sigdelset(&act.sa_mask, SIGCHLD);
- sigdelset(&act.sa_mask, SIGINFO);
+ waitset = *set;
+
+ /* These signals can't be waited on. */
+ sigdelset(&waitset, SIGKILL);
+ sigdelset(&waitset, SIGSTOP);
+ sigdelset(&waitset, _SCHED_SIGNAL);
+ sigdelset(&waitset, SIGCHLD);
+ sigdelset(&waitset, SIGINFO);
/* Check to see if a pending signal is in the wait mask. */
- if (tempset = (_thread_run->sigpend & act.sa_mask)) {
+ if (tempset = (_thread_run->sigpend & waitset)) {
/* Enter a loop to find a pending signal: */
for (i = 1; i < NSIG; i++) {
if (sigismember (&tempset, i))
@@ -81,17 +88,17 @@ sigwait(const sigset_t * set, int *sig)
* Enter a loop to find the signals that are SIG_DFL. For
* these signals we must install a dummy signal handler in
* order for the kernel to pass them in to us. POSIX says
- * that the application must explicitly install a dummy
+ * that the _application_ must explicitly install a dummy
* handler for signals that are SIG_IGN in order to sigwait
* on them. Note that SIG_IGN signals are left in the
* mask because a subsequent sigaction could enable an
* ignored signal.
*/
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i)) {
- if (_thread_sigact[i - 1].sa_handler == SIG_DFL)
- if (_thread_sys_sigaction(i,&act,NULL) != 0)
- ret = -1;
+ if (sigismember(&waitset, i) &&
+ (_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
+ if (_thread_sys_sigaction(i,&act,NULL) != 0)
+ ret = -1;
}
}
if (ret == 0) {
@@ -101,7 +108,7 @@ sigwait(const sigset_t * set, int *sig)
* mask is independent of the threads signal mask
* and requires separate storage.
*/
- _thread_run->data.sigwait = &act.sa_mask;
+ _thread_run->data.sigwait = &waitset;
/* Wait for a signal: */
_thread_kern_sched_state(PS_SIGWAIT, __FILE__, __LINE__);
@@ -119,7 +126,7 @@ sigwait(const sigset_t * set, int *sig)
/* Restore the sigactions: */
act.sa_handler = SIG_DFL;
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i) &&
+ if (sigismember(&waitset, i) &&
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
if (_thread_sys_sigaction(i,&act,NULL) != 0)
ret = -1;
diff --git a/lib/libkse/thread/thr_suspend_np.c b/lib/libkse/thread/thr_suspend_np.c
index 6a6eaf4..02943bb 100644
--- a/lib/libkse/thread/thr_suspend_np.c
+++ b/lib/libkse/thread/thr_suspend_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -52,20 +53,19 @@ pthread_suspend_np(pthread_t thread)
}
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
diff --git a/lib/libpthread/thread/Makefile.inc b/lib/libpthread/thread/Makefile.inc
index 16799cf..22e9548 100644
--- a/lib/libpthread/thread/Makefile.inc
+++ b/lib/libpthread/thread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
+# $Id: Makefile.inc,v 1.17 1999/03/23 05:07:54 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -73,8 +73,8 @@ SRCS+= \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_poll.c \
uthread_priority_queue.c \
- uthread_queue.c \
uthread_read.c \
uthread_readv.c \
uthread_recvfrom.c \
diff --git a/lib/libpthread/thread/thr_close.c b/lib/libpthread/thread/thr_close.c
index 7e21853..4f55b0a 100644
--- a/lib/libpthread/thread/thr_close.c
+++ b/lib/libpthread/thread/thr_close.c
@@ -29,7 +29,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
+#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
@@ -45,12 +47,21 @@ close(int fd)
int ret;
int status;
struct stat sb;
+ struct fd_table_entry *entry;
- /* Lock the file descriptor while the file is closed: */
- if ((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) {
- /* Get file descriptor status. */
- _thread_sys_fstat(fd, &sb);
-
+ if ((fd == _thread_kern_pipe[0]) || (fd == _thread_kern_pipe[1])) {
+ /*
+ * Don't allow silly programs to close the kernel pipe.
+ */
+ errno = EBADF;
+ ret = -1;
+ }
+ /*
+ * Lock the file descriptor while the file is closed and get
+ * the file descriptor status:
+ */
+ else if (((ret = _FD_LOCK(fd, FD_RDWR, NULL)) == 0) &&
+ ((ret = _thread_sys_fstat(fd, &sb)) == 0)) {
/*
* Check if the file should be left as blocking.
*
@@ -78,11 +89,14 @@ close(int fd)
_thread_sys_fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
+ /* XXX: Assumes well behaved threads. */
+ /* XXX: Defer real close to avoid race condition */
+ entry = _thread_fd_table[fd];
+ _thread_fd_table[fd] = NULL;
+ free(entry);
+
/* Close the file descriptor: */
ret = _thread_sys_close(fd);
-
- free(_thread_fd_table[fd]);
- _thread_fd_table[fd] = NULL;
}
return (ret);
}
diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c
index e0360dd..bacfb72 100644
--- a/lib/libpthread/thread/thr_cond.c
+++ b/lib/libpthread/thread/thr_cond.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -44,6 +45,28 @@ static inline pthread_t cond_queue_deq(pthread_cond_t);
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+/* Reinitialize a condition variable to defaults. */
+int
+_cond_reinit(pthread_cond_t * cond)
+{
+ int ret = 0;
+
+ if (cond == NULL)
+ ret = EINVAL;
+ else if (*cond == NULL)
+ ret = pthread_cond_init(cond, NULL);
+ else {
+ /*
+ * Initialize the condition variable structure:
+ */
+ TAILQ_INIT(&(*cond)->c_queue);
+ (*cond)->c_flags = COND_FLAGS_INITED;
+ (*cond)->c_type = COND_TYPE_FAST;
+ (*cond)->c_mutex = NULL;
+ memset(&(*cond)->lock, 0, sizeof((*cond)->lock));
+ }
+ return (ret);
+}
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
@@ -146,6 +169,9 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -155,9 +181,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
(*cond)->c_flags |= COND_FLAGS_INITED;
}
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -247,6 +270,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
*/
else if (*cond != NULL ||
(rval = pthread_cond_init(cond,NULL)) == 0) {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
@@ -256,10 +282,6 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
(*cond)->c_flags |= COND_FLAGS_INITED;
}
-
- /* Lock the condition variable structure: */
- _SPINLOCK(&(*cond)->lock);
-
/* Process according to condition variable type: */
switch ((*cond)->c_type) {
/* Fast condition variable: */
@@ -375,6 +397,12 @@ pthread_cond_signal(pthread_cond_t * cond)
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -409,6 +437,12 @@ pthread_cond_signal(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -425,14 +459,10 @@ pthread_cond_broadcast(pthread_cond_t * cond)
rval = EINVAL;
else {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues. In addition, we must assure
- * that all threads currently waiting on the condition
- * variable are signaled and are not timedout by a
- * scheduling signal that causes a preemption.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -468,9 +498,11 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
- /* Reenable preemption and yield if necessary.
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -488,7 +520,7 @@ cond_queue_deq(pthread_cond_t cond)
if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
return(pthread);
@@ -507,9 +539,9 @@ cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
- if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
TAILQ_REMOVE(&cond->c_queue, pthread, qe);
- pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
}
@@ -535,6 +567,6 @@ cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
tid = TAILQ_NEXT(tid, qe);
TAILQ_INSERT_BEFORE(tid, pthread, qe);
}
- pthread->flags |= PTHREAD_FLAGS_QUEUED;
+ pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
}
#endif
diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c
index 438e527..f5e0b63 100644
--- a/lib/libpthread/thread/thr_create.c
+++ b/lib/libpthread/thread/thr_create.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <stdlib.h>
@@ -171,7 +172,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
- _thread_queue_init(&(new_thread->join_queue));
+ TAILQ_INIT(&(new_thread->join_queue));
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
@@ -179,46 +180,39 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
- new_thread->queue = NULL;
- new_thread->qnxt = NULL;
new_thread->flags = 0;
+ new_thread->poll_data.nfds = 0;
+ new_thread->poll_data.fds = NULL;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
+ */
+ _thread_kern_sig_defer();
/*
* Check if the garbage collector thread
* needs to be started.
*/
- f_gc = (_thread_link_list == _thread_initial);
+ f_gc = (TAILQ_FIRST(&_thread_list) == _thread_initial);
/* Add the thread to the linked list of all threads: */
- new_thread->nxt = _thread_link_list;
- _thread_link_list = new_thread;
-
- /* Unlock the thread list: */
- _unlock_thread_list();
-
- /*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
- */
- _thread_kern_sched_defer();
+ TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
new_thread->state = PS_SUSPENDED;
PTHREAD_WAITQ_INSERT(new_thread);
- } else {
+ }
+ else {
new_thread->state = PS_RUNNING;
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
}
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding
+ * if necessary.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libpthread/thread/thr_detach.c b/lib/libpthread/thread/thr_detach.c
index 05da832..73a58de2 100644
--- a/lib/libpthread/thread/thr_detach.c
+++ b/lib/libpthread/thread/thr_detach.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -53,23 +54,25 @@ pthread_detach(pthread_t pthread)
pthread->attr.flags |= PTHREAD_DETACHED;
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Enter a loop to bring all threads off the join queue: */
- while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
+ while ((next_thread = TAILQ_FIRST(&pthread->join_queue)) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&pthread->join_queue, next_thread, qe);
+
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if a
+ * scheduling signal occurred while in the critical region.
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c
index c54dbda..a5f706d 100644
--- a/lib/libpthread/thread/thr_exit.c
+++ b/lib/libpthread/thread/thr_exit.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <unistd.h>
@@ -92,7 +93,12 @@ _thread_exit(char *fname, int lineno, char *string)
_thread_sys_write(2, s, strlen(s));
/* Force this process to exit: */
+ /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
+#if defined(_PTHREADS_INVARIANTS)
+ abort();
+#else
_exit(1);
+#endif
}
void
@@ -129,22 +135,24 @@ pthread_exit(void *status)
}
/*
- * Guard against preemption by a scheduling signal. A change of
- * thread state modifies the waiting and priority queues.
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Check if there are any threads joined to this one: */
- while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
+ while ((pthread = TAILQ_FIRST(&(_thread_run->join_queue))) != NULL) {
+ /* Remove the thread from the queue: */
+ TAILQ_REMOVE(&_thread_run->join_queue, pthread, qe);
+
/* Wake the joined thread and let it detach this thread: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
/*
* Lock the garbage collector mutex to ensure that the garbage
@@ -154,8 +162,21 @@ pthread_exit(void *status)
PANIC("Cannot lock gc mutex");
/* Add this thread to the list of dead threads. */
- _thread_run->nxt_dead = _thread_dead;
- _thread_dead = _thread_run;
+ TAILQ_INSERT_HEAD(&_dead_list, _thread_run, dle);
+
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Remove this thread from the thread list: */
+ TAILQ_REMOVE(&_thread_list, _thread_run, tle);
+
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/*
* Signal the garbage collector thread that there is something
diff --git a/lib/libpthread/thread/thr_find_thread.c b/lib/libpthread/thread/thr_find_thread.c
index e4a59a0..a010a7e 100644
--- a/lib/libpthread/thread/thr_find_thread.c
+++ b/lib/libpthread/thread/thr_find_thread.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,20 @@ _find_thread(pthread_t pthread)
/* Invalid thread: */
return(EINVAL);
- /* Lock the thread list: */
- _lock_thread_list();
-
- /* Point to the first thread in the list: */
- pthread1 = _thread_link_list;
+ /*
+ * Defer signals to protect the thread list from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_thread_list, tle) {
+ if (pthread == pthread1)
+ break;
}
- /* Unlock the thread list: */
- _unlock_thread_list();
+ /* Undefer and handle pending signals, yielding if necessary: */
+ _thread_kern_sig_undefer();
/* Return zero if the thread exists: */
return ((pthread1 != NULL) ? 0:ESRCH);
@@ -83,13 +84,10 @@ _find_dead_thread(pthread_t pthread)
if (pthread_mutex_lock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
- /* Point to the first thread in the list: */
- pthread1 = _thread_dead;
-
- /* Search for the thread to join to: */
- while (pthread1 != NULL && pthread1 != pthread) {
- /* Point to the next thread: */
- pthread1 = pthread1->nxt_dead;
+ /* Search for the specified thread: */
+ TAILQ_FOREACH(pthread1, &_dead_list, dle) {
+ if (pthread1 == pthread)
+ break;
}
/* Unlock the garbage collector mutex: */
diff --git a/lib/libpthread/thread/thr_fork.c b/lib/libpthread/thread/thr_fork.c
index 5582c1e..8d3322e 100644
--- a/lib/libpthread/thread/thr_fork.c
+++ b/lib/libpthread/thread/thr_fork.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <string.h>
@@ -44,10 +45,13 @@ fork(void)
int i, flags;
pid_t ret;
pthread_t pthread;
- pthread_t pthread_next;
+ pthread_t pthread_save;
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues from access
+ * by the signal handler:
+ */
+ _thread_kern_sig_defer();
/* Fork a new process: */
if ((ret = _thread_sys_fork()) != 0) {
@@ -88,45 +92,79 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ }
+ /* Reinitialize the GC mutex: */
+ else if (_mutex_reinit(&_gc_mutex) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC mutex for forked process");
+ }
+ /* Reinitialize the GC condition variable: */
+ else if (_cond_reinit(&_gc_cond) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot initialize GC condvar for forked process");
+ }
/* Initialize the ready queue: */
- } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
- PTHREAD_MAX_PRIORITY) != 0) {
+ else if (_pq_init(&_readyq) != 0) {
/* Abort this application: */
- PANIC("Cannot allocate priority ready queue.");
+ PANIC("Cannot initialize priority ready queue.");
} else {
- /* Point to the first thread in the list: */
- pthread = _thread_link_list;
-
/*
* Enter a loop to remove all threads other than
* the running thread from the thread list:
*/
+ pthread = TAILQ_FIRST(&_thread_list);
while (pthread != NULL) {
- pthread_next = pthread->nxt;
- if (pthread == _thread_run) {
- _thread_link_list = pthread;
- pthread->nxt = NULL;
- } else {
- if (pthread->attr.stackaddr_attr ==
- NULL && pthread->stack != NULL)
+ /* Save the thread to be freed: */
+ pthread_save = pthread;
+
+ /*
+ * Advance to the next thread before
+ * destroying the current thread:
+ */
+ pthread = TAILQ_NEXT(pthread, dle);
+
+ /* Make sure this isn't the running thread: */
+ if (pthread_save != _thread_run) {
+ /* Remove this thread from the list: */
+ TAILQ_REMOVE(&_thread_list,
+ pthread_save, tle);
+
+ if (pthread_save->attr.stackaddr_attr ==
+ NULL && pthread_save->stack != NULL)
/*
* Free the stack of the
* dead thread:
*/
- free(pthread->stack);
+ free(pthread_save->stack);
- if (pthread->specific_data != NULL)
- free(pthread->specific_data);
+ if (pthread_save->specific_data != NULL)
+ free(pthread_save->specific_data);
- free(pthread);
- }
+ if (pthread_save->poll_data.fds != NULL)
+ free(pthread_save->poll_data.fds);
- /* Point to the next thread: */
- pthread = pthread_next;
+ free(pthread_save);
+ }
}
- /* Re-init the waiting queues. */
+ /* Re-init the dead thread list: */
+ TAILQ_INIT(&_dead_list);
+
+ /* Re-init the waiting and work queues. */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
+
+ /* Re-init the threads mutex queue: */
+ TAILQ_INIT(&_thread_run->mutexq);
+
+ /* No spinlocks yet: */
+ _spinblock_count = 0;
+
+ /* Don't queue signals yet: */
+ _queue_signals = 0;
+
+ /* Initialize signal handling: */
+ _thread_sig_init();
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -147,15 +185,17 @@ fork(void)
_thread_fd_table[i]->w_lockcount = 0;;
/* Initialise the read/write queues: */
- _thread_queue_init(&_thread_fd_table[i]->r_queue);
- _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->r_queue);
+ TAILQ_INIT(&_thread_fd_table[i]->w_queue);
}
}
}
}
- /* Unock the thread list: */
- _unlock_thread_list();
+ /*
+ * Undefer and handle pending signals, yielding if necessary:
+ */
+ _thread_kern_sig_undefer();
/* Return the process ID: */
return (ret);
diff --git a/lib/libpthread/thread/thr_gc.c b/lib/libpthread/thread/thr_gc.c
index 510c51f..e29e29b 100644
--- a/lib/libpthread/thread/thr_gc.c
+++ b/lib/libpthread/thread/thr_gc.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $
+ * $Id: uthread_gc.c,v 1.3 1999/03/23 05:07:55 jb Exp $
*
* Garbage collector thread. Frees memory allocated for dead threads.
*
@@ -74,20 +74,26 @@ _thread_gc(pthread_addr_t arg)
/* Dump thread info to file. */
_thread_dump_info();
- /* Lock the thread list: */
- _lock_thread_list();
+ /*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
/* Check if this is the last running thread: */
- if (_thread_link_list == _thread_run &&
- _thread_link_list->nxt == NULL)
+ if (TAILQ_FIRST(&_thread_list) == _thread_run &&
+ TAILQ_NEXT(_thread_run, tle) == NULL)
/*
* This is the last thread, so it can exit
* now.
*/
f_done = 1;
- /* Unlock the thread list: */
- _unlock_thread_list();
+ /*
+ * Undefer and handle pending signals, yielding if
+ * necessary:
+ */
+ _thread_kern_sig_undefer();
/*
* Lock the garbage collector mutex which ensures that
@@ -100,48 +106,24 @@ _thread_gc(pthread_addr_t arg)
p_stack = NULL;
pthread_cln = NULL;
- /* Point to the first dead thread (if there are any): */
- pthread = _thread_dead;
-
- /* There is no previous dead thread: */
- pthread_prv = NULL;
-
/*
* Enter a loop to search for the first dead thread that
* has memory to free.
*/
- while (p_stack == NULL && pthread_cln == NULL &&
- pthread != NULL) {
- /* Save a pointer to the next thread: */
- pthread_nxt = pthread->nxt_dead;
-
+ for (pthread = TAILQ_FIRST(&_dead_list);
+ p_stack == NULL && pthread_cln == NULL && pthread != NULL;
+ pthread = TAILQ_NEXT(pthread, dle)) {
/* Check if the initial thread: */
- if (pthread == _thread_initial)
+ if (pthread == _thread_initial) {
/* Don't destroy the initial thread. */
- pthread_prv = pthread;
-
+ }
/*
* Check if this thread has detached:
*/
else if ((pthread->attr.flags &
PTHREAD_DETACHED) != 0) {
- /*
- * Check if there is no previous dead
- * thread:
- */
- if (pthread_prv == NULL)
- /*
- * The dead thread is at the head
- * of the list:
- */
- _thread_dead = pthread_nxt;
- else
- /*
- * The dead thread is not at the
- * head of the list:
- */
- pthread_prv->nxt_dead =
- pthread->nxt_dead;
+ /* Remove this thread from the dead list: */
+ TAILQ_REMOVE(&_dead_list, pthread, dle);
/*
* Check if the stack was not specified by
@@ -162,14 +144,12 @@ _thread_gc(pthread_addr_t arg)
* be freed outside the locks:
*/
pthread_cln = pthread;
+
} else {
/*
* This thread has not detached, so do
- * not destroy it:
- */
- pthread_prv = pthread;
-
- /*
+ * not destroy it.
+ *
* Check if the stack was not specified by
* the caller to pthread_create and has not
* been destroyed yet:
@@ -189,9 +169,6 @@ _thread_gc(pthread_addr_t arg)
pthread->stack = NULL;
}
}
-
- /* Point to the next thread: */
- pthread = pthread_nxt;
}
/*
@@ -229,52 +206,12 @@ _thread_gc(pthread_addr_t arg)
*/
if (p_stack != NULL)
free(p_stack);
- if (pthread_cln != NULL) {
- /* Lock the thread list: */
- _lock_thread_list();
-
- /*
- * Check if the thread is at the head of the
- * linked list.
- */
- if (_thread_link_list == pthread_cln)
- /* There is no previous thread: */
- _thread_link_list = pthread_cln->nxt;
- else {
- /* Point to the first thread in the list: */
- pthread = _thread_link_list;
-
- /*
- * Enter a loop to find the thread in the
- * linked list before the thread that is
- * about to be freed.
- */
- while (pthread != NULL &&
- pthread->nxt != pthread_cln)
- /* Point to the next thread: */
- pthread = pthread->nxt;
-
- /* Check that a previous thread was found: */
- if (pthread != NULL) {
- /*
- * Point the previous thread to
- * the one after the thread being
- * freed:
- */
- pthread->nxt = pthread_cln->nxt;
- }
- }
-
- /* Unlock the thread list: */
- _unlock_thread_list();
-
+ if (pthread_cln != NULL)
/*
* Free the memory allocated for the thread
* structure.
*/
free(pthread_cln);
- }
-
}
return (NULL);
}
diff --git a/lib/libpthread/thread/thr_info.c b/lib/libpthread/thread/thr_info.c
index d2d97da..8c0787a 100644
--- a/lib/libpthread/thread/thr_info.c
+++ b/lib/libpthread/thread/thr_info.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdio.h>
#include <fcntl.h>
@@ -55,6 +56,7 @@ static const struct s_thread_info thread_info[] = {
{PS_FDR_WAIT , "Waiting for read"},
{PS_FDW_WAIT , "Waiting for write"},
{PS_FILE_WAIT , "Waiting for FILE lock"},
+ {PS_POLL_WAIT , "Waiting on poll"},
{PS_SELECT_WAIT , "Waiting on select"},
{PS_SLEEP_WAIT , "Sleeping"},
{PS_WAIT_WAIT , "Waiting process"},
@@ -108,8 +110,7 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
/* Find the state: */
for (j = 0; j < (sizeof(thread_info) /
sizeof(struct s_thread_info)) - 1; j++)
@@ -214,8 +215,29 @@ _thread_dump_info(void)
_thread_sys_write(fd, s, strlen(s));
}
+ /* Output a header for threads in the work queue: */
+ strcpy(s, "\n\n=============\nTHREADS IN WORKQ\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_workq, qe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
- if (_thread_dead == NULL) {
+ if (TAILQ_FIRST(&_dead_list) == NULL) {
/* Output a record: */
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
_thread_sys_write(fd, s, strlen(s));
@@ -228,8 +250,7 @@ _thread_dump_info(void)
* Enter a loop to report each thread in the global
* dead thread list:
*/
- for (pthread = _thread_dead; pthread != NULL;
- pthread = pthread->nxt_dead) {
+ TAILQ_FOREACH(pthread, &_dead_list, dle) {
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c
index e4411ce..d470301 100644
--- a/lib/libpthread/thread/thr_init.c
+++ b/lib/libpthread/thread/thr_init.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
/* Allocate space for global thread variables here: */
@@ -39,7 +40,9 @@
#include <string.h>
#include <fcntl.h>
#include <paths.h>
+#include <poll.h>
#include <unistd.h>
+#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/ttycom.h>
#ifdef _THREAD_SAFE
@@ -81,6 +84,9 @@ _thread_init(void)
int fd;
int flags;
int i;
+ int len;
+ int mib[2];
+ struct clockinfo clockinfo;
struct sigaction act;
/* Check if this function has already been called: */
@@ -147,8 +153,8 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
- /* Initialize the ready queue: */
- else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Allocate and initialize the ready queue: */
+ else if (_pq_alloc(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
/* Abort this application: */
PANIC("Cannot allocate priority ready queue.");
}
@@ -165,8 +171,9 @@ _thread_init(void)
_thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
- /* Initialize the waiting queue: */
+ /* Initialize the waiting and work queues: */
TAILQ_INIT(&_waitingq);
+ TAILQ_INIT(&_workq);
/* Initialize the scheduling switch hook routine: */
_sched_switch_hook = NULL;
@@ -186,23 +193,23 @@ _thread_init(void)
_thread_initial->state = PS_RUNNING;
/* Initialise the queue: */
- _thread_queue_init(&(_thread_initial->join_queue));
+ TAILQ_INIT(&(_thread_initial->join_queue));
/* Initialize the owned mutex queue and count: */
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
/* Initialise the rest of the fields: */
- _thread_initial->sched_defer_count = 0;
- _thread_initial->yield_on_sched_undefer = 0;
+ _thread_initial->poll_data.nfds = 0;
+ _thread_initial->poll_data.fds = NULL;
+ _thread_initial->sig_defer_count = 0;
+ _thread_initial->yield_on_sig_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
- _thread_initial->queue = NULL;
- _thread_initial->qnxt = NULL;
- _thread_initial->nxt = NULL;
_thread_initial->flags = 0;
_thread_initial->error = 0;
- _thread_link_list = _thread_initial;
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INSERT_HEAD(&_thread_list, _thread_initial, tle);
_thread_run = _thread_initial;
/* Initialise the global signal action structure: */
@@ -210,6 +217,9 @@ _thread_init(void)
act.sa_handler = (void (*) ()) _thread_sig_handler;
act.sa_flags = 0;
+ /* Initialize signal handling: */
+ _thread_sig_init();
+
/* Enter a loop to get the existing signal status: */
for (i = 1; i < NSIG; i++) {
/* Check for signals which cannot be trapped: */
@@ -241,6 +251,13 @@ _thread_init(void)
PANIC("Cannot initialise signal handler");
}
+ /* Get the kernel clockrate: */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_CLOCKRATE;
+ len = sizeof (struct clockinfo);
+ if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
+ _clock_res_nsec = clockinfo.tick * 1000;
+
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
/*
@@ -256,6 +273,14 @@ _thread_init(void)
* table, so abort this process.
*/
PANIC("Cannot allocate memory for file descriptor table");
+ }
+ /* Allocate memory for the pollfd table: */
+ if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
+ /*
+ * Cannot allocate memory for the file descriptor
+ * table, so abort this process.
+ */
+ PANIC("Cannot allocate memory for pollfd table");
} else {
/*
* Enter a loop to initialise the file descriptor
@@ -265,6 +290,14 @@ _thread_init(void)
/* Initialise the file descriptor table: */
_thread_fd_table[i] = NULL;
}
+
+ /* Initialize stdio file descriptor table entries: */
+ if ((_thread_fd_table_init(0) != 0) ||
+ (_thread_fd_table_init(1) != 0) ||
+ (_thread_fd_table_init(2) != 0)) {
+ PANIC("Cannot initialize stdio file descriptor "
+ "table entries");
+ }
}
}
diff --git a/lib/libpthread/thread/thr_join.c b/lib/libpthread/thread/thr_join.c
index 2043b76..f36d7f7 100644
--- a/lib/libpthread/thread/thr_join.c
+++ b/lib/libpthread/thread/thr_join.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -71,7 +72,7 @@ pthread_join(pthread_t pthread, void **thread_return)
/* Check if the thread is not dead: */
else if (pthread->state != PS_DEAD) {
/* Add the running thread to the join queue: */
- _thread_queue_enq(&(pthread->join_queue), _thread_run);
+ TAILQ_INSERT_TAIL(&(pthread->join_queue), _thread_run, qe);
/* Schedule the next thread: */
_thread_kern_sched_state(PS_JOIN, __FILE__, __LINE__);
diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c
index f1e97f9..8718906 100644
--- a/lib/libpthread/thread/thr_kern.c
+++ b/lib/libpthread/thread/thr_kern.c
@@ -29,10 +29,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_kern.c,v 1.17 1999/05/07 07:59:44 jasone Exp $
+ * $Id: uthread_kern.c,v 1.16 1999/03/23 05:07:56 jb Exp $
*
*/
#include <errno.h>
+#include <poll.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
@@ -51,7 +52,10 @@
/* Static function prototype definitions: */
static void
-_thread_kern_select(int wait_reqd);
+_thread_kern_poll(int wait_reqd);
+
+static void
+dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
@@ -62,14 +66,12 @@ _thread_kern_sched(struct sigcontext * scp)
#ifndef __alpha__
char *fdata;
#endif
- pthread_t pthread;
- pthread_t pthread_h = NULL;
+ pthread_t pthread, pthread_h = NULL;
pthread_t last_thread = NULL;
struct itimerval itimer;
- struct timespec ts;
- struct timespec ts1;
- struct timeval tv;
- struct timeval tv1;
+ struct timespec ts, ts1;
+ struct timeval tv, tv1;
+ int i, set_timer = 0;
/*
* Flag the pthread kernel as executing scheduler code
@@ -111,6 +113,7 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Run the installed switch hook: */
thread_run_switch_hook(_last_user_thread, _thread_run);
}
+
return;
} else
/* Flag the jump buffer was the last state saved: */
@@ -127,238 +130,208 @@ __asm__("fnsave %0": :"m"(*fdata));
* either a sigreturn (if the state was saved as a sigcontext) or a
* longjmp (if the state was saved by a setjmp).
*/
- while (_thread_link_list != NULL) {
+ while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Protect the scheduling queues from access by the signal
+ * handler.
*/
- _thread_kern_select(0);
+ _queue_signals = 1;
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for sleeping threads that are ready
- * or timedout. While we're at it, also find the smallest
- * timeout value for threads waiting for a time.
- */
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- /* Check if this thread is ready: */
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ if (_thread_run != &_thread_kern_thread) {
/*
- * Check if this thread is blocked by an
- * atomic lock:
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sig_undefer = 0;
+
+ /*
+ * Save the current time as the time that the thread
+ * became inactive:
*/
- else if (pthread->state == PS_SPINBLOCK) {
+ _thread_run->last_inactive.tv_sec = tv.tv_sec;
+ _thread_run->last_inactive.tv_usec = tv.tv_usec;
+
+ /*
+ * Place the currently running thread into the
+ * appropriate queue(s).
+ */
+ switch (_thread_run->state) {
+ case PS_DEAD:
/*
- * If the lock is available, let
- * the thread run.
+ * Dead threads are not placed in any queue:
*/
- if (pthread->data.spinlock->access_lock == 0) {
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+ break;
- /* Check if this thread is to timeout: */
- } else if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /* Check if this thread is to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
+ case PS_RUNNING:
/*
- * Check if this thread is to wakeup
- * immediately or if it is past its wakeup
- * time:
+ * Runnable threads can't be placed in the
+ * priority queue until after waiting threads
+ * are polled (to preserve round-robin
+ * scheduling).
*/
- else if ((pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) ||
- (ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec >= pthread->wakeup_time.tv_nsec))) {
- /*
- * Check if this thread is waiting on
- * select:
- */
- if (pthread->state == PS_SELECT_WAIT) {
- /*
- * The select has timed out, so
- * zero the file descriptor
- * sets:
- */
- FD_ZERO(&pthread->data.select_data->readfds);
- FD_ZERO(&pthread->data.select_data->writefds);
- FD_ZERO(&pthread->data.select_data->exceptfds);
- pthread->data.select_data->nfds = 0;
- }
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
/*
- * Return an error as an interrupted
- * wait:
+ * Accumulate the number of microseconds that
+ * this thread has run for:
*/
- _thread_seterrno(pthread, EINTR);
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
+ }
+ break;
- /*
- * Flag the timeout in the thread
- * structure:
- */
- pthread->timeout = 1;
+ /*
+ * States which do not depend on file descriptor I/O
+ * operations or timeouts:
+ */
+ case PS_DEADLOCK:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_FILE_WAIT:
+ case PS_JOIN:
+ case PS_MUTEX_WAIT:
+ case PS_SIGSUSPEND:
+ case PS_SIGTHREAD:
+ case PS_SIGWAIT:
+ case PS_SUSPENDED:
+ case PS_WAIT_WAIT:
+ /* No timeouts for these states: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
- /*
- * Change the threads state to allow
- * it to be restarted:
- */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- } else {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + CLOCK_RES_NSEC;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
- /*
- * Check for underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow
- * of the nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of
- * the nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure
- * to a timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ /* States which can timeout: */
+ case PS_COND_WAIT:
+ case PS_SLEEP_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+ break;
+
+ /* States that require periodic work: */
+ case PS_SPINBLOCK:
+ /* No timeouts for this state: */
+ _thread_run->wakeup_time.tv_sec = -1;
+ _thread_run->wakeup_time.tv_nsec = -1;
+
+ /* Increment spinblock count: */
+ _spinblock_count++;
+ /* fall through */
+ case PS_FDR_WAIT:
+ case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
+
+ /* Insert into the waiting queue: */
+ PTHREAD_WAITQ_INSERT(_thread_run);
+
+ /* Insert into the work queue: */
+ PTHREAD_WORKQ_INSERT(_thread_run);
}
}
- /* Check if there is a current thread: */
- if (_thread_run != &_thread_kern_thread) {
- /*
- * This thread no longer needs to yield the CPU.
- */
- _thread_run->yield_on_sched_undefer = 0;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ _thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ /*
+ * Wake up threads that have timedout. This has to be
+ * done after polling in case a thread does a poll or
+ * select with zero time.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ while (((pthread = TAILQ_FIRST(&_waitingq)) != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1) &&
+ (((pthread->wakeup_time.tv_sec == 0) &&
+ (pthread->wakeup_time.tv_nsec == 0)) ||
+ (pthread->wakeup_time.tv_sec < ts.tv_sec) ||
+ ((pthread->wakeup_time.tv_sec == ts.tv_sec) &&
+ (pthread->wakeup_time.tv_nsec <= ts.tv_nsec)))) {
+ switch (pthread->state) {
+ case PS_POLL_WAIT:
+ case PS_SELECT_WAIT:
+ /* Return zero file descriptors ready: */
+ pthread->data.poll_data->nfds = 0;
+ /* fall through */
+ default:
+ /*
+ * Remove this thread from the waiting queue
+ * (and work queue if necessary) and place it
+ * in the ready queue.
+ */
+ PTHREAD_WAITQ_CLEARACTIVE();
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread, PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ break;
+ }
/*
- * Accumulate the number of microseconds that this
- * thread has run for:
+ * Flag the timeout in the thread structure:
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
- if (_thread_run->state == PS_RUNNING) {
- if (_thread_run->slice_usec == -1) {
- /*
- * The thread exceeded its time
- * quantum or it yielded the CPU;
- * place it at the tail of the
- * queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
- } else {
- /*
- * The thread hasn't exceeded its
- * interval. Place it at the head
- * of the queue for its priority.
- */
- PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
- }
- }
- else if (_thread_run->state == PS_DEAD) {
+ pthread->timeout = 1;
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+
+ /*
+ * Check if there is a current runnable thread that isn't
+ * already in the ready queue:
+ */
+ if ((_thread_run != &_thread_kern_thread) &&
+ (_thread_run->state == PS_RUNNING) &&
+ ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
+ if (_thread_run->slice_usec == -1) {
/*
- * Don't add dead threads to the waiting
- * queue, because when they're reaped, it
- * will corrupt the queue.
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
*/
- }
- else {
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * This thread has changed state and needs
- * to be placed in the waiting queue.
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- PTHREAD_WAITQ_INSERT(_thread_run);
-
- /* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
/*
* Get the highest priority thread in the ready queue.
*/
- pthread_h = PTHREAD_PRIOQ_FIRST;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
/* Check if there are no threads ready to run: */
if (pthread_h == NULL) {
@@ -369,18 +342,84 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_run = &_thread_kern_thread;
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
/*
* There are no threads ready to run, so wait until
* something happens that changes this condition:
*/
- _thread_kern_select(1);
- } else {
+ _thread_kern_poll(1);
+ }
+ else {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread_h);
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /* Check to see if there is more than one thread: */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ /*
+ * Check for signals queued while the scheduling
+ * queues were protected:
+ */
+ while (_sigq_check_reqd != 0) {
+ /* Clear before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues again: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /*
+ * Check for a higher priority thread that
+ * became runnable due to signal handling.
+ */
+ if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > pthread_h->active_priority)) {
+ /*
+ * Insert the lower priority thread
+ * at the head of its priority list:
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
+
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /* There's a new thread in town: */
+ pthread_h = pthread;
+ }
+
+ /* Get first thread on the waiting list: */
+ pthread = TAILQ_FIRST(&_waitingq);
+
+ /*
+ * Check to see if there is more than one
+ * thread:
+ */
+ if (pthread_h != TAILQ_FIRST(&_thread_list) ||
+ TAILQ_NEXT(pthread_h, tle) != NULL)
+ set_timer = 1;
+ else
+ set_timer = 0;
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+ }
+
/* Make the selected thread the current thread: */
_thread_run = pthread_h;
- /* Remove the thread from the ready queue. */
- PTHREAD_PRIOQ_REMOVE(_thread_run);
-
/*
* Save the current time as the time that the thread
* became active:
@@ -389,6 +428,76 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_run->last_active.tv_usec = tv.tv_usec;
/*
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /* Get first thread on the waiting list: */
+ if ((pthread != NULL) &&
+ (pthread->wakeup_time.tv_sec != -1)) {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + _clock_res_nsec;
+
+ /*
+ * Check for underflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of the
+ * nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure to a
+ * timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
+ }
+
+ /*
* Check if this thread is running for the first time
* or running again after using its full time slice
* allocation:
@@ -399,7 +508,7 @@ __asm__("fnsave %0": :"m"(*fdata));
}
/* Check if there is more than one thread: */
- if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
+ if (set_timer != 0) {
/*
* Start the interval timer for the
* calculated time interval:
@@ -462,6 +571,19 @@ __asm__("fnsave %0": :"m"(*fdata));
void
_thread_kern_sched_state(enum pthread_state state, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and is placed into the proper queue.
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -476,6 +598,20 @@ void
_thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno)
{
+ /*
+ * Flag the pthread kernel as executing scheduler code
+ * to avoid a scheduler signal from interrupting this
+ * execution and calling the scheduler again.
+ */
+ _thread_kern_in_sched = 1;
+
+ /*
+ * Prevent the signal handler from fiddling with this thread
+ * before its state is set and it is placed into the proper
+ * queue(s).
+ */
+ _queue_signals = 1;
+
/* Change the state of the current thread: */
_thread_run->state = state;
_thread_run->fname = fname;
@@ -489,384 +625,193 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
}
static void
-_thread_kern_select(int wait_reqd)
+_thread_kern_poll(int wait_reqd)
{
char bufr[128];
- fd_set fd_set_except;
- fd_set fd_set_read;
- fd_set fd_set_write;
int count = 0;
- int count_dec;
- int found_one;
- int i;
- int nfds = -1;
- int settimeout;
- pthread_t pthread;
+ int i, found;
+ int kern_pipe_added = 0;
+ int nfds = 0;
+ int timeout_ms = 0;
+ struct pthread *pthread, *pthread_next;
ssize_t num;
struct timespec ts;
- struct timespec ts1;
- struct timeval *p_tv;
struct timeval tv;
- struct timeval tv1;
-
- /* Zero the file descriptor sets: */
- FD_ZERO(&fd_set_read);
- FD_ZERO(&fd_set_write);
- FD_ZERO(&fd_set_except);
/* Check if the caller wants to wait: */
- if (wait_reqd) {
- /*
- * Add the pthread kernel pipe file descriptor to the read
- * set:
- */
- FD_SET(_thread_kern_pipe[0], &fd_set_read);
- nfds = _thread_kern_pipe[0];
-
+ if (wait_reqd == 0) {
+ timeout_ms = 0;
+ }
+ else {
/* Get the current time of day: */
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+
+ _queue_signals = 1;
+ pthread = TAILQ_FIRST(&_waitingq);
+ _queue_signals = 0;
+
+ if ((pthread == NULL) || (pthread->wakeup_time.tv_sec == -1)) {
+ /*
+ * Either there are no threads in the waiting queue,
+ * or there are no threads that can timeout.
+ */
+ timeout_ms = INFTIM;
+ }
+ else {
+ /*
+ * Calculate the time left for the next thread to
+ * timeout allowing for the clock resolution:
+ */
+ timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
+ _clock_res_nsec) / 1000000);
+ /*
+ * Don't allow negative timeouts:
+ */
+ if (timeout_ms < 0)
+ timeout_ms = 0;
+ }
}
- /* Initialise the time value structure: */
- tv.tv_sec = 0;
- tv.tv_usec = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
/*
- * Enter a loop to process threads waiting on either file descriptors
- * or times:
+ * Check to see if the signal queue needs to be walked to look
+ * for threads awoken by a signal while in the scheduler. Only
+ * do this if a wait is specified; otherwise, the waiting queue
+ * will be checked after the zero-timed _poll.
*/
- _waitingq_check_reqd = 0; /* reset flag before loop */
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Assume that this state does not time out: */
- settimeout = 0;
+ while ((_sigq_check_reqd != 0) && (timeout_ms != 0)) {
+ /* Reset flag before handling queued signals: */
+ _sigq_check_reqd = 0;
+
+ dequeue_signals();
- /* Process according to thread state: */
- switch (pthread->state) {
/*
- * States which do not depend on file descriptor I/O
- * operations or timeouts:
+ * Check for a thread that became runnable due to
+ * a signal:
*/
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGTHREAD:
- case PS_SIGWAIT:
- case PS_STATE_MAX:
- case PS_WAIT_WAIT:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ if (PTHREAD_PRIOQ_FIRST() != NULL) {
/*
- * A signal occurred and made this thread ready
- * while in the scheduler or while the scheduling
- * queues were protected.
+ * Since there is at least one runnable thread,
+ * disable the wait.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ timeout_ms = 0;
+ }
+ }
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /* Add the file descriptor to the read set: */
- FD_SET(pthread->data.fd.fd, &fd_set_read);
+ /*
+ * Form the poll table:
+ */
+ nfds = 0;
+ if (timeout_ms != 0) {
+ /* Add the kernel pipe to the poll table: */
+ _thread_pfd_table[nfds].fd = _thread_kern_pipe[0];
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].revents = 0;
+ nfds++;
+ kern_pipe_added = 1;
+ }
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ switch (pthread->state) {
+ case PS_SPINBLOCK:
/*
- * Check if this file descriptor is greater than any
- * of those seen so far:
+ * If the lock is available, let the thread run.
*/
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
+ /* One less thread in a spinblock state: */
+ _spinblock_count--;
}
- /* Increment the file descriptor count: */
- count++;
+ break;
- /* This state can time out: */
- settimeout = 1;
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLRDNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
+ }
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /* Add the file descriptor to the write set: */
- FD_SET(pthread->data.fd.fd, &fd_set_write);
-
- /*
- * Check if this file descriptor is greater than any
- * of those seen so far:
- */
- if (pthread->data.fd.fd > nfds) {
- /* Remember this file descriptor: */
- nfds = pthread->data.fd.fd;
+ /* Limit number of polled files to table size: */
+ if (nfds < _thread_dtablesize) {
+ _thread_pfd_table[nfds].events = POLLWRNORM;
+ _thread_pfd_table[nfds].fd = pthread->data.fd.fd;
+ nfds++;
}
- /* Increment the file descriptor count: */
- count++;
-
- /* This state can time out: */
- settimeout = 1;
break;
- /* States that time out: */
- case PS_SLEEP_WAIT:
- case PS_COND_WAIT:
- /* Flag a timeout as required: */
- settimeout = 1;
- break;
-
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Enter a loop to process each file descriptor in
- * the thread-specific file descriptor sets:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Check if this file descriptor is set for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Add the file descriptor to the
- * exception set:
- */
- FD_SET(i, &fd_set_except);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Add the file descriptor to the
- * write set:
- */
- FD_SET(i, &fd_set_write);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
- }
- /*
- * Check if this file descriptor is set for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Add the file descriptor to the
- * read set:
- */
- FD_SET(i, &fd_set_read);
-
- /*
- * Increment the file descriptor
- * count:
- */
- count++;
-
- /*
- * Check if this file descriptor is
- * greater than any of those seen so
- * far:
- */
- if (i > nfds) {
- /*
- * Remember this file
- * descriptor:
- */
- nfds = i;
- }
+ /* Limit number of polled files to table size: */
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ _thread_pfd_table[nfds + i].fd =
+ pthread->data.poll_data->fds[i].fd;
+ _thread_pfd_table[nfds + i].events =
+ pthread->data.poll_data->fds[i].events;
}
+ nfds += pthread->data.poll_data->nfds;
}
-
- /* This state can time out: */
- settimeout = 1;
break;
- }
-
- /*
- * Check if the caller wants to wait and if the thread state
- * is one that times out:
- */
- if (wait_reqd && settimeout) {
- /* Check if this thread wants to wait forever: */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /* Check if this thread doesn't want to wait at all: */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- /* Override the caller's request to wait: */
- wait_reqd = 0;
- } else {
- /*
- * Calculate the time until this thread is
- * ready, allowing for the clock resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for underflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
- /*
- * Check if no time value has been found yet,
- * or if the thread will be ready sooner that
- * the earliest one found so far:
- */
- if ((tv.tv_sec == 0 && tv.tv_usec == 0) || timercmp(&tv1, &tv, <)) {
- /* Update the time value: */
- tv.tv_sec = tv1.tv_sec;
- tv.tv_usec = tv1.tv_usec;
- }
- }
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
- /* Check if the caller wants to wait: */
- if (wait_reqd) {
- /* Check if no threads were found with timeouts: */
- if (tv.tv_sec == 0 && tv.tv_usec == 0) {
- /* Wait forever: */
- p_tv = NULL;
- } else {
- /*
- * Point to the time value structure which contains
- * the earliest time that a thread will be ready:
- */
- p_tv = &tv;
- }
+ /*
+ * Wait for a file descriptor to be ready for read, write, or
+ * an exception, or a timeout to occur:
+ */
+ count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
+ if (kern_pipe_added != 0)
/*
- * Flag the pthread kernel as in a select. This is to avoid
- * the window between the next statement that unblocks
- * signals and the select statement which follows.
+ * Remove the pthread kernel pipe file descriptor
+ * from the pollfd table:
*/
- _thread_kern_in_select = 1;
+ nfds = 1;
+ else
+ nfds = 0;
+ /*
+ * Check if it is possible that there are bytes in the kernel
+ * read pipe waiting to be read:
+ */
+ if (count < 0 || ((kern_pipe_added != 0) &&
+ (_thread_pfd_table[0].revents & POLLRDNORM))) {
/*
- * Wait for a file descriptor to be ready for read, write, or
- * an exception, or a timeout to occur:
+ * If the kernel read pipe was included in the
+ * count:
*/
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
+ if (count > 0) {
+ /* Decrement the count of file descriptors: */
+ count--;
+ }
- /* Reset the kernel in select flag: */
- _thread_kern_in_select = 0;
+ if (_sigq_check_reqd != 0) {
+ /* Reset flag before handling signals: */
+ _sigq_check_reqd = 0;
- /*
- * Check if it is possible that there are bytes in the kernel
- * read pipe waiting to be read:
- */
- if (count < 0 || FD_ISSET(_thread_kern_pipe[0], &fd_set_read)) {
- /*
- * Check if the kernel read pipe was included in the
- * count:
- */
- if (count > 0) {
- /*
- * Remove the kernel read pipe from the
- * count:
- */
- FD_CLR(_thread_kern_pipe[0], &fd_set_read);
-
- /* Decrement the count of file descriptors: */
- count--;
- }
- /*
- * Enter a loop to read (and trash) bytes from the
- * pthread kernel pipe:
- */
- while ((num = _thread_sys_read(_thread_kern_pipe[0], bufr, sizeof(bufr))) > 0) {
- /*
- * The buffer read contains one byte per
- * signal and each byte is the signal number.
- * This data is not used, but the fact that
- * the signal handler wrote to the pipe *is*
- * used to cause the _select call
- * to complete if the signal occurred between
- * the time when signals were unblocked and
- * the _select select call being
- * made.
- */
- }
+ dequeue_signals();
}
}
- /* Check if there are file descriptors to poll: */
- else if (count > 0) {
- /*
- * Point to the time value structure which has been zeroed so
- * that the call to _select will not wait:
- */
- p_tv = &tv;
-
- /* Poll file descrptors without wait: */
- count = _thread_sys_select(nfds + 1, &fd_set_read, &fd_set_write, &fd_set_except, p_tv);
- }
/*
* Check if any file descriptors are ready:
@@ -875,301 +820,133 @@ _thread_kern_select(int wait_reqd)
/*
* Enter a loop to look for threads waiting on file
* descriptors that are flagged as available by the
- * _select syscall:
+ * _poll syscall:
*/
- TAILQ_FOREACH (pthread, &_waitingq, pqe) {
- /* Process according to thread state: */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
switch (pthread->state) {
- /*
- * States which do not depend on file
- * descriptor I/O operations:
- */
- case PS_COND_WAIT:
- case PS_DEAD:
- case PS_DEADLOCK:
- case PS_FDLR_WAIT:
- case PS_FDLW_WAIT:
- case PS_FILE_WAIT:
- case PS_JOIN:
- case PS_MUTEX_WAIT:
- case PS_SIGWAIT:
- case PS_SLEEP_WAIT:
- case PS_WAIT_WAIT:
- case PS_SIGTHREAD:
- case PS_STATE_MAX:
- case PS_SUSPENDED:
- /* Nothing to do here. */
- break;
-
- case PS_RUNNING:
+ case PS_SPINBLOCK:
/*
- * A signal occurred and made this thread
- * ready while in the scheduler.
+ * If the lock is available, let the thread run.
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- break;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
- /* File descriptor read wait: */
- case PS_FDR_WAIT:
- /*
- * Check if the file descriptor is available
- * for read:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_read)) {
/*
- * Change the thread state to allow
- * it to read from the file when it
- * is scheduled next:
+ * One less thread in a spinblock state:
*/
- pthread->state = PS_RUNNING;
+ _spinblock_count--;
+ }
+ break;
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ /* File descriptor read wait: */
+ case PS_FDR_WAIT:
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLRDNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
/* File descriptor write wait: */
case PS_FDW_WAIT:
- /*
- * Check if the file descriptor is available
- * for write:
- */
- if (FD_ISSET(pthread->data.fd.fd, &fd_set_write)) {
- /*
- * Change the thread state to allow
- * it to write to the file when it is
- * scheduled next:
- */
- pthread->state = PS_RUNNING;
-
- /*
- * Remove it from the waiting queue
- * and add it to the ready queue:
- */
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ if ((nfds < _thread_dtablesize) &&
+ (_thread_pfd_table[nfds].revents & POLLWRNORM)) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
+ nfds++;
break;
- /* Select wait: */
+ /* File descriptor poll or select wait: */
+ case PS_POLL_WAIT:
case PS_SELECT_WAIT:
- /*
- * Reset the flag that indicates if a file
- * descriptor is ready for some type of
- * operation:
- */
- count_dec = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file descriptors
- * for the first descriptor that is ready:
- */
- for (i = 0; i < pthread->data.select_data->nfds && count_dec == 0; i++) {
+ if (pthread->data.poll_data->nfds + nfds <
+ _thread_dtablesize) {
/*
- * Check if this file descriptor does
- * not have an exception:
+ * Enter a loop looking for I/O
+ * readiness:
*/
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds) && FD_ISSET(i, &fd_set_except)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
- }
- /*
- * Check if this file descriptor is
- * not ready for write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds) && FD_ISSET(i, &fd_set_write)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+ found = 0;
+ for (i = 0; i < pthread->data.poll_data->nfds; i++) {
+ if (_thread_pfd_table[nfds + i].revents != 0) {
+ pthread->data.poll_data->fds[i].revents =
+ _thread_pfd_table[nfds + i].revents;
+ found++;
+ }
}
- /*
- * Check if this file descriptor is
- * not ready for read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds) && FD_ISSET(i, &fd_set_read)) {
- /*
- * Flag this file descriptor
- * as ready:
- */
- count_dec = 1;
+
+ /* Increment before destroying: */
+ nfds += pthread->data.poll_data->nfds;
+
+ if (found != 0) {
+ pthread->data.poll_data->nfds = found;
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
}
}
+ else
+ nfds += pthread->data.poll_data->nfds;
+ break;
+ /* Other states do not depend on file I/O. */
+ default:
+ break;
+ }
+ }
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+ else if (_spinblock_count != 0) {
+ /*
+ * Enter a loop to look for threads waiting on a spinlock
+ * that is now available.
+ */
+ PTHREAD_WAITQ_SETACTIVE();
+ TAILQ_FOREACH(pthread, &_workq, qe) {
+ if (pthread->state == PS_SPINBLOCK) {
/*
- * Check if any file descriptors are ready
- * for the current thread:
+ * If the lock is available, let the thread run.
*/
- if (count_dec) {
- /*
- * Reset the count of file
- * descriptors that are ready for
- * this thread:
- */
- found_one = 0;
-
- /*
- * Enter a loop to search though the
- * thread-specific select file
- * descriptors:
- */
- for (i = 0; i < pthread->data.select_data->nfds; i++) {
- /*
- * Reset the count of
- * operations for which the
- * current file descriptor is
- * ready:
- */
- count_dec = 0;
-
- /*
- * Check if this file
- * descriptor is selected for
- * exceptions:
- */
- if (FD_ISSET(i, &pthread->data.select_data->exceptfds)) {
- /*
- * Check if this file
- * descriptor has an
- * exception:
- */
- if (FD_ISSET(i, &fd_set_except)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->exceptfds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * write:
- */
- if (FD_ISSET(i, &pthread->data.select_data->writefds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for write:
- */
- if (FD_ISSET(i, &fd_set_write)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->writefds);
- }
- }
- /*
- * Check if this file
- * descriptor is selected for
- * read:
- */
- if (FD_ISSET(i, &pthread->data.select_data->readfds)) {
- /*
- * Check if this file
- * descriptor is
- * ready for read:
- */
- if (FD_ISSET(i, &fd_set_read)) {
- /*
- * Increment
- * the count
- * for this
- * file:
- */
- count_dec++;
- } else {
- /*
- * Clear the
- * file
- * descriptor
- * in the
- * thread-spec
- * ific file
- * descriptor
- * set:
- */
- FD_CLR(i, &pthread->data.select_data->readfds);
- }
- }
- /*
- * Check if the current file
- * descriptor is ready for
- * any one of the operations:
- */
- if (count_dec > 0) {
- /*
- * Increment the
- * count of file
- * descriptors that
- * are ready for the
- * current thread:
- */
- found_one++;
- }
- }
-
- /*
- * Return the number of file
- * descriptors that are ready:
- */
- pthread->data.select_data->nfds = found_one;
-
- /*
- * Change the state of the current
- * thread to run:
- */
- pthread->state = PS_RUNNING;
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_WAITQ_CLEARACTIVE();
+ PTHREAD_WORKQ_REMOVE(pthread);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_WAITQ_SETACTIVE();
/*
- * Remove it from the waiting queue
- * and add it to the ready queue:
+ * One less thread in a spinblock state:
*/
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ _spinblock_count--;
}
- break;
}
}
+ PTHREAD_WAITQ_CLEARACTIVE();
+ }
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
+
+ while (_sigq_check_reqd != 0) {
+ /* Handle queued signals: */
+ _sigq_check_reqd = 0;
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+
+ dequeue_signals();
+
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
}
/* Nothing to return. */
@@ -1219,59 +996,101 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
void
-_thread_kern_sched_defer(void)
+_thread_kern_sig_defer(void)
{
- /* Allow scheduling deferral to be recursive. */
- _thread_run->sched_defer_count++;
+ /* Allow signal deferral to be recursive. */
+ _thread_run->sig_defer_count++;
}
void
-_thread_kern_sched_undefer(void)
+_thread_kern_sig_undefer(void)
{
pthread_t pthread;
int need_resched = 0;
/*
* Perform checks to yield only if we are about to undefer
- * scheduling.
+ * signals.
*/
- if (_thread_run->sched_defer_count == 1) {
+ if (_thread_run->sig_defer_count > 1) {
+ /* Decrement the signal deferral count. */
+ _thread_run->sig_defer_count--;
+ }
+ else if (_thread_run->sig_defer_count == 1) {
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
+
/*
- * Check if the waiting queue needs to be examined for
- * threads that are now ready:
+ * Check if there are queued signals:
*/
- while (_waitingq_check_reqd != 0) {
- /* Clear the flag before checking the waiting queue: */
- _waitingq_check_reqd = 0;
-
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
- if (pthread->state == PS_RUNNING) {
- PTHREAD_WAITQ_REMOVE(pthread);
- PTHREAD_PRIOQ_INSERT_TAIL(pthread);
- }
+ while (_sigq_check_reqd != 0) {
+ /* Defer scheduling while we process queued signals: */
+ _thread_run->sig_defer_count = 1;
+
+ /* Clear the flag before checking the signal queue: */
+ _sigq_check_reqd = 0;
+
+ /* Dequeue and handle signals: */
+ dequeue_signals();
+
+ /*
+ * Avoiding an unnecessary check to reschedule, check
+ * to see if signal handling caused a higher priority
+ * thread to become ready.
+ */
+ if ((need_resched == 0) &&
+ (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority))) {
+ need_resched = 1;
}
+
+ /* Reenable signals: */
+ _thread_run->sig_defer_count = 0;
}
- /*
- * We need to yield if a thread change of state caused a
- * higher priority thread to become ready, or if a
- * scheduling signal occurred while preemption was disabled.
- */
- if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
- (pthread->active_priority > _thread_run->active_priority)) ||
- (_thread_run->yield_on_sched_undefer != 0)) {
- _thread_run->yield_on_sched_undefer = 0;
- need_resched = 1;
+ /* Yield the CPU if necessary: */
+ if (need_resched || _thread_run->yield_on_sig_undefer != 0) {
+ _thread_run->yield_on_sig_undefer = 0;
+ _thread_kern_sched(NULL);
}
}
+}
- if (_thread_run->sched_defer_count > 0) {
- /* Decrement the scheduling deferral count. */
- _thread_run->sched_defer_count--;
+static void
+dequeue_signals(void)
+{
+ char bufr[128];
+ int i, num;
- /* Yield the CPU if necessary: */
- if (need_resched)
- _thread_kern_sched(NULL);
+ /*
+ * Enter a loop to read and handle queued signals from the
+ * pthread kernel pipe:
+ */
+ while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
+ sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
+ /*
+ * The buffer read contains one byte per signal and
+ * each byte is the signal number.
+ */
+ for (i = 0; i < num; i++) {
+ if ((int) bufr[i] == _SCHED_SIGNAL) {
+ /*
+ * Scheduling signals shouldn't ever be
+ * queued; just ignore it for now.
+ */
+ }
+ else {
+ /* Handle this signal: */
+ _thread_sig_handle((int) bufr[i], NULL);
+ }
+ }
+ }
+ if ((num < 0) && (errno != EAGAIN)) {
+ /*
+ * The only error we should expect is if there is
+ * no data to read.
+ */
+ PANIC("Unable to read from thread kernel pipe");
}
}
diff --git a/lib/libpthread/thread/thr_kill.c b/lib/libpthread/thread/thr_kill.c
index c729179..f5bb6e2 100644
--- a/lib/libpthread/thread/thr_kill.c
+++ b/lib/libpthread/thread/thr_kill.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <signal.h>
@@ -46,18 +47,18 @@ pthread_kill(pthread_t pthread, int sig)
/* Invalid signal: */
ret = EINVAL;
- /* Ignored signals get dropped on the floor. */
- else if (_thread_sigact[sig - 1].sa_handler == SIG_IGN)
- ret = 0;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
+ */
+ else if (((ret = _find_thread(pthread)) == 0) && (sig > 0) &&
+ (_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
switch (pthread->state) {
case PS_SIGSUSPEND:
@@ -90,15 +91,19 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
- case PS_SELECT_WAIT:
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
+ case PS_SELECT_WAIT:
if (!sigismember(&pthread->sigmask, sig) &&
(_thread_sigact[sig - 1].sa_handler != SIG_IGN)) {
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
@@ -116,11 +121,43 @@ pthread_kill(pthread_t pthread, int sig)
break;
}
+
+ /*
+ * Check that a custom handler is installed
+ * and if the signal is not blocked:
+ */
+ if (_thread_sigact[sig - 1].sa_handler != SIG_DFL &&
+ _thread_sigact[sig - 1].sa_handler != SIG_IGN &&
+ sigismember(&pthread->sigpend, sig) &&
+ !sigismember(&pthread->sigmask, sig)) {
+ pthread_t pthread_saved = _thread_run;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
+ _thread_run = pthread;
+
+ /* Clear the pending signal: */
+ sigdelset(&pthread->sigpend, sig);
+
+ /*
+ * Dispatch the signal via the custom signal
+ * handler:
+ */
+ (*(_thread_sigact[sig - 1].sa_handler))(sig);
+
+ _thread_run = pthread_saved;
+
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
+ }
+
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index fa9c8cf..c967b46 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <errno.h>
@@ -39,6 +40,25 @@
#include <pthread.h>
#include "pthread_private.h"
+#if defined(_PTHREADS_INVARIANTS)
+#define _MUTEX_INIT_LINK(m) do { \
+ (m)->m_qe.tqe_prev = NULL; \
+ (m)->m_qe.tqe_next = NULL; \
+} while (0)
+#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+ if ((m)->m_qe.tqe_prev == NULL) \
+ PANIC("mutex is not on list"); \
+} while (0)
+#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+ if (((m)->m_qe.tqe_prev != NULL) || \
+ ((m)->m_qe.tqe_next != NULL)) \
+ PANIC("mutex is on list"); \
+} while (0)
+#else
+#define _MUTEX_INIT_LINK(m)
+#define _MUTEX_ASSERT_IS_OWNED(m)
+#define _MUTEX_ASSERT_NOT_OWNED(m)
+#endif
/*
* Prototypes
@@ -55,6 +75,34 @@ static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+/* Reinitialize a mutex to defaults. */
+int
+_mutex_reinit(pthread_mutex_t * mutex)
+{
+ int ret = 0;
+
+ if (mutex == NULL)
+ ret = EINVAL;
+ else if (*mutex == NULL)
+ ret = pthread_mutex_init(mutex, NULL);
+ else {
+ /*
+ * Initialize the mutex structure:
+ */
+ (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
+ (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
+ TAILQ_INIT(&(*mutex)->m_queue);
+ (*mutex)->m_owner = NULL;
+ (*mutex)->m_data.m_count = 0;
+ (*mutex)->m_flags = MUTEX_FLAGS_INITED;
+ (*mutex)->m_refcount = 0;
+ (*mutex)->m_prio = 0;
+ (*mutex)->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(*mutex);
+ memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
+ }
+ return (ret);
+}
int
pthread_mutex_init(pthread_mutex_t * mutex,
@@ -138,6 +186,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
else
pmutex->m_prio = 0;
pmutex->m_saved_prio = 0;
+ _MUTEX_INIT_LINK(pmutex);
memset(&pmutex->lock, 0, sizeof(pmutex->lock));
*mutex = pmutex;
} else {
@@ -147,7 +196,7 @@ pthread_mutex_init(pthread_mutex_t * mutex,
}
}
/* Return the completion status: */
- return (ret);
+ return(ret);
}
int
@@ -177,6 +226,7 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
* Free the memory allocated for the mutex
* structure:
*/
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
free(*mutex);
/*
@@ -222,28 +272,24 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
+ _MUTEX_INIT_LINK(*mutex);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -254,6 +300,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -282,6 +329,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_thread_run->inherited_priority;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -317,6 +365,7 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -337,10 +386,10 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -361,28 +410,24 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
/*
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
+ */
+ _thread_kern_sig_defer();
+
+ /* Lock the mutex structure: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
* If the mutex was statically allocated, properly
* initialize the tail queue.
*/
if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*mutex)->m_queue);
(*mutex)->m_flags |= MUTEX_FLAGS_INITED;
+ _MUTEX_INIT_LINK(*mutex);
}
- /*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
- */
- _thread_kern_sched_defer();
-
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
-
/* Process according to mutex type: */
switch ((*mutex)->m_protocol) {
/* Default POSIX mutex: */
@@ -392,6 +437,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_owner = _thread_run;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -419,12 +465,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -449,6 +489,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
@@ -481,12 +522,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Lock the mutex structure again: */
_SPINLOCK(&(*mutex)->lock);
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -519,6 +554,7 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
(*mutex)->m_prio;
/* Add to the list of owned mutexes: */
+ _MUTEX_ASSERT_NOT_OWNED(*mutex);
TAILQ_INSERT_TAIL(&_thread_run->mutexq,
(*mutex), m_qe);
} else if ((*mutex)->m_owner == _thread_run)
@@ -556,12 +592,6 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
*/
ret = _thread_run->error;
_thread_run->error = 0;
-
- /*
- * This thread is no longer waiting for
- * the mutex:
- */
- _thread_run->data.mutex = NULL;
}
break;
@@ -576,10 +606,10 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -683,15 +713,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
ret = EINVAL;
} else {
/*
- * Guard against being preempted by a scheduling signal.
- * To support priority inheritence mutexes, we need to
- * maintain lists of mutex ownerships for each thread as
- * well as lists of waiting threads for each mutex. In
- * order to propagate priorities we need to atomically
- * walk these lists and cannot rely on a single mutex
- * lock to provide protection against modification.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
@@ -723,8 +748,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_data.m_count = 0;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of
@@ -738,6 +765,19 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
*/
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+
+ /*
+ * Add the mutex to the threads list of
+ * owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
}
break;
@@ -784,8 +824,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
(*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Get the next thread from the queue of threads
@@ -891,8 +933,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_thread_run->priority_mutex_count--;
/* Remove the mutex from the threads queue. */
+ _MUTEX_ASSERT_IS_OWNED(*mutex);
TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
- (*mutex), m_qe);
+ (*mutex), m_qe);
+ _MUTEX_INIT_LINK(*mutex);
/*
* Enter a loop to find a waiting thread whose
@@ -913,6 +957,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
(*mutex)->m_owner->error = EINVAL;
PTHREAD_NEW_STATE((*mutex)->m_owner,
PS_RUNNING);
+ /*
+ * The thread is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
}
/* Check for a new owner: */
@@ -978,10 +1027,10 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
_SPINUNLOCK(&(*mutex)->lock);
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
/* Return the completion status: */
@@ -990,11 +1039,11 @@ mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
/*
- * This function is called when a change in base priority occurs
- * for a thread that is thread holding, or waiting for, a priority
- * protection or inheritence mutex. A change in a threads base
- * priority can effect changes to active priorities of other threads
- * and to the ordering of mutex locking by waiting threads.
+ * This function is called when a change in base priority occurs for
+ * a thread that is holding or waiting for a priority protection or
+ * inheritence mutex. A change in a threads base priority can effect
+ * changes to active priorities of other threads and to the ordering
+ * of mutex locking by waiting threads.
*
* This must be called while thread scheduling is deferred.
*/
@@ -1231,8 +1280,7 @@ mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
* If this thread is in the priority queue, it must be
* removed and reinserted for its new priority.
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
/*
* Remove the thread from the priority queue
* before changing its priority:
diff --git a/lib/libpthread/thread/thr_poll.c b/lib/libpthread/thread/thr_poll.c
new file mode 100644
index 0000000..a54d023
--- /dev/null
+++ b/lib/libpthread/thread/thr_poll.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+
+int
+poll(struct pollfd *fds, unsigned int nfds, int timeout)
+{
+ struct timespec ts;
+ int numfds = nfds;
+ int i, ret = 0, found = 0;
+ struct pthread_poll_data data;
+
+ if (numfds > _thread_dtablesize) {
+ numfds = _thread_dtablesize;
+ }
+ /* Check if a timeout was specified: */
+ if (timeout == INFTIM) {
+ /* Wait for ever: */
+ _thread_kern_set_timeout(NULL);
+ } else if (timeout != 0) {
+ /* Convert the timeout in msec to a timespec: */
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000;
+
+ /* Set the wake up time: */
+ _thread_kern_set_timeout(&ts);
+ }
+
+ if (((ret = _thread_sys_poll(fds, numfds, 0)) == 0) && (timeout != 0)) {
+ data.nfds = numfds;
+ data.fds = fds;
+
+ /*
+ * Clear revents in case of a timeout which leaves fds
+ * unchanged:
+ */
+ for (i = 0; i < numfds; i++) {
+ fds[i].revents = 0;
+ }
+
+ _thread_run->data.poll_data = &data;
+ _thread_run->interrupted = 0;
+ _thread_kern_sched_state(PS_POLL_WAIT, __FILE__, __LINE__);
+ if (_thread_run->interrupted) {
+ errno = EINTR;
+ ret = -1;
+ } else {
+ ret = data.nfds;
+ }
+ }
+
+ return (ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c
index 516a1e0..0a00a9d3 100644
--- a/lib/libpthread/thread/thr_priority_queue.c
+++ b/lib/libpthread/thread/thr_priority_queue.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <stdlib.h>
#include <sys/queue.h>
@@ -40,9 +41,51 @@
/* Prototypes: */
static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+#if defined(_PTHREADS_INVARIANTS)
+
+static int _pq_active = 0;
+
+#define _PQ_IN_SCHEDQ (PTHREAD_FLAGS_IN_PRIOQ | PTHREAD_FLAGS_IN_WAITQ | PTHREAD_FLAGS_IN_WORKQ)
+
+#define _PQ_SET_ACTIVE() _pq_active = 1
+#define _PQ_CLEAR_ACTIVE() _pq_active = 0
+#define _PQ_ASSERT_ACTIVE(msg) do { \
+ if (_pq_active == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_INACTIVE(msg) do { \
+ if (_pq_active != 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_WAITQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg) do { \
+ if (((thrd)->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0) \
+ PANIC(msg); \
+} while (0)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \
+ if ((thrd)->flags & _PQ_IN_SCHEDQ) \
+ PANIC(msg); \
+} while (0)
+
+#else
+
+#define _PQ_SET_ACTIVE()
+#define _PQ_CLEAR_ACTIVE()
+#define _PQ_ASSERT_ACTIVE(msg)
+#define _PQ_ASSERT_INACTIVE(msg)
+#define _PQ_ASSERT_IN_WAITQ(thrd, msg)
+#define _PQ_ASSERT_IN_PRIOQ(thrd, msg)
+#define _PQ_ASSERT_NOT_QUEUED(thrd, msg)
+#define _PQ_CHECK_PRIO()
+
+#endif
+
int
-_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+_pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
{
int i, ret = 0;
int prioslots = maxprio - minprio + 1;
@@ -56,8 +99,26 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
ret = -1;
else {
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+
+ ret = _pq_init(pq);
+
+ }
+ return (ret);
+}
+
+int
+_pq_init(pq_queue_t *pq)
+{
+ int i, ret = 0;
+
+ if ((pq == NULL) || (pq->pq_lists == NULL))
+ ret = -1;
+
+ else {
/* Initialize the queue for each priority slot: */
- for (i = 0; i < prioslots; i++) {
+ for (i = 0; i < pq->pq_size; i++) {
TAILQ_INIT(&pq->pq_lists[i].pl_head);
pq->pq_lists[i].pl_prio = i;
pq->pq_lists[i].pl_queued = 0;
@@ -65,9 +126,7 @@ _pq_init(pq_queue_t *pq, int minprio, int maxprio)
/* Initialize the priority queue: */
TAILQ_INIT(&pq->pq_queue);
-
- /* Remember the queue size: */
- pq->pq_size = prioslots;
+ _PQ_CLEAR_ACTIVE();
}
return (ret);
}
@@ -77,7 +136,27 @@ _pq_remove(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_PRIOQ(pthread, "_pq_remove: Not in priority queue");
+
+ /*
+ * Remove this thread from priority list. Note that if
+ * the priority list becomes empty, it is not removed
+ * from the priority queue because another thread may be
+ * added to the priority list (resulting in a needless
+ * removal/insertion). Priority lists are only removed
+ * from the priority queue when _pq_first is called.
+ */
TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+
+ /* This thread is now longer in the priority queue. */
+ pthread->flags &= ~PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -86,10 +165,23 @@ _pq_insert_head(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_head: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_head: Already in priority queue");
+
TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -98,10 +190,23 @@ _pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
{
int prio = pthread->active_priority;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_insert_tail: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread,
+ "_pq_insert_tail: Already in priority queue");
+
TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
if (pq->pq_lists[prio].pl_queued == 0)
/* Insert the list into the priority queue: */
pq_insert_prio_list(pq, prio);
+
+ /* Mark this thread as being in the priority queue. */
+ pthread->flags |= PTHREAD_FLAGS_IN_PRIOQ;
+
+ _PQ_CLEAR_ACTIVE();
}
@@ -111,6 +216,12 @@ _pq_first(pq_queue_t *pq)
pq_list_t *pql;
pthread_t pthread = NULL;
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_pq_first: pq_active");
+ _PQ_SET_ACTIVE();
+
while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
(pthread == NULL)) {
if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
@@ -124,6 +235,8 @@ _pq_first(pq_queue_t *pq)
pql->pl_queued = 0;
}
}
+
+ _PQ_CLEAR_ACTIVE();
return (pthread);
}
@@ -134,9 +247,14 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq_list_t *pql;
/*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_ACTIVE("pq_insert_prio_list: pq_active");
+
+ /*
* The priority queue is in descending priority order. Start at
* the beginning of the queue and find the list before which the
- * new list should to be inserted.
+ * new list should be inserted.
*/
pql = TAILQ_FIRST(&pq->pq_queue);
while ((pql != NULL) && (pql->pl_prio > prio))
@@ -152,4 +270,66 @@ pq_insert_prio_list(pq_queue_t *pq, int prio)
pq->pq_lists[prio].pl_queued = 1;
}
+#if defined(_PTHREADS_INVARIANTS)
+void
+_waitq_insert(pthread_t pthread)
+{
+ pthread_t tid;
+
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_insert: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_NOT_QUEUED(pthread, "_waitq_insert: Already in queue");
+
+ if (pthread->wakeup_time.tv_sec == -1)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else {
+ tid = TAILQ_FIRST(&_waitingq);
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) &&
+ ((tid->wakeup_time.tv_sec < pthread->wakeup_time.tv_sec) ||
+ ((tid->wakeup_time.tv_sec == pthread->wakeup_time.tv_sec) &&
+ (tid->wakeup_time.tv_nsec <= pthread->wakeup_time.tv_nsec))))
+ tid = TAILQ_NEXT(tid, pqe);
+ if (tid == NULL)
+ TAILQ_INSERT_TAIL(&_waitingq, pthread, pqe);
+ else
+ TAILQ_INSERT_BEFORE(tid, pthread, pqe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_remove(pthread_t pthread)
+{
+ /*
+ * Make some assertions when debugging is enabled:
+ */
+ _PQ_ASSERT_INACTIVE("_waitq_remove: pq_active");
+ _PQ_SET_ACTIVE();
+ _PQ_ASSERT_IN_WAITQ(pthread, "_waitq_remove: Not in queue");
+
+ TAILQ_REMOVE(&_waitingq, pthread, pqe);
+ pthread->flags &= ~PTHREAD_FLAGS_IN_WAITQ;
+
+ _PQ_CLEAR_ACTIVE();
+}
+
+void
+_waitq_setactive(void)
+{
+ _PQ_ASSERT_INACTIVE("_waitq_setactive: pq_active");
+ _PQ_SET_ACTIVE();
+}
+
+void
+_waitq_clearactive(void)
+{
+ _PQ_ASSERT_ACTIVE("_waitq_clearactive: ! pq_active");
+ _PQ_CLEAR_ACTIVE();
+}
+#endif
#endif
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index 6cacb27..036ea80 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -31,6 +31,7 @@
*
* Private thread definitions for the uthread kernel.
*
+ * $Id$
*/
#ifndef _PTHREAD_PRIVATE_H
@@ -68,33 +69,88 @@
/*
- * Priority queue manipulation macros:
+ * Priority queue manipulation macros (using pqe link):
*/
#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
-#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
/*
- * Waiting queue manipulation macros:
+ * Waiting queue manipulation macros (using pqe link):
*/
-#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
+#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
+#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
+#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
+#else
#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_INSERT(thrd) do { \
+ if ((thrd)->wakeup_time.tv_sec == -1) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else { \
+ pthread_t tid = TAILQ_FIRST(&_waitingq); \
+ while ((tid != NULL) && (tid->wakeup_time.tv_sec != -1) && \
+ ((tid->wakeup_time.tv_sec < (thrd)->wakeup_time.tv_sec) || \
+ ((tid->wakeup_time.tv_sec == (thrd)->wakeup_time.tv_sec) && \
+ (tid->wakeup_time.tv_nsec <= (thrd)->wakeup_time.tv_nsec)))) \
+ tid = TAILQ_NEXT(tid, pqe); \
+ if (tid == NULL) \
+ TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe); \
+ else \
+ TAILQ_INSERT_BEFORE(tid,thrd,pqe); \
+ } \
+} while (0)
+#define PTHREAD_WAITQ_CLEARACTIVE()
+#define PTHREAD_WAITQ_SETACTIVE()
+#endif
+
+/*
+ * Work queue manipulation macros (using qe link):
+ */
+#define PTHREAD_WORKQ_INSERT(thrd) do { \
+ TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
+ (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+#define PTHREAD_WORKQ_REMOVE(thrd) do { \
+ TAILQ_REMOVE(&_workq,thrd,qe); \
+ (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
+} while (0)
+
/*
* State change macro without scheduling queue change:
*/
-#define PTHREAD_SET_STATE(thrd, newstate) { \
+#define PTHREAD_SET_STATE(thrd, newstate) do { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
-}
+} while (0)
/*
* State change macro with scheduling queue change - This must be
* called with preemption deferred (see thread_kern_sched_[un]defer).
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#if defined(_PTHREADS_INVARIANTS)
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
+ if (_thread_kern_new_state != 0) \
+ PANIC("Recursive PTHREAD_NEW_STATE"); \
+ _thread_kern_new_state = 1; \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ _thread_kern_new_state = 0; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+} while (0)
+#else
+#define PTHREAD_NEW_STATE(thrd, newstate) do { \
if ((thrd)->state != newstate) { \
if ((thrd)->state == PS_RUNNING) { \
PTHREAD_PRIOQ_REMOVE(thrd); \
@@ -105,7 +161,8 @@
} \
} \
PTHREAD_SET_STATE(thrd, newstate); \
-}
+} while (0)
+#endif
/*
* Define the signals to be used for scheduling.
@@ -119,15 +176,6 @@
#endif
/*
- * Queue definitions.
- */
-struct pthread_queue {
- struct pthread *q_next;
- struct pthread *q_last;
- void *q_data;
-};
-
-/*
* Priority queues.
*
* XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
@@ -147,14 +195,9 @@ typedef struct pq_queue {
/*
- * Static queue initialization values.
- */
-#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
-
-/*
* TailQ initialization values.
*/
-#define TAILQ_INITIALIZER { NULL, NULL }
+#define TAILQ_INITIALIZER { NULL, NULL }
/*
* Mutex definitions.
@@ -257,7 +300,7 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL, \
+ { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
0, _SPINLOCK_INITIALIZER }
/*
@@ -303,12 +346,6 @@ struct pthread_attr {
#define CLOCK_RES_NSEC 10000000
/*
- * Number of microseconds between incremental priority updates for
- * threads that are ready to run, but denied being run.
- */
-#define INC_PRIO_USEC 500000
-
-/*
* Time slice period in microseconds.
*/
#define TIMESLICE_USEC 100000
@@ -345,6 +382,7 @@ enum pthread_state {
PS_FDR_WAIT,
PS_FDW_WAIT,
PS_FILE_WAIT,
+ PS_POLL_WAIT,
PS_SELECT_WAIT,
PS_SLEEP_WAIT,
PS_WAIT_WAIT,
@@ -377,8 +415,8 @@ struct fd_table_entry {
* state of the lock on the file descriptor.
*/
spinlock_t lock;
- struct pthread_queue r_queue; /* Read queue. */
- struct pthread_queue w_queue; /* Write queue. */
+ TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
+ TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
struct pthread *r_owner; /* Ptr to thread owning read lock. */
struct pthread *w_owner; /* Ptr to thread owning write lock. */
char *r_fname; /* Ptr to read lock source file name */
@@ -390,11 +428,9 @@ struct fd_table_entry {
int flags; /* Flags used in open. */
};
-struct pthread_select_data {
+struct pthread_poll_data {
int nfds;
- fd_set readfds;
- fd_set writefds;
- fd_set exceptfds;
+ struct pollfd *fds;
};
union pthread_wait_data {
@@ -406,7 +442,7 @@ union pthread_wait_data {
short branch; /* Line number, for debugging. */
char *fname; /* Source file name for debugging.*/
} fd;
- struct pthread_select_data * select_data;
+ struct pthread_poll_data * poll_data;
spinlock_t *spinlock;
};
@@ -427,15 +463,11 @@ struct pthread {
*/
spinlock_t lock;
- /*
- * Pointer to the next thread in the thread linked list.
- */
- struct pthread *nxt;
+ /* Queue entry for list of all threads: */
+ TAILQ_ENTRY(pthread) tle;
- /*
- * Pointer to the next thread in the dead thread linked list.
- */
- struct pthread *nxt_dead;
+ /* Queue entry for list of dead threads: */
+ TAILQ_ENTRY(pthread) dle;
/*
* Thread start routine, argument, stack pointer and thread
@@ -514,25 +546,25 @@ struct pthread {
*/
int error;
- /* Join queue for waiting threads: */
- struct pthread_queue join_queue;
+ /* Join queue head and link for waiting threads: */
+ TAILQ_HEAD(join_head, pthread) join_queue;
/*
- * The current thread can belong to only one scheduling queue
- * at a time (ready or waiting queue). It can also belong to
- * a queue of threads waiting on mutexes or condition variables.
- * Use pqe for the scheduling queue link (both ready and waiting),
- * and qe for other links (mutexes and condition variables).
+ * The current thread can belong to only one scheduling queue at
+ * a time (ready or waiting queue). It can also belong to (only)
+ * one of:
*
- * Pointer to queue (if any) on which the current thread is waiting.
+ * o A queue of threads waiting for a mutex
+ * o A queue of threads waiting for a condition variable
+ * o A queue of threads waiting for another thread to terminate
+ * (the join queue above)
+ * o A queue of threads waiting for a file descriptor lock
+ * o A queue of threads needing work done by the kernel thread
+ * (waiting for a spinlock or file I/O)
*
- * XXX The queuing should be changed to use the TAILQ entry below.
- * XXX For the time being, it's hybrid.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links.
*/
- struct pthread_queue *queue;
-
- /* Pointer to next element in queue. */
- struct pthread *qnxt;
/* Priority queue entry for this thread: */
TAILQ_ENTRY(pthread) pqe;
@@ -544,6 +576,11 @@ struct pthread {
union pthread_wait_data data;
/*
+ * Allocated for converting select into poll.
+ */
+ struct pthread_poll_data poll_data;
+
+ /*
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
@@ -553,23 +590,26 @@ struct pthread {
int signo;
/*
- * Set to non-zero when this thread has deferred thread
- * scheduling. We allow for recursive deferral.
+ * Set to non-zero when this thread has deferred signals.
+ * We allow for recursive deferral.
*/
- int sched_defer_count;
+ int sig_defer_count;
/*
* Set to TRUE if this thread should yield after undeferring
- * thread scheduling.
+ * signals.
*/
- int yield_on_sched_undefer;
+ int yield_on_sig_undefer;
/* Miscellaneous data. */
int flags;
#define PTHREAD_FLAGS_PRIVATE 0x0001
#define PTHREAD_EXITING 0x0002
-#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
-#define PTHREAD_FLAGS_TRACE 0x0008
+#define PTHREAD_FLAGS_IN_CONDQ 0x0004 /* in condition queue using qe link*/
+#define PTHREAD_FLAGS_IN_WORKQ 0x0008 /* in work queue using qe link */
+#define PTHREAD_FLAGS_IN_WAITQ 0x0010 /* in waiting queue using pqe link*/
+#define PTHREAD_FLAGS_IN_PRIOQ 0x0020 /* in priority queue using pqe link*/
+#define PTHREAD_FLAGS_TRACE 0x0040 /* for debugging purposes */
/*
* Base priority is the user setable and retrievable priority
@@ -592,7 +632,7 @@ struct pthread {
/*
* Active priority is always the maximum of the threads base
* priority and inherited priority. When there is a change
- * in either the real or inherited priority, the active
+ * in either the base or inherited priority, the active
* priority must be recalculated.
*/
char active_priority;
@@ -649,10 +689,10 @@ SCLASS struct pthread * volatile _thread_single
;
#endif
-/* Ptr to the first thread in the thread linked list: */
-SCLASS struct pthread * volatile _thread_link_list
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_thread_list);
#else
;
#endif
@@ -661,7 +701,7 @@ SCLASS struct pthread * volatile _thread_link_list
* Array of kernel pipe file descriptors that are used to ensure that
* no signals are missed in calls to _select.
*/
-SCLASS int _thread_kern_pipe[2]
+SCLASS int _thread_kern_pipe[2]
#ifdef GLOBAL_PTHREAD_PRIVATE
= {
-1,
@@ -670,7 +710,7 @@ SCLASS int _thread_kern_pipe[2]
#else
;
#endif
-SCLASS int _thread_kern_in_select
+SCLASS int volatile _queue_signals
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0;
#else
@@ -692,9 +732,9 @@ SCLASS struct timeval kern_inc_prio_time
#endif
/* Dead threads: */
-SCLASS struct pthread * volatile _thread_dead
+SCLASS TAILQ_HEAD(, pthread) _dead_list
#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
+= TAILQ_HEAD_INITIALIZER(_dead_list);
#else
;
#endif
@@ -747,6 +787,14 @@ SCLASS struct fd_table_entry **_thread_fd_table
;
#endif
+/* Table for polling file descriptors: */
+SCLASS struct pollfd *_thread_pfd_table
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL;
+#else
+;
+#endif
+
SCLASS const int dtablecount
#ifdef GLOBAL_PTHREAD_PRIVATE
= 4096/sizeof(struct fd_table_entry);
@@ -760,6 +808,13 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
+SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= CLOCK_RES_NSEC;
+#else
+;
+#endif
+
/* Garbage collector mutex and condition variable. */
SCLASS pthread_mutex_t _gc_mutex
#ifdef GLOBAL_PTHREAD_PRIVATE
@@ -783,8 +838,20 @@ struct sigaction _thread_sigact[NSIG];
SCLASS pq_queue_t _readyq;
SCLASS TAILQ_HEAD(, pthread) _waitingq;
-/* Indicates that the waitingq now has threads ready to run. */
-SCLASS volatile int _waitingq_check_reqd
+/*
+ * Work queue:
+ */
+SCLASS TAILQ_HEAD(, pthread) _workq;
+
+/* Tracks the number of threads blocked while waiting for a spinlock. */
+SCLASS volatile int _spinblock_count
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Indicates that the signal queue needs to be checked. */
+SCLASS volatile int _sigq_check_reqd
#ifdef GLOBAL_PTHREAD_PRIVATE
= 0
#endif
@@ -797,6 +864,13 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
#endif
;
+/* Used for _PTHREADS_INVARIANTS checking. */
+SCLASS int _thread_kern_new_state
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -825,18 +899,23 @@ int _thread_fd_lock(int, int, struct timespec *);
int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
void _dispatch_signals(void);
void _thread_signal(pthread_t, int);
-void _lock_thread(void);
-void _lock_thread_list(void);
-void _unlock_thread(void);
-void _unlock_thread_list(void);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
+int _mutex_reinit(pthread_mutex_t *);
void _mutex_notify_priochange(struct pthread *);
-int _pq_init(struct pq_queue *pq, int, int);
+int _cond_reinit(pthread_cond_t *);
+int _pq_alloc(struct pq_queue *, int, int);
+int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
+#if defined(_PTHREADS_INVARIANTS)
+void _waitq_insert(pthread_t pthread);
+void _waitq_remove(pthread_t pthread);
+void _waitq_setactive(void);
+void _waitq_clearactive(void);
+#endif
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -849,18 +928,15 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
-void _thread_kern_sched_defer(void);
-void _thread_kern_sched_undefer(void);
+void _thread_kern_sig_defer(void);
+void _thread_kern_sig_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
+void _thread_sig_handle(int, struct sigcontext *);
+void _thread_sig_init(void);
void _thread_start(void);
void _thread_start_sig_handler(void);
void _thread_seterrno(pthread_t,int);
-void _thread_queue_init(struct pthread_queue *);
-void _thread_queue_enq(struct pthread_queue *, struct pthread *);
-int _thread_queue_remove(struct pthread_queue *, struct pthread *);
int _thread_fd_table_init(int fd);
-struct pthread *_thread_queue_get(struct pthread_queue *);
-struct pthread *_thread_queue_deq(struct pthread_queue *);
pthread_addr_t _thread_gc(pthread_addr_t);
/* #include <signal.h> */
@@ -1036,6 +1112,11 @@ pid_t _thread_sys_waitpid(pid_t, int *, int);
pid_t _thread_sys_wait3(int *, int, struct rusage *);
pid_t _thread_sys_wait4(pid_t, int *, int, struct rusage *);
#endif
+
+/* #include <poll.h> */
+#ifdef _SYS_POLL_H_
+int _thread_sys_poll(struct pollfd *, unsigned, int);
+#endif
__END_DECLS
#endif /* !_PTHREAD_PRIVATE_H */
diff --git a/lib/libpthread/thread/thr_resume_np.c b/lib/libpthread/thread/thr_resume_np.c
index 885a457..b12baba 100644
--- a/lib/libpthread/thread/thr_resume_np.c
+++ b/lib/libpthread/thread/thr_resume_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -46,20 +47,19 @@ pthread_resume_np(pthread_t thread)
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues
+ * from access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
/*
- * Reenable preemption and yield if a scheduling
- * signal occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
}
return(ret);
diff --git a/lib/libpthread/thread/thr_select.c b/lib/libpthread/thread/thr_select.c
index 6d7d7dc..c2f86c5 100644
--- a/lib/libpthread/thread/thr_select.c
+++ b/lib/libpthread/thread/thr_select.c
@@ -29,10 +29,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <unistd.h>
#include <errno.h>
+#include <poll.h>
#include <string.h>
+#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/fcntl.h>
@@ -44,12 +47,10 @@ int
select(int numfds, fd_set * readfds, fd_set * writefds,
fd_set * exceptfds, struct timeval * timeout)
{
- fd_set read_locks, write_locks, rdwr_locks;
struct timespec ts;
- struct timeval zero_timeout = {0, 0};
- int i, ret = 0, got_all_locks = 1;
- int f_wait = 1;
- struct pthread_select_data data;
+ int i, ret = 0, f_wait = 1;
+ int pfd_index, got_one = 0, fd_count = 0;
+ struct pthread_poll_data data;
if (numfds > _thread_dtablesize) {
numfds = _thread_dtablesize;
@@ -68,112 +69,129 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
_thread_kern_set_timeout(NULL);
}
- FD_ZERO(&read_locks);
- FD_ZERO(&write_locks);
- FD_ZERO(&rdwr_locks);
-
- /* lock readfds */
+ /* Count the number of file descriptors to be polled: */
if (readfds || writefds || exceptfds) {
for (i = 0; i < numfds; i++) {
- if ((readfds && (FD_ISSET(i, readfds))) || (exceptfds && FD_ISSET(i, exceptfds))) {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_RDWR, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &rdwr_locks);
- } else {
- if ((ret = _FD_LOCK(i, FD_READ, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &read_locks);
- }
- } else {
- if (writefds && FD_ISSET(i, writefds)) {
- if ((ret = _FD_LOCK(i, FD_WRITE, NULL)) != 0) {
- got_all_locks = 0;
- break;
- }
- FD_SET(i, &write_locks);
- }
+ if ((readfds && FD_ISSET(i, readfds)) ||
+ (exceptfds && FD_ISSET(i, exceptfds)) ||
+ (writefds && FD_ISSET(i, writefds))) {
+ fd_count++;
}
}
}
- if (got_all_locks) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
- }
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+
+ /*
+ * Allocate memory for poll data if it hasn't already been
+ * allocated or if previously allocated memory is insufficient.
+ */
+ if ((_thread_run->poll_data.fds == NULL) ||
+ (_thread_run->poll_data.nfds < fd_count)) {
+ data.fds = (struct pollfd *) realloc(_thread_run->poll_data.fds,
+ sizeof(struct pollfd) * MAX(128, fd_count));
+ if (data.fds == NULL) {
+ errno = ENOMEM;
+ ret = -1;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ else {
+ /*
+ * Note that the threads poll data always
+ * indicates what is allocated, not what is
+ * currently being polled.
+ */
+ _thread_run->poll_data.fds = data.fds;
+ _thread_run->poll_data.nfds = MAX(128, fd_count);
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
- data.nfds = numfds;
- FD_ZERO(&data.readfds);
- FD_ZERO(&data.writefds);
- FD_ZERO(&data.exceptfds);
- if (readfds != NULL) {
- memcpy(&data.readfds, readfds, sizeof(data.readfds));
+ }
+ if (ret == 0) {
+ /* Setup the wait data. */
+ data.fds = _thread_run->poll_data.fds;
+ data.nfds = fd_count;
+
+ /*
+ * Setup the array of pollfds. Optimize this by
+ * running the loop in reverse and stopping when
+ * the number of selected file descriptors is reached.
+ */
+ for (i = numfds - 1, pfd_index = fd_count - 1;
+ (i >= 0) && (pfd_index >= 0); i--) {
+ data.fds[pfd_index].events = 0;
+ if (readfds && FD_ISSET(i, readfds)) {
+ data.fds[pfd_index].events = POLLRDNORM;
}
- if (writefds != NULL) {
- memcpy(&data.writefds, writefds, sizeof(data.writefds));
+ if (exceptfds && FD_ISSET(i, exceptfds)) {
+ data.fds[pfd_index].events |= POLLRDBAND;
}
- if (exceptfds != NULL) {
- memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
+ if (writefds && FD_ISSET(i, writefds)) {
+ data.fds[pfd_index].events |= POLLWRNORM;
+ }
+ if (data.fds[pfd_index].events != 0) {
+ /*
+ * Set the file descriptor to be polled and
+ * clear revents in case of a timeout which
+ * leaves fds unchanged:
+ */
+ data.fds[pfd_index].fd = i;
+ data.fds[pfd_index].revents = 0;
+ pfd_index--;
}
- _thread_run->data.select_data = &data;
+ }
+ if (((ret = _thread_sys_poll(data.fds, data.nfds, 0)) == 0) &&
+ (f_wait != 0)) {
+ _thread_run->data.poll_data = &data;
_thread_run->interrupted = 0;
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
if (_thread_run->interrupted) {
errno = EINTR;
+ data.nfds = 0;
ret = -1;
} else
ret = data.nfds;
}
}
- /* clean up the locks */
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &read_locks))
- _FD_UNLOCK(i, FD_READ);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &rdwr_locks))
- _FD_UNLOCK(i, FD_RDWR);
- for (i = 0; i < numfds; i++)
- if (FD_ISSET(i, &write_locks))
- _FD_UNLOCK(i, FD_WRITE);
if (ret >= 0) {
- if (readfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, readfds) &&
- !FD_ISSET(i, &data.readfds)) {
- FD_CLR(i, readfds);
+ numfds = 0;
+ for (i = 0; i < fd_count; i++) {
+ /*
+ * Check the results of the poll and clear
+ * this file descriptor from the fdset if
+ * the requested event wasn't ready.
+ */
+ got_one = 0;
+ if (readfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, readfds)) {
+ if (data.fds[i].revents & (POLLIN |
+ POLLRDNORM))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd, readfds);
}
}
- }
- if (writefds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, writefds) &&
- !FD_ISSET(i, &data.writefds)) {
- FD_CLR(i, writefds);
+ if (writefds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, writefds)) {
+ if (data.fds[i].revents & (POLLOUT |
+ POLLWRNORM | POLLWRBAND))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ writefds);
}
}
- }
- if (exceptfds != NULL) {
- for (i = 0; i < numfds; i++) {
- if (FD_ISSET(i, exceptfds) &&
- !FD_ISSET(i, &data.exceptfds)) {
- FD_CLR(i, exceptfds);
+ if (exceptfds != NULL) {
+ if (FD_ISSET(data.fds[i].fd, exceptfds)) {
+ if (data.fds[i].revents & (POLLRDBAND |
+ POLLPRI | POLLHUP | POLLERR |
+ POLLNVAL))
+ got_one = 1;
+ else
+ FD_CLR(data.fds[i].fd,
+ exceptfds);
}
}
+ if (got_one)
+ numfds++;
}
+ ret = numfds;
}
return (ret);
diff --git a/lib/libpthread/thread/thr_setschedparam.c b/lib/libpthread/thread/thr_setschedparam.c
index 93635da..1ba0e20 100644
--- a/lib/libpthread/thread/thr_setschedparam.c
+++ b/lib/libpthread/thread/thr_setschedparam.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#include <sys/param.h>
@@ -50,10 +51,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
/*
- * Guard against being preempted by a scheduling
- * signal:
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
if (param->sched_priority != pthread->base_priority) {
/*
@@ -61,8 +62,7 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
* queue before any adjustments are made to its
* active priority:
*/
- if ((pthread != _thread_run) &&
- (pthread->state == PS_RUNNING)) {
+ if ((pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) != 0) {
in_readyq = 1;
old_prio = pthread->active_priority;
PTHREAD_PRIOQ_REMOVE(pthread);
@@ -103,10 +103,10 @@ pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
pthread->attr.sched_policy = policy;
/*
- * Renable preemption and yield if a scheduling signal
- * arrived while in the critical region:
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c
index e51d949..ac475a4 100644
--- a/lib/libpthread/thread/thr_sig.c
+++ b/lib/libpthread/thread/thr_sig.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <fcntl.h>
@@ -38,107 +39,51 @@
#include <pthread.h>
#include "pthread_private.h"
-/*
- * State change macro for signal handler:
- */
-#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
- if ((_thread_run->sched_defer_count == 0) && \
- (_thread_kern_in_sched == 0)) { \
- PTHREAD_NEW_STATE(thrd, newstate); \
- } else { \
- _waitingq_check_reqd = 1; \
- PTHREAD_SET_STATE(thrd, newstate); \
- } \
-}
-
/* Static variables: */
-static int volatile yield_on_unlock_thread = 0;
-static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
-
-/* Lock the thread list: */
-void
-_lock_thread_list()
-{
- /* Lock the thread list: */
- _SPINLOCK(&thread_link_list_lock);
-}
+static spinlock_t signal_lock = _SPINLOCK_INITIALIZER;
+unsigned int pending_sigs[NSIG];
+unsigned int handled_sigs[NSIG];
+int volatile check_pending = 0;
-/* Lock the thread list: */
+/* Initialize signal handling facility: */
void
-_unlock_thread_list()
+_thread_sig_init(void)
{
- /* Unlock the thread list: */
- _SPINUNLOCK(&thread_link_list_lock);
-
- /*
- * Check if a scheduler interrupt occurred while the thread
- * list was locked:
- */
- if (yield_on_unlock_thread) {
- /* Reset the interrupt flag: */
- yield_on_unlock_thread = 0;
+ int i;
- /* This thread has overstayed it's welcome: */
- sched_yield();
+ /* Clear pending and handled signal counts: */
+ for (i = 1; i < NSIG; i++) {
+ pending_sigs[i - 1] = 0;
+ handled_sigs[i - 1] = 0;
}
+
+ /* Clear the lock: */
+ signal_lock.access_lock = 0;
}
void
_thread_sig_handler(int sig, int code, struct sigcontext * scp)
{
- char c;
- int i;
- pthread_t pthread;
-
- /*
- * Check if the pthread kernel has unblocked signals (or is about to)
- * and was on its way into a _select when the current
- * signal interrupted it:
- */
- if (_thread_kern_in_select) {
- /* Cast the signal number to a character variable: */
- c = sig;
-
- /*
- * Write the signal number to the kernel pipe so that it will
- * be ready to read when this signal handler returns. This
- * means that the _select call will complete
- * immediately.
- */
- _thread_sys_write(_thread_kern_pipe[1], &c, 1);
- }
- /* Check if the signal requires a dump of thread information: */
- if (sig == SIGINFO)
- /* Dump thread information to file: */
- _thread_dump_info();
+ char c;
+ int i;
/* Check if an interval timer signal: */
- else if (sig == _SCHED_SIGNAL) {
- /* Check if the scheduler interrupt has come at an
- * unfortunate time which one of the threads is
- * modifying the thread list:
- */
- if (thread_link_list_lock.access_lock)
+ if (sig == _SCHED_SIGNAL) {
+ if (_thread_kern_in_sched != 0) {
/*
- * Set a flag so that the thread that has
- * the lock yields when it unlocks the
- * thread list:
+ * The scheduler is already running; ignore this
+ * signal.
*/
- yield_on_unlock_thread = 1;
-
+ }
/*
* Check if the scheduler interrupt has come when
* the currently running thread has deferred thread
- * scheduling.
+ * signals.
*/
- else if (_thread_run->sched_defer_count)
- _thread_run->yield_on_sched_undefer = 1;
+ else if (_thread_run->sig_defer_count > 0)
+ _thread_run->yield_on_sig_undefer = 1;
- /*
- * Check if the kernel has not been interrupted while
- * executing scheduler code:
- */
- else if (!_thread_kern_in_sched) {
+ else {
/*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
@@ -152,6 +97,72 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
PANIC("Returned to signal function from scheduler");
}
+ }
+ /*
+ * Check if the kernel has been interrupted while the scheduler
+ * is accessing the scheduling queues or if there is a currently
+ * running thread that has deferred signals.
+ */
+ else if ((_queue_signals != 0) || ((_thread_kern_in_sched == 0) &&
+ (_thread_run->sig_defer_count > 0))) {
+ /* Cast the signal number to a character variable: */
+ c = sig;
+
+ /*
+ * Write the signal number to the kernel pipe so that it will
+ * be ready to read when this signal handler returns.
+ */
+ _thread_sys_write(_thread_kern_pipe[1], &c, 1);
+
+ /* Indicate that there are queued signals in the pipe. */
+ _sigq_check_reqd = 1;
+ }
+ else {
+ if (_atomic_lock(&signal_lock.access_lock)) {
+ /* There is another signal handler running: */
+ pending_sigs[sig - 1]++;
+ check_pending = 1;
+ }
+ else {
+ /* It's safe to handle the signal now. */
+ _thread_sig_handle(sig, scp);
+
+ /* Reset the pending and handled count back to 0: */
+ pending_sigs[sig - 1] = 0;
+ handled_sigs[sig - 1] = 0;
+
+ signal_lock.access_lock = 0;
+ }
+
+ /* Enter a loop to process pending signals: */
+ while ((check_pending != 0) &&
+ (_atomic_lock(&signal_lock.access_lock) == 0)) {
+ check_pending = 0;
+ for (i = 1; i < NSIG; i++) {
+ if (pending_sigs[i - 1] > handled_sigs[i - 1])
+ _thread_sig_handle(i, scp);
+ }
+ signal_lock.access_lock = 0;
+ }
+ }
+}
+
+void
+_thread_sig_handle(int sig, struct sigcontext * scp)
+{
+ int i;
+ pthread_t pthread, pthread_next;
+
+ /* Check if the signal requires a dump of thread information: */
+ if (sig == SIGINFO)
+ /* Dump thread information to file: */
+ _thread_dump_info();
+
+ /* Check if an interval timer signal: */
+ else if (sig == _SCHED_SIGNAL) {
+ /*
+ * This shouldn't ever occur (should this panic?).
+ */
} else {
/* Check if a child has terminated: */
if (sig == SIGCHLD) {
@@ -183,10 +194,9 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to discard pending SIGCONT
* signals:
*/
- for (pthread = _thread_link_list;
- pthread != NULL;
- pthread = pthread->nxt)
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
sigdelset(&pthread->sigpend,SIGCONT);
+ }
}
/*
@@ -196,11 +206,18 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ for (pthread = TAILQ_FIRST(&_waitingq);
+ pthread != NULL; pthread = pthread_next) {
+ /*
+ * Grab the next thread before possibly destroying
+ * the link entry.
+ */
+ pthread_next = TAILQ_NEXT(pthread, pqe);
+
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -219,10 +236,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* Enter a loop to process each thread in the linked
* list:
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_thread_list, tle) {
pthread_t pthread_saved = _thread_run;
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count++;
+
_thread_run = pthread;
_thread_signal(pthread,sig);
@@ -232,6 +252,10 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_dispatch_signals();
_thread_run = pthread_saved;
+
+ /* Current thread inside critical region? */
+ if (_thread_run->sig_defer_count > 0)
+ pthread->sig_defer_count--;
}
}
@@ -284,7 +308,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -296,6 +320,7 @@ _thread_signal(pthread_t pthread, int sig)
*/
case PS_FDR_WAIT:
case PS_FDW_WAIT:
+ case PS_POLL_WAIT:
case PS_SLEEP_WAIT:
case PS_SELECT_WAIT:
if (sig != SIGCHLD ||
@@ -303,8 +328,11 @@ _thread_signal(pthread_t pthread, int sig)
/* Flag the operation as interrupted: */
pthread->interrupted = 1;
+ if (pthread->flags & PTHREAD_FLAGS_IN_WORKQ)
+ PTHREAD_WORKQ_REMOVE(pthread);
+
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -319,7 +347,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libpthread/thread/thr_sigaction.c b/lib/libpthread/thread/thr_sigaction.c
index 73a3b21..ed390d1 100644
--- a/lib/libpthread/thread/thr_sigaction.c
+++ b/lib/libpthread/thread/thr_sigaction.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -77,6 +78,9 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
gact.sa_mask = act->sa_mask;
gact.sa_flags = 0;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&gact.sa_mask, _SCHED_SIGNAL);
+
/*
* Check if the signal handler is being set to
* the default or ignore handlers:
diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c
index 98a5359..6c7d8d3 100644
--- a/lib/libpthread/thread/thr_sigwait.c
+++ b/lib/libpthread/thread/thr_sigwait.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <signal.h>
#include <errno.h>
@@ -41,7 +42,7 @@ sigwait(const sigset_t * set, int *sig)
{
int ret = 0;
int i;
- sigset_t tempset;
+ sigset_t tempset, waitset;
struct sigaction act;
/*
@@ -51,17 +52,23 @@ sigwait(const sigset_t * set, int *sig)
act.sa_flags = SA_RESTART;
act.sa_mask = *set;
+ /* Ensure the scheduling signal is masked: */
+ sigaddset(&act.sa_mask, _SCHED_SIGNAL);
+
/*
- * These signals can't be waited on.
+ * Initialize the set of signals that will be waited on:
*/
- sigdelset(&act.sa_mask, SIGKILL);
- sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, _SCHED_SIGNAL);
- sigdelset(&act.sa_mask, SIGCHLD);
- sigdelset(&act.sa_mask, SIGINFO);
+ waitset = *set;
+
+ /* These signals can't be waited on. */
+ sigdelset(&waitset, SIGKILL);
+ sigdelset(&waitset, SIGSTOP);
+ sigdelset(&waitset, _SCHED_SIGNAL);
+ sigdelset(&waitset, SIGCHLD);
+ sigdelset(&waitset, SIGINFO);
/* Check to see if a pending signal is in the wait mask. */
- if (tempset = (_thread_run->sigpend & act.sa_mask)) {
+ if (tempset = (_thread_run->sigpend & waitset)) {
/* Enter a loop to find a pending signal: */
for (i = 1; i < NSIG; i++) {
if (sigismember (&tempset, i))
@@ -81,17 +88,17 @@ sigwait(const sigset_t * set, int *sig)
* Enter a loop to find the signals that are SIG_DFL. For
* these signals we must install a dummy signal handler in
* order for the kernel to pass them in to us. POSIX says
- * that the application must explicitly install a dummy
+ * that the _application_ must explicitly install a dummy
* handler for signals that are SIG_IGN in order to sigwait
* on them. Note that SIG_IGN signals are left in the
* mask because a subsequent sigaction could enable an
* ignored signal.
*/
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i)) {
- if (_thread_sigact[i - 1].sa_handler == SIG_DFL)
- if (_thread_sys_sigaction(i,&act,NULL) != 0)
- ret = -1;
+ if (sigismember(&waitset, i) &&
+ (_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
+ if (_thread_sys_sigaction(i,&act,NULL) != 0)
+ ret = -1;
}
}
if (ret == 0) {
@@ -101,7 +108,7 @@ sigwait(const sigset_t * set, int *sig)
* mask is independent of the threads signal mask
* and requires separate storage.
*/
- _thread_run->data.sigwait = &act.sa_mask;
+ _thread_run->data.sigwait = &waitset;
/* Wait for a signal: */
_thread_kern_sched_state(PS_SIGWAIT, __FILE__, __LINE__);
@@ -119,7 +126,7 @@ sigwait(const sigset_t * set, int *sig)
/* Restore the sigactions: */
act.sa_handler = SIG_DFL;
for (i = 1; i < NSIG; i++) {
- if (sigismember(&act.sa_mask, i) &&
+ if (sigismember(&waitset, i) &&
(_thread_sigact[i - 1].sa_handler == SIG_DFL)) {
if (_thread_sys_sigaction(i,&act,NULL) != 0)
ret = -1;
diff --git a/lib/libpthread/thread/thr_suspend_np.c b/lib/libpthread/thread/thr_suspend_np.c
index 6a6eaf4..02943bb 100644
--- a/lib/libpthread/thread/thr_suspend_np.c
+++ b/lib/libpthread/thread/thr_suspend_np.c
@@ -29,6 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $Id$
*/
#include <errno.h>
#ifdef _THREAD_SAFE
@@ -52,20 +53,19 @@ pthread_suspend_np(pthread_t thread)
}
/*
- * Guard against preemption by a scheduling signal.
- * A change of thread state modifies the waiting
- * and priority queues.
+ * Defer signals to protect the scheduling queues from
+ * access by the signal handler:
*/
- _thread_kern_sched_defer();
+ _thread_kern_sig_defer();
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
/*
- * Reenable preemption and yield if a scheduling signal
- * occurred while in the critical region.
+ * Undefer and handle pending signals, yielding if
+ * necessary:
*/
- _thread_kern_sched_undefer();
+ _thread_kern_sig_undefer();
}
return(ret);
}
OpenPOWER on IntegriCloud