summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/libc_r/uthread/Makefile.inc17
-rw-r--r--lib/libc_r/uthread/pthread_private.h235
-rw-r--r--lib/libc_r/uthread/uthread_attr_getinheritsched.c50
-rw-r--r--lib/libc_r/uthread/uthread_attr_getschedparam.c50
-rw-r--r--lib/libc_r/uthread/uthread_attr_getschedpolicy.c50
-rw-r--r--lib/libc_r/uthread/uthread_attr_getscope.c53
-rw-r--r--lib/libc_r/uthread/uthread_attr_setinheritsched.c50
-rw-r--r--lib/libc_r/uthread/uthread_attr_setschedparam.c50
-rw-r--r--lib/libc_r/uthread/uthread_attr_setschedpolicy.c51
-rw-r--r--lib/libc_r/uthread/uthread_attr_setscope.c62
-rw-r--r--lib/libc_r/uthread/uthread_cond.c300
-rw-r--r--lib/libc_r/uthread/uthread_create.c40
-rw-r--r--lib/libc_r/uthread/uthread_detach.c13
-rw-r--r--lib/libc_r/uthread/uthread_execve.c2
-rw-r--r--lib/libc_r/uthread/uthread_exit.c15
-rw-r--r--lib/libc_r/uthread/uthread_fd.c4
-rw-r--r--lib/libc_r/uthread/uthread_fork.c34
-rw-r--r--lib/libc_r/uthread/uthread_gc.c10
-rw-r--r--lib/libc_r/uthread/uthread_getprio.c11
-rw-r--r--lib/libc_r/uthread/uthread_getschedparam.c56
-rw-r--r--lib/libc_r/uthread/uthread_info.c51
-rw-r--r--lib/libc_r/uthread/uthread_init.c38
-rw-r--r--lib/libc_r/uthread/uthread_kern.c619
-rw-r--r--lib/libc_r/uthread/uthread_kill.c13
-rw-r--r--lib/libc_r/uthread/uthread_mattr_init.c2
-rw-r--r--lib/libc_r/uthread/uthread_mutex.c1258
-rw-r--r--lib/libc_r/uthread/uthread_mutex_prioceiling.c109
-rw-r--r--lib/libc_r/uthread/uthread_mutex_protocol.c68
-rw-r--r--lib/libc_r/uthread/uthread_priority_queue.c155
-rw-r--r--lib/libc_r/uthread/uthread_resume_np.c13
-rw-r--r--lib/libc_r/uthread/uthread_select.c6
-rw-r--r--lib/libc_r/uthread/uthread_setprio.c18
-rw-r--r--lib/libc_r/uthread/uthread_setschedparam.c113
-rw-r--r--lib/libc_r/uthread/uthread_sig.c51
-rw-r--r--lib/libc_r/uthread/uthread_sigaction.c2
-rw-r--r--lib/libc_r/uthread/uthread_sigpending.c55
-rw-r--r--lib/libc_r/uthread/uthread_sigwait.c2
-rw-r--r--lib/libc_r/uthread/uthread_spinlock.c32
-rw-r--r--lib/libc_r/uthread/uthread_suspend_np.c13
-rw-r--r--lib/libc_r/uthread/uthread_switch_np.c69
-rw-r--r--lib/libkse/thread/Makefile.inc17
-rw-r--r--lib/libkse/thread/thr_attr_getinheritsched.c50
-rw-r--r--lib/libkse/thread/thr_attr_getschedparam.c50
-rw-r--r--lib/libkse/thread/thr_attr_getschedpolicy.c50
-rw-r--r--lib/libkse/thread/thr_attr_getscope.c53
-rw-r--r--lib/libkse/thread/thr_attr_setinheritsched.c50
-rw-r--r--lib/libkse/thread/thr_attr_setschedparam.c50
-rw-r--r--lib/libkse/thread/thr_attr_setschedpolicy.c51
-rw-r--r--lib/libkse/thread/thr_attr_setscope.c62
-rw-r--r--lib/libkse/thread/thr_cond.c300
-rw-r--r--lib/libkse/thread/thr_create.c40
-rw-r--r--lib/libkse/thread/thr_detach.c13
-rw-r--r--lib/libkse/thread/thr_exit.c15
-rw-r--r--lib/libkse/thread/thr_fork.c34
-rw-r--r--lib/libkse/thread/thr_getprio.c11
-rw-r--r--lib/libkse/thread/thr_getschedparam.c56
-rw-r--r--lib/libkse/thread/thr_info.c51
-rw-r--r--lib/libkse/thread/thr_init.c38
-rw-r--r--lib/libkse/thread/thr_kern.c619
-rw-r--r--lib/libkse/thread/thr_kill.c13
-rw-r--r--lib/libkse/thread/thr_mattr_init.c2
-rw-r--r--lib/libkse/thread/thr_mutex.c1258
-rw-r--r--lib/libkse/thread/thr_mutex_prioceiling.c109
-rw-r--r--lib/libkse/thread/thr_mutex_protocol.c68
-rw-r--r--lib/libkse/thread/thr_priority_queue.c155
-rw-r--r--lib/libkse/thread/thr_private.h235
-rw-r--r--lib/libkse/thread/thr_resume_np.c13
-rw-r--r--lib/libkse/thread/thr_select.c6
-rw-r--r--lib/libkse/thread/thr_setprio.c18
-rw-r--r--lib/libkse/thread/thr_setschedparam.c113
-rw-r--r--lib/libkse/thread/thr_sig.c51
-rw-r--r--lib/libkse/thread/thr_sigaction.c2
-rw-r--r--lib/libkse/thread/thr_sigpending.c55
-rw-r--r--lib/libkse/thread/thr_sigwait.c2
-rw-r--r--lib/libkse/thread/thr_spinlock.c32
-rw-r--r--lib/libkse/thread/thr_suspend_np.c13
-rw-r--r--lib/libkse/thread/thr_switch_np.c69
-rw-r--r--lib/libpthread/thread/Makefile.inc17
-rw-r--r--lib/libpthread/thread/thr_attr_getinheritsched.c50
-rw-r--r--lib/libpthread/thread/thr_attr_getschedparam.c50
-rw-r--r--lib/libpthread/thread/thr_attr_getschedpolicy.c50
-rw-r--r--lib/libpthread/thread/thr_attr_getscope.c53
-rw-r--r--lib/libpthread/thread/thr_attr_setinheritsched.c50
-rw-r--r--lib/libpthread/thread/thr_attr_setschedparam.c50
-rw-r--r--lib/libpthread/thread/thr_attr_setschedpolicy.c51
-rw-r--r--lib/libpthread/thread/thr_attr_setscope.c62
-rw-r--r--lib/libpthread/thread/thr_cond.c300
-rw-r--r--lib/libpthread/thread/thr_create.c40
-rw-r--r--lib/libpthread/thread/thr_detach.c13
-rw-r--r--lib/libpthread/thread/thr_exit.c15
-rw-r--r--lib/libpthread/thread/thr_fork.c34
-rw-r--r--lib/libpthread/thread/thr_gc.c10
-rw-r--r--lib/libpthread/thread/thr_getprio.c11
-rw-r--r--lib/libpthread/thread/thr_getschedparam.c56
-rw-r--r--lib/libpthread/thread/thr_info.c51
-rw-r--r--lib/libpthread/thread/thr_init.c38
-rw-r--r--lib/libpthread/thread/thr_kern.c619
-rw-r--r--lib/libpthread/thread/thr_kill.c13
-rw-r--r--lib/libpthread/thread/thr_mattr_init.c2
-rw-r--r--lib/libpthread/thread/thr_mutex.c1258
-rw-r--r--lib/libpthread/thread/thr_mutex_prioceiling.c109
-rw-r--r--lib/libpthread/thread/thr_mutex_protocol.c68
-rw-r--r--lib/libpthread/thread/thr_priority_queue.c155
-rw-r--r--lib/libpthread/thread/thr_private.h235
-rw-r--r--lib/libpthread/thread/thr_resume_np.c13
-rw-r--r--lib/libpthread/thread/thr_select.c6
-rw-r--r--lib/libpthread/thread/thr_setprio.c18
-rw-r--r--lib/libpthread/thread/thr_setschedparam.c113
-rw-r--r--lib/libpthread/thread/thr_sig.c51
-rw-r--r--lib/libpthread/thread/thr_sigaction.c2
-rw-r--r--lib/libpthread/thread/thr_sigpending.c55
-rw-r--r--lib/libpthread/thread/thr_sigwait.c2
-rw-r--r--lib/libpthread/thread/thr_spinlock.c32
-rw-r--r--lib/libpthread/thread/thr_suspend_np.c13
-rw-r--r--lib/libpthread/thread/thr_switch_np.c69
115 files changed, 9492 insertions, 2006 deletions
diff --git a/lib/libc_r/uthread/Makefile.inc b/lib/libc_r/uthread/Makefile.inc
index 004982a..16799cf 100644
--- a/lib/libc_r/uthread/Makefile.inc
+++ b/lib/libc_r/uthread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.15 1998/09/12 22:03:20 dt Exp $
+# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -8,10 +8,18 @@ SRCS+= \
uthread_attr_destroy.c \
uthread_attr_init.c \
uthread_attr_getdetachstate.c \
+ uthread_attr_getinheritsched.c \
+ uthread_attr_getschedparam.c \
+ uthread_attr_getschedpolicy.c \
+ uthread_attr_getscope.c \
uthread_attr_getstackaddr.c \
uthread_attr_getstacksize.c \
uthread_attr_setcreatesuspend_np.c \
uthread_attr_setdetachstate.c \
+ uthread_attr_setinheritsched.c \
+ uthread_attr_setschedparam.c \
+ uthread_attr_setschedpolicy.c \
+ uthread_attr_setscope.c \
uthread_attr_setstackaddr.c \
uthread_attr_setstacksize.c \
uthread_autoinit.cc \
@@ -44,6 +52,7 @@ SRCS+= \
uthread_getdirentries.c \
uthread_getpeername.c \
uthread_getprio.c \
+ uthread_getschedparam.c \
uthread_getsockname.c \
uthread_getsockopt.c \
uthread_info.c \
@@ -57,11 +66,14 @@ SRCS+= \
uthread_mattr_kind_np.c \
uthread_multi_np.c \
uthread_mutex.c \
+ uthread_mutex_prioceiling.c \
+ uthread_mutex_protocol.c \
uthread_mutexattr_destroy.c \
uthread_nanosleep.c \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_priority_queue.c \
uthread_queue.c \
uthread_read.c \
uthread_readv.c \
@@ -76,12 +88,14 @@ SRCS+= \
uthread_sendto.c \
uthread_seterrno.c \
uthread_setprio.c \
+ uthread_setschedparam.c \
uthread_setsockopt.c \
uthread_shutdown.c \
uthread_sig.c \
uthread_sigaction.c \
uthread_sigblock.c \
uthread_sigmask.c \
+ uthread_sigpending.c \
uthread_sigprocmask.c \
uthread_sigsetmask.c \
uthread_sigsuspend.c \
@@ -92,6 +106,7 @@ SRCS+= \
uthread_spec.c \
uthread_spinlock.c \
uthread_suspend_np.c \
+ uthread_switch_np.c \
uthread_vfork.c \
uthread_wait4.c \
uthread_write.c \
diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h
index 2d7e723..bf99a3b 100644
--- a/lib/libc_r/uthread/pthread_private.h
+++ b/lib/libc_r/uthread/pthread_private.h
@@ -55,6 +55,7 @@
#include <sys/time.h>
#include <sched.h>
#include <spinlock.h>
+#include <pthread_np.h>
/*
* Kernel fatal error handler macro.
@@ -65,16 +66,59 @@
#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+
/*
- * State change macro:
+ * Priority queue manipulation macros:
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
+#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
+#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
+#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+
+/*
+ * Waiting queue manipulation macros:
+ */
+#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+
+/*
+ * State change macro without scheduling queue change:
+ */
+#define PTHREAD_SET_STATE(thrd, newstate) { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
}
/*
+ * State change macro with scheduling queue change - This must be
+ * called with preemption deferred (see thread_kern_sched_[un]defer).
+ */
+#define PTHREAD_NEW_STATE(thrd, newstate) { \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ PTHREAD_SET_STATE(thrd, newstate); \
+}
+
+/*
+ * Define the signals to be used for scheduling.
+ */
+#if defined(_PTHREADS_COMPAT_SCHED)
+#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
+#define _SCHED_SIGNAL SIGVTALRM
+#else
+#define _ITIMER_SCHED_TIMER ITIMER_PROF
+#define _SCHED_SIGNAL SIGPROF
+#endif
+
+/*
* Queue definitions.
*/
struct pthread_queue {
@@ -84,10 +128,34 @@ struct pthread_queue {
};
/*
+ * Priority queues.
+ *
+ * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
+ */
+typedef struct pq_list {
+ TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
+ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
+ int pl_prio; /* the priority of this list */
+ int pl_queued; /* is this in the priority queue */
+} pq_list_t;
+
+typedef struct pq_queue {
+ TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
+ pq_list_t *pq_lists; /* array of all priority lists */
+ int pq_size; /* number of priority lists */
+} pq_queue_t;
+
+
+/*
* Static queue initialization values.
*/
#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
+/*
+ * TailQ initialization values.
+ */
+#define TAILQ_INITIALIZER { NULL, NULL }
+
/*
* Mutex definitions.
*/
@@ -98,10 +166,31 @@ union pthread_mutex_data {
struct pthread_mutex {
enum pthread_mutextype m_type;
- struct pthread_queue m_queue;
+ int m_protocol;
+ TAILQ_HEAD(mutex_head, pthread) m_queue;
struct pthread *m_owner;
union pthread_mutex_data m_data;
long m_flags;
+ int m_refcount;
+
+ /*
+ * Used for priority inheritence and protection.
+ *
+ * m_prio - For priority inheritence, the highest active
+ * priority (threads locking the mutex inherit
+ * this priority). For priority protection, the
+ * ceiling priority of this mutex.
+ * m_saved_prio - mutex owners inherited priority before
+ * taking the mutex, restored when the owner
+ * unlocks the mutex.
+ */
+ int m_prio;
+ int m_saved_prio;
+
+ /*
+ * Link for list of all mutexes a thread currently owns.
+ */
+ TAILQ_ENTRY(pthread_mutex) m_qe;
/*
* Lock for accesses to this structure.
@@ -120,11 +209,13 @@ struct pthread_mutex {
* Static mutex initialization values.
*/
#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { MUTEX_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, \
- NULL, { NULL }, MUTEX_FLAGS_INITED }
+ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
+ NULL, { NULL }, MUTEX_FLAGS_INITED, 0, 0, 0, TAILQ_INITIALIZER }
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
+ int m_protocol;
+ int m_ceiling;
long m_flags;
};
@@ -137,15 +228,16 @@ enum pthread_cond_type {
};
struct pthread_cond {
- enum pthread_cond_type c_type;
- struct pthread_queue c_queue;
- void *c_data;
- long c_flags;
+ enum pthread_cond_type c_type;
+ TAILQ_HEAD(cond_head, pthread) c_queue;
+ pthread_mutex_t c_mutex;
+ void *c_data;
+ long c_flags;
/*
* Lock for accesses to this structure.
*/
- spinlock_t lock;
+ spinlock_t lock;
};
struct pthread_cond_attr {
@@ -164,7 +256,8 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, COND_FLAGS_INITED }
+ { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL \
+ COND_FLAGS_INITED }
/*
* Cleanup definitions.
@@ -176,7 +269,9 @@ struct pthread_cleanup {
};
struct pthread_attr {
- int schedparam_policy;
+ int sched_policy;
+ int sched_inherit;
+ int sched_interval;
int prio;
int suspend;
int flags;
@@ -254,9 +349,11 @@ enum pthread_state {
PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
+ PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
+ PS_DEADLOCK,
PS_STATE_MAX
};
@@ -300,8 +397,8 @@ struct pthread_select_data {
};
union pthread_wait_data {
- pthread_mutex_t *mutex;
- pthread_cond_t *cond;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
struct {
short fd; /* Used when thread waiting on fd */
@@ -309,6 +406,7 @@ union pthread_wait_data {
char *fname; /* Source file name for debugging.*/
} fd;
struct pthread_select_data * select_data;
+ spinlock_t *spinlock;
};
/*
@@ -419,7 +517,11 @@ struct pthread {
struct pthread_queue join_queue;
/*
- * The current thread can belong to only one queue at a time.
+ * The current thread can belong to only one scheduling queue
+ * at a time (ready or waiting queue). It can also belong to
+ * a queue of threads waiting on mutexes or condition variables.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links (mutexes and condition variables).
*
* Pointer to queue (if any) on which the current thread is waiting.
*
@@ -431,8 +533,11 @@ struct pthread {
/* Pointer to next element in queue. */
struct pthread *qnxt;
+ /* Priority queue entry for this thread: */
+ TAILQ_ENTRY(pthread) pqe;
+
/* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) qe;
/* Wait data. */
union pthread_wait_data data;
@@ -446,10 +551,59 @@ struct pthread {
/* Signal number when in state PS_SIGWAIT: */
int signo;
+ /*
+ * Set to non-zero when this thread has deferred thread
+ * scheduling. We allow for recursive deferral.
+ */
+ int sched_defer_count;
+
+ /*
+ * Set to TRUE if this thread should yield after undeferring
+ * thread scheduling.
+ */
+ int yield_on_sched_undefer;
+
/* Miscellaneous data. */
- int flags;
-#define PTHREAD_EXITING 0x0100
- char pthread_priority;
+ int flags;
+#define PTHREAD_FLAGS_PRIVATE 0x0001
+#define PTHREAD_EXITING 0x0002
+#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
+#define PTHREAD_FLAGS_TRACE 0x0008
+
+ /*
+ * Base priority is the user setable and retrievable priority
+ * of the thread. It is only affected by explicit calls to
+ * set thread priority and upon thread creation via a thread
+ * attribute or default priority.
+ */
+ char base_priority;
+
+ /*
+ * Inherited priority is the priority a thread inherits by
+ * taking a priority inheritence or protection mutex. It
+ * is not affected by base priority changes. Inherited
+ * priority defaults to and remains 0 until a mutex is taken
+ * that is being waited on by any other thread whose priority
+ * is non-zero.
+ */
+ char inherited_priority;
+
+ /*
+ * Active priority is always the maximum of the threads base
+ * priority and inherited priority. When there is a change
+ * in either the real or inherited priority, the active
+ * priority must be recalculated.
+ */
+ char active_priority;
+
+ /* Number of priority ceiling or protection mutexes owned. */
+ int priority_mutex_count;
+
+ /*
+ * Queue of currently owned mutexes.
+ */
+ TAILQ_HEAD(, pthread_mutex) mutexq;
+
void *ret;
const void **specific_data;
int specific_data_count;
@@ -475,6 +629,14 @@ SCLASS struct pthread * volatile _thread_run
;
#endif
+/* Ptr to the thread structure for the last user thread to run: */
+SCLASS struct pthread * volatile _last_user_thread
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= &_thread_kern_thread;
+#else
+;
+#endif
+
/*
* Ptr to the thread running in single-threaded mode or NULL if
* running multi-threaded (default POSIX behaviour).
@@ -547,7 +709,7 @@ SCLASS struct pthread *_thread_initial
/* Default thread attributes: */
SCLASS struct pthread_attr pthread_attr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
+= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT };
#else
;
@@ -556,7 +718,7 @@ SCLASS struct pthread_attr pthread_attr_default
/* Default mutex attributes: */
SCLASS struct pthread_mutex_attr pthread_mutexattr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { MUTEX_TYPE_FAST, 0 };
+= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
#else
;
#endif
@@ -614,6 +776,27 @@ SCLASS pthread_cond_t _gc_cond
*/
struct sigaction _thread_sigact[NSIG];
+/*
+ * Scheduling queues:
+ */
+SCLASS pq_queue_t _readyq;
+SCLASS TAILQ_HEAD(, pthread) _waitingq;
+
+/* Indicates that the waitingq now has threads ready to run. */
+SCLASS volatile int _waitingq_check_reqd
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Thread switch hook. */
+SCLASS pthread_switch_routine_t _sched_switch_hook
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -645,6 +828,14 @@ void _lock_thread(void);
void _lock_thread_list(void);
void _unlock_thread(void);
void _unlock_thread_list(void);
+int _mutex_cv_lock(pthread_mutex_t *);
+int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_notify_priochange(struct pthread *);
+int _pq_init(struct pq_queue *pq, int, int);
+void _pq_remove(struct pq_queue *pq, struct pthread *);
+void _pq_insert_head(struct pq_queue *pq, struct pthread *);
+void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
+struct pthread *_pq_first(struct pq_queue *pq);
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -657,6 +848,8 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
+void _thread_kern_sched_defer(void);
+void _thread_kern_sched_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
void _thread_start(void);
void _thread_start_sig_handler(void);
diff --git a/lib/libc_r/uthread/uthread_attr_getinheritsched.c b/lib/libc_r/uthread/uthread_attr_getinheritsched.c
new file mode 100644
index 0000000..38851ca
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_getinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getinheritsched(pthread_attr_t *attr, int *sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ *sched_inherit = (*attr)->sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_getschedparam.c b/lib/libc_r/uthread/uthread_attr_getschedparam.c
new file mode 100644
index 0000000..ea5e19d
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_getschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ param->sched_priority = (*attr)->prio;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c
new file mode 100644
index 0000000..0b9ff59
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
+ ret = EINVAL;
+ else
+ *policy = (*attr)->sched_policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_getscope.c b/lib/libc_r/uthread/uthread_attr_getscope.c
new file mode 100644
index 0000000..f84b104
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_getscope.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getscope(pthread_attr_t *attr, int *contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else
+ *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
+ PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_setinheritsched.c b/lib/libc_r/uthread/uthread_attr_setinheritsched.c
new file mode 100644
index 0000000..017b7df
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_setinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->sched_inherit = sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_setschedparam.c b/lib/libc_r/uthread/uthread_attr_setschedparam.c
new file mode 100644
index 0000000..5c860a8
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_setschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->prio = param->sched_priority;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_setschedpolicy.c b/lib/libc_r/uthread/uthread_attr_setschedpolicy.c
new file mode 100644
index 0000000..3d5aa3c
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_setschedpolicy.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy < SCHED_FIFO) ||
+ (policy > SCHED_RR))
+ ret = EINVAL;
+ else
+ (*attr)->sched_policy = policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_attr_setscope.c b/lib/libc_r/uthread/uthread_attr_setscope.c
new file mode 100644
index 0000000..24dead6
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_attr_setscope.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) ||
+ (contentionscope != PTHREAD_SCOPE_PROCESS) ||
+ (contentionscope != PTHREAD_SCOPE_SYSTEM))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else if (contentionscope == PTHREAD_SCOPE_SYSTEM)
+ /* We don't support system wide contention: */
+#ifdef NOT_YET
+ ret = ENOTSUP;
+#else
+ ret = EOPNOTSUPP;
+#endif
+
+ else
+ (*attr)->flags |= contentionscope;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c
index a085ea6..c090d79 100644
--- a/lib/libc_r/uthread/uthread_cond.c
+++ b/lib/libc_r/uthread/uthread_cond.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -37,6 +37,14 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * Prototypes
+ */
+static inline pthread_t cond_queue_deq(pthread_cond_t);
+static inline void cond_queue_remove(pthread_cond_t, pthread_t);
+static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+
+
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
{
@@ -83,9 +91,10 @@ pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
* Initialise the condition variable
* structure:
*/
- _thread_queue_init(&pcond->c_queue);
+ TAILQ_INIT(&pcond->c_queue);
pcond->c_flags |= COND_FLAGS_INITED;
pcond->c_type = type;
+ pcond->c_mutex = NULL;
memset(&pcond->lock,0,sizeof(pcond->lock));
*cond = pcond;
}
@@ -144,33 +153,57 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Wait forever: */
- _thread_run->wakeup_time.tv_sec = -1;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
-
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Return invalid argument error: */
+ rval = EINVAL;
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
- /* Lock the mutex: */
- rval = pthread_mutex_lock(mutex);
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Wait forever: */
+ _thread_run->wakeup_time.tv_sec = -1;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) ==
+ NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ }
+ else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
}
break;
@@ -183,7 +216,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
rval = EINVAL;
break;
}
-
}
/* Return the completion status: */
@@ -213,42 +245,88 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Set the wakeup time: */
- _thread_run->wakeup_time.tv_sec = abstime->tv_sec;
- _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec;
-
- /* Reset the timeout flag: */
- _thread_run->timeout = 0;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
+ /* Return invalid argument error: */
+ rval = EINVAL;
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Set the wakeup time: */
+ _thread_run->wakeup_time.tv_sec =
+ abstime->tv_sec;
+ _thread_run->wakeup_time.tv_nsec =
+ abstime->tv_nsec;
- /* Lock the mutex: */
- if ((rval = pthread_mutex_lock(mutex)) != 0) {
- }
- /* Check if the wait timed out: */
- else if (_thread_run->timeout) {
- /* Return a timeout error: */
- rval = ETIMEDOUT;
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
+
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ } else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Check if the wait timedout: */
+ if (_thread_run->timeout == 0) {
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
+ else {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
+ /*
+ * The wait timed out; remove
+ * the thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond,
+ _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+
+ /* Return a timeout error: */
+ rval = ETIMEDOUT;
+
+ /*
+ * Lock the mutex and ignore
+ * any errors:
+ */
+ (void)_mutex_cv_lock(mutex);
+ }
}
}
break;
@@ -273,7 +351,6 @@ int
pthread_cond_signal(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
@@ -286,11 +363,22 @@ pthread_cond_signal(pthread_cond_t * cond)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Bring the next thread off the condition queue: */
- if ((pthread = _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
+ /*
+ * Enter a loop to dequeue threads from the condition
+ * queue until we find one that hasn't previously
+ * timed out.
+ */
+ while (((pthread = cond_queue_deq(*cond)) != NULL) &&
+ (pthread->timeout != 0)) {
+ }
+
+ if (pthread != NULL)
/* Allow the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -312,12 +400,21 @@ int
pthread_cond_broadcast(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues. In addition, we must assure
+ * that all threads currently waiting on the condition
+ * variable are signaled and are not timedout by a
+ * scheduling signal that causes a preemption.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -329,11 +426,17 @@ pthread_cond_broadcast(pthread_cond_t * cond)
* Enter a loop to bring all threads off the
* condition queue:
*/
- while ((pthread =
- _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
- /* Allow the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ while ((pthread = cond_queue_deq(*cond)) != NULL) {
+ /*
+ * The thread is already running if the
+ * timeout flag is set.
+ */
+ if (pthread->timeout == 0)
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
+
+ /* There are no more waiting threads: */
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -345,9 +448,74 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Reenable preemption and yield if necessary.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (rval);
}
+
+/*
+ * Dequeue a waiting thread from the head of a condition queue in
+ * descending priority order.
+ */
+static inline pthread_t
+cond_queue_deq(pthread_cond_t cond)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+{
+ /*
+ * Because pthread_cond_timedwait() can timeout as well
+ * as be signaled by another thread, it is necessary to
+ * guard against removing the thread from the queue if
+ * it isn't in the queue.
+ */
+ if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+}
+
+/*
+ * Enqueue a waiting thread to a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&cond->c_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_QUEUED;
+}
#endif
diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c
index 4169461..438e527 100644
--- a/lib/libc_r/uthread/uthread_create.c
+++ b/lib/libc_r/uthread/uthread_create.c
@@ -99,12 +99,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
new_thread->magic = PTHREAD_MAGIC;
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- PTHREAD_NEW_STATE(new_thread,PS_SUSPENDED);
- } else {
- PTHREAD_NEW_STATE(new_thread,PS_RUNNING);
- }
-
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
@@ -162,21 +156,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->pthread_priority = _thread_run->pthread_priority;
- new_thread->attr.prio = _thread_run->pthread_priority;
- new_thread->attr.schedparam_policy = _thread_run->attr.schedparam_policy;
+ new_thread->base_priority = _thread_run->base_priority;
+ new_thread->attr.prio = _thread_run->base_priority;
+ new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->pthread_priority = new_thread->attr.prio;
+ new_thread->base_priority = new_thread->attr.prio;
}
+ new_thread->active_priority = new_thread->base_priority;
+ new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
_thread_queue_init(&(new_thread->join_queue));
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&new_thread->mutexq);
+
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
@@ -200,6 +199,27 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Unlock the thread list: */
_unlock_thread_list();
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
+ if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
+ new_thread->state = PS_SUSPENDED;
+ PTHREAD_WAITQ_INSERT(new_thread);
+ } else {
+ new_thread->state = PS_RUNNING;
+ PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
+ }
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libc_r/uthread/uthread_detach.c b/lib/libc_r/uthread/uthread_detach.c
index da456bf..05da832 100644
--- a/lib/libc_r/uthread/uthread_detach.c
+++ b/lib/libc_r/uthread/uthread_detach.c
@@ -52,11 +52,24 @@ pthread_detach(pthread_t pthread)
/* Flag the thread as detached: */
pthread->attr.flags |= PTHREAD_DETACHED;
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libc_r/uthread/uthread_execve.c b/lib/libc_r/uthread/uthread_execve.c
index 0d289a4..0dbd467 100644
--- a/lib/libc_r/uthread/uthread_execve.c
+++ b/lib/libc_r/uthread/uthread_execve.c
@@ -52,7 +52,7 @@ execve(const char *name, char *const * argv, char *const * envp)
itimer.it_interval.tv_usec = 0;
itimer.it_value.tv_sec = 0;
itimer.it_value.tv_usec = 0;
- setitimer(ITIMER_VIRTUAL, &itimer, NULL);
+ setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
/* Close the pthread kernel pipe: */
_thread_sys_close(_thread_kern_pipe[0]);
diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c
index a5fc400..93b8b83 100644
--- a/lib/libc_r/uthread/uthread_exit.c
+++ b/lib/libc_r/uthread/uthread_exit.c
@@ -49,7 +49,7 @@ void _exit(int status)
itimer.it_interval.tv_usec = 0;
itimer.it_value.tv_sec = 0;
itimer.it_value.tv_usec = 0;
- setitimer(ITIMER_VIRTUAL, &itimer, NULL);
+ setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
/* Close the pthread kernel pipe: */
_thread_sys_close(_thread_kern_pipe[0]);
@@ -127,6 +127,13 @@ pthread_exit(void *status)
/* Run the thread-specific data destructors: */
_thread_cleanupspecific();
}
+
+ /*
+ * Guard against preemption by a scheduling signal. A change of
+ * thread state modifies the waiting and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Check if there are any threads joined to this one: */
while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
/* Wake the joined thread and let it detach this thread: */
@@ -134,6 +141,12 @@ pthread_exit(void *status)
}
/*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
+ /*
* Lock the garbage collector mutex to ensure that the garbage
* collector is not using the dead thread list.
*/
diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c
index fa8b88b..e9ec27b 100644
--- a/lib/libc_r/uthread/uthread_fd.c
+++ b/lib/libc_r/uthread/uthread_fd.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_fd.c,v 1.8 1998/06/09 23:16:53 jb Exp $
+ * $Id: uthread_fd.c,v 1.9 1998/09/13 15:33:42 dt Exp $
*
*/
#include <errno.h>
@@ -199,7 +199,7 @@ _thread_fd_unlock(int fd, int lock_type)
} else {
/*
* Set the state of the new owner of
- * the thread to running:
+ * the thread to running:
*/
PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING);
diff --git a/lib/libc_r/uthread/uthread_fork.c b/lib/libc_r/uthread/uthread_fork.c
index 960c1de..5582c1e 100644
--- a/lib/libc_r/uthread/uthread_fork.c
+++ b/lib/libc_r/uthread/uthread_fork.c
@@ -41,7 +41,7 @@
pid_t
fork(void)
{
- int flags;
+ int i, flags;
pid_t ret;
pthread_t pthread;
pthread_t pthread_next;
@@ -88,6 +88,11 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ /* Initialize the ready queue: */
+ } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
+ PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
} else {
/* Point to the first thread in the list: */
pthread = _thread_link_list;
@@ -119,6 +124,33 @@ fork(void)
/* Point to the next thread: */
pthread = pthread_next;
}
+
+ /* Re-init the waiting queues. */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /* Clear out any locks in the file descriptor table: */
+ for (i = 0; i < _thread_dtablesize; i++) {
+ if (_thread_fd_table[i] != NULL) {
+ /* Initialise the file locks: */
+ memset(&_thread_fd_table[i]->lock, 0,
+ sizeof(_thread_fd_table[i]->lock));
+ _thread_fd_table[i]->r_owner = NULL;
+ _thread_fd_table[i]->w_owner = NULL;
+ _thread_fd_table[i]->r_fname = NULL;
+ _thread_fd_table[i]->w_fname = NULL;
+ _thread_fd_table[i]->r_lineno = 0;;
+ _thread_fd_table[i]->w_lineno = 0;;
+ _thread_fd_table[i]->r_lockcount = 0;;
+ _thread_fd_table[i]->w_lockcount = 0;;
+
+ /* Initialise the read/write queues: */
+ _thread_queue_init(&_thread_fd_table[i]->r_queue);
+ _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ }
+ }
}
}
diff --git a/lib/libc_r/uthread/uthread_gc.c b/lib/libc_r/uthread/uthread_gc.c
index f297fa8..510c51f 100644
--- a/lib/libc_r/uthread/uthread_gc.c
+++ b/lib/libc_r/uthread/uthread_gc.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_gc.c,v 1.1 1998/09/30 06:36:56 jb Exp $
+ * $Id: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $
*
* Garbage collector thread. Frees memory allocated for dead threads.
*
@@ -47,6 +47,7 @@ _thread_gc(pthread_addr_t arg)
int f_debug;
int f_done = 0;
int ret;
+ sigset_t mask;
pthread_t pthread;
pthread_t pthread_cln;
pthread_t pthread_nxt;
@@ -54,6 +55,13 @@ _thread_gc(pthread_addr_t arg)
struct timespec abstime;
void *p_stack;
+ /* Block all signals */
+ sigfillset (&mask);
+ sigprocmask (SIG_BLOCK, &mask, NULL);
+
+ /* Mark this thread as a library thread (not a user thread). */
+ _thread_run->flags |= PTHREAD_FLAGS_PRIVATE;
+
/* Set a debug flag based on an environment variable. */
f_debug = (getenv("LIBC_R_DEBUG") != NULL);
diff --git a/lib/libc_r/uthread/uthread_getprio.c b/lib/libc_r/uthread/uthread_getprio.c
index 708b8f1..b2c94d6 100644
--- a/lib/libc_r/uthread/uthread_getprio.c
+++ b/lib/libc_r/uthread/uthread_getprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,12 +38,11 @@
int
pthread_getprio(pthread_t pthread)
{
- int ret;
+ int policy, ret;
+ struct sched_param param;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(pthread)) == 0)
- /* Get the thread priority: */
- ret = pthread->pthread_priority;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0)
+ ret = param.sched_priority;
else {
/* Invalid thread: */
errno = ret;
diff --git a/lib/libc_r/uthread/uthread_getschedparam.c b/lib/libc_r/uthread/uthread_getschedparam.c
new file mode 100644
index 0000000..e7d18d9
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_getschedparam.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
+{
+ int ret;
+
+ if ((param == NULL) || (policy == NULL))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /* Return the threads base priority and scheduling policy: */
+ param->sched_priority = pthread->base_priority;
+ *policy = pthread->attr.sched_policy;
+ }
+
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_info.c b/lib/libc_r/uthread/uthread_info.c
index f66dd2d..d2d97da 100644
--- a/lib/libc_r/uthread/uthread_info.c
+++ b/lib/libc_r/uthread/uthread_info.c
@@ -60,9 +60,11 @@ static const struct s_thread_info thread_info[] = {
{PS_WAIT_WAIT , "Waiting process"},
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
{PS_SIGWAIT , "Waiting for a signal"},
+ {PS_SPINBLOCK , "Waiting for a spinlock"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
+ {PS_DEADLOCK , "Deadlocked"},
{PS_STATE_MAX , "Not a real state!"}
};
@@ -75,6 +77,7 @@ _thread_dump_info(void)
int j;
pthread_t pthread;
char tmpfile[128];
+ pq_list_t *pq_list;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
@@ -116,7 +119,7 @@ _thread_dump_info(void)
snprintf(s, sizeof(s),
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ?
- "":pthread->name, pthread->pthread_priority,
+ "":pthread->name, pthread->base_priority,
thread_info[j].name,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
@@ -167,6 +170,50 @@ _thread_dump_info(void)
}
}
+ /* Output a header for ready threads: */
+ strcpy(s, "\n\n=============\nREADY THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the ready queue: */
+ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
+ TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+ }
+
+ /* Output a header for waiting threads: */
+ strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
if (_thread_dead == NULL) {
/* Output a record: */
@@ -186,7 +233,7 @@ _thread_dump_info(void)
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
- pthread, pthread->pthread_priority,
+ pthread, pthread->base_priority,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
}
diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c
index 50f3bef..e4411ce 100644
--- a/lib/libc_r/uthread/uthread_init.c
+++ b/lib/libc_r/uthread/uthread_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -147,6 +147,11 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
+ /* Initialize the ready queue: */
+ else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
+ }
/* Allocate memory for the thread structure of the initial thread: */
else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
/*
@@ -157,10 +162,25 @@ _thread_init(void)
} else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
+ _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
+ /* Initialize the waiting queue: */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ _thread_initial->magic = PTHREAD_MAGIC;
+
/* Default the priority of the initial thread: */
- _thread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->inherited_priority = 0;
/* Initialise the state of the initial thread: */
_thread_initial->state = PS_RUNNING;
@@ -168,7 +188,13 @@ _thread_init(void)
/* Initialise the queue: */
_thread_queue_init(&(_thread_initial->join_queue));
+ /* Initialize the owned mutex queue and count: */
+ TAILQ_INIT(&(_thread_initial->mutexq));
+ _thread_initial->priority_mutex_count = 0;
+
/* Initialise the rest of the fields: */
+ _thread_initial->sched_defer_count = 0;
+ _thread_initial->yield_on_sched_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
_thread_initial->queue = NULL;
@@ -206,9 +232,9 @@ _thread_init(void)
* signals that the user-thread kernel needs. Actually
* SIGINFO isn't really needed, but it is nice to have.
*/
- if (_thread_sys_sigaction(SIGVTALRM, &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGINFO , &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGCHLD , &act, NULL) != 0) {
+ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) {
/*
* Abort this process if signal initialisation fails:
*/
@@ -256,6 +282,8 @@ _thread_init(void)
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
+ gettimeofday(&kern_inc_prio_time, NULL);
+
return;
}
diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c
index 3a6966b..626f1d4 100644
--- a/lib/libc_r/uthread/uthread_kern.c
+++ b/lib/libc_r/uthread/uthread_kern.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -53,16 +53,18 @@
static void
_thread_kern_select(int wait_reqd);
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+
void
_thread_kern_sched(struct sigcontext * scp)
{
#ifndef __alpha__
char *fdata;
#endif
- int prio = -1;
pthread_t pthread;
pthread_t pthread_h = NULL;
- pthread_t pthread_s = NULL;
+ pthread_t last_thread = NULL;
struct itimerval itimer;
struct timespec ts;
struct timespec ts1;
@@ -105,18 +107,21 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_kern_in_sched = 0;
- /*
- * There might be pending signals for this thread, so
- * dispatch any that aren't blocked:
- */
- _dispatch_signals();
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread, _thread_run);
+ }
return;
} else
/* Flag the jump buffer was the last state saved: */
_thread_run->sig_saved = 0;
+ /* If the currently running thread is a user thread, save it: */
+ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
+ _last_user_thread = _thread_run;
+
/*
- * Enter a the scheduling loop that finds the next thread that is
+ * Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
* in the global list or when a thread has its state restored by
* either a sigreturn (if the state was saved as a sigcontext) or a
@@ -134,12 +139,48 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(0);
/*
- * Enter a loop to look for sleeping threads that are ready:
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /*
+ * Enter a loop to look for sleeping threads that are ready
+ * or timedout. While we're at it, also find the smallest
+ * timeout value for threads waiting for a time.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ /* Check if this thread is ready: */
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check if this thread is blocked by an
+ * atomic lock:
+ */
+ else if (pthread->state == PS_SPINBLOCK) {
+ /*
+ * If the lock is available, let
+ * the thread run.
+ */
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ }
+
/* Check if this thread is to timeout: */
- if (pthread->state == PS_COND_WAIT ||
+ } else if (pthread->state == PS_COND_WAIT ||
pthread->state == PS_SLEEP_WAIT ||
pthread->state == PS_FDR_WAIT ||
pthread->state == PS_FDW_WAIT ||
@@ -163,9 +204,9 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (pthread->state == PS_SELECT_WAIT) {
/*
- * The select has timed out,
- * so zero the file
- * descriptor sets:
+ * The select has timed out, so
+ * zero the file descriptor
+ * sets:
*/
FD_ZERO(&pthread->data.select_data->readfds);
FD_ZERO(&pthread->data.select_data->writefds);
@@ -189,13 +230,72 @@ __asm__("fnsave %0": :"m"(*fdata));
* it to be restarted:
*/
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ } else {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + CLOCK_RES_NSEC;
+
+ /*
+ * Check for underflow of the
+ * nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow
+ * of the nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond
+ * field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of
+ * the nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure
+ * to a timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
}
+
}
}
/* Check if there is a current thread: */
if (_thread_run != &_thread_kern_thread) {
/*
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sched_undefer = 0;
+
+ /*
* Save the current time as the time that the thread
* became inactive:
*/
@@ -204,194 +304,64 @@ __asm__("fnsave %0": :"m"(*fdata));
/*
* Accumulate the number of microseconds that this
- * thread has run for:
+ * thread has run for:
*/
- if (_thread_run->slice_usec != -1) {
- _thread_run->slice_usec += (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
- }
-
- /*
- * Check if this thread has reached its allocated
- * time slice period:
- */
- if (_thread_run->slice_usec > TIMESLICE_USEC) {
- /*
- * Flag the allocated time slice period as
- * up:
- */
- _thread_run->slice_usec = -1;
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
}
- }
- /* Check if an incremental priority update is required: */
- if (((tv.tv_sec - kern_inc_prio_time.tv_sec) * 1000000 +
- tv.tv_usec - kern_inc_prio_time.tv_usec) > INC_PRIO_USEC) {
- /*
- * Enter a loop to look for run-enabled threads that
- * have not run since the last time that an
- * incremental priority update was performed:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if this thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if the last time that this thread
- * was run (as indicated by the last time it
- * became inactive) is before the time that
- * the last incremental priority check was
- * made:
- */
- else if (timercmp(&pthread->last_inactive, &kern_inc_prio_time, <)) {
+ if (_thread_run->state == PS_RUNNING) {
+ if (_thread_run->slice_usec == -1) {
+ /*
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
+ */
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * Increment the incremental priority
- * for this thread in the hope that
- * it will eventually get a chance to
- * run:
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- (pthread->inc_prio)++;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
-
- /* Save the new incremental priority update time: */
- kern_inc_prio_time.tv_sec = tv.tv_sec;
- kern_inc_prio_time.tv_usec = tv.tv_usec;
- }
- /*
- * Enter a loop to look for the first thread of the highest
- * priority that is ready to run:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if no run-enabled thread has been seen or if
- * the current thread has a priority higher than the
- * highest seen so far:
- */
- else if (pthread_h == NULL || (pthread->pthread_priority + pthread->inc_prio) > prio) {
+ else if (_thread_run->state == PS_DEAD) {
/*
- * Save this thread as the highest priority
- * thread seen so far:
+ * Don't add dead threads to the waiting
+ * queue, because when they're reaped, it
+ * will corrupt the queue.
*/
- pthread_h = pthread;
- prio = pthread->pthread_priority + pthread->inc_prio;
}
- }
-
- /*
- * Enter a loop to look for a thread that: 1. Is run-enabled.
- * 2. Has the required agregate priority. 3. Has not been
- * allocated its allocated time slice. 4. Became inactive
- * least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- /* Ignore threads that are not ready to run. */
- }
-
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority found
- * above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
+ else {
/*
- * Ignore threads which have lower agregate
- * priority.
+ * This thread has changed state and needs
+ * to be placed in the waiting queue.
*/
- }
-
- /*
- * Check if the current thread reached its time slice
- * allocation last time it ran (or if it has not run
- * yet):
- */
- else if (pthread->slice_usec == -1) {
- }
+ PTHREAD_WAITQ_INSERT(_thread_run);
- /*
- * Check if an eligible thread has not been found
- * yet, or if the current thread has an inactive time
- * earlier than the last one seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current thread as
- * the most eligible thread seen so far:
- */
- pthread_s = pthread;
-
- /*
- * Save the time that the selected thread
- * became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
}
}
/*
- * Check if no thread was selected according to incomplete
- * time slice allocation:
+ * Get the highest priority thread in the ready queue.
*/
- if (pthread_s == NULL) {
- /*
- * Enter a loop to look for any other thread that: 1.
- * Is run-enabled. 2. Has the required agregate
- * priority. 3. Became inactive least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if the current thread is unable to
- * run:
- */
- if (pthread->state != PS_RUNNING) {
- /*
- * Ignore threads that are not ready
- * to run.
- */
- }
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority
- * found above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
- /*
- * Ignore threads which have lower
- * agregate priority.
- */
- }
- /*
- * Check if an eligible thread has not been
- * found yet, or if the current thread has an
- * inactive time earlier than the last one
- * seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current
- * thread as the most eligible thread
- * seen so far:
- */
- pthread_s = pthread;
+ pthread_h = PTHREAD_PRIOQ_FIRST;
- /*
- * Save the time that the selected
- * thread became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
- }
- }
- }
/* Check if there are no threads ready to run: */
- if (pthread_s == NULL) {
+ if (pthread_h == NULL) {
/*
* Lock the pthread kernel by changing the pointer to
* the running thread to point to the global kernel
@@ -406,7 +376,10 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(1);
} else {
/* Make the selected thread the current thread: */
- _thread_run = pthread_s;
+ _thread_run = pthread_h;
+
+ /* Remove the thread from the ready queue. */
+ PTHREAD_PRIOQ_REMOVE(_thread_run);
/*
* Save the current time as the time that the thread
@@ -424,149 +397,22 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Reset the accumulated time slice period: */
_thread_run->slice_usec = 0;
}
- /*
- * Reset the incremental priority now that this
- * thread has been given the chance to run:
- */
- _thread_run->inc_prio = 0;
/* Check if there is more than one thread: */
if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
/*
- * Define the maximum time before a SIGVTALRM
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for threads waiting
- * for a time:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if this thread is to
- * timeout:
- */
- if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /*
- * Check if this thread is to
- * wait forever:
- */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /*
- * Check if this thread is to
- * wakeup immediately:
- */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- }
- /*
- * Check if the current time
- * is after the wakeup time:
- */
- else if ((ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec > pthread->wakeup_time.tv_nsec))) {
- } else {
- /*
- * Calculate the time
- * until this thread
- * is ready, allowing
- * for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for
- * underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for
- * the
- * underflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow
- * of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for
- * the
- * overflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the
- * timespec structure
- * to a timeval
- * structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv, &ts1);
-
- /*
- * Check if the
- * thread will be
- * ready sooner than
- * the earliest one
- * found so far:
- */
- if (timercmp(&tv, &itimer.it_value, <)) {
- /*
- * Update the
- * time
- * value:
- */
- itimer.it_value.tv_sec = tv.tv_sec;
- itimer.it_value.tv_usec = tv.tv_usec;
- }
- }
- }
- }
-
- /*
* Start the interval timer for the
* calculated time interval:
*/
- if (setitimer(ITIMER_VIRTUAL, &itimer, NULL) != 0) {
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
/*
* Cannot initialise the timer, so
* abort this process:
*/
- PANIC("Cannot set virtual timer");
+ PANIC("Cannot set scheduling timer");
}
}
+
/* Check if a signal context was saved: */
if (_thread_run->sig_saved == 1) {
#ifndef __alpha__
@@ -579,20 +425,30 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Restore the floating point state: */
__asm__("frstor %0": :"m"(*fdata));
#endif
-
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
- _thread_kern_in_sched = 0;
+ _thread_kern_in_sched = 0;
+
+ /*
+ * If we had a context switch, run any
+ * installed switch hooks.
+ */
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
_thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else
+ } else {
/*
* Do a longjmp to restart the thread that
* was context switched out (by a longjmp to
* a different thread):
*/
longjmp(_thread_run->saved_jmp_buf, 1);
+ }
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
@@ -679,7 +535,8 @@ _thread_kern_select(int wait_reqd)
* Enter a loop to process threads waiting on either file descriptors
* or times:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Assume that this state does not time out: */
settimeout = 0;
@@ -690,12 +547,12 @@ _thread_kern_select(int wait_reqd)
* operations or timeouts:
*/
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
case PS_JOIN:
case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_SIGTHREAD:
case PS_SIGWAIT:
case PS_STATE_MAX:
@@ -704,6 +561,16 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread ready
+ * while in the scheduler or while the scheduling
+ * queues were protected.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/* Add the file descriptor to the read set: */
@@ -1010,16 +877,16 @@ _thread_kern_select(int wait_reqd)
* descriptors that are flagged as available by the
* _select syscall:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Process according to thread state: */
switch (pthread->state) {
/*
* States which do not depend on file
* descriptor I/O operations:
*/
- case PS_RUNNING:
case PS_COND_WAIT:
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
@@ -1034,6 +901,15 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread
+ * ready while in the scheduler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/*
@@ -1047,6 +923,13 @@ _thread_kern_select(int wait_reqd)
* is scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1063,6 +946,13 @@ _thread_kern_select(int wait_reqd)
* scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1269,6 +1159,13 @@ _thread_kern_select(int wait_reqd)
* thread to run:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
}
@@ -1320,4 +1217,80 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
return;
}
+
+void
+_thread_kern_sched_defer(void)
+{
+ /* Allow scheduling deferral to be recursive. */
+ _thread_run->sched_defer_count++;
+}
+
+void
+_thread_kern_sched_undefer(void)
+{
+ pthread_t pthread;
+ int need_resched = 0;
+
+ /*
+ * Perform checks to yield only if we are about to undefer
+ * scheduling.
+ */
+ if (_thread_run->sched_defer_count == 1) {
+ /*
+ * Check if the waiting queue needs to be examined for
+ * threads that are now ready:
+ */
+ while (_waitingq_check_reqd != 0) {
+ /* Clear the flag before checking the waiting queue: */
+ _waitingq_check_reqd = 0;
+
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ }
+
+ /*
+ * We need to yield if a thread change of state caused a
+ * higher priority thread to become ready, or if a
+ * scheduling signal occurred while preemption was disabled.
+ */
+ if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority)) ||
+ (_thread_run->yield_on_sched_undefer != 0)) {
+ _thread_run->yield_on_sched_undefer = 0;
+ need_resched = 1;
+ }
+ }
+
+ if (_thread_run->sched_defer_count > 0) {
+ /* Decrement the scheduling deferral count. */
+ _thread_run->sched_defer_count--;
+
+ /* Yield the CPU if necessary: */
+ if (need_resched)
+ _thread_kern_sched(NULL);
+ }
+}
+
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
+{
+ pthread_t tid_out = thread_out;
+ pthread_t tid_in = thread_in;
+
+ if ((tid_out != NULL) &&
+ (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_out = NULL;
+ if ((tid_in != NULL) &&
+ (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_in = NULL;
+
+ if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
+ /* Run the scheduler switch hook: */
+ _sched_switch_hook(tid_out, tid_in);
+ }
+}
#endif
diff --git a/lib/libc_r/uthread/uthread_kill.c b/lib/libc_r/uthread/uthread_kill.c
index 7572c05..c729179 100644
--- a/lib/libc_r/uthread/uthread_kill.c
+++ b/lib/libc_r/uthread/uthread_kill.c
@@ -52,6 +52,13 @@ pthread_kill(pthread_t pthread, int sig)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
switch (pthread->state) {
case PS_SIGSUSPEND:
/*
@@ -108,6 +115,12 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
diff --git a/lib/libc_r/uthread/uthread_mattr_init.c b/lib/libc_r/uthread/uthread_mattr_init.c
index 73226a6..206485f 100644
--- a/lib/libc_r/uthread/uthread_mattr_init.c
+++ b/lib/libc_r/uthread/uthread_mattr_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c
index d3801f1..0103a6c 100644
--- a/lib/libc_r/uthread/uthread_mutex.c
+++ b/lib/libc_r/uthread/uthread_mutex.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -33,78 +33,116 @@
#include <stdlib.h>
#include <errno.h>
#include <string.h>
+#include <sys/param.h>
+#include <sys/queue.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
+
+/*
+ * Prototypes
+ */
+static inline int mutex_self_trylock(pthread_mutex_t);
+static inline int mutex_self_lock(pthread_mutex_t);
+static inline int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(pthread_mutex_t);
+static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static inline pthread_t mutex_queue_deq(pthread_mutex_t);
+static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
+static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
+
+
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+
int
pthread_mutex_init(pthread_mutex_t * mutex,
const pthread_mutexattr_t * mutex_attr)
{
- enum pthread_mutextype type;
+ enum pthread_mutextype type;
+ int protocol;
+ int ceiling;
pthread_mutex_t pmutex;
int ret = 0;
- if (mutex == NULL) {
+ if (mutex == NULL)
ret = EINVAL;
- } else {
- /* Check if default mutex attributes: */
- if (mutex_attr == NULL || *mutex_attr == NULL)
- /* Default to a fast mutex: */
- type = PTHREAD_MUTEX_DEFAULT;
- else if ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Use the requested mutex type: */
- type = (*mutex_attr)->m_type;
-
- /* Check no errors so far: */
- if (ret == 0) {
- if ((pmutex = (pthread_mutex_t)
- malloc(sizeof(struct pthread_mutex))) == NULL)
- ret = ENOMEM;
- else {
- /* Reset the mutex flags: */
- pmutex->m_flags = 0;
-
- /* Process according to mutex type: */
- switch (type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Nothing to do here. */
- break;
-
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
- break;
-
- /* Trap invalid mutex types: */
- default:
- /* Return an invalid argument error: */
- ret = EINVAL;
- break;
- }
- if (ret == 0) {
- /* Initialise the rest of the mutex: */
- _thread_queue_init(&pmutex->m_queue);
- pmutex->m_flags |= MUTEX_FLAGS_INITED;
- pmutex->m_owner = NULL;
- pmutex->m_type = type;
- memset(&pmutex->lock, 0,
- sizeof(pmutex->lock));
- *mutex = pmutex;
- } else {
- free(pmutex);
- *mutex = NULL;
- }
+ /* Check if default mutex attributes: */
+ else if (mutex_attr == NULL || *mutex_attr == NULL) {
+ /* Default to a (error checking) POSIX mutex: */
+ type = PTHREAD_MUTEX_ERRORCHECK;
+ protocol = PTHREAD_PRIO_NONE;
+ ceiling = PTHREAD_MAX_PRIORITY;
+ }
+
+ /* Check mutex type: */
+ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
+ ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Check mutex protocol: */
+ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
+ ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ else {
+ /* Use the requested mutex type and protocol: */
+ type = (*mutex_attr)->m_type;
+ protocol = (*mutex_attr)->m_protocol;
+ ceiling = (*mutex_attr)->m_ceiling;
+ }
+
+ /* Check no errors so far: */
+ if (ret == 0) {
+ if ((pmutex = (pthread_mutex_t)
+ malloc(sizeof(struct pthread_mutex))) == NULL)
+ ret = ENOMEM;
+ else {
+ /* Reset the mutex flags: */
+ pmutex->m_flags = 0;
+
+ /* Process according to mutex type: */
+ switch (type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /* Nothing to do here. */
+ break;
+
+ /* Single UNIX Spec 2 recursive mutex: */
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Reset the mutex count: */
+ pmutex->m_data.m_count = 0;
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ /* Initialise the rest of the mutex: */
+ TAILQ_INIT(&pmutex->m_queue);
+ pmutex->m_flags |= MUTEX_FLAGS_INITED;
+ pmutex->m_owner = NULL;
+ pmutex->m_type = type;
+ pmutex->m_protocol = protocol;
+ pmutex->m_refcount = 0;
+ if (protocol == PTHREAD_PRIO_PROTECT)
+ pmutex->m_prio = ceiling;
+ else
+ pmutex->m_prio = 0;
+ pmutex->m_saved_prio = 0;
+ memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ *mutex = pmutex;
+ } else {
+ free(pmutex);
+ *mutex = NULL;
}
}
}
@@ -124,16 +162,29 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
_SPINLOCK(&(*mutex)->lock);
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Check to see if this mutex is in use:
*/
- free(*mutex);
+ if (((*mutex)->m_owner != NULL) ||
+ (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
+ ((*mutex)->m_refcount != 0)) {
+ ret = EBUSY;
- /*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
- */
- *mutex = NULL;
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&(*mutex)->lock);
+ }
+ else {
+ /*
+ * Free the memory allocated for the mutex
+ * structure:
+ */
+ free(*mutex);
+
+ /*
+ * Leave the caller's pointer NULL now that
+ * the mutex has been destroyed:
+ */
+ *mutex = NULL;
+ }
}
/* Return the completion status: */
@@ -170,44 +221,100 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
/* Check if this mutex is not locked: */
if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- } else {
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
/* Return a busy error: */
ret = EBUSY;
- }
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if this mutex is locked: */
- if ((*mutex)->m_owner != NULL) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for the running thread: */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
/*
- * Check if the mutex is locked by the running
- * thread:
+ * The mutex takes on the attributes of the
+ * running thread when there are no waiters.
*/
- if ((*mutex)->m_owner == _thread_run) {
- /* Increment the lock count: */
- (*mutex)->m_data.m_count++;
- } else {
- /* Return a busy error: */
- ret = EBUSY;
- }
- } else {
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- }
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority.
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
break;
/* Trap invalid mutex types: */
@@ -219,6 +326,12 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -238,91 +351,200 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* What SS2 define as a 'normal' mutex. This has to deadlock
- on attempts to get a lock you already own. */
- case PTHREAD_MUTEX_NORMAL:
- if ((*mutex)->m_owner == _thread_run) {
- /* Intetionally deadlock */
- for (;;)
- _thread_kern_sched_state(PS_MUTEX_WAIT, __FILE__, __LINE__);
- }
- goto COMMON_LOCK;
-
- /* Return error (not OK) on attempting to re-lock */
- case PTHREAD_MUTEX_ERRORCHECK:
- if ((*mutex)->m_owner == _thread_run) {
- ret = EDEADLK;
- break;
- }
-
- /* Fast mutexes do not check for any error conditions: */
- case PTHREAD_MUTEX_DEFAULT:
- COMMON_LOCK:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
-
- /* Reset the lock count for this mutex: */
- (*mutex)->m_data.m_count = 0;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ /*
+ * The mutex takes on attributes of the
+ * running thread when there are no waiters.
+ */
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ if (_thread_run->active_priority >
+ (*mutex)->m_prio)
+ /* Adjust priorities: */
+ mutex_priority_adjust(*mutex);
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
+ /*
+ * Lock the mutex for the running
+ * thread:
+ */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Increment the lock count for this mutex: */
- (*mutex)->m_data.m_count++;
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority:
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /* Clear any previous error: */
+ _thread_run->error = 0;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * The threads priority may have changed while
+ * waiting for the mutex causing a ceiling
+ * violation.
+ */
+ ret = _thread_run->error;
+ _thread_run->error = 0;
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
+ }
break;
/* Trap invalid mutex types: */
@@ -334,6 +556,12 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -343,56 +571,375 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
int
pthread_mutex_unlock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ return (mutex_unlock_common(mutex, /* add reference */ 0));
+}
+
+int
+_mutex_cv_unlock(pthread_mutex_t * mutex)
+{
+ return (mutex_unlock_common(mutex, /* add reference */ 1));
+}
+
+int
+_mutex_cv_lock(pthread_mutex_t * mutex)
+{
+ int ret;
+ if ((ret = pthread_mutex_lock(mutex)) == 0)
+ (*mutex)->m_refcount--;
+ return (ret);
+}
+
+static inline int
+mutex_self_trylock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EBUSY;
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_self_lock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EDEADLK;
+ break;
+
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * What SS2 define as a 'normal' mutex. Intentionally
+ * deadlock on attempts to get a lock you already own.
+ */
+ _thread_kern_sched_state_unlock(PS_DEADLOCK,
+ &mutex->lock, __FILE__, __LINE__);
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+{
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
} else {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Default & normal mutexes do not really need to check for
- any error conditions: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Check if the running thread is not the owner of the mutex: */
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = (*mutex)->m_owner ? EPERM : EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of
+ * threads waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) {
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+ }
+ break;
+
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Check if the running thread is not the owner of the
+ * mutex:
*/
- else if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ if ((*mutex)->m_owner != _thread_run) {
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
+ }
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /*
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
+ */
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of threads
+ * waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) == NULL)
+ /* This mutex has no priority. */
+ (*mutex)->m_prio = 0;
+ else {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Set the priority of the mutex. Since
+ * our waiting threads are in descending
+ * priority order, the priority of the
+ * mutex becomes the active priority of
+ * the thread we just dequeued.
+ */
+ (*mutex)->m_prio =
+ (*mutex)->m_owner->active_priority;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning threads inherited priority
+ * now becomes his active priority (the
+ * priority of the mutex).
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if the running thread is not the owner of the mutex: */
+ /* POSIX priority ceiling mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
- /* Check if there are still counts: */
- else if ((*mutex)->m_data.m_count > 1) {
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
/* Decrement the count: */
(*mutex)->m_data.m_count--;
} else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
(*mutex)->m_data.m_count = 0;
+
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
*/
- if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Enter a loop to find a waiting thread whose
+ * active priority will not cause a ceiling
+ * violation:
+ */
+ while ((((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) &&
+ ((*mutex)->m_owner->active_priority >
+ (*mutex)->m_prio)) {
+ /*
+ * Either the mutex ceiling priority
+ * been lowered and/or this threads
+ * priority has been raised subsequent
+ * to this thread being queued on the
+ * waiting list.
+ */
+ (*mutex)->m_owner->error = EINVAL;
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+
+ /* Check for a new owner: */
+ if ((*mutex)->m_owner != NULL) {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning thread inherits the
+ * ceiling priority of the mutex and
+ * executes at that priority:
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+ (*mutex)->m_owner->active_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
}
}
break;
@@ -404,11 +951,348 @@ pthread_mutex_unlock(pthread_mutex_t * mutex)
break;
}
+ if ((ret == 0) && (add_reference != 0)) {
+ /* Increment the reference count: */
+ (*mutex)->m_refcount++;
+ }
+
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (ret);
}
+
+
+/*
+ * This function is called when a change in base priority occurs
+ * for a thread that is thread holding, or waiting for, a priority
+ * protection or inheritence mutex. A change in a threads base
+ * priority can effect changes to active priorities of other threads
+ * and to the ordering of mutex locking by waiting threads.
+ *
+ * This must be called while thread scheduling is deferred.
+ */
+void
+_mutex_notify_priochange(pthread_t pthread)
+{
+ /* Adjust the priorites of any owned priority mutexes: */
+ if (pthread->priority_mutex_count > 0) {
+ /*
+ * Rescan the mutexes owned by this thread and correct
+ * their priorities to account for this threads change
+ * in priority. This has the side effect of changing
+ * the threads active priority.
+ */
+ mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ }
+
+ /*
+ * If this thread is waiting on a priority inheritence mutex,
+ * check for priority adjustments. A change in priority can
+ * also effect a ceiling violation(*) for a thread waiting on
+ * a priority protection mutex; we don't perform the check here
+ * as it is done in pthread_mutex_unlock.
+ *
+ * (*) It should be noted that a priority change to a thread
+ * _after_ taking and owning a priority ceiling mutex
+ * does not affect ownership of that mutex; the ceiling
+ * priority is only checked before mutex ownership occurs.
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /* Lock the mutex structure: */
+ _SPINLOCK(&pthread->data.mutex->lock);
+
+ /*
+ * Check to make sure this thread is still in the same state
+ * (the spinlock above can yield the CPU to another thread):
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /*
+ * Remove and reinsert this thread into the list of
+ * waiting threads to preserve decreasing priority
+ * order.
+ */
+ mutex_queue_remove(pthread->data.mutex, pthread);
+ mutex_queue_enq(pthread->data.mutex, pthread);
+
+ if (pthread->data.mutex->m_protocol ==
+ PTHREAD_PRIO_INHERIT) {
+ /* Adjust priorities: */
+ mutex_priority_adjust(pthread->data.mutex);
+ }
+ }
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&pthread->data.mutex->lock);
+ }
+}
+
+/*
+ * Called when a new thread is added to the mutex waiting queue or
+ * when a threads priority changes that is already in the mutex
+ * waiting queue.
+ */
+static void
+mutex_priority_adjust(pthread_mutex_t mutex)
+{
+ pthread_t pthread_next, pthread = mutex->m_owner;
+ int temp_prio;
+ pthread_mutex_t m = mutex;
+
+ /*
+ * Calculate the mutex priority as the maximum of the highest
+ * active priority of any waiting threads and the owning threads
+ * active priority(*).
+ *
+ * (*) Because the owning threads current active priority may
+ * reflect priority inherited from this mutex (and the mutex
+ * priority may have changed) we must recalculate the active
+ * priority based on the threads saved inherited priority
+ * and its base priority.
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, pthread->base_priority));
+
+ /* See if this mutex really needs adjusting: */
+ if (temp_prio == m->m_prio)
+ /* No need to propagate the priority: */
+ return;
+
+ /* Set new priority of the mutex: */
+ m->m_prio = temp_prio;
+
+ while (m != NULL) {
+ /*
+ * Save the threads priority before rescanning the
+ * owned mutexes:
+ */
+ temp_prio = pthread->active_priority;
+
+ /*
+ * Fix the priorities for all the mutexes this thread has
+ * locked since taking this mutex. This also has a
+ * potential side-effect of changing the threads priority.
+ */
+ mutex_rescan_owned(pthread, m);
+
+ /*
+ * If the thread is currently waiting on a mutex, check
+ * to see if the threads new priority has affected the
+ * priority of the mutex.
+ */
+ if ((temp_prio != pthread->active_priority) &&
+ (pthread->state == PS_MUTEX_WAIT) &&
+ (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Grab the mutex this thread is waiting on: */
+ m = pthread->data.mutex;
+
+ /*
+ * The priority for this thread has changed. Remove
+ * and reinsert this thread into the list of waiting
+ * threads to preserve decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
+
+ /* Grab the waiting thread with highest priority: */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
+
+ /*
+ * Calculate the mutex priority as the maximum of the
+ * highest active priority of any waiting threads and
+ * the owning threads active priority.
+ */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, m->m_owner->base_priority));
+
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated to the
+ * mutex this thread is waiting on and up to
+ * the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+ }
+ else
+ /* We're done: */
+ m = NULL;
+
+ }
+ else
+ /* We're done: */
+ m = NULL;
+ }
+}
+
+static void
+mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
+{
+ int active_prio, inherited_prio;
+ pthread_mutex_t m;
+ pthread_t pthread_next;
+
+ /*
+ * Start walking the mutexes the thread has taken since
+ * taking this mutex.
+ */
+ if (mutex == NULL) {
+ /*
+ * A null mutex means start at the beginning of the owned
+ * mutex list.
+ */
+ m = TAILQ_FIRST(&pthread->mutexq);
+
+ /* There is no inherited priority yet. */
+ inherited_prio = 0;
+ }
+ else {
+ /*
+ * The caller wants to start after a specific mutex. It
+ * is assumed that this mutex is a priority inheritence
+ * mutex and that its priority has been correctly
+ * calculated.
+ */
+ m = TAILQ_NEXT(mutex, m_qe);
+
+ /* Start inheriting priority from the specified mutex. */
+ inherited_prio = mutex->m_prio;
+ }
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ while (m != NULL) {
+ /*
+ * We only want to deal with priority inheritence
+ * mutexes. This might be optimized by only placing
+ * priority inheritence mutexes into the owned mutex
+ * list, but it may prove to be useful having all
+ * owned mutexes in this list. Consider a thread
+ * exiting while holding mutexes...
+ */
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
+ /*
+ * Fix the owners saved (inherited) priority to
+ * reflect the priority of the previous mutex.
+ */
+ m->m_saved_prio = inherited_prio;
+
+ if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
+ /* Recalculate the priority of the mutex: */
+ m->m_prio = MAX(active_prio,
+ pthread_next->active_priority);
+ else
+ m->m_prio = active_prio;
+
+ /* Recalculate new inherited and active priorities: */
+ inherited_prio = m->m_prio;
+ active_prio = MAX(m->m_prio, pthread->base_priority);
+ }
+
+ /* Advance to the next mutex owned by this thread: */
+ m = TAILQ_NEXT(m, m_qe);
+ }
+
+ /*
+ * Fix the threads inherited priority and recalculate its
+ * active priority.
+ */
+ pthread->inherited_priority = inherited_prio;
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ if (active_prio != pthread->active_priority) {
+ /*
+ * If this thread is in the priority queue, it must be
+ * removed and reinserted for its new priority.
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ /*
+ * Remove the thread from the priority queue
+ * before changing its priority:
+ */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /*
+ * POSIX states that if the priority is being
+ * lowered, the thread must be inserted at the
+ * head of the queue for its priority if it owns
+ * any priority protection or inheritence mutexes.
+ */
+ if ((active_prio < pthread->active_priority) &&
+ (pthread->priority_mutex_count > 0)) {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ */
+static inline pthread_t
+mutex_queue_deq(pthread_mutex_t mutex)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL)
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a mutex queue in descending priority order.
+ */
+static inline void
+mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
+{
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+}
+
+/*
+ * Enqueue a waiting thread to a queue in descending priority order.
+ */
+static inline void
+mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+}
+
#endif
diff --git a/lib/libc_r/uthread/uthread_mutex_prioceiling.c b/lib/libc_r/uthread/uthread_mutex_prioceiling.c
new file mode 100644
index 0000000..edd9fb5
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_mutex_prioceiling.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ *prioceiling = (*mattr)->m_ceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ (*mattr)->m_ceiling = prioceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
+ int *prioceiling)
+{
+ int ret;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ ret = (*mutex)->m_prio;
+
+ return(ret);
+}
+
+int
+pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
+ int prioceiling, int *old_ceiling)
+{
+ int ret = 0;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else {
+ /* Lock the mutex: */
+ if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ /* Return the old ceiling and set the new ceiling: */
+ *old_ceiling = (*mutex)->m_prio;
+ (*mutex)->m_prio = prioceiling;
+
+ /* Unlock the mutex: */
+ ret = pthread_mutex_unlock(mutex);
+ }
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_mutex_protocol.c b/lib/libc_r/uthread/uthread_mutex_protocol.c
new file mode 100644
index 0000000..56c5542
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_mutex_protocol.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else
+ *protocol = (*mattr)->m_protocol;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL) ||
+ (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
+ ret = EINVAL;
+ else {
+ (*mattr)->m_protocol = protocol;
+ (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
+ }
+ return(ret);
+}
+
+#endif
diff --git a/lib/libc_r/uthread/uthread_priority_queue.c b/lib/libc_r/uthread/uthread_priority_queue.c
new file mode 100644
index 0000000..516a1e0
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_priority_queue.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <stdlib.h>
+#include <sys/queue.h>
+#include <string.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+/* Prototypes: */
+static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+
+
+int
+_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+{
+ int i, ret = 0;
+ int prioslots = maxprio - minprio + 1;
+
+ if (pq == NULL)
+ ret = -1;
+
+ /* Create the priority queue with (maxprio - minprio + 1) slots: */
+ else if ((pq->pq_lists =
+ (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL)
+ ret = -1;
+
+ else {
+ /* Initialize the queue for each priority slot: */
+ for (i = 0; i < prioslots; i++) {
+ TAILQ_INIT(&pq->pq_lists[i].pl_head);
+ pq->pq_lists[i].pl_prio = i;
+ pq->pq_lists[i].pl_queued = 0;
+ }
+
+ /* Initialize the priority queue: */
+ TAILQ_INIT(&pq->pq_queue);
+
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+ }
+ return (ret);
+}
+
+void
+_pq_remove(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+}
+
+
+void
+_pq_insert_head(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+void
+_pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+pthread_t
+_pq_first(pq_queue_t *pq)
+{
+ pq_list_t *pql;
+ pthread_t pthread = NULL;
+
+ while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
+ (pthread == NULL)) {
+ if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
+ /*
+ * The priority list is empty; remove the list
+ * from the queue.
+ */
+ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link);
+
+ /* Mark the list as not being in the queue: */
+ pql->pl_queued = 0;
+ }
+ }
+ return (pthread);
+}
+
+
+static void
+pq_insert_prio_list(pq_queue_t *pq, int prio)
+{
+ pq_list_t *pql;
+
+ /*
+ * The priority queue is in descending priority order. Start at
+ * the beginning of the queue and find the list before which the
+ * new list should to be inserted.
+ */
+ pql = TAILQ_FIRST(&pq->pq_queue);
+ while ((pql != NULL) && (pql->pl_prio > prio))
+ pql = TAILQ_NEXT(pql, pl_link);
+
+ /* Insert the list: */
+ if (pql == NULL)
+ TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link);
+ else
+ TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link);
+
+ /* Mark this list as being in the queue: */
+ pq->pq_lists[prio].pl_queued = 1;
+}
+
+#endif
diff --git a/lib/libc_r/uthread/uthread_resume_np.c b/lib/libc_r/uthread/uthread_resume_np.c
index 7c5f46a..885a457 100644
--- a/lib/libc_r/uthread/uthread_resume_np.c
+++ b/lib/libc_r/uthread/uthread_resume_np.c
@@ -45,8 +45,21 @@ pthread_resume_np(pthread_t thread)
if ((ret = _find_thread(thread)) == 0) {
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
}
return(ret);
diff --git a/lib/libc_r/uthread/uthread_select.c b/lib/libc_r/uthread/uthread_select.c
index d6202db..6d7d7dc 100644
--- a/lib/libc_r/uthread/uthread_select.c
+++ b/lib/libc_r/uthread/uthread_select.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/fcntl.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
@@ -47,6 +48,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
struct timespec ts;
struct timeval zero_timeout = {0, 0};
int i, ret = 0, got_all_locks = 1;
+ int f_wait = 1;
struct pthread_select_data data;
if (numfds > _thread_dtablesize) {
@@ -59,6 +61,8 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
/* Set the wake up time: */
_thread_kern_set_timeout(&ts);
+ if (ts.tv_sec == 0 && ts.tv_nsec == 0)
+ f_wait = 0;
} else {
/* Wait for ever: */
_thread_kern_set_timeout(NULL);
@@ -110,7 +114,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
if (exceptfds != NULL) {
memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0) {
+ if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
data.nfds = numfds;
FD_ZERO(&data.readfds);
FD_ZERO(&data.writefds);
diff --git a/lib/libc_r/uthread/uthread_setprio.c b/lib/libc_r/uthread/uthread_setprio.c
index dd89f15..008b6b0 100644
--- a/lib/libc_r/uthread/uthread_setprio.c
+++ b/lib/libc_r/uthread/uthread_setprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,17 +38,13 @@
int
pthread_setprio(pthread_t pthread, int prio)
{
- int ret;
+ int ret, policy;
+ struct sched_param param;
- /* Check if the priority is invalid: */
- if (prio < PTHREAD_MIN_PRIORITY || prio > PTHREAD_MAX_PRIORITY)
- /* Return an invalid argument error: */
- ret = EINVAL;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0)
- /* Set the thread priority: */
- pthread->pthread_priority = prio;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0) {
+ param.sched_priority = prio;
+ ret = pthread_setschedparam(pthread, policy, &param);
+ }
/* Return the error status: */
return (ret);
diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c
new file mode 100644
index 0000000..93635da
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_setschedparam.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#include <sys/param.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
+{
+ int old_prio, in_readyq = 0, ret = 0;
+
+ if ((param == NULL) || (param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY) ||
+ (policy < SCHED_FIFO) || (policy > SCHED_RR))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling
+ * signal:
+ */
+ _thread_kern_sched_defer();
+
+ if (param->sched_priority != pthread->base_priority) {
+ /*
+ * Remove the thread from its current priority
+ * queue before any adjustments are made to its
+ * active priority:
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ in_readyq = 1;
+ old_prio = pthread->active_priority;
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ }
+
+ /* Set the thread base priority: */
+ pthread->base_priority = param->sched_priority;
+
+ /* Recalculate the active priority: */
+ pthread->active_priority = MAX(pthread->base_priority,
+ pthread->inherited_priority);
+
+ if (in_readyq) {
+ if ((pthread->priority_mutex_count > 0) &&
+ (old_prio > pthread->active_priority)) {
+ /*
+ * POSIX states that if the priority is
+ * being lowered, the thread must be
+ * inserted at the head of the queue for
+ * its priority if it owns any priority
+ * protection or inheritence mutexes.
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check for any mutex priority adjustments. This
+ * includes checking for a priority mutex on which
+ * this thread is waiting.
+ */
+ _mutex_notify_priochange(pthread);
+ }
+
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_sig.c b/lib/libc_r/uthread/uthread_sig.c
index 3e55d65..e51d949 100644
--- a/lib/libc_r/uthread/uthread_sig.c
+++ b/lib/libc_r/uthread/uthread_sig.c
@@ -38,6 +38,19 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * State change macro for signal handler:
+ */
+#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
+ if ((_thread_run->sched_defer_count == 0) && \
+ (_thread_kern_in_sched == 0)) { \
+ PTHREAD_NEW_STATE(thrd, newstate); \
+ } else { \
+ _waitingq_check_reqd = 1; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+ } \
+}
+
/* Static variables: */
static int volatile yield_on_unlock_thread = 0;
static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
@@ -94,14 +107,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_thread_sys_write(_thread_kern_pipe[1], &c, 1);
}
-
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
_thread_dump_info();
/* Check if an interval timer signal: */
- else if (sig == SIGVTALRM) {
+ else if (sig == _SCHED_SIGNAL) {
/* Check if the scheduler interrupt has come at an
* unfortunate time which one of the threads is
* modifying the thread list:
@@ -115,6 +127,14 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
yield_on_unlock_thread = 1;
/*
+ * Check if the scheduler interrupt has come when
+ * the currently running thread has deferred thread
+ * scheduling.
+ */
+ else if (_thread_run->sched_defer_count)
+ _thread_run->yield_on_sched_undefer = 1;
+
+ /*
* Check if the kernel has not been interrupted while
* executing scheduler code:
*/
@@ -170,18 +190,17 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
}
/*
- * Enter a loop to process each thread in the linked
+ * Enter a loop to process each thread in the waiting
* list that is sigwait-ing on a signal. Since POSIX
* doesn't specify which thread will get the signal
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -201,11 +220,19 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* list:
*/
for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt)
+ pthread = pthread->nxt) {
+ pthread_t pthread_saved = _thread_run;
+
+ _thread_run = pthread;
_thread_signal(pthread,sig);
- /* Dispatch pending signals to the running thread: */
- _dispatch_signals();
+ /*
+ * Dispatch pending signals to the
+ * running thread:
+ */
+ _dispatch_signals();
+ _thread_run = pthread_saved;
+ }
}
/* Returns nothing. */
@@ -257,7 +284,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -277,7 +304,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -292,7 +319,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libc_r/uthread/uthread_sigaction.c b/lib/libc_r/uthread/uthread_sigaction.c
index 40f3850..73a3b21 100644
--- a/lib/libc_r/uthread/uthread_sigaction.c
+++ b/lib/libc_r/uthread/uthread_sigaction.c
@@ -71,7 +71,7 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Check if the kernel needs to be advised of a change
* in signal action:
*/
- if (act != NULL && sig != SIGVTALRM && sig != SIGCHLD &&
+ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
/* Initialise the global signal action structure: */
gact.sa_mask = act->sa_mask;
diff --git a/lib/libc_r/uthread/uthread_sigpending.c b/lib/libc_r/uthread/uthread_sigpending.c
new file mode 100644
index 0000000..44a39a6
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_sigpending.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <signal.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+sigpending(sigset_t * set)
+{
+ int ret = 0;
+
+ /* Check for a null signal set pointer: */
+ if (set == NULL) {
+ /* Return an invalid argument: */
+ ret = EINVAL;
+ }
+ else {
+ *set = _thread_run->sigpend;
+ }
+ /* Return the completion status: */
+ return (ret);
+}
+#endif
diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c
index 590f9db..98a5359 100644
--- a/lib/libc_r/uthread/uthread_sigwait.c
+++ b/lib/libc_r/uthread/uthread_sigwait.c
@@ -56,7 +56,7 @@ sigwait(const sigset_t * set, int *sig)
*/
sigdelset(&act.sa_mask, SIGKILL);
sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, SIGVTALRM);
+ sigdelset(&act.sa_mask, _SCHED_SIGNAL);
sigdelset(&act.sa_mask, SIGCHLD);
sigdelset(&act.sa_mask, SIGINFO);
diff --git a/lib/libc_r/uthread/uthread_spinlock.c b/lib/libc_r/uthread/uthread_spinlock.c
index 9da115e..4da3f8c 100644
--- a/lib/libc_r/uthread/uthread_spinlock.c
+++ b/lib/libc_r/uthread/uthread_spinlock.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_spinlock.c,v 1.3 1998/06/06 07:27:06 jb Exp $
+ * $Id: uthread_spinlock.c,v 1.4 1998/06/09 23:13:10 jb Exp $
*
*/
@@ -56,12 +56,9 @@ _spinlock(spinlock_t *lck)
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run)
- return;
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
}
/* The running thread now owns the lock: */
@@ -81,24 +78,25 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
+ int cnt = 0;
+
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run) {
+ cnt++;
+ if (cnt > 100) {
char str[256];
- snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) which it had already locked in %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
+ snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
_thread_sys_write(2,str,strlen(str));
-
- /* Create a thread dump to help debug this problem: */
- _thread_dump_info();
- return;
+ sleep(1);
+ cnt = 0;
}
+
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
}
/* The running thread now owns the lock: */
diff --git a/lib/libc_r/uthread/uthread_suspend_np.c b/lib/libc_r/uthread/uthread_suspend_np.c
index 871683a..6a6eaf4 100644
--- a/lib/libc_r/uthread/uthread_suspend_np.c
+++ b/lib/libc_r/uthread/uthread_suspend_np.c
@@ -51,8 +51,21 @@ pthread_suspend_np(pthread_t thread)
thread->interrupted = 1;
}
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
return(ret);
}
diff --git a/lib/libc_r/uthread/uthread_switch_np.c b/lib/libc_r/uthread/uthread_switch_np.c
new file mode 100644
index 0000000..8373214
--- /dev/null
+++ b/lib/libc_r/uthread/uthread_switch_np.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include <pthread_np.h>
+#include "pthread_private.h"
+
+
+int
+pthread_switch_add_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine == NULL)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = routine;
+
+ return(ret);
+}
+
+int
+pthread_switch_delete_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine != _sched_switch_hook)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = NULL;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/Makefile.inc b/lib/libkse/thread/Makefile.inc
index 004982a..16799cf 100644
--- a/lib/libkse/thread/Makefile.inc
+++ b/lib/libkse/thread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.15 1998/09/12 22:03:20 dt Exp $
+# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -8,10 +8,18 @@ SRCS+= \
uthread_attr_destroy.c \
uthread_attr_init.c \
uthread_attr_getdetachstate.c \
+ uthread_attr_getinheritsched.c \
+ uthread_attr_getschedparam.c \
+ uthread_attr_getschedpolicy.c \
+ uthread_attr_getscope.c \
uthread_attr_getstackaddr.c \
uthread_attr_getstacksize.c \
uthread_attr_setcreatesuspend_np.c \
uthread_attr_setdetachstate.c \
+ uthread_attr_setinheritsched.c \
+ uthread_attr_setschedparam.c \
+ uthread_attr_setschedpolicy.c \
+ uthread_attr_setscope.c \
uthread_attr_setstackaddr.c \
uthread_attr_setstacksize.c \
uthread_autoinit.cc \
@@ -44,6 +52,7 @@ SRCS+= \
uthread_getdirentries.c \
uthread_getpeername.c \
uthread_getprio.c \
+ uthread_getschedparam.c \
uthread_getsockname.c \
uthread_getsockopt.c \
uthread_info.c \
@@ -57,11 +66,14 @@ SRCS+= \
uthread_mattr_kind_np.c \
uthread_multi_np.c \
uthread_mutex.c \
+ uthread_mutex_prioceiling.c \
+ uthread_mutex_protocol.c \
uthread_mutexattr_destroy.c \
uthread_nanosleep.c \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_priority_queue.c \
uthread_queue.c \
uthread_read.c \
uthread_readv.c \
@@ -76,12 +88,14 @@ SRCS+= \
uthread_sendto.c \
uthread_seterrno.c \
uthread_setprio.c \
+ uthread_setschedparam.c \
uthread_setsockopt.c \
uthread_shutdown.c \
uthread_sig.c \
uthread_sigaction.c \
uthread_sigblock.c \
uthread_sigmask.c \
+ uthread_sigpending.c \
uthread_sigprocmask.c \
uthread_sigsetmask.c \
uthread_sigsuspend.c \
@@ -92,6 +106,7 @@ SRCS+= \
uthread_spec.c \
uthread_spinlock.c \
uthread_suspend_np.c \
+ uthread_switch_np.c \
uthread_vfork.c \
uthread_wait4.c \
uthread_write.c \
diff --git a/lib/libkse/thread/thr_attr_getinheritsched.c b/lib/libkse/thread/thr_attr_getinheritsched.c
new file mode 100644
index 0000000..38851ca
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_getinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getinheritsched(pthread_attr_t *attr, int *sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ *sched_inherit = (*attr)->sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_getschedparam.c b/lib/libkse/thread/thr_attr_getschedparam.c
new file mode 100644
index 0000000..ea5e19d
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_getschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ param->sched_priority = (*attr)->prio;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_getschedpolicy.c b/lib/libkse/thread/thr_attr_getschedpolicy.c
new file mode 100644
index 0000000..0b9ff59
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_getschedpolicy.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
+ ret = EINVAL;
+ else
+ *policy = (*attr)->sched_policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_getscope.c b/lib/libkse/thread/thr_attr_getscope.c
new file mode 100644
index 0000000..f84b104
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_getscope.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getscope(pthread_attr_t *attr, int *contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else
+ *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
+ PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_setinheritsched.c b/lib/libkse/thread/thr_attr_setinheritsched.c
new file mode 100644
index 0000000..017b7df
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_setinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->sched_inherit = sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_setschedparam.c b/lib/libkse/thread/thr_attr_setschedparam.c
new file mode 100644
index 0000000..5c860a8
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_setschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->prio = param->sched_priority;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_setschedpolicy.c b/lib/libkse/thread/thr_attr_setschedpolicy.c
new file mode 100644
index 0000000..3d5aa3c
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_setschedpolicy.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy < SCHED_FIFO) ||
+ (policy > SCHED_RR))
+ ret = EINVAL;
+ else
+ (*attr)->sched_policy = policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_attr_setscope.c b/lib/libkse/thread/thr_attr_setscope.c
new file mode 100644
index 0000000..24dead6
--- /dev/null
+++ b/lib/libkse/thread/thr_attr_setscope.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) ||
+ (contentionscope != PTHREAD_SCOPE_PROCESS) ||
+ (contentionscope != PTHREAD_SCOPE_SYSTEM))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else if (contentionscope == PTHREAD_SCOPE_SYSTEM)
+ /* We don't support system wide contention: */
+#ifdef NOT_YET
+ ret = ENOTSUP;
+#else
+ ret = EOPNOTSUPP;
+#endif
+
+ else
+ (*attr)->flags |= contentionscope;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_cond.c b/lib/libkse/thread/thr_cond.c
index a085ea6..c090d79 100644
--- a/lib/libkse/thread/thr_cond.c
+++ b/lib/libkse/thread/thr_cond.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -37,6 +37,14 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * Prototypes
+ */
+static inline pthread_t cond_queue_deq(pthread_cond_t);
+static inline void cond_queue_remove(pthread_cond_t, pthread_t);
+static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+
+
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
{
@@ -83,9 +91,10 @@ pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
* Initialise the condition variable
* structure:
*/
- _thread_queue_init(&pcond->c_queue);
+ TAILQ_INIT(&pcond->c_queue);
pcond->c_flags |= COND_FLAGS_INITED;
pcond->c_type = type;
+ pcond->c_mutex = NULL;
memset(&pcond->lock,0,sizeof(pcond->lock));
*cond = pcond;
}
@@ -144,33 +153,57 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Wait forever: */
- _thread_run->wakeup_time.tv_sec = -1;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
-
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Return invalid argument error: */
+ rval = EINVAL;
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
- /* Lock the mutex: */
- rval = pthread_mutex_lock(mutex);
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Wait forever: */
+ _thread_run->wakeup_time.tv_sec = -1;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) ==
+ NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ }
+ else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
}
break;
@@ -183,7 +216,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
rval = EINVAL;
break;
}
-
}
/* Return the completion status: */
@@ -213,42 +245,88 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Set the wakeup time: */
- _thread_run->wakeup_time.tv_sec = abstime->tv_sec;
- _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec;
-
- /* Reset the timeout flag: */
- _thread_run->timeout = 0;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
+ /* Return invalid argument error: */
+ rval = EINVAL;
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Set the wakeup time: */
+ _thread_run->wakeup_time.tv_sec =
+ abstime->tv_sec;
+ _thread_run->wakeup_time.tv_nsec =
+ abstime->tv_nsec;
- /* Lock the mutex: */
- if ((rval = pthread_mutex_lock(mutex)) != 0) {
- }
- /* Check if the wait timed out: */
- else if (_thread_run->timeout) {
- /* Return a timeout error: */
- rval = ETIMEDOUT;
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
+
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ } else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Check if the wait timedout: */
+ if (_thread_run->timeout == 0) {
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
+ else {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
+ /*
+ * The wait timed out; remove
+ * the thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond,
+ _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+
+ /* Return a timeout error: */
+ rval = ETIMEDOUT;
+
+ /*
+ * Lock the mutex and ignore
+ * any errors:
+ */
+ (void)_mutex_cv_lock(mutex);
+ }
}
}
break;
@@ -273,7 +351,6 @@ int
pthread_cond_signal(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
@@ -286,11 +363,22 @@ pthread_cond_signal(pthread_cond_t * cond)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Bring the next thread off the condition queue: */
- if ((pthread = _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
+ /*
+ * Enter a loop to dequeue threads from the condition
+ * queue until we find one that hasn't previously
+ * timed out.
+ */
+ while (((pthread = cond_queue_deq(*cond)) != NULL) &&
+ (pthread->timeout != 0)) {
+ }
+
+ if (pthread != NULL)
/* Allow the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -312,12 +400,21 @@ int
pthread_cond_broadcast(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues. In addition, we must assure
+ * that all threads currently waiting on the condition
+ * variable are signaled and are not timedout by a
+ * scheduling signal that causes a preemption.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -329,11 +426,17 @@ pthread_cond_broadcast(pthread_cond_t * cond)
* Enter a loop to bring all threads off the
* condition queue:
*/
- while ((pthread =
- _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
- /* Allow the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ while ((pthread = cond_queue_deq(*cond)) != NULL) {
+ /*
+ * The thread is already running if the
+ * timeout flag is set.
+ */
+ if (pthread->timeout == 0)
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
+
+ /* There are no more waiting threads: */
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -345,9 +448,74 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Reenable preemption and yield if necessary.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (rval);
}
+
+/*
+ * Dequeue a waiting thread from the head of a condition queue in
+ * descending priority order.
+ */
+static inline pthread_t
+cond_queue_deq(pthread_cond_t cond)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+{
+ /*
+ * Because pthread_cond_timedwait() can timeout as well
+ * as be signaled by another thread, it is necessary to
+ * guard against removing the thread from the queue if
+ * it isn't in the queue.
+ */
+ if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+}
+
+/*
+ * Enqueue a waiting thread to a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&cond->c_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_QUEUED;
+}
#endif
diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c
index 4169461..438e527 100644
--- a/lib/libkse/thread/thr_create.c
+++ b/lib/libkse/thread/thr_create.c
@@ -99,12 +99,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
new_thread->magic = PTHREAD_MAGIC;
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- PTHREAD_NEW_STATE(new_thread,PS_SUSPENDED);
- } else {
- PTHREAD_NEW_STATE(new_thread,PS_RUNNING);
- }
-
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
@@ -162,21 +156,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->pthread_priority = _thread_run->pthread_priority;
- new_thread->attr.prio = _thread_run->pthread_priority;
- new_thread->attr.schedparam_policy = _thread_run->attr.schedparam_policy;
+ new_thread->base_priority = _thread_run->base_priority;
+ new_thread->attr.prio = _thread_run->base_priority;
+ new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->pthread_priority = new_thread->attr.prio;
+ new_thread->base_priority = new_thread->attr.prio;
}
+ new_thread->active_priority = new_thread->base_priority;
+ new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
_thread_queue_init(&(new_thread->join_queue));
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&new_thread->mutexq);
+
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
@@ -200,6 +199,27 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Unlock the thread list: */
_unlock_thread_list();
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
+ if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
+ new_thread->state = PS_SUSPENDED;
+ PTHREAD_WAITQ_INSERT(new_thread);
+ } else {
+ new_thread->state = PS_RUNNING;
+ PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
+ }
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libkse/thread/thr_detach.c b/lib/libkse/thread/thr_detach.c
index da456bf..05da832 100644
--- a/lib/libkse/thread/thr_detach.c
+++ b/lib/libkse/thread/thr_detach.c
@@ -52,11 +52,24 @@ pthread_detach(pthread_t pthread)
/* Flag the thread as detached: */
pthread->attr.flags |= PTHREAD_DETACHED;
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libkse/thread/thr_exit.c b/lib/libkse/thread/thr_exit.c
index a5fc400..93b8b83 100644
--- a/lib/libkse/thread/thr_exit.c
+++ b/lib/libkse/thread/thr_exit.c
@@ -49,7 +49,7 @@ void _exit(int status)
itimer.it_interval.tv_usec = 0;
itimer.it_value.tv_sec = 0;
itimer.it_value.tv_usec = 0;
- setitimer(ITIMER_VIRTUAL, &itimer, NULL);
+ setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
/* Close the pthread kernel pipe: */
_thread_sys_close(_thread_kern_pipe[0]);
@@ -127,6 +127,13 @@ pthread_exit(void *status)
/* Run the thread-specific data destructors: */
_thread_cleanupspecific();
}
+
+ /*
+ * Guard against preemption by a scheduling signal. A change of
+ * thread state modifies the waiting and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Check if there are any threads joined to this one: */
while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
/* Wake the joined thread and let it detach this thread: */
@@ -134,6 +141,12 @@ pthread_exit(void *status)
}
/*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
+ /*
* Lock the garbage collector mutex to ensure that the garbage
* collector is not using the dead thread list.
*/
diff --git a/lib/libkse/thread/thr_fork.c b/lib/libkse/thread/thr_fork.c
index 960c1de..5582c1e 100644
--- a/lib/libkse/thread/thr_fork.c
+++ b/lib/libkse/thread/thr_fork.c
@@ -41,7 +41,7 @@
pid_t
fork(void)
{
- int flags;
+ int i, flags;
pid_t ret;
pthread_t pthread;
pthread_t pthread_next;
@@ -88,6 +88,11 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ /* Initialize the ready queue: */
+ } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
+ PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
} else {
/* Point to the first thread in the list: */
pthread = _thread_link_list;
@@ -119,6 +124,33 @@ fork(void)
/* Point to the next thread: */
pthread = pthread_next;
}
+
+ /* Re-init the waiting queues. */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /* Clear out any locks in the file descriptor table: */
+ for (i = 0; i < _thread_dtablesize; i++) {
+ if (_thread_fd_table[i] != NULL) {
+ /* Initialise the file locks: */
+ memset(&_thread_fd_table[i]->lock, 0,
+ sizeof(_thread_fd_table[i]->lock));
+ _thread_fd_table[i]->r_owner = NULL;
+ _thread_fd_table[i]->w_owner = NULL;
+ _thread_fd_table[i]->r_fname = NULL;
+ _thread_fd_table[i]->w_fname = NULL;
+ _thread_fd_table[i]->r_lineno = 0;;
+ _thread_fd_table[i]->w_lineno = 0;;
+ _thread_fd_table[i]->r_lockcount = 0;;
+ _thread_fd_table[i]->w_lockcount = 0;;
+
+ /* Initialise the read/write queues: */
+ _thread_queue_init(&_thread_fd_table[i]->r_queue);
+ _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ }
+ }
}
}
diff --git a/lib/libkse/thread/thr_getprio.c b/lib/libkse/thread/thr_getprio.c
index 708b8f1..b2c94d6 100644
--- a/lib/libkse/thread/thr_getprio.c
+++ b/lib/libkse/thread/thr_getprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,12 +38,11 @@
int
pthread_getprio(pthread_t pthread)
{
- int ret;
+ int policy, ret;
+ struct sched_param param;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(pthread)) == 0)
- /* Get the thread priority: */
- ret = pthread->pthread_priority;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0)
+ ret = param.sched_priority;
else {
/* Invalid thread: */
errno = ret;
diff --git a/lib/libkse/thread/thr_getschedparam.c b/lib/libkse/thread/thr_getschedparam.c
new file mode 100644
index 0000000..e7d18d9
--- /dev/null
+++ b/lib/libkse/thread/thr_getschedparam.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
+{
+ int ret;
+
+ if ((param == NULL) || (policy == NULL))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /* Return the threads base priority and scheduling policy: */
+ param->sched_priority = pthread->base_priority;
+ *policy = pthread->attr.sched_policy;
+ }
+
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_info.c b/lib/libkse/thread/thr_info.c
index f66dd2d..d2d97da 100644
--- a/lib/libkse/thread/thr_info.c
+++ b/lib/libkse/thread/thr_info.c
@@ -60,9 +60,11 @@ static const struct s_thread_info thread_info[] = {
{PS_WAIT_WAIT , "Waiting process"},
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
{PS_SIGWAIT , "Waiting for a signal"},
+ {PS_SPINBLOCK , "Waiting for a spinlock"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
+ {PS_DEADLOCK , "Deadlocked"},
{PS_STATE_MAX , "Not a real state!"}
};
@@ -75,6 +77,7 @@ _thread_dump_info(void)
int j;
pthread_t pthread;
char tmpfile[128];
+ pq_list_t *pq_list;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
@@ -116,7 +119,7 @@ _thread_dump_info(void)
snprintf(s, sizeof(s),
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ?
- "":pthread->name, pthread->pthread_priority,
+ "":pthread->name, pthread->base_priority,
thread_info[j].name,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
@@ -167,6 +170,50 @@ _thread_dump_info(void)
}
}
+ /* Output a header for ready threads: */
+ strcpy(s, "\n\n=============\nREADY THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the ready queue: */
+ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
+ TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+ }
+
+ /* Output a header for waiting threads: */
+ strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
if (_thread_dead == NULL) {
/* Output a record: */
@@ -186,7 +233,7 @@ _thread_dump_info(void)
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
- pthread, pthread->pthread_priority,
+ pthread, pthread->base_priority,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
}
diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c
index 50f3bef..e4411ce 100644
--- a/lib/libkse/thread/thr_init.c
+++ b/lib/libkse/thread/thr_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -147,6 +147,11 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
+ /* Initialize the ready queue: */
+ else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
+ }
/* Allocate memory for the thread structure of the initial thread: */
else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
/*
@@ -157,10 +162,25 @@ _thread_init(void)
} else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
+ _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
+ /* Initialize the waiting queue: */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ _thread_initial->magic = PTHREAD_MAGIC;
+
/* Default the priority of the initial thread: */
- _thread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->inherited_priority = 0;
/* Initialise the state of the initial thread: */
_thread_initial->state = PS_RUNNING;
@@ -168,7 +188,13 @@ _thread_init(void)
/* Initialise the queue: */
_thread_queue_init(&(_thread_initial->join_queue));
+ /* Initialize the owned mutex queue and count: */
+ TAILQ_INIT(&(_thread_initial->mutexq));
+ _thread_initial->priority_mutex_count = 0;
+
/* Initialise the rest of the fields: */
+ _thread_initial->sched_defer_count = 0;
+ _thread_initial->yield_on_sched_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
_thread_initial->queue = NULL;
@@ -206,9 +232,9 @@ _thread_init(void)
* signals that the user-thread kernel needs. Actually
* SIGINFO isn't really needed, but it is nice to have.
*/
- if (_thread_sys_sigaction(SIGVTALRM, &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGINFO , &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGCHLD , &act, NULL) != 0) {
+ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) {
/*
* Abort this process if signal initialisation fails:
*/
@@ -256,6 +282,8 @@ _thread_init(void)
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
+ gettimeofday(&kern_inc_prio_time, NULL);
+
return;
}
diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c
index 3a6966b..626f1d4 100644
--- a/lib/libkse/thread/thr_kern.c
+++ b/lib/libkse/thread/thr_kern.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -53,16 +53,18 @@
static void
_thread_kern_select(int wait_reqd);
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+
void
_thread_kern_sched(struct sigcontext * scp)
{
#ifndef __alpha__
char *fdata;
#endif
- int prio = -1;
pthread_t pthread;
pthread_t pthread_h = NULL;
- pthread_t pthread_s = NULL;
+ pthread_t last_thread = NULL;
struct itimerval itimer;
struct timespec ts;
struct timespec ts1;
@@ -105,18 +107,21 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_kern_in_sched = 0;
- /*
- * There might be pending signals for this thread, so
- * dispatch any that aren't blocked:
- */
- _dispatch_signals();
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread, _thread_run);
+ }
return;
} else
/* Flag the jump buffer was the last state saved: */
_thread_run->sig_saved = 0;
+ /* If the currently running thread is a user thread, save it: */
+ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
+ _last_user_thread = _thread_run;
+
/*
- * Enter a the scheduling loop that finds the next thread that is
+ * Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
* in the global list or when a thread has its state restored by
* either a sigreturn (if the state was saved as a sigcontext) or a
@@ -134,12 +139,48 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(0);
/*
- * Enter a loop to look for sleeping threads that are ready:
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /*
+ * Enter a loop to look for sleeping threads that are ready
+ * or timedout. While we're at it, also find the smallest
+ * timeout value for threads waiting for a time.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ /* Check if this thread is ready: */
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check if this thread is blocked by an
+ * atomic lock:
+ */
+ else if (pthread->state == PS_SPINBLOCK) {
+ /*
+ * If the lock is available, let
+ * the thread run.
+ */
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ }
+
/* Check if this thread is to timeout: */
- if (pthread->state == PS_COND_WAIT ||
+ } else if (pthread->state == PS_COND_WAIT ||
pthread->state == PS_SLEEP_WAIT ||
pthread->state == PS_FDR_WAIT ||
pthread->state == PS_FDW_WAIT ||
@@ -163,9 +204,9 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (pthread->state == PS_SELECT_WAIT) {
/*
- * The select has timed out,
- * so zero the file
- * descriptor sets:
+ * The select has timed out, so
+ * zero the file descriptor
+ * sets:
*/
FD_ZERO(&pthread->data.select_data->readfds);
FD_ZERO(&pthread->data.select_data->writefds);
@@ -189,13 +230,72 @@ __asm__("fnsave %0": :"m"(*fdata));
* it to be restarted:
*/
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ } else {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + CLOCK_RES_NSEC;
+
+ /*
+ * Check for underflow of the
+ * nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow
+ * of the nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond
+ * field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of
+ * the nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure
+ * to a timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
}
+
}
}
/* Check if there is a current thread: */
if (_thread_run != &_thread_kern_thread) {
/*
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sched_undefer = 0;
+
+ /*
* Save the current time as the time that the thread
* became inactive:
*/
@@ -204,194 +304,64 @@ __asm__("fnsave %0": :"m"(*fdata));
/*
* Accumulate the number of microseconds that this
- * thread has run for:
+ * thread has run for:
*/
- if (_thread_run->slice_usec != -1) {
- _thread_run->slice_usec += (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
- }
-
- /*
- * Check if this thread has reached its allocated
- * time slice period:
- */
- if (_thread_run->slice_usec > TIMESLICE_USEC) {
- /*
- * Flag the allocated time slice period as
- * up:
- */
- _thread_run->slice_usec = -1;
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
}
- }
- /* Check if an incremental priority update is required: */
- if (((tv.tv_sec - kern_inc_prio_time.tv_sec) * 1000000 +
- tv.tv_usec - kern_inc_prio_time.tv_usec) > INC_PRIO_USEC) {
- /*
- * Enter a loop to look for run-enabled threads that
- * have not run since the last time that an
- * incremental priority update was performed:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if this thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if the last time that this thread
- * was run (as indicated by the last time it
- * became inactive) is before the time that
- * the last incremental priority check was
- * made:
- */
- else if (timercmp(&pthread->last_inactive, &kern_inc_prio_time, <)) {
+ if (_thread_run->state == PS_RUNNING) {
+ if (_thread_run->slice_usec == -1) {
+ /*
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
+ */
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * Increment the incremental priority
- * for this thread in the hope that
- * it will eventually get a chance to
- * run:
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- (pthread->inc_prio)++;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
-
- /* Save the new incremental priority update time: */
- kern_inc_prio_time.tv_sec = tv.tv_sec;
- kern_inc_prio_time.tv_usec = tv.tv_usec;
- }
- /*
- * Enter a loop to look for the first thread of the highest
- * priority that is ready to run:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if no run-enabled thread has been seen or if
- * the current thread has a priority higher than the
- * highest seen so far:
- */
- else if (pthread_h == NULL || (pthread->pthread_priority + pthread->inc_prio) > prio) {
+ else if (_thread_run->state == PS_DEAD) {
/*
- * Save this thread as the highest priority
- * thread seen so far:
+ * Don't add dead threads to the waiting
+ * queue, because when they're reaped, it
+ * will corrupt the queue.
*/
- pthread_h = pthread;
- prio = pthread->pthread_priority + pthread->inc_prio;
}
- }
-
- /*
- * Enter a loop to look for a thread that: 1. Is run-enabled.
- * 2. Has the required agregate priority. 3. Has not been
- * allocated its allocated time slice. 4. Became inactive
- * least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- /* Ignore threads that are not ready to run. */
- }
-
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority found
- * above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
+ else {
/*
- * Ignore threads which have lower agregate
- * priority.
+ * This thread has changed state and needs
+ * to be placed in the waiting queue.
*/
- }
-
- /*
- * Check if the current thread reached its time slice
- * allocation last time it ran (or if it has not run
- * yet):
- */
- else if (pthread->slice_usec == -1) {
- }
+ PTHREAD_WAITQ_INSERT(_thread_run);
- /*
- * Check if an eligible thread has not been found
- * yet, or if the current thread has an inactive time
- * earlier than the last one seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current thread as
- * the most eligible thread seen so far:
- */
- pthread_s = pthread;
-
- /*
- * Save the time that the selected thread
- * became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
}
}
/*
- * Check if no thread was selected according to incomplete
- * time slice allocation:
+ * Get the highest priority thread in the ready queue.
*/
- if (pthread_s == NULL) {
- /*
- * Enter a loop to look for any other thread that: 1.
- * Is run-enabled. 2. Has the required agregate
- * priority. 3. Became inactive least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if the current thread is unable to
- * run:
- */
- if (pthread->state != PS_RUNNING) {
- /*
- * Ignore threads that are not ready
- * to run.
- */
- }
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority
- * found above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
- /*
- * Ignore threads which have lower
- * agregate priority.
- */
- }
- /*
- * Check if an eligible thread has not been
- * found yet, or if the current thread has an
- * inactive time earlier than the last one
- * seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current
- * thread as the most eligible thread
- * seen so far:
- */
- pthread_s = pthread;
+ pthread_h = PTHREAD_PRIOQ_FIRST;
- /*
- * Save the time that the selected
- * thread became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
- }
- }
- }
/* Check if there are no threads ready to run: */
- if (pthread_s == NULL) {
+ if (pthread_h == NULL) {
/*
* Lock the pthread kernel by changing the pointer to
* the running thread to point to the global kernel
@@ -406,7 +376,10 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(1);
} else {
/* Make the selected thread the current thread: */
- _thread_run = pthread_s;
+ _thread_run = pthread_h;
+
+ /* Remove the thread from the ready queue. */
+ PTHREAD_PRIOQ_REMOVE(_thread_run);
/*
* Save the current time as the time that the thread
@@ -424,149 +397,22 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Reset the accumulated time slice period: */
_thread_run->slice_usec = 0;
}
- /*
- * Reset the incremental priority now that this
- * thread has been given the chance to run:
- */
- _thread_run->inc_prio = 0;
/* Check if there is more than one thread: */
if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
/*
- * Define the maximum time before a SIGVTALRM
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for threads waiting
- * for a time:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if this thread is to
- * timeout:
- */
- if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /*
- * Check if this thread is to
- * wait forever:
- */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /*
- * Check if this thread is to
- * wakeup immediately:
- */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- }
- /*
- * Check if the current time
- * is after the wakeup time:
- */
- else if ((ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec > pthread->wakeup_time.tv_nsec))) {
- } else {
- /*
- * Calculate the time
- * until this thread
- * is ready, allowing
- * for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for
- * underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for
- * the
- * underflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow
- * of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for
- * the
- * overflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the
- * timespec structure
- * to a timeval
- * structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv, &ts1);
-
- /*
- * Check if the
- * thread will be
- * ready sooner than
- * the earliest one
- * found so far:
- */
- if (timercmp(&tv, &itimer.it_value, <)) {
- /*
- * Update the
- * time
- * value:
- */
- itimer.it_value.tv_sec = tv.tv_sec;
- itimer.it_value.tv_usec = tv.tv_usec;
- }
- }
- }
- }
-
- /*
* Start the interval timer for the
* calculated time interval:
*/
- if (setitimer(ITIMER_VIRTUAL, &itimer, NULL) != 0) {
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
/*
* Cannot initialise the timer, so
* abort this process:
*/
- PANIC("Cannot set virtual timer");
+ PANIC("Cannot set scheduling timer");
}
}
+
/* Check if a signal context was saved: */
if (_thread_run->sig_saved == 1) {
#ifndef __alpha__
@@ -579,20 +425,30 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Restore the floating point state: */
__asm__("frstor %0": :"m"(*fdata));
#endif
-
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
- _thread_kern_in_sched = 0;
+ _thread_kern_in_sched = 0;
+
+ /*
+ * If we had a context switch, run any
+ * installed switch hooks.
+ */
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
_thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else
+ } else {
/*
* Do a longjmp to restart the thread that
* was context switched out (by a longjmp to
* a different thread):
*/
longjmp(_thread_run->saved_jmp_buf, 1);
+ }
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
@@ -679,7 +535,8 @@ _thread_kern_select(int wait_reqd)
* Enter a loop to process threads waiting on either file descriptors
* or times:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Assume that this state does not time out: */
settimeout = 0;
@@ -690,12 +547,12 @@ _thread_kern_select(int wait_reqd)
* operations or timeouts:
*/
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
case PS_JOIN:
case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_SIGTHREAD:
case PS_SIGWAIT:
case PS_STATE_MAX:
@@ -704,6 +561,16 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread ready
+ * while in the scheduler or while the scheduling
+ * queues were protected.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/* Add the file descriptor to the read set: */
@@ -1010,16 +877,16 @@ _thread_kern_select(int wait_reqd)
* descriptors that are flagged as available by the
* _select syscall:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Process according to thread state: */
switch (pthread->state) {
/*
* States which do not depend on file
* descriptor I/O operations:
*/
- case PS_RUNNING:
case PS_COND_WAIT:
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
@@ -1034,6 +901,15 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread
+ * ready while in the scheduler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/*
@@ -1047,6 +923,13 @@ _thread_kern_select(int wait_reqd)
* is scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1063,6 +946,13 @@ _thread_kern_select(int wait_reqd)
* scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1269,6 +1159,13 @@ _thread_kern_select(int wait_reqd)
* thread to run:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
}
@@ -1320,4 +1217,80 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
return;
}
+
+void
+_thread_kern_sched_defer(void)
+{
+ /* Allow scheduling deferral to be recursive. */
+ _thread_run->sched_defer_count++;
+}
+
+void
+_thread_kern_sched_undefer(void)
+{
+ pthread_t pthread;
+ int need_resched = 0;
+
+ /*
+ * Perform checks to yield only if we are about to undefer
+ * scheduling.
+ */
+ if (_thread_run->sched_defer_count == 1) {
+ /*
+ * Check if the waiting queue needs to be examined for
+ * threads that are now ready:
+ */
+ while (_waitingq_check_reqd != 0) {
+ /* Clear the flag before checking the waiting queue: */
+ _waitingq_check_reqd = 0;
+
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ }
+
+ /*
+ * We need to yield if a thread change of state caused a
+ * higher priority thread to become ready, or if a
+ * scheduling signal occurred while preemption was disabled.
+ */
+ if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority)) ||
+ (_thread_run->yield_on_sched_undefer != 0)) {
+ _thread_run->yield_on_sched_undefer = 0;
+ need_resched = 1;
+ }
+ }
+
+ if (_thread_run->sched_defer_count > 0) {
+ /* Decrement the scheduling deferral count. */
+ _thread_run->sched_defer_count--;
+
+ /* Yield the CPU if necessary: */
+ if (need_resched)
+ _thread_kern_sched(NULL);
+ }
+}
+
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
+{
+ pthread_t tid_out = thread_out;
+ pthread_t tid_in = thread_in;
+
+ if ((tid_out != NULL) &&
+ (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_out = NULL;
+ if ((tid_in != NULL) &&
+ (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_in = NULL;
+
+ if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
+ /* Run the scheduler switch hook: */
+ _sched_switch_hook(tid_out, tid_in);
+ }
+}
#endif
diff --git a/lib/libkse/thread/thr_kill.c b/lib/libkse/thread/thr_kill.c
index 7572c05..c729179 100644
--- a/lib/libkse/thread/thr_kill.c
+++ b/lib/libkse/thread/thr_kill.c
@@ -52,6 +52,13 @@ pthread_kill(pthread_t pthread, int sig)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
switch (pthread->state) {
case PS_SIGSUSPEND:
/*
@@ -108,6 +115,12 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
diff --git a/lib/libkse/thread/thr_mattr_init.c b/lib/libkse/thread/thr_mattr_init.c
index 73226a6..206485f 100644
--- a/lib/libkse/thread/thr_mattr_init.c
+++ b/lib/libkse/thread/thr_mattr_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/lib/libkse/thread/thr_mutex.c b/lib/libkse/thread/thr_mutex.c
index d3801f1..0103a6c 100644
--- a/lib/libkse/thread/thr_mutex.c
+++ b/lib/libkse/thread/thr_mutex.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -33,78 +33,116 @@
#include <stdlib.h>
#include <errno.h>
#include <string.h>
+#include <sys/param.h>
+#include <sys/queue.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
+
+/*
+ * Prototypes
+ */
+static inline int mutex_self_trylock(pthread_mutex_t);
+static inline int mutex_self_lock(pthread_mutex_t);
+static inline int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(pthread_mutex_t);
+static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static inline pthread_t mutex_queue_deq(pthread_mutex_t);
+static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
+static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
+
+
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+
int
pthread_mutex_init(pthread_mutex_t * mutex,
const pthread_mutexattr_t * mutex_attr)
{
- enum pthread_mutextype type;
+ enum pthread_mutextype type;
+ int protocol;
+ int ceiling;
pthread_mutex_t pmutex;
int ret = 0;
- if (mutex == NULL) {
+ if (mutex == NULL)
ret = EINVAL;
- } else {
- /* Check if default mutex attributes: */
- if (mutex_attr == NULL || *mutex_attr == NULL)
- /* Default to a fast mutex: */
- type = PTHREAD_MUTEX_DEFAULT;
- else if ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Use the requested mutex type: */
- type = (*mutex_attr)->m_type;
-
- /* Check no errors so far: */
- if (ret == 0) {
- if ((pmutex = (pthread_mutex_t)
- malloc(sizeof(struct pthread_mutex))) == NULL)
- ret = ENOMEM;
- else {
- /* Reset the mutex flags: */
- pmutex->m_flags = 0;
-
- /* Process according to mutex type: */
- switch (type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Nothing to do here. */
- break;
-
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
- break;
-
- /* Trap invalid mutex types: */
- default:
- /* Return an invalid argument error: */
- ret = EINVAL;
- break;
- }
- if (ret == 0) {
- /* Initialise the rest of the mutex: */
- _thread_queue_init(&pmutex->m_queue);
- pmutex->m_flags |= MUTEX_FLAGS_INITED;
- pmutex->m_owner = NULL;
- pmutex->m_type = type;
- memset(&pmutex->lock, 0,
- sizeof(pmutex->lock));
- *mutex = pmutex;
- } else {
- free(pmutex);
- *mutex = NULL;
- }
+ /* Check if default mutex attributes: */
+ else if (mutex_attr == NULL || *mutex_attr == NULL) {
+ /* Default to a (error checking) POSIX mutex: */
+ type = PTHREAD_MUTEX_ERRORCHECK;
+ protocol = PTHREAD_PRIO_NONE;
+ ceiling = PTHREAD_MAX_PRIORITY;
+ }
+
+ /* Check mutex type: */
+ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
+ ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Check mutex protocol: */
+ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
+ ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ else {
+ /* Use the requested mutex type and protocol: */
+ type = (*mutex_attr)->m_type;
+ protocol = (*mutex_attr)->m_protocol;
+ ceiling = (*mutex_attr)->m_ceiling;
+ }
+
+ /* Check no errors so far: */
+ if (ret == 0) {
+ if ((pmutex = (pthread_mutex_t)
+ malloc(sizeof(struct pthread_mutex))) == NULL)
+ ret = ENOMEM;
+ else {
+ /* Reset the mutex flags: */
+ pmutex->m_flags = 0;
+
+ /* Process according to mutex type: */
+ switch (type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /* Nothing to do here. */
+ break;
+
+ /* Single UNIX Spec 2 recursive mutex: */
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Reset the mutex count: */
+ pmutex->m_data.m_count = 0;
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ /* Initialise the rest of the mutex: */
+ TAILQ_INIT(&pmutex->m_queue);
+ pmutex->m_flags |= MUTEX_FLAGS_INITED;
+ pmutex->m_owner = NULL;
+ pmutex->m_type = type;
+ pmutex->m_protocol = protocol;
+ pmutex->m_refcount = 0;
+ if (protocol == PTHREAD_PRIO_PROTECT)
+ pmutex->m_prio = ceiling;
+ else
+ pmutex->m_prio = 0;
+ pmutex->m_saved_prio = 0;
+ memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ *mutex = pmutex;
+ } else {
+ free(pmutex);
+ *mutex = NULL;
}
}
}
@@ -124,16 +162,29 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
_SPINLOCK(&(*mutex)->lock);
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Check to see if this mutex is in use:
*/
- free(*mutex);
+ if (((*mutex)->m_owner != NULL) ||
+ (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
+ ((*mutex)->m_refcount != 0)) {
+ ret = EBUSY;
- /*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
- */
- *mutex = NULL;
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&(*mutex)->lock);
+ }
+ else {
+ /*
+ * Free the memory allocated for the mutex
+ * structure:
+ */
+ free(*mutex);
+
+ /*
+ * Leave the caller's pointer NULL now that
+ * the mutex has been destroyed:
+ */
+ *mutex = NULL;
+ }
}
/* Return the completion status: */
@@ -170,44 +221,100 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
/* Check if this mutex is not locked: */
if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- } else {
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
/* Return a busy error: */
ret = EBUSY;
- }
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if this mutex is locked: */
- if ((*mutex)->m_owner != NULL) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for the running thread: */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
/*
- * Check if the mutex is locked by the running
- * thread:
+ * The mutex takes on the attributes of the
+ * running thread when there are no waiters.
*/
- if ((*mutex)->m_owner == _thread_run) {
- /* Increment the lock count: */
- (*mutex)->m_data.m_count++;
- } else {
- /* Return a busy error: */
- ret = EBUSY;
- }
- } else {
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- }
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority.
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
break;
/* Trap invalid mutex types: */
@@ -219,6 +326,12 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -238,91 +351,200 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* What SS2 define as a 'normal' mutex. This has to deadlock
- on attempts to get a lock you already own. */
- case PTHREAD_MUTEX_NORMAL:
- if ((*mutex)->m_owner == _thread_run) {
- /* Intetionally deadlock */
- for (;;)
- _thread_kern_sched_state(PS_MUTEX_WAIT, __FILE__, __LINE__);
- }
- goto COMMON_LOCK;
-
- /* Return error (not OK) on attempting to re-lock */
- case PTHREAD_MUTEX_ERRORCHECK:
- if ((*mutex)->m_owner == _thread_run) {
- ret = EDEADLK;
- break;
- }
-
- /* Fast mutexes do not check for any error conditions: */
- case PTHREAD_MUTEX_DEFAULT:
- COMMON_LOCK:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
-
- /* Reset the lock count for this mutex: */
- (*mutex)->m_data.m_count = 0;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ /*
+ * The mutex takes on attributes of the
+ * running thread when there are no waiters.
+ */
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ if (_thread_run->active_priority >
+ (*mutex)->m_prio)
+ /* Adjust priorities: */
+ mutex_priority_adjust(*mutex);
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
+ /*
+ * Lock the mutex for the running
+ * thread:
+ */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Increment the lock count for this mutex: */
- (*mutex)->m_data.m_count++;
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority:
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /* Clear any previous error: */
+ _thread_run->error = 0;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * The threads priority may have changed while
+ * waiting for the mutex causing a ceiling
+ * violation.
+ */
+ ret = _thread_run->error;
+ _thread_run->error = 0;
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
+ }
break;
/* Trap invalid mutex types: */
@@ -334,6 +556,12 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -343,56 +571,375 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
int
pthread_mutex_unlock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ return (mutex_unlock_common(mutex, /* add reference */ 0));
+}
+
+int
+_mutex_cv_unlock(pthread_mutex_t * mutex)
+{
+ return (mutex_unlock_common(mutex, /* add reference */ 1));
+}
+
+int
+_mutex_cv_lock(pthread_mutex_t * mutex)
+{
+ int ret;
+ if ((ret = pthread_mutex_lock(mutex)) == 0)
+ (*mutex)->m_refcount--;
+ return (ret);
+}
+
+static inline int
+mutex_self_trylock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EBUSY;
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_self_lock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EDEADLK;
+ break;
+
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * What SS2 define as a 'normal' mutex. Intentionally
+ * deadlock on attempts to get a lock you already own.
+ */
+ _thread_kern_sched_state_unlock(PS_DEADLOCK,
+ &mutex->lock, __FILE__, __LINE__);
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+{
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
} else {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Default & normal mutexes do not really need to check for
- any error conditions: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Check if the running thread is not the owner of the mutex: */
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = (*mutex)->m_owner ? EPERM : EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of
+ * threads waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) {
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+ }
+ break;
+
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Check if the running thread is not the owner of the
+ * mutex:
*/
- else if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ if ((*mutex)->m_owner != _thread_run) {
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
+ }
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /*
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
+ */
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of threads
+ * waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) == NULL)
+ /* This mutex has no priority. */
+ (*mutex)->m_prio = 0;
+ else {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Set the priority of the mutex. Since
+ * our waiting threads are in descending
+ * priority order, the priority of the
+ * mutex becomes the active priority of
+ * the thread we just dequeued.
+ */
+ (*mutex)->m_prio =
+ (*mutex)->m_owner->active_priority;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning threads inherited priority
+ * now becomes his active priority (the
+ * priority of the mutex).
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if the running thread is not the owner of the mutex: */
+ /* POSIX priority ceiling mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
- /* Check if there are still counts: */
- else if ((*mutex)->m_data.m_count > 1) {
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
/* Decrement the count: */
(*mutex)->m_data.m_count--;
} else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
(*mutex)->m_data.m_count = 0;
+
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
*/
- if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Enter a loop to find a waiting thread whose
+ * active priority will not cause a ceiling
+ * violation:
+ */
+ while ((((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) &&
+ ((*mutex)->m_owner->active_priority >
+ (*mutex)->m_prio)) {
+ /*
+ * Either the mutex ceiling priority
+ * been lowered and/or this threads
+ * priority has been raised subsequent
+ * to this thread being queued on the
+ * waiting list.
+ */
+ (*mutex)->m_owner->error = EINVAL;
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+
+ /* Check for a new owner: */
+ if ((*mutex)->m_owner != NULL) {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning thread inherits the
+ * ceiling priority of the mutex and
+ * executes at that priority:
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+ (*mutex)->m_owner->active_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
}
}
break;
@@ -404,11 +951,348 @@ pthread_mutex_unlock(pthread_mutex_t * mutex)
break;
}
+ if ((ret == 0) && (add_reference != 0)) {
+ /* Increment the reference count: */
+ (*mutex)->m_refcount++;
+ }
+
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (ret);
}
+
+
+/*
+ * This function is called when a change in base priority occurs
+ * for a thread that is thread holding, or waiting for, a priority
+ * protection or inheritence mutex. A change in a threads base
+ * priority can effect changes to active priorities of other threads
+ * and to the ordering of mutex locking by waiting threads.
+ *
+ * This must be called while thread scheduling is deferred.
+ */
+void
+_mutex_notify_priochange(pthread_t pthread)
+{
+ /* Adjust the priorites of any owned priority mutexes: */
+ if (pthread->priority_mutex_count > 0) {
+ /*
+ * Rescan the mutexes owned by this thread and correct
+ * their priorities to account for this threads change
+ * in priority. This has the side effect of changing
+ * the threads active priority.
+ */
+ mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ }
+
+ /*
+ * If this thread is waiting on a priority inheritence mutex,
+ * check for priority adjustments. A change in priority can
+ * also effect a ceiling violation(*) for a thread waiting on
+ * a priority protection mutex; we don't perform the check here
+ * as it is done in pthread_mutex_unlock.
+ *
+ * (*) It should be noted that a priority change to a thread
+ * _after_ taking and owning a priority ceiling mutex
+ * does not affect ownership of that mutex; the ceiling
+ * priority is only checked before mutex ownership occurs.
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /* Lock the mutex structure: */
+ _SPINLOCK(&pthread->data.mutex->lock);
+
+ /*
+ * Check to make sure this thread is still in the same state
+ * (the spinlock above can yield the CPU to another thread):
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /*
+ * Remove and reinsert this thread into the list of
+ * waiting threads to preserve decreasing priority
+ * order.
+ */
+ mutex_queue_remove(pthread->data.mutex, pthread);
+ mutex_queue_enq(pthread->data.mutex, pthread);
+
+ if (pthread->data.mutex->m_protocol ==
+ PTHREAD_PRIO_INHERIT) {
+ /* Adjust priorities: */
+ mutex_priority_adjust(pthread->data.mutex);
+ }
+ }
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&pthread->data.mutex->lock);
+ }
+}
+
+/*
+ * Called when a new thread is added to the mutex waiting queue or
+ * when a threads priority changes that is already in the mutex
+ * waiting queue.
+ */
+static void
+mutex_priority_adjust(pthread_mutex_t mutex)
+{
+ pthread_t pthread_next, pthread = mutex->m_owner;
+ int temp_prio;
+ pthread_mutex_t m = mutex;
+
+ /*
+ * Calculate the mutex priority as the maximum of the highest
+ * active priority of any waiting threads and the owning threads
+ * active priority(*).
+ *
+ * (*) Because the owning threads current active priority may
+ * reflect priority inherited from this mutex (and the mutex
+ * priority may have changed) we must recalculate the active
+ * priority based on the threads saved inherited priority
+ * and its base priority.
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, pthread->base_priority));
+
+ /* See if this mutex really needs adjusting: */
+ if (temp_prio == m->m_prio)
+ /* No need to propagate the priority: */
+ return;
+
+ /* Set new priority of the mutex: */
+ m->m_prio = temp_prio;
+
+ while (m != NULL) {
+ /*
+ * Save the threads priority before rescanning the
+ * owned mutexes:
+ */
+ temp_prio = pthread->active_priority;
+
+ /*
+ * Fix the priorities for all the mutexes this thread has
+ * locked since taking this mutex. This also has a
+ * potential side-effect of changing the threads priority.
+ */
+ mutex_rescan_owned(pthread, m);
+
+ /*
+ * If the thread is currently waiting on a mutex, check
+ * to see if the threads new priority has affected the
+ * priority of the mutex.
+ */
+ if ((temp_prio != pthread->active_priority) &&
+ (pthread->state == PS_MUTEX_WAIT) &&
+ (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Grab the mutex this thread is waiting on: */
+ m = pthread->data.mutex;
+
+ /*
+ * The priority for this thread has changed. Remove
+ * and reinsert this thread into the list of waiting
+ * threads to preserve decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
+
+ /* Grab the waiting thread with highest priority: */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
+
+ /*
+ * Calculate the mutex priority as the maximum of the
+ * highest active priority of any waiting threads and
+ * the owning threads active priority.
+ */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, m->m_owner->base_priority));
+
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated to the
+ * mutex this thread is waiting on and up to
+ * the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+ }
+ else
+ /* We're done: */
+ m = NULL;
+
+ }
+ else
+ /* We're done: */
+ m = NULL;
+ }
+}
+
+static void
+mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
+{
+ int active_prio, inherited_prio;
+ pthread_mutex_t m;
+ pthread_t pthread_next;
+
+ /*
+ * Start walking the mutexes the thread has taken since
+ * taking this mutex.
+ */
+ if (mutex == NULL) {
+ /*
+ * A null mutex means start at the beginning of the owned
+ * mutex list.
+ */
+ m = TAILQ_FIRST(&pthread->mutexq);
+
+ /* There is no inherited priority yet. */
+ inherited_prio = 0;
+ }
+ else {
+ /*
+ * The caller wants to start after a specific mutex. It
+ * is assumed that this mutex is a priority inheritence
+ * mutex and that its priority has been correctly
+ * calculated.
+ */
+ m = TAILQ_NEXT(mutex, m_qe);
+
+ /* Start inheriting priority from the specified mutex. */
+ inherited_prio = mutex->m_prio;
+ }
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ while (m != NULL) {
+ /*
+ * We only want to deal with priority inheritence
+ * mutexes. This might be optimized by only placing
+ * priority inheritence mutexes into the owned mutex
+ * list, but it may prove to be useful having all
+ * owned mutexes in this list. Consider a thread
+ * exiting while holding mutexes...
+ */
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
+ /*
+ * Fix the owners saved (inherited) priority to
+ * reflect the priority of the previous mutex.
+ */
+ m->m_saved_prio = inherited_prio;
+
+ if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
+ /* Recalculate the priority of the mutex: */
+ m->m_prio = MAX(active_prio,
+ pthread_next->active_priority);
+ else
+ m->m_prio = active_prio;
+
+ /* Recalculate new inherited and active priorities: */
+ inherited_prio = m->m_prio;
+ active_prio = MAX(m->m_prio, pthread->base_priority);
+ }
+
+ /* Advance to the next mutex owned by this thread: */
+ m = TAILQ_NEXT(m, m_qe);
+ }
+
+ /*
+ * Fix the threads inherited priority and recalculate its
+ * active priority.
+ */
+ pthread->inherited_priority = inherited_prio;
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ if (active_prio != pthread->active_priority) {
+ /*
+ * If this thread is in the priority queue, it must be
+ * removed and reinserted for its new priority.
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ /*
+ * Remove the thread from the priority queue
+ * before changing its priority:
+ */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /*
+ * POSIX states that if the priority is being
+ * lowered, the thread must be inserted at the
+ * head of the queue for its priority if it owns
+ * any priority protection or inheritence mutexes.
+ */
+ if ((active_prio < pthread->active_priority) &&
+ (pthread->priority_mutex_count > 0)) {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ */
+static inline pthread_t
+mutex_queue_deq(pthread_mutex_t mutex)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL)
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a mutex queue in descending priority order.
+ */
+static inline void
+mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
+{
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+}
+
+/*
+ * Enqueue a waiting thread to a queue in descending priority order.
+ */
+static inline void
+mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+}
+
#endif
diff --git a/lib/libkse/thread/thr_mutex_prioceiling.c b/lib/libkse/thread/thr_mutex_prioceiling.c
new file mode 100644
index 0000000..edd9fb5
--- /dev/null
+++ b/lib/libkse/thread/thr_mutex_prioceiling.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ *prioceiling = (*mattr)->m_ceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ (*mattr)->m_ceiling = prioceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
+ int *prioceiling)
+{
+ int ret;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ ret = (*mutex)->m_prio;
+
+ return(ret);
+}
+
+int
+pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
+ int prioceiling, int *old_ceiling)
+{
+ int ret = 0;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else {
+ /* Lock the mutex: */
+ if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ /* Return the old ceiling and set the new ceiling: */
+ *old_ceiling = (*mutex)->m_prio;
+ (*mutex)->m_prio = prioceiling;
+
+ /* Unlock the mutex: */
+ ret = pthread_mutex_unlock(mutex);
+ }
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_mutex_protocol.c b/lib/libkse/thread/thr_mutex_protocol.c
new file mode 100644
index 0000000..56c5542
--- /dev/null
+++ b/lib/libkse/thread/thr_mutex_protocol.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else
+ *protocol = (*mattr)->m_protocol;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL) ||
+ (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
+ ret = EINVAL;
+ else {
+ (*mattr)->m_protocol = protocol;
+ (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
+ }
+ return(ret);
+}
+
+#endif
diff --git a/lib/libkse/thread/thr_priority_queue.c b/lib/libkse/thread/thr_priority_queue.c
new file mode 100644
index 0000000..516a1e0
--- /dev/null
+++ b/lib/libkse/thread/thr_priority_queue.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <stdlib.h>
+#include <sys/queue.h>
+#include <string.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+/* Prototypes: */
+static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+
+
+int
+_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+{
+ int i, ret = 0;
+ int prioslots = maxprio - minprio + 1;
+
+ if (pq == NULL)
+ ret = -1;
+
+ /* Create the priority queue with (maxprio - minprio + 1) slots: */
+ else if ((pq->pq_lists =
+ (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL)
+ ret = -1;
+
+ else {
+ /* Initialize the queue for each priority slot: */
+ for (i = 0; i < prioslots; i++) {
+ TAILQ_INIT(&pq->pq_lists[i].pl_head);
+ pq->pq_lists[i].pl_prio = i;
+ pq->pq_lists[i].pl_queued = 0;
+ }
+
+ /* Initialize the priority queue: */
+ TAILQ_INIT(&pq->pq_queue);
+
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+ }
+ return (ret);
+}
+
+void
+_pq_remove(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+}
+
+
+void
+_pq_insert_head(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+void
+_pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+pthread_t
+_pq_first(pq_queue_t *pq)
+{
+ pq_list_t *pql;
+ pthread_t pthread = NULL;
+
+ while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
+ (pthread == NULL)) {
+ if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
+ /*
+ * The priority list is empty; remove the list
+ * from the queue.
+ */
+ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link);
+
+ /* Mark the list as not being in the queue: */
+ pql->pl_queued = 0;
+ }
+ }
+ return (pthread);
+}
+
+
+static void
+pq_insert_prio_list(pq_queue_t *pq, int prio)
+{
+ pq_list_t *pql;
+
+ /*
+ * The priority queue is in descending priority order. Start at
+ * the beginning of the queue and find the list before which the
+ * new list should to be inserted.
+ */
+ pql = TAILQ_FIRST(&pq->pq_queue);
+ while ((pql != NULL) && (pql->pl_prio > prio))
+ pql = TAILQ_NEXT(pql, pl_link);
+
+ /* Insert the list: */
+ if (pql == NULL)
+ TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link);
+ else
+ TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link);
+
+ /* Mark this list as being in the queue: */
+ pq->pq_lists[prio].pl_queued = 1;
+}
+
+#endif
diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h
index 2d7e723..bf99a3b 100644
--- a/lib/libkse/thread/thr_private.h
+++ b/lib/libkse/thread/thr_private.h
@@ -55,6 +55,7 @@
#include <sys/time.h>
#include <sched.h>
#include <spinlock.h>
+#include <pthread_np.h>
/*
* Kernel fatal error handler macro.
@@ -65,16 +66,59 @@
#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+
/*
- * State change macro:
+ * Priority queue manipulation macros:
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
+#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
+#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
+#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+
+/*
+ * Waiting queue manipulation macros:
+ */
+#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+
+/*
+ * State change macro without scheduling queue change:
+ */
+#define PTHREAD_SET_STATE(thrd, newstate) { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
}
/*
+ * State change macro with scheduling queue change - This must be
+ * called with preemption deferred (see thread_kern_sched_[un]defer).
+ */
+#define PTHREAD_NEW_STATE(thrd, newstate) { \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ PTHREAD_SET_STATE(thrd, newstate); \
+}
+
+/*
+ * Define the signals to be used for scheduling.
+ */
+#if defined(_PTHREADS_COMPAT_SCHED)
+#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
+#define _SCHED_SIGNAL SIGVTALRM
+#else
+#define _ITIMER_SCHED_TIMER ITIMER_PROF
+#define _SCHED_SIGNAL SIGPROF
+#endif
+
+/*
* Queue definitions.
*/
struct pthread_queue {
@@ -84,10 +128,34 @@ struct pthread_queue {
};
/*
+ * Priority queues.
+ *
+ * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
+ */
+typedef struct pq_list {
+ TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
+ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
+ int pl_prio; /* the priority of this list */
+ int pl_queued; /* is this in the priority queue */
+} pq_list_t;
+
+typedef struct pq_queue {
+ TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
+ pq_list_t *pq_lists; /* array of all priority lists */
+ int pq_size; /* number of priority lists */
+} pq_queue_t;
+
+
+/*
* Static queue initialization values.
*/
#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
+/*
+ * TailQ initialization values.
+ */
+#define TAILQ_INITIALIZER { NULL, NULL }
+
/*
* Mutex definitions.
*/
@@ -98,10 +166,31 @@ union pthread_mutex_data {
struct pthread_mutex {
enum pthread_mutextype m_type;
- struct pthread_queue m_queue;
+ int m_protocol;
+ TAILQ_HEAD(mutex_head, pthread) m_queue;
struct pthread *m_owner;
union pthread_mutex_data m_data;
long m_flags;
+ int m_refcount;
+
+ /*
+ * Used for priority inheritence and protection.
+ *
+ * m_prio - For priority inheritence, the highest active
+ * priority (threads locking the mutex inherit
+ * this priority). For priority protection, the
+ * ceiling priority of this mutex.
+ * m_saved_prio - mutex owners inherited priority before
+ * taking the mutex, restored when the owner
+ * unlocks the mutex.
+ */
+ int m_prio;
+ int m_saved_prio;
+
+ /*
+ * Link for list of all mutexes a thread currently owns.
+ */
+ TAILQ_ENTRY(pthread_mutex) m_qe;
/*
* Lock for accesses to this structure.
@@ -120,11 +209,13 @@ struct pthread_mutex {
* Static mutex initialization values.
*/
#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { MUTEX_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, \
- NULL, { NULL }, MUTEX_FLAGS_INITED }
+ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
+ NULL, { NULL }, MUTEX_FLAGS_INITED, 0, 0, 0, TAILQ_INITIALIZER }
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
+ int m_protocol;
+ int m_ceiling;
long m_flags;
};
@@ -137,15 +228,16 @@ enum pthread_cond_type {
};
struct pthread_cond {
- enum pthread_cond_type c_type;
- struct pthread_queue c_queue;
- void *c_data;
- long c_flags;
+ enum pthread_cond_type c_type;
+ TAILQ_HEAD(cond_head, pthread) c_queue;
+ pthread_mutex_t c_mutex;
+ void *c_data;
+ long c_flags;
/*
* Lock for accesses to this structure.
*/
- spinlock_t lock;
+ spinlock_t lock;
};
struct pthread_cond_attr {
@@ -164,7 +256,8 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, COND_FLAGS_INITED }
+ { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL \
+ COND_FLAGS_INITED }
/*
* Cleanup definitions.
@@ -176,7 +269,9 @@ struct pthread_cleanup {
};
struct pthread_attr {
- int schedparam_policy;
+ int sched_policy;
+ int sched_inherit;
+ int sched_interval;
int prio;
int suspend;
int flags;
@@ -254,9 +349,11 @@ enum pthread_state {
PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
+ PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
+ PS_DEADLOCK,
PS_STATE_MAX
};
@@ -300,8 +397,8 @@ struct pthread_select_data {
};
union pthread_wait_data {
- pthread_mutex_t *mutex;
- pthread_cond_t *cond;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
struct {
short fd; /* Used when thread waiting on fd */
@@ -309,6 +406,7 @@ union pthread_wait_data {
char *fname; /* Source file name for debugging.*/
} fd;
struct pthread_select_data * select_data;
+ spinlock_t *spinlock;
};
/*
@@ -419,7 +517,11 @@ struct pthread {
struct pthread_queue join_queue;
/*
- * The current thread can belong to only one queue at a time.
+ * The current thread can belong to only one scheduling queue
+ * at a time (ready or waiting queue). It can also belong to
+ * a queue of threads waiting on mutexes or condition variables.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links (mutexes and condition variables).
*
* Pointer to queue (if any) on which the current thread is waiting.
*
@@ -431,8 +533,11 @@ struct pthread {
/* Pointer to next element in queue. */
struct pthread *qnxt;
+ /* Priority queue entry for this thread: */
+ TAILQ_ENTRY(pthread) pqe;
+
/* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) qe;
/* Wait data. */
union pthread_wait_data data;
@@ -446,10 +551,59 @@ struct pthread {
/* Signal number when in state PS_SIGWAIT: */
int signo;
+ /*
+ * Set to non-zero when this thread has deferred thread
+ * scheduling. We allow for recursive deferral.
+ */
+ int sched_defer_count;
+
+ /*
+ * Set to TRUE if this thread should yield after undeferring
+ * thread scheduling.
+ */
+ int yield_on_sched_undefer;
+
/* Miscellaneous data. */
- int flags;
-#define PTHREAD_EXITING 0x0100
- char pthread_priority;
+ int flags;
+#define PTHREAD_FLAGS_PRIVATE 0x0001
+#define PTHREAD_EXITING 0x0002
+#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
+#define PTHREAD_FLAGS_TRACE 0x0008
+
+ /*
+ * Base priority is the user setable and retrievable priority
+ * of the thread. It is only affected by explicit calls to
+ * set thread priority and upon thread creation via a thread
+ * attribute or default priority.
+ */
+ char base_priority;
+
+ /*
+ * Inherited priority is the priority a thread inherits by
+ * taking a priority inheritence or protection mutex. It
+ * is not affected by base priority changes. Inherited
+ * priority defaults to and remains 0 until a mutex is taken
+ * that is being waited on by any other thread whose priority
+ * is non-zero.
+ */
+ char inherited_priority;
+
+ /*
+ * Active priority is always the maximum of the threads base
+ * priority and inherited priority. When there is a change
+ * in either the real or inherited priority, the active
+ * priority must be recalculated.
+ */
+ char active_priority;
+
+ /* Number of priority ceiling or protection mutexes owned. */
+ int priority_mutex_count;
+
+ /*
+ * Queue of currently owned mutexes.
+ */
+ TAILQ_HEAD(, pthread_mutex) mutexq;
+
void *ret;
const void **specific_data;
int specific_data_count;
@@ -475,6 +629,14 @@ SCLASS struct pthread * volatile _thread_run
;
#endif
+/* Ptr to the thread structure for the last user thread to run: */
+SCLASS struct pthread * volatile _last_user_thread
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= &_thread_kern_thread;
+#else
+;
+#endif
+
/*
* Ptr to the thread running in single-threaded mode or NULL if
* running multi-threaded (default POSIX behaviour).
@@ -547,7 +709,7 @@ SCLASS struct pthread *_thread_initial
/* Default thread attributes: */
SCLASS struct pthread_attr pthread_attr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
+= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT };
#else
;
@@ -556,7 +718,7 @@ SCLASS struct pthread_attr pthread_attr_default
/* Default mutex attributes: */
SCLASS struct pthread_mutex_attr pthread_mutexattr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { MUTEX_TYPE_FAST, 0 };
+= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
#else
;
#endif
@@ -614,6 +776,27 @@ SCLASS pthread_cond_t _gc_cond
*/
struct sigaction _thread_sigact[NSIG];
+/*
+ * Scheduling queues:
+ */
+SCLASS pq_queue_t _readyq;
+SCLASS TAILQ_HEAD(, pthread) _waitingq;
+
+/* Indicates that the waitingq now has threads ready to run. */
+SCLASS volatile int _waitingq_check_reqd
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Thread switch hook. */
+SCLASS pthread_switch_routine_t _sched_switch_hook
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -645,6 +828,14 @@ void _lock_thread(void);
void _lock_thread_list(void);
void _unlock_thread(void);
void _unlock_thread_list(void);
+int _mutex_cv_lock(pthread_mutex_t *);
+int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_notify_priochange(struct pthread *);
+int _pq_init(struct pq_queue *pq, int, int);
+void _pq_remove(struct pq_queue *pq, struct pthread *);
+void _pq_insert_head(struct pq_queue *pq, struct pthread *);
+void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
+struct pthread *_pq_first(struct pq_queue *pq);
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -657,6 +848,8 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
+void _thread_kern_sched_defer(void);
+void _thread_kern_sched_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
void _thread_start(void);
void _thread_start_sig_handler(void);
diff --git a/lib/libkse/thread/thr_resume_np.c b/lib/libkse/thread/thr_resume_np.c
index 7c5f46a..885a457 100644
--- a/lib/libkse/thread/thr_resume_np.c
+++ b/lib/libkse/thread/thr_resume_np.c
@@ -45,8 +45,21 @@ pthread_resume_np(pthread_t thread)
if ((ret = _find_thread(thread)) == 0) {
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
}
return(ret);
diff --git a/lib/libkse/thread/thr_select.c b/lib/libkse/thread/thr_select.c
index d6202db..6d7d7dc 100644
--- a/lib/libkse/thread/thr_select.c
+++ b/lib/libkse/thread/thr_select.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/fcntl.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
@@ -47,6 +48,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
struct timespec ts;
struct timeval zero_timeout = {0, 0};
int i, ret = 0, got_all_locks = 1;
+ int f_wait = 1;
struct pthread_select_data data;
if (numfds > _thread_dtablesize) {
@@ -59,6 +61,8 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
/* Set the wake up time: */
_thread_kern_set_timeout(&ts);
+ if (ts.tv_sec == 0 && ts.tv_nsec == 0)
+ f_wait = 0;
} else {
/* Wait for ever: */
_thread_kern_set_timeout(NULL);
@@ -110,7 +114,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
if (exceptfds != NULL) {
memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0) {
+ if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
data.nfds = numfds;
FD_ZERO(&data.readfds);
FD_ZERO(&data.writefds);
diff --git a/lib/libkse/thread/thr_setprio.c b/lib/libkse/thread/thr_setprio.c
index dd89f15..008b6b0 100644
--- a/lib/libkse/thread/thr_setprio.c
+++ b/lib/libkse/thread/thr_setprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,17 +38,13 @@
int
pthread_setprio(pthread_t pthread, int prio)
{
- int ret;
+ int ret, policy;
+ struct sched_param param;
- /* Check if the priority is invalid: */
- if (prio < PTHREAD_MIN_PRIORITY || prio > PTHREAD_MAX_PRIORITY)
- /* Return an invalid argument error: */
- ret = EINVAL;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0)
- /* Set the thread priority: */
- pthread->pthread_priority = prio;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0) {
+ param.sched_priority = prio;
+ ret = pthread_setschedparam(pthread, policy, &param);
+ }
/* Return the error status: */
return (ret);
diff --git a/lib/libkse/thread/thr_setschedparam.c b/lib/libkse/thread/thr_setschedparam.c
new file mode 100644
index 0000000..93635da
--- /dev/null
+++ b/lib/libkse/thread/thr_setschedparam.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#include <sys/param.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
+{
+ int old_prio, in_readyq = 0, ret = 0;
+
+ if ((param == NULL) || (param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY) ||
+ (policy < SCHED_FIFO) || (policy > SCHED_RR))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling
+ * signal:
+ */
+ _thread_kern_sched_defer();
+
+ if (param->sched_priority != pthread->base_priority) {
+ /*
+ * Remove the thread from its current priority
+ * queue before any adjustments are made to its
+ * active priority:
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ in_readyq = 1;
+ old_prio = pthread->active_priority;
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ }
+
+ /* Set the thread base priority: */
+ pthread->base_priority = param->sched_priority;
+
+ /* Recalculate the active priority: */
+ pthread->active_priority = MAX(pthread->base_priority,
+ pthread->inherited_priority);
+
+ if (in_readyq) {
+ if ((pthread->priority_mutex_count > 0) &&
+ (old_prio > pthread->active_priority)) {
+ /*
+ * POSIX states that if the priority is
+ * being lowered, the thread must be
+ * inserted at the head of the queue for
+ * its priority if it owns any priority
+ * protection or inheritence mutexes.
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check for any mutex priority adjustments. This
+ * includes checking for a priority mutex on which
+ * this thread is waiting.
+ */
+ _mutex_notify_priochange(pthread);
+ }
+
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c
index 3e55d65..e51d949 100644
--- a/lib/libkse/thread/thr_sig.c
+++ b/lib/libkse/thread/thr_sig.c
@@ -38,6 +38,19 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * State change macro for signal handler:
+ */
+#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
+ if ((_thread_run->sched_defer_count == 0) && \
+ (_thread_kern_in_sched == 0)) { \
+ PTHREAD_NEW_STATE(thrd, newstate); \
+ } else { \
+ _waitingq_check_reqd = 1; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+ } \
+}
+
/* Static variables: */
static int volatile yield_on_unlock_thread = 0;
static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
@@ -94,14 +107,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_thread_sys_write(_thread_kern_pipe[1], &c, 1);
}
-
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
_thread_dump_info();
/* Check if an interval timer signal: */
- else if (sig == SIGVTALRM) {
+ else if (sig == _SCHED_SIGNAL) {
/* Check if the scheduler interrupt has come at an
* unfortunate time which one of the threads is
* modifying the thread list:
@@ -115,6 +127,14 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
yield_on_unlock_thread = 1;
/*
+ * Check if the scheduler interrupt has come when
+ * the currently running thread has deferred thread
+ * scheduling.
+ */
+ else if (_thread_run->sched_defer_count)
+ _thread_run->yield_on_sched_undefer = 1;
+
+ /*
* Check if the kernel has not been interrupted while
* executing scheduler code:
*/
@@ -170,18 +190,17 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
}
/*
- * Enter a loop to process each thread in the linked
+ * Enter a loop to process each thread in the waiting
* list that is sigwait-ing on a signal. Since POSIX
* doesn't specify which thread will get the signal
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -201,11 +220,19 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* list:
*/
for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt)
+ pthread = pthread->nxt) {
+ pthread_t pthread_saved = _thread_run;
+
+ _thread_run = pthread;
_thread_signal(pthread,sig);
- /* Dispatch pending signals to the running thread: */
- _dispatch_signals();
+ /*
+ * Dispatch pending signals to the
+ * running thread:
+ */
+ _dispatch_signals();
+ _thread_run = pthread_saved;
+ }
}
/* Returns nothing. */
@@ -257,7 +284,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -277,7 +304,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -292,7 +319,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libkse/thread/thr_sigaction.c b/lib/libkse/thread/thr_sigaction.c
index 40f3850..73a3b21 100644
--- a/lib/libkse/thread/thr_sigaction.c
+++ b/lib/libkse/thread/thr_sigaction.c
@@ -71,7 +71,7 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Check if the kernel needs to be advised of a change
* in signal action:
*/
- if (act != NULL && sig != SIGVTALRM && sig != SIGCHLD &&
+ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
/* Initialise the global signal action structure: */
gact.sa_mask = act->sa_mask;
diff --git a/lib/libkse/thread/thr_sigpending.c b/lib/libkse/thread/thr_sigpending.c
new file mode 100644
index 0000000..44a39a6
--- /dev/null
+++ b/lib/libkse/thread/thr_sigpending.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <signal.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+sigpending(sigset_t * set)
+{
+ int ret = 0;
+
+ /* Check for a null signal set pointer: */
+ if (set == NULL) {
+ /* Return an invalid argument: */
+ ret = EINVAL;
+ }
+ else {
+ *set = _thread_run->sigpend;
+ }
+ /* Return the completion status: */
+ return (ret);
+}
+#endif
diff --git a/lib/libkse/thread/thr_sigwait.c b/lib/libkse/thread/thr_sigwait.c
index 590f9db..98a5359 100644
--- a/lib/libkse/thread/thr_sigwait.c
+++ b/lib/libkse/thread/thr_sigwait.c
@@ -56,7 +56,7 @@ sigwait(const sigset_t * set, int *sig)
*/
sigdelset(&act.sa_mask, SIGKILL);
sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, SIGVTALRM);
+ sigdelset(&act.sa_mask, _SCHED_SIGNAL);
sigdelset(&act.sa_mask, SIGCHLD);
sigdelset(&act.sa_mask, SIGINFO);
diff --git a/lib/libkse/thread/thr_spinlock.c b/lib/libkse/thread/thr_spinlock.c
index 9da115e..4da3f8c 100644
--- a/lib/libkse/thread/thr_spinlock.c
+++ b/lib/libkse/thread/thr_spinlock.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_spinlock.c,v 1.3 1998/06/06 07:27:06 jb Exp $
+ * $Id: uthread_spinlock.c,v 1.4 1998/06/09 23:13:10 jb Exp $
*
*/
@@ -56,12 +56,9 @@ _spinlock(spinlock_t *lck)
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run)
- return;
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
}
/* The running thread now owns the lock: */
@@ -81,24 +78,25 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
+ int cnt = 0;
+
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run) {
+ cnt++;
+ if (cnt > 100) {
char str[256];
- snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) which it had already locked in %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
+ snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
_thread_sys_write(2,str,strlen(str));
-
- /* Create a thread dump to help debug this problem: */
- _thread_dump_info();
- return;
+ sleep(1);
+ cnt = 0;
}
+
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
}
/* The running thread now owns the lock: */
diff --git a/lib/libkse/thread/thr_suspend_np.c b/lib/libkse/thread/thr_suspend_np.c
index 871683a..6a6eaf4 100644
--- a/lib/libkse/thread/thr_suspend_np.c
+++ b/lib/libkse/thread/thr_suspend_np.c
@@ -51,8 +51,21 @@ pthread_suspend_np(pthread_t thread)
thread->interrupted = 1;
}
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
return(ret);
}
diff --git a/lib/libkse/thread/thr_switch_np.c b/lib/libkse/thread/thr_switch_np.c
new file mode 100644
index 0000000..8373214
--- /dev/null
+++ b/lib/libkse/thread/thr_switch_np.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include <pthread_np.h>
+#include "pthread_private.h"
+
+
+int
+pthread_switch_add_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine == NULL)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = routine;
+
+ return(ret);
+}
+
+int
+pthread_switch_delete_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine != _sched_switch_hook)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = NULL;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/Makefile.inc b/lib/libpthread/thread/Makefile.inc
index 004982a..16799cf 100644
--- a/lib/libpthread/thread/Makefile.inc
+++ b/lib/libpthread/thread/Makefile.inc
@@ -1,4 +1,4 @@
-# $Id: Makefile.inc,v 1.15 1998/09/12 22:03:20 dt Exp $
+# $Id: Makefile.inc,v 1.16 1998/09/30 06:36:55 jb Exp $
# uthread sources
.PATH: ${.CURDIR}/uthread
@@ -8,10 +8,18 @@ SRCS+= \
uthread_attr_destroy.c \
uthread_attr_init.c \
uthread_attr_getdetachstate.c \
+ uthread_attr_getinheritsched.c \
+ uthread_attr_getschedparam.c \
+ uthread_attr_getschedpolicy.c \
+ uthread_attr_getscope.c \
uthread_attr_getstackaddr.c \
uthread_attr_getstacksize.c \
uthread_attr_setcreatesuspend_np.c \
uthread_attr_setdetachstate.c \
+ uthread_attr_setinheritsched.c \
+ uthread_attr_setschedparam.c \
+ uthread_attr_setschedpolicy.c \
+ uthread_attr_setscope.c \
uthread_attr_setstackaddr.c \
uthread_attr_setstacksize.c \
uthread_autoinit.cc \
@@ -44,6 +52,7 @@ SRCS+= \
uthread_getdirentries.c \
uthread_getpeername.c \
uthread_getprio.c \
+ uthread_getschedparam.c \
uthread_getsockname.c \
uthread_getsockopt.c \
uthread_info.c \
@@ -57,11 +66,14 @@ SRCS+= \
uthread_mattr_kind_np.c \
uthread_multi_np.c \
uthread_mutex.c \
+ uthread_mutex_prioceiling.c \
+ uthread_mutex_protocol.c \
uthread_mutexattr_destroy.c \
uthread_nanosleep.c \
uthread_once.c \
uthread_open.c \
uthread_pipe.c \
+ uthread_priority_queue.c \
uthread_queue.c \
uthread_read.c \
uthread_readv.c \
@@ -76,12 +88,14 @@ SRCS+= \
uthread_sendto.c \
uthread_seterrno.c \
uthread_setprio.c \
+ uthread_setschedparam.c \
uthread_setsockopt.c \
uthread_shutdown.c \
uthread_sig.c \
uthread_sigaction.c \
uthread_sigblock.c \
uthread_sigmask.c \
+ uthread_sigpending.c \
uthread_sigprocmask.c \
uthread_sigsetmask.c \
uthread_sigsuspend.c \
@@ -92,6 +106,7 @@ SRCS+= \
uthread_spec.c \
uthread_spinlock.c \
uthread_suspend_np.c \
+ uthread_switch_np.c \
uthread_vfork.c \
uthread_wait4.c \
uthread_write.c \
diff --git a/lib/libpthread/thread/thr_attr_getinheritsched.c b/lib/libpthread/thread/thr_attr_getinheritsched.c
new file mode 100644
index 0000000..38851ca
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_getinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getinheritsched(pthread_attr_t *attr, int *sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ *sched_inherit = (*attr)->sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_getschedparam.c b/lib/libpthread/thread/thr_attr_getschedparam.c
new file mode 100644
index 0000000..ea5e19d
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_getschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ param->sched_priority = (*attr)->prio;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_getschedpolicy.c b/lib/libpthread/thread/thr_attr_getschedpolicy.c
new file mode 100644
index 0000000..0b9ff59
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_getschedpolicy.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
+ ret = EINVAL;
+ else
+ *policy = (*attr)->sched_policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_getscope.c b/lib/libpthread/thread/thr_attr_getscope.c
new file mode 100644
index 0000000..f84b104
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_getscope.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_getscope(pthread_attr_t *attr, int *contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else
+ *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
+ PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_setinheritsched.c b/lib/libpthread/thread/thr_attr_setinheritsched.c
new file mode 100644
index 0000000..017b7df
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_setinheritsched.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->sched_inherit = sched_inherit;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_setschedparam.c b/lib/libpthread/thread/thr_attr_setschedparam.c
new file mode 100644
index 0000000..5c860a8
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_setschedparam.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedparam(pthread_attr_t *attr, struct sched_param *param)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ (*attr)->prio = param->sched_priority;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_setschedpolicy.c b/lib/libpthread/thread/thr_attr_setschedpolicy.c
new file mode 100644
index 0000000..3d5aa3c
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_setschedpolicy.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) || (policy < SCHED_FIFO) ||
+ (policy > SCHED_RR))
+ ret = EINVAL;
+ else
+ (*attr)->sched_policy = policy;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_attr_setscope.c b/lib/libpthread/thread/thr_attr_setscope.c
new file mode 100644
index 0000000..24dead6
--- /dev/null
+++ b/lib/libpthread/thread/thr_attr_setscope.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+{
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL) ||
+ (contentionscope != PTHREAD_SCOPE_PROCESS) ||
+ (contentionscope != PTHREAD_SCOPE_SYSTEM))
+ /* Return an invalid argument: */
+ ret = EINVAL;
+
+ else if (contentionscope == PTHREAD_SCOPE_SYSTEM)
+ /* We don't support system wide contention: */
+#ifdef NOT_YET
+ ret = ENOTSUP;
+#else
+ ret = EOPNOTSUPP;
+#endif
+
+ else
+ (*attr)->flags |= contentionscope;
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_cond.c b/lib/libpthread/thread/thr_cond.c
index a085ea6..c090d79 100644
--- a/lib/libpthread/thread/thr_cond.c
+++ b/lib/libpthread/thread/thr_cond.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -37,6 +37,14 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * Prototypes
+ */
+static inline pthread_t cond_queue_deq(pthread_cond_t);
+static inline void cond_queue_remove(pthread_cond_t, pthread_t);
+static inline void cond_queue_enq(pthread_cond_t, pthread_t);
+
+
int
pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
{
@@ -83,9 +91,10 @@ pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr)
* Initialise the condition variable
* structure:
*/
- _thread_queue_init(&pcond->c_queue);
+ TAILQ_INIT(&pcond->c_queue);
pcond->c_flags |= COND_FLAGS_INITED;
pcond->c_type = type;
+ pcond->c_mutex = NULL;
memset(&pcond->lock,0,sizeof(pcond->lock));
*cond = pcond;
}
@@ -144,33 +153,57 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Wait forever: */
- _thread_run->wakeup_time.tv_sec = -1;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
-
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Return invalid argument error: */
+ rval = EINVAL;
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
- /* Lock the mutex: */
- rval = pthread_mutex_lock(mutex);
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Wait forever: */
+ _thread_run->wakeup_time.tv_sec = -1;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) ==
+ NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ }
+ else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
}
break;
@@ -183,7 +216,6 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex)
rval = EINVAL;
break;
}
-
}
/* Return the completion status: */
@@ -213,42 +245,88 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Set the wakeup time: */
- _thread_run->wakeup_time.tv_sec = abstime->tv_sec;
- _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec;
-
- /* Reset the timeout flag: */
- _thread_run->timeout = 0;
-
- /*
- * Queue the running thread for the condition
- * variable:
- */
- _thread_queue_enq(&(*cond)->c_queue, _thread_run);
-
- /* Unlock the mutex: */
- if ((rval = pthread_mutex_unlock(mutex)) != 0) {
- /*
- * Cannot unlock the mutex, so remove the
- * running thread from the condition
- * variable queue:
- */
- _thread_queue_deq(&(*cond)->c_queue);
+ if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
+ ((*cond)->c_mutex != *mutex))) {
+ /* Return invalid argument error: */
+ rval = EINVAL;
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
} else {
- /* Schedule the next thread: */
- _thread_kern_sched_state_unlock(PS_COND_WAIT,
- &(*cond)->lock, __FILE__, __LINE__);
+ /* Set the wakeup time: */
+ _thread_run->wakeup_time.tv_sec =
+ abstime->tv_sec;
+ _thread_run->wakeup_time.tv_nsec =
+ abstime->tv_nsec;
- /* Lock the mutex: */
- if ((rval = pthread_mutex_lock(mutex)) != 0) {
- }
- /* Check if the wait timed out: */
- else if (_thread_run->timeout) {
- /* Return a timeout error: */
- rval = ETIMEDOUT;
+ /* Reset the timeout flag: */
+ _thread_run->timeout = 0;
+
+ /*
+ * Queue the running thread for the condition
+ * variable:
+ */
+ cond_queue_enq(*cond, _thread_run);
+
+ /* Remember the mutex that is being used: */
+ (*cond)->c_mutex = *mutex;
+
+ /* Unlock the mutex: */
+ if ((rval = _mutex_cv_unlock(mutex)) != 0) {
+ /*
+ * Cannot unlock the mutex, so remove
+ * the running thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond, _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unlock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+ } else {
+ /*
+ * Schedule the next thread and unlock
+ * the condition variable structure:
+ */
+ _thread_kern_sched_state_unlock(PS_COND_WAIT,
+ &(*cond)->lock, __FILE__, __LINE__);
+
+ /* Check if the wait timedout: */
+ if (_thread_run->timeout == 0) {
+ /* Lock the mutex: */
+ rval = _mutex_cv_lock(mutex);
+ }
+ else {
+ /* Lock the condition variable structure: */
+ _SPINLOCK(&(*cond)->lock);
+
+ /*
+ * The wait timed out; remove
+ * the thread from the condition
+ * variable queue:
+ */
+ cond_queue_remove(*cond,
+ _thread_run);
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
+
+ /* Unock the condition variable structure: */
+ _SPINUNLOCK(&(*cond)->lock);
+
+ /* Return a timeout error: */
+ rval = ETIMEDOUT;
+
+ /*
+ * Lock the mutex and ignore
+ * any errors:
+ */
+ (void)_mutex_cv_lock(mutex);
+ }
}
}
break;
@@ -273,7 +351,6 @@ int
pthread_cond_signal(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
@@ -286,11 +363,22 @@ pthread_cond_signal(pthread_cond_t * cond)
switch ((*cond)->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
- /* Bring the next thread off the condition queue: */
- if ((pthread = _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
+ /*
+ * Enter a loop to dequeue threads from the condition
+ * queue until we find one that hasn't previously
+ * timed out.
+ */
+ while (((pthread = cond_queue_deq(*cond)) != NULL) &&
+ (pthread->timeout != 0)) {
+ }
+
+ if (pthread != NULL)
/* Allow the thread to run: */
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
- }
+
+ /* Check for no more waiters: */
+ if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -312,12 +400,21 @@ int
pthread_cond_broadcast(pthread_cond_t * cond)
{
int rval = 0;
- int status;
pthread_t pthread;
if (cond == NULL || *cond == NULL)
rval = EINVAL;
else {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues. In addition, we must assure
+ * that all threads currently waiting on the condition
+ * variable are signaled and are not timedout by a
+ * scheduling signal that causes a preemption.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the condition variable structure: */
_SPINLOCK(&(*cond)->lock);
@@ -329,11 +426,17 @@ pthread_cond_broadcast(pthread_cond_t * cond)
* Enter a loop to bring all threads off the
* condition queue:
*/
- while ((pthread =
- _thread_queue_deq(&(*cond)->c_queue)) != NULL) {
- /* Allow the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ while ((pthread = cond_queue_deq(*cond)) != NULL) {
+ /*
+ * The thread is already running if the
+ * timeout flag is set.
+ */
+ if (pthread->timeout == 0)
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
}
+
+ /* There are no more waiting threads: */
+ (*cond)->c_mutex = NULL;
break;
/* Trap invalid condition variable types: */
@@ -345,9 +448,74 @@ pthread_cond_broadcast(pthread_cond_t * cond)
/* Unlock the condition variable structure: */
_SPINUNLOCK(&(*cond)->lock);
+
+ /* Reenable preemption and yield if necessary.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (rval);
}
+
+/*
+ * Dequeue a waiting thread from the head of a condition queue in
+ * descending priority order.
+ */
+static inline pthread_t
+cond_queue_deq(pthread_cond_t cond)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+{
+ /*
+ * Because pthread_cond_timedwait() can timeout as well
+ * as be signaled by another thread, it is necessary to
+ * guard against removing the thread from the queue if
+ * it isn't in the queue.
+ */
+ if (pthread->flags & PTHREAD_FLAGS_QUEUED) {
+ TAILQ_REMOVE(&cond->c_queue, pthread, qe);
+ pthread->flags &= ~PTHREAD_FLAGS_QUEUED;
+ }
+}
+
+/*
+ * Enqueue a waiting thread to a condition queue in descending priority
+ * order.
+ */
+static inline void
+cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&cond->c_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+ pthread->flags |= PTHREAD_FLAGS_QUEUED;
+}
#endif
diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c
index 4169461..438e527 100644
--- a/lib/libpthread/thread/thr_create.c
+++ b/lib/libpthread/thread/thr_create.c
@@ -99,12 +99,6 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
new_thread->magic = PTHREAD_MAGIC;
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- PTHREAD_NEW_STATE(new_thread,PS_SUSPENDED);
- } else {
- PTHREAD_NEW_STATE(new_thread,PS_RUNNING);
- }
-
/* Initialise the thread for signals: */
new_thread->sigmask = _thread_run->sigmask;
@@ -162,21 +156,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->pthread_priority = _thread_run->pthread_priority;
- new_thread->attr.prio = _thread_run->pthread_priority;
- new_thread->attr.schedparam_policy = _thread_run->attr.schedparam_policy;
+ new_thread->base_priority = _thread_run->base_priority;
+ new_thread->attr.prio = _thread_run->base_priority;
+ new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
* default values:
*/
- new_thread->pthread_priority = new_thread->attr.prio;
+ new_thread->base_priority = new_thread->attr.prio;
}
+ new_thread->active_priority = new_thread->base_priority;
+ new_thread->inherited_priority = 0;
/* Initialise the join queue for the new thread: */
_thread_queue_init(&(new_thread->join_queue));
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&new_thread->mutexq);
+
/* Initialise hooks in the thread structure: */
new_thread->specific_data = NULL;
new_thread->cleanup = NULL;
@@ -200,6 +199,27 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Unlock the thread list: */
_unlock_thread_list();
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
+ if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
+ new_thread->state = PS_SUSPENDED;
+ PTHREAD_WAITQ_INSERT(new_thread);
+ } else {
+ new_thread->state = PS_RUNNING;
+ PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
+ }
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
diff --git a/lib/libpthread/thread/thr_detach.c b/lib/libpthread/thread/thr_detach.c
index da456bf..05da832 100644
--- a/lib/libpthread/thread/thr_detach.c
+++ b/lib/libpthread/thread/thr_detach.c
@@ -52,11 +52,24 @@ pthread_detach(pthread_t pthread)
/* Flag the thread as detached: */
pthread->attr.flags |= PTHREAD_DETACHED;
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Enter a loop to bring all threads off the join queue: */
while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) {
/* Make the thread run: */
PTHREAD_NEW_STATE(next_thread,PS_RUNNING);
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
} else
/* Return an error: */
rval = EINVAL;
diff --git a/lib/libpthread/thread/thr_exit.c b/lib/libpthread/thread/thr_exit.c
index a5fc400..93b8b83 100644
--- a/lib/libpthread/thread/thr_exit.c
+++ b/lib/libpthread/thread/thr_exit.c
@@ -49,7 +49,7 @@ void _exit(int status)
itimer.it_interval.tv_usec = 0;
itimer.it_value.tv_sec = 0;
itimer.it_value.tv_usec = 0;
- setitimer(ITIMER_VIRTUAL, &itimer, NULL);
+ setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL);
/* Close the pthread kernel pipe: */
_thread_sys_close(_thread_kern_pipe[0]);
@@ -127,6 +127,13 @@ pthread_exit(void *status)
/* Run the thread-specific data destructors: */
_thread_cleanupspecific();
}
+
+ /*
+ * Guard against preemption by a scheduling signal. A change of
+ * thread state modifies the waiting and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Check if there are any threads joined to this one: */
while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) {
/* Wake the joined thread and let it detach this thread: */
@@ -134,6 +141,12 @@ pthread_exit(void *status)
}
/*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
+
+ /*
* Lock the garbage collector mutex to ensure that the garbage
* collector is not using the dead thread list.
*/
diff --git a/lib/libpthread/thread/thr_fork.c b/lib/libpthread/thread/thr_fork.c
index 960c1de..5582c1e 100644
--- a/lib/libpthread/thread/thr_fork.c
+++ b/lib/libpthread/thread/thr_fork.c
@@ -41,7 +41,7 @@
pid_t
fork(void)
{
- int flags;
+ int i, flags;
pid_t ret;
pthread_t pthread;
pthread_t pthread_next;
@@ -88,6 +88,11 @@ fork(void)
else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) {
/* Abort this application: */
abort();
+ /* Initialize the ready queue: */
+ } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY,
+ PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
} else {
/* Point to the first thread in the list: */
pthread = _thread_link_list;
@@ -119,6 +124,33 @@ fork(void)
/* Point to the next thread: */
pthread = pthread_next;
}
+
+ /* Re-init the waiting queues. */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /* Clear out any locks in the file descriptor table: */
+ for (i = 0; i < _thread_dtablesize; i++) {
+ if (_thread_fd_table[i] != NULL) {
+ /* Initialise the file locks: */
+ memset(&_thread_fd_table[i]->lock, 0,
+ sizeof(_thread_fd_table[i]->lock));
+ _thread_fd_table[i]->r_owner = NULL;
+ _thread_fd_table[i]->w_owner = NULL;
+ _thread_fd_table[i]->r_fname = NULL;
+ _thread_fd_table[i]->w_fname = NULL;
+ _thread_fd_table[i]->r_lineno = 0;;
+ _thread_fd_table[i]->w_lineno = 0;;
+ _thread_fd_table[i]->r_lockcount = 0;;
+ _thread_fd_table[i]->w_lockcount = 0;;
+
+ /* Initialise the read/write queues: */
+ _thread_queue_init(&_thread_fd_table[i]->r_queue);
+ _thread_queue_init(&_thread_fd_table[i]->w_queue);
+ }
+ }
}
}
diff --git a/lib/libpthread/thread/thr_gc.c b/lib/libpthread/thread/thr_gc.c
index f297fa8..510c51f 100644
--- a/lib/libpthread/thread/thr_gc.c
+++ b/lib/libpthread/thread/thr_gc.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_gc.c,v 1.1 1998/09/30 06:36:56 jb Exp $
+ * $Id: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $
*
* Garbage collector thread. Frees memory allocated for dead threads.
*
@@ -47,6 +47,7 @@ _thread_gc(pthread_addr_t arg)
int f_debug;
int f_done = 0;
int ret;
+ sigset_t mask;
pthread_t pthread;
pthread_t pthread_cln;
pthread_t pthread_nxt;
@@ -54,6 +55,13 @@ _thread_gc(pthread_addr_t arg)
struct timespec abstime;
void *p_stack;
+ /* Block all signals */
+ sigfillset (&mask);
+ sigprocmask (SIG_BLOCK, &mask, NULL);
+
+ /* Mark this thread as a library thread (not a user thread). */
+ _thread_run->flags |= PTHREAD_FLAGS_PRIVATE;
+
/* Set a debug flag based on an environment variable. */
f_debug = (getenv("LIBC_R_DEBUG") != NULL);
diff --git a/lib/libpthread/thread/thr_getprio.c b/lib/libpthread/thread/thr_getprio.c
index 708b8f1..b2c94d6 100644
--- a/lib/libpthread/thread/thr_getprio.c
+++ b/lib/libpthread/thread/thr_getprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,12 +38,11 @@
int
pthread_getprio(pthread_t pthread)
{
- int ret;
+ int policy, ret;
+ struct sched_param param;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(pthread)) == 0)
- /* Get the thread priority: */
- ret = pthread->pthread_priority;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0)
+ ret = param.sched_priority;
else {
/* Invalid thread: */
errno = ret;
diff --git a/lib/libpthread/thread/thr_getschedparam.c b/lib/libpthread/thread/thr_getschedparam.c
new file mode 100644
index 0000000..e7d18d9
--- /dev/null
+++ b/lib/libpthread/thread/thr_getschedparam.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
+{
+ int ret;
+
+ if ((param == NULL) || (policy == NULL))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /* Return the threads base priority and scheduling policy: */
+ param->sched_priority = pthread->base_priority;
+ *policy = pthread->attr.sched_policy;
+ }
+
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_info.c b/lib/libpthread/thread/thr_info.c
index f66dd2d..d2d97da 100644
--- a/lib/libpthread/thread/thr_info.c
+++ b/lib/libpthread/thread/thr_info.c
@@ -60,9 +60,11 @@ static const struct s_thread_info thread_info[] = {
{PS_WAIT_WAIT , "Waiting process"},
{PS_SIGSUSPEND , "Suspended, waiting for a signal"},
{PS_SIGWAIT , "Waiting for a signal"},
+ {PS_SPINBLOCK , "Waiting for a spinlock"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
+ {PS_DEADLOCK , "Deadlocked"},
{PS_STATE_MAX , "Not a real state!"}
};
@@ -75,6 +77,7 @@ _thread_dump_info(void)
int j;
pthread_t pthread;
char tmpfile[128];
+ pq_list_t *pq_list;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
@@ -116,7 +119,7 @@ _thread_dump_info(void)
snprintf(s, sizeof(s),
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ?
- "":pthread->name, pthread->pthread_priority,
+ "":pthread->name, pthread->base_priority,
thread_info[j].name,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
@@ -167,6 +170,50 @@ _thread_dump_info(void)
}
}
+ /* Output a header for ready threads: */
+ strcpy(s, "\n\n=============\nREADY THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the ready queue: */
+ TAILQ_FOREACH (pq_list, &_readyq.pq_queue, pl_link) {
+ TAILQ_FOREACH(pthread, &pq_list->pl_head, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+ }
+
+ /* Output a header for waiting threads: */
+ strcpy(s, "\n\n=============\nWAITING THREADS\n\n");
+ _thread_sys_write(fd, s, strlen(s));
+
+ /* Enter a loop to report each thread in the waiting queue: */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
+ /* Find the state: */
+ for (j = 0; j < (sizeof(thread_info) /
+ sizeof(struct s_thread_info)) - 1; j++)
+ if (thread_info[j].state == pthread->state)
+ break;
+ /* Output a record for the current thread: */
+ snprintf(s, sizeof(s),
+ "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ pthread, (pthread->name == NULL) ?
+ "":pthread->name, pthread->base_priority,
+ thread_info[j].name,
+ pthread->fname,pthread->lineno);
+ _thread_sys_write(fd, s, strlen(s));
+ }
+
/* Check if there are no dead threads: */
if (_thread_dead == NULL) {
/* Output a record: */
@@ -186,7 +233,7 @@ _thread_dump_info(void)
/* Output a record for the current thread: */
snprintf(s, sizeof(s),
"Thread %p prio %3d [%s:%d]\n",
- pthread, pthread->pthread_priority,
+ pthread, pthread->base_priority,
pthread->fname,pthread->lineno);
_thread_sys_write(fd, s, strlen(s));
}
diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c
index 50f3bef..e4411ce 100644
--- a/lib/libpthread/thread/thr_init.c
+++ b/lib/libpthread/thread/thr_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -147,6 +147,11 @@ _thread_init(void)
/* Abort this application: */
PANIC("Cannot get kernel write pipe flags");
}
+ /* Initialize the ready queue: */
+ else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) != 0) {
+ /* Abort this application: */
+ PANIC("Cannot allocate priority ready queue.");
+ }
/* Allocate memory for the thread structure of the initial thread: */
else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
/*
@@ -157,10 +162,25 @@ _thread_init(void)
} else {
/* Zero the global kernel thread structure: */
memset(&_thread_kern_thread, 0, sizeof(struct pthread));
+ _thread_kern_thread.flags = PTHREAD_FLAGS_PRIVATE;
memset(_thread_initial, 0, sizeof(struct pthread));
+ /* Initialize the waiting queue: */
+ TAILQ_INIT(&_waitingq);
+
+ /* Initialize the scheduling switch hook routine: */
+ _sched_switch_hook = NULL;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ _thread_initial->magic = PTHREAD_MAGIC;
+
/* Default the priority of the initial thread: */
- _thread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY;
+ _thread_initial->inherited_priority = 0;
/* Initialise the state of the initial thread: */
_thread_initial->state = PS_RUNNING;
@@ -168,7 +188,13 @@ _thread_init(void)
/* Initialise the queue: */
_thread_queue_init(&(_thread_initial->join_queue));
+ /* Initialize the owned mutex queue and count: */
+ TAILQ_INIT(&(_thread_initial->mutexq));
+ _thread_initial->priority_mutex_count = 0;
+
/* Initialise the rest of the fields: */
+ _thread_initial->sched_defer_count = 0;
+ _thread_initial->yield_on_sched_undefer = 0;
_thread_initial->specific_data = NULL;
_thread_initial->cleanup = NULL;
_thread_initial->queue = NULL;
@@ -206,9 +232,9 @@ _thread_init(void)
* signals that the user-thread kernel needs. Actually
* SIGINFO isn't really needed, but it is nice to have.
*/
- if (_thread_sys_sigaction(SIGVTALRM, &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGINFO , &act, NULL) != 0 ||
- _thread_sys_sigaction(SIGCHLD , &act, NULL) != 0) {
+ if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 ||
+ _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) {
/*
* Abort this process if signal initialisation fails:
*/
@@ -256,6 +282,8 @@ _thread_init(void)
pthread_cond_init(&_gc_cond,NULL) != 0)
PANIC("Failed to initialise garbage collector mutex or condvar");
+ gettimeofday(&kern_inc_prio_time, NULL);
+
return;
}
diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c
index 3a6966b..626f1d4 100644
--- a/lib/libpthread/thread/thr_kern.c
+++ b/lib/libpthread/thread/thr_kern.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -53,16 +53,18 @@
static void
_thread_kern_select(int wait_reqd);
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+
void
_thread_kern_sched(struct sigcontext * scp)
{
#ifndef __alpha__
char *fdata;
#endif
- int prio = -1;
pthread_t pthread;
pthread_t pthread_h = NULL;
- pthread_t pthread_s = NULL;
+ pthread_t last_thread = NULL;
struct itimerval itimer;
struct timespec ts;
struct timespec ts1;
@@ -105,18 +107,21 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
_thread_kern_in_sched = 0;
- /*
- * There might be pending signals for this thread, so
- * dispatch any that aren't blocked:
- */
- _dispatch_signals();
+ if (_sched_switch_hook != NULL) {
+ /* Run the installed switch hook: */
+ thread_run_switch_hook(_last_user_thread, _thread_run);
+ }
return;
} else
/* Flag the jump buffer was the last state saved: */
_thread_run->sig_saved = 0;
+ /* If the currently running thread is a user thread, save it: */
+ if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
+ _last_user_thread = _thread_run;
+
/*
- * Enter a the scheduling loop that finds the next thread that is
+ * Enter a scheduling loop that finds the next thread that is
* ready to run. This loop completes when there are no more threads
* in the global list or when a thread has its state restored by
* either a sigreturn (if the state was saved as a sigcontext) or a
@@ -134,12 +139,48 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(0);
/*
- * Enter a loop to look for sleeping threads that are ready:
+ * Define the maximum time before a scheduling signal
+ * is required:
+ */
+ itimer.it_value.tv_sec = 0;
+ itimer.it_value.tv_usec = TIMESLICE_USEC;
+
+ /*
+ * The interval timer is not reloaded when it
+ * times out. The interval time needs to be
+ * calculated every time.
+ */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = 0;
+
+ /*
+ * Enter a loop to look for sleeping threads that are ready
+ * or timedout. While we're at it, also find the smallest
+ * timeout value for threads waiting for a time.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ /* Check if this thread is ready: */
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check if this thread is blocked by an
+ * atomic lock:
+ */
+ else if (pthread->state == PS_SPINBLOCK) {
+ /*
+ * If the lock is available, let
+ * the thread run.
+ */
+ if (pthread->data.spinlock->access_lock == 0) {
+ PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ }
+
/* Check if this thread is to timeout: */
- if (pthread->state == PS_COND_WAIT ||
+ } else if (pthread->state == PS_COND_WAIT ||
pthread->state == PS_SLEEP_WAIT ||
pthread->state == PS_FDR_WAIT ||
pthread->state == PS_FDW_WAIT ||
@@ -163,9 +204,9 @@ __asm__("fnsave %0": :"m"(*fdata));
*/
if (pthread->state == PS_SELECT_WAIT) {
/*
- * The select has timed out,
- * so zero the file
- * descriptor sets:
+ * The select has timed out, so
+ * zero the file descriptor
+ * sets:
*/
FD_ZERO(&pthread->data.select_data->readfds);
FD_ZERO(&pthread->data.select_data->writefds);
@@ -189,13 +230,72 @@ __asm__("fnsave %0": :"m"(*fdata));
* it to be restarted:
*/
PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ } else {
+ /*
+ * Calculate the time until this thread
+ * is ready, allowing for the clock
+ * resolution:
+ */
+ ts1.tv_sec = pthread->wakeup_time.tv_sec
+ - ts.tv_sec;
+ ts1.tv_nsec = pthread->wakeup_time.tv_nsec
+ - ts.tv_nsec + CLOCK_RES_NSEC;
+
+ /*
+ * Check for underflow of the
+ * nanosecond field:
+ */
+ if (ts1.tv_nsec < 0) {
+ /*
+ * Allow for the underflow
+ * of the nanosecond field:
+ */
+ ts1.tv_sec--;
+ ts1.tv_nsec += 1000000000;
+ }
+ /*
+ * Check for overflow of the nanosecond
+ * field:
+ */
+ if (ts1.tv_nsec >= 1000000000) {
+ /*
+ * Allow for the overflow of
+ * the nanosecond field:
+ */
+ ts1.tv_sec++;
+ ts1.tv_nsec -= 1000000000;
+ }
+ /*
+ * Convert the timespec structure
+ * to a timeval structure:
+ */
+ TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
+
+ /*
+ * Check if the thread will be ready
+ * sooner than the earliest ones found
+ * so far:
+ */
+ if (timercmp(&tv1, &itimer.it_value, <)) {
+ /*
+ * Update the time value:
+ */
+ itimer.it_value.tv_sec = tv1.tv_sec;
+ itimer.it_value.tv_usec = tv1.tv_usec;
+ }
}
+
}
}
/* Check if there is a current thread: */
if (_thread_run != &_thread_kern_thread) {
/*
+ * This thread no longer needs to yield the CPU.
+ */
+ _thread_run->yield_on_sched_undefer = 0;
+
+ /*
* Save the current time as the time that the thread
* became inactive:
*/
@@ -204,194 +304,64 @@ __asm__("fnsave %0": :"m"(*fdata));
/*
* Accumulate the number of microseconds that this
- * thread has run for:
+ * thread has run for:
*/
- if (_thread_run->slice_usec != -1) {
- _thread_run->slice_usec += (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
- }
-
- /*
- * Check if this thread has reached its allocated
- * time slice period:
- */
- if (_thread_run->slice_usec > TIMESLICE_USEC) {
- /*
- * Flag the allocated time slice period as
- * up:
- */
- _thread_run->slice_usec = -1;
+ if ((_thread_run->slice_usec != -1) &&
+ (_thread_run->attr.sched_policy != SCHED_FIFO)) {
+ _thread_run->slice_usec +=
+ (_thread_run->last_inactive.tv_sec -
+ _thread_run->last_active.tv_sec) * 1000000 +
+ _thread_run->last_inactive.tv_usec -
+ _thread_run->last_active.tv_usec;
+
+ /* Check for time quantum exceeded: */
+ if (_thread_run->slice_usec > TIMESLICE_USEC)
+ _thread_run->slice_usec = -1;
}
- }
- /* Check if an incremental priority update is required: */
- if (((tv.tv_sec - kern_inc_prio_time.tv_sec) * 1000000 +
- tv.tv_usec - kern_inc_prio_time.tv_usec) > INC_PRIO_USEC) {
- /*
- * Enter a loop to look for run-enabled threads that
- * have not run since the last time that an
- * incremental priority update was performed:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if this thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if the last time that this thread
- * was run (as indicated by the last time it
- * became inactive) is before the time that
- * the last incremental priority check was
- * made:
- */
- else if (timercmp(&pthread->last_inactive, &kern_inc_prio_time, <)) {
+ if (_thread_run->state == PS_RUNNING) {
+ if (_thread_run->slice_usec == -1) {
+ /*
+ * The thread exceeded its time
+ * quantum or it yielded the CPU;
+ * place it at the tail of the
+ * queue for its priority.
+ */
+ PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ } else {
/*
- * Increment the incremental priority
- * for this thread in the hope that
- * it will eventually get a chance to
- * run:
+ * The thread hasn't exceeded its
+ * interval. Place it at the head
+ * of the queue for its priority.
*/
- (pthread->inc_prio)++;
+ PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
}
}
-
- /* Save the new incremental priority update time: */
- kern_inc_prio_time.tv_sec = tv.tv_sec;
- kern_inc_prio_time.tv_usec = tv.tv_usec;
- }
- /*
- * Enter a loop to look for the first thread of the highest
- * priority that is ready to run:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- }
- /*
- * Check if no run-enabled thread has been seen or if
- * the current thread has a priority higher than the
- * highest seen so far:
- */
- else if (pthread_h == NULL || (pthread->pthread_priority + pthread->inc_prio) > prio) {
+ else if (_thread_run->state == PS_DEAD) {
/*
- * Save this thread as the highest priority
- * thread seen so far:
+ * Don't add dead threads to the waiting
+ * queue, because when they're reaped, it
+ * will corrupt the queue.
*/
- pthread_h = pthread;
- prio = pthread->pthread_priority + pthread->inc_prio;
}
- }
-
- /*
- * Enter a loop to look for a thread that: 1. Is run-enabled.
- * 2. Has the required agregate priority. 3. Has not been
- * allocated its allocated time slice. 4. Became inactive
- * least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /* Check if the current thread is unable to run: */
- if (pthread->state != PS_RUNNING) {
- /* Ignore threads that are not ready to run. */
- }
-
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority found
- * above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
+ else {
/*
- * Ignore threads which have lower agregate
- * priority.
+ * This thread has changed state and needs
+ * to be placed in the waiting queue.
*/
- }
-
- /*
- * Check if the current thread reached its time slice
- * allocation last time it ran (or if it has not run
- * yet):
- */
- else if (pthread->slice_usec == -1) {
- }
+ PTHREAD_WAITQ_INSERT(_thread_run);
- /*
- * Check if an eligible thread has not been found
- * yet, or if the current thread has an inactive time
- * earlier than the last one seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current thread as
- * the most eligible thread seen so far:
- */
- pthread_s = pthread;
-
- /*
- * Save the time that the selected thread
- * became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
+ /* Restart the time slice: */
+ _thread_run->slice_usec = -1;
}
}
/*
- * Check if no thread was selected according to incomplete
- * time slice allocation:
+ * Get the highest priority thread in the ready queue.
*/
- if (pthread_s == NULL) {
- /*
- * Enter a loop to look for any other thread that: 1.
- * Is run-enabled. 2. Has the required agregate
- * priority. 3. Became inactive least recently.
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if the current thread is unable to
- * run:
- */
- if (pthread->state != PS_RUNNING) {
- /*
- * Ignore threads that are not ready
- * to run.
- */
- }
- /*
- * Check if the current thread as an agregate
- * priority not equal to the highest priority
- * found above:
- */
- else if ((pthread->pthread_priority + pthread->inc_prio) != prio) {
- /*
- * Ignore threads which have lower
- * agregate priority.
- */
- }
- /*
- * Check if an eligible thread has not been
- * found yet, or if the current thread has an
- * inactive time earlier than the last one
- * seen:
- */
- else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) {
- /*
- * Save the pointer to the current
- * thread as the most eligible thread
- * seen so far:
- */
- pthread_s = pthread;
+ pthread_h = PTHREAD_PRIOQ_FIRST;
- /*
- * Save the time that the selected
- * thread became inactive:
- */
- tv1.tv_sec = pthread->last_inactive.tv_sec;
- tv1.tv_usec = pthread->last_inactive.tv_usec;
- }
- }
- }
/* Check if there are no threads ready to run: */
- if (pthread_s == NULL) {
+ if (pthread_h == NULL) {
/*
* Lock the pthread kernel by changing the pointer to
* the running thread to point to the global kernel
@@ -406,7 +376,10 @@ __asm__("fnsave %0": :"m"(*fdata));
_thread_kern_select(1);
} else {
/* Make the selected thread the current thread: */
- _thread_run = pthread_s;
+ _thread_run = pthread_h;
+
+ /* Remove the thread from the ready queue. */
+ PTHREAD_PRIOQ_REMOVE(_thread_run);
/*
* Save the current time as the time that the thread
@@ -424,149 +397,22 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Reset the accumulated time slice period: */
_thread_run->slice_usec = 0;
}
- /*
- * Reset the incremental priority now that this
- * thread has been given the chance to run:
- */
- _thread_run->inc_prio = 0;
/* Check if there is more than one thread: */
if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) {
/*
- * Define the maximum time before a SIGVTALRM
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
- */
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /*
- * Enter a loop to look for threads waiting
- * for a time:
- */
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
- /*
- * Check if this thread is to
- * timeout:
- */
- if (pthread->state == PS_COND_WAIT ||
- pthread->state == PS_SLEEP_WAIT ||
- pthread->state == PS_FDR_WAIT ||
- pthread->state == PS_FDW_WAIT ||
- pthread->state == PS_SELECT_WAIT) {
- /*
- * Check if this thread is to
- * wait forever:
- */
- if (pthread->wakeup_time.tv_sec == -1) {
- }
- /*
- * Check if this thread is to
- * wakeup immediately:
- */
- else if (pthread->wakeup_time.tv_sec == 0 &&
- pthread->wakeup_time.tv_nsec == 0) {
- }
- /*
- * Check if the current time
- * is after the wakeup time:
- */
- else if ((ts.tv_sec > pthread->wakeup_time.tv_sec) ||
- ((ts.tv_sec == pthread->wakeup_time.tv_sec) &&
- (ts.tv_nsec > pthread->wakeup_time.tv_nsec))) {
- } else {
- /*
- * Calculate the time
- * until this thread
- * is ready, allowing
- * for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- CLOCK_RES_NSEC;
-
- /*
- * Check for
- * underflow of the
- * nanosecond field:
- */
- if (ts1.tv_nsec < 0) {
- /*
- * Allow for
- * the
- * underflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow
- * of the nanosecond
- * field:
- */
- if (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for
- * the
- * overflow
- * of the
- * nanosecond
- * field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the
- * timespec structure
- * to a timeval
- * structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv, &ts1);
-
- /*
- * Check if the
- * thread will be
- * ready sooner than
- * the earliest one
- * found so far:
- */
- if (timercmp(&tv, &itimer.it_value, <)) {
- /*
- * Update the
- * time
- * value:
- */
- itimer.it_value.tv_sec = tv.tv_sec;
- itimer.it_value.tv_usec = tv.tv_usec;
- }
- }
- }
- }
-
- /*
* Start the interval timer for the
* calculated time interval:
*/
- if (setitimer(ITIMER_VIRTUAL, &itimer, NULL) != 0) {
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
/*
* Cannot initialise the timer, so
* abort this process:
*/
- PANIC("Cannot set virtual timer");
+ PANIC("Cannot set scheduling timer");
}
}
+
/* Check if a signal context was saved: */
if (_thread_run->sig_saved == 1) {
#ifndef __alpha__
@@ -579,20 +425,30 @@ __asm__("fnsave %0": :"m"(*fdata));
/* Restore the floating point state: */
__asm__("frstor %0": :"m"(*fdata));
#endif
-
/*
* Do a sigreturn to restart the thread that
* was interrupted by a signal:
*/
- _thread_kern_in_sched = 0;
+ _thread_kern_in_sched = 0;
+
+ /*
+ * If we had a context switch, run any
+ * installed switch hooks.
+ */
+ if ((_sched_switch_hook != NULL) &&
+ (_last_user_thread != _thread_run)) {
+ thread_run_switch_hook(_last_user_thread,
+ _thread_run);
+ }
_thread_sys_sigreturn(&_thread_run->saved_sigcontext);
- } else
+ } else {
/*
* Do a longjmp to restart the thread that
* was context switched out (by a longjmp to
* a different thread):
*/
longjmp(_thread_run->saved_jmp_buf, 1);
+ }
/* This point should not be reached. */
PANIC("Thread has returned from sigreturn or longjmp");
@@ -679,7 +535,8 @@ _thread_kern_select(int wait_reqd)
* Enter a loop to process threads waiting on either file descriptors
* or times:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ _waitingq_check_reqd = 0; /* reset flag before loop */
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Assume that this state does not time out: */
settimeout = 0;
@@ -690,12 +547,12 @@ _thread_kern_select(int wait_reqd)
* operations or timeouts:
*/
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
case PS_JOIN:
case PS_MUTEX_WAIT:
- case PS_RUNNING:
case PS_SIGTHREAD:
case PS_SIGWAIT:
case PS_STATE_MAX:
@@ -704,6 +561,16 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread ready
+ * while in the scheduler or while the scheduling
+ * queues were protected.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/* Add the file descriptor to the read set: */
@@ -1010,16 +877,16 @@ _thread_kern_select(int wait_reqd)
* descriptors that are flagged as available by the
* _select syscall:
*/
- for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) {
+ TAILQ_FOREACH (pthread, &_waitingq, pqe) {
/* Process according to thread state: */
switch (pthread->state) {
/*
* States which do not depend on file
* descriptor I/O operations:
*/
- case PS_RUNNING:
case PS_COND_WAIT:
case PS_DEAD:
+ case PS_DEADLOCK:
case PS_FDLR_WAIT:
case PS_FDLW_WAIT:
case PS_FILE_WAIT:
@@ -1034,6 +901,15 @@ _thread_kern_select(int wait_reqd)
/* Nothing to do here. */
break;
+ case PS_RUNNING:
+ /*
+ * A signal occurred and made this thread
+ * ready while in the scheduler.
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ break;
+
/* File descriptor read wait: */
case PS_FDR_WAIT:
/*
@@ -1047,6 +923,13 @@ _thread_kern_select(int wait_reqd)
* is scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1063,6 +946,13 @@ _thread_kern_select(int wait_reqd)
* scheduled next:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
@@ -1269,6 +1159,13 @@ _thread_kern_select(int wait_reqd)
* thread to run:
*/
pthread->state = PS_RUNNING;
+
+ /*
+ * Remove it from the waiting queue
+ * and add it to the ready queue:
+ */
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
}
break;
}
@@ -1320,4 +1217,80 @@ _thread_kern_set_timeout(struct timespec * timeout)
}
return;
}
+
+void
+_thread_kern_sched_defer(void)
+{
+ /* Allow scheduling deferral to be recursive. */
+ _thread_run->sched_defer_count++;
+}
+
+void
+_thread_kern_sched_undefer(void)
+{
+ pthread_t pthread;
+ int need_resched = 0;
+
+ /*
+ * Perform checks to yield only if we are about to undefer
+ * scheduling.
+ */
+ if (_thread_run->sched_defer_count == 1) {
+ /*
+ * Check if the waiting queue needs to be examined for
+ * threads that are now ready:
+ */
+ while (_waitingq_check_reqd != 0) {
+ /* Clear the flag before checking the waiting queue: */
+ _waitingq_check_reqd = 0;
+
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
+ if (pthread->state == PS_RUNNING) {
+ PTHREAD_WAITQ_REMOVE(pthread);
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ }
+
+ /*
+ * We need to yield if a thread change of state caused a
+ * higher priority thread to become ready, or if a
+ * scheduling signal occurred while preemption was disabled.
+ */
+ if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) &&
+ (pthread->active_priority > _thread_run->active_priority)) ||
+ (_thread_run->yield_on_sched_undefer != 0)) {
+ _thread_run->yield_on_sched_undefer = 0;
+ need_resched = 1;
+ }
+ }
+
+ if (_thread_run->sched_defer_count > 0) {
+ /* Decrement the scheduling deferral count. */
+ _thread_run->sched_defer_count--;
+
+ /* Yield the CPU if necessary: */
+ if (need_resched)
+ _thread_kern_sched(NULL);
+ }
+}
+
+static inline void
+thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
+{
+ pthread_t tid_out = thread_out;
+ pthread_t tid_in = thread_in;
+
+ if ((tid_out != NULL) &&
+ (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_out = NULL;
+ if ((tid_in != NULL) &&
+ (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0))
+ tid_in = NULL;
+
+ if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
+ /* Run the scheduler switch hook: */
+ _sched_switch_hook(tid_out, tid_in);
+ }
+}
#endif
diff --git a/lib/libpthread/thread/thr_kill.c b/lib/libpthread/thread/thr_kill.c
index 7572c05..c729179 100644
--- a/lib/libpthread/thread/thr_kill.c
+++ b/lib/libpthread/thread/thr_kill.c
@@ -52,6 +52,13 @@ pthread_kill(pthread_t pthread, int sig)
/* Find the thread in the list of active threads: */
else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
switch (pthread->state) {
case PS_SIGSUSPEND:
/*
@@ -108,6 +115,12 @@ pthread_kill(pthread_t pthread, int sig)
sigaddset(&pthread->sigpend,sig);
break;
}
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
diff --git a/lib/libpthread/thread/thr_mattr_init.c b/lib/libpthread/thread/thr_mattr_init.c
index 73226a6..206485f 100644
--- a/lib/libpthread/thread/thr_mattr_init.c
+++ b/lib/libpthread/thread/thr_mattr_init.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/lib/libpthread/thread/thr_mutex.c b/lib/libpthread/thread/thr_mutex.c
index d3801f1..0103a6c 100644
--- a/lib/libpthread/thread/thr_mutex.c
+++ b/lib/libpthread/thread/thr_mutex.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -33,78 +33,116 @@
#include <stdlib.h>
#include <errno.h>
#include <string.h>
+#include <sys/param.h>
+#include <sys/queue.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
+
+/*
+ * Prototypes
+ */
+static inline int mutex_self_trylock(pthread_mutex_t);
+static inline int mutex_self_lock(pthread_mutex_t);
+static inline int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(pthread_mutex_t);
+static void mutex_rescan_owned (pthread_t, pthread_mutex_t);
+static inline pthread_t mutex_queue_deq(pthread_mutex_t);
+static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
+static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
+
+
static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
+
int
pthread_mutex_init(pthread_mutex_t * mutex,
const pthread_mutexattr_t * mutex_attr)
{
- enum pthread_mutextype type;
+ enum pthread_mutextype type;
+ int protocol;
+ int ceiling;
pthread_mutex_t pmutex;
int ret = 0;
- if (mutex == NULL) {
+ if (mutex == NULL)
ret = EINVAL;
- } else {
- /* Check if default mutex attributes: */
- if (mutex_attr == NULL || *mutex_attr == NULL)
- /* Default to a fast mutex: */
- type = PTHREAD_MUTEX_DEFAULT;
- else if ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)
- /* Return an invalid argument error: */
- ret = EINVAL;
- else
- /* Use the requested mutex type: */
- type = (*mutex_attr)->m_type;
-
- /* Check no errors so far: */
- if (ret == 0) {
- if ((pmutex = (pthread_mutex_t)
- malloc(sizeof(struct pthread_mutex))) == NULL)
- ret = ENOMEM;
- else {
- /* Reset the mutex flags: */
- pmutex->m_flags = 0;
-
- /* Process according to mutex type: */
- switch (type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Nothing to do here. */
- break;
-
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Reset the mutex count: */
- pmutex->m_data.m_count = 0;
- break;
-
- /* Trap invalid mutex types: */
- default:
- /* Return an invalid argument error: */
- ret = EINVAL;
- break;
- }
- if (ret == 0) {
- /* Initialise the rest of the mutex: */
- _thread_queue_init(&pmutex->m_queue);
- pmutex->m_flags |= MUTEX_FLAGS_INITED;
- pmutex->m_owner = NULL;
- pmutex->m_type = type;
- memset(&pmutex->lock, 0,
- sizeof(pmutex->lock));
- *mutex = pmutex;
- } else {
- free(pmutex);
- *mutex = NULL;
- }
+ /* Check if default mutex attributes: */
+ else if (mutex_attr == NULL || *mutex_attr == NULL) {
+ /* Default to a (error checking) POSIX mutex: */
+ type = PTHREAD_MUTEX_ERRORCHECK;
+ protocol = PTHREAD_PRIO_NONE;
+ ceiling = PTHREAD_MAX_PRIORITY;
+ }
+
+ /* Check mutex type: */
+ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
+ ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Check mutex protocol: */
+ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
+ ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ else {
+ /* Use the requested mutex type and protocol: */
+ type = (*mutex_attr)->m_type;
+ protocol = (*mutex_attr)->m_protocol;
+ ceiling = (*mutex_attr)->m_ceiling;
+ }
+
+ /* Check no errors so far: */
+ if (ret == 0) {
+ if ((pmutex = (pthread_mutex_t)
+ malloc(sizeof(struct pthread_mutex))) == NULL)
+ ret = ENOMEM;
+ else {
+ /* Reset the mutex flags: */
+ pmutex->m_flags = 0;
+
+ /* Process according to mutex type: */
+ switch (type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /* Nothing to do here. */
+ break;
+
+ /* Single UNIX Spec 2 recursive mutex: */
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Reset the mutex count: */
+ pmutex->m_data.m_count = 0;
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ /* Initialise the rest of the mutex: */
+ TAILQ_INIT(&pmutex->m_queue);
+ pmutex->m_flags |= MUTEX_FLAGS_INITED;
+ pmutex->m_owner = NULL;
+ pmutex->m_type = type;
+ pmutex->m_protocol = protocol;
+ pmutex->m_refcount = 0;
+ if (protocol == PTHREAD_PRIO_PROTECT)
+ pmutex->m_prio = ceiling;
+ else
+ pmutex->m_prio = 0;
+ pmutex->m_saved_prio = 0;
+ memset(&pmutex->lock, 0, sizeof(pmutex->lock));
+ *mutex = pmutex;
+ } else {
+ free(pmutex);
+ *mutex = NULL;
}
}
}
@@ -124,16 +162,29 @@ pthread_mutex_destroy(pthread_mutex_t * mutex)
_SPINLOCK(&(*mutex)->lock);
/*
- * Free the memory allocated for the mutex
- * structure:
+ * Check to see if this mutex is in use:
*/
- free(*mutex);
+ if (((*mutex)->m_owner != NULL) ||
+ (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
+ ((*mutex)->m_refcount != 0)) {
+ ret = EBUSY;
- /*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
- */
- *mutex = NULL;
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&(*mutex)->lock);
+ }
+ else {
+ /*
+ * Free the memory allocated for the mutex
+ * structure:
+ */
+ free(*mutex);
+
+ /*
+ * Leave the caller's pointer NULL now that
+ * the mutex has been destroyed:
+ */
+ *mutex = NULL;
+ }
}
/* Return the completion status: */
@@ -170,44 +221,100 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Fast mutex: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
/* Check if this mutex is not locked: */
if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- } else {
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
/* Return a busy error: */
ret = EBUSY;
- }
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if this mutex is locked: */
- if ((*mutex)->m_owner != NULL) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for the running thread: */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
/*
- * Check if the mutex is locked by the running
- * thread:
+ * The mutex takes on the attributes of the
+ * running thread when there are no waiters.
*/
- if ((*mutex)->m_owner == _thread_run) {
- /* Increment the lock count: */
- (*mutex)->m_data.m_count++;
- } else {
- /* Return a busy error: */
- ret = EBUSY;
- }
- } else {
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
/* Lock the mutex for the running thread: */
(*mutex)->m_owner = _thread_run;
- }
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
+
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority.
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_trylock(*mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
break;
/* Trap invalid mutex types: */
@@ -219,6 +326,12 @@ pthread_mutex_trylock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -238,91 +351,200 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
* initialization:
*/
else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* What SS2 define as a 'normal' mutex. This has to deadlock
- on attempts to get a lock you already own. */
- case PTHREAD_MUTEX_NORMAL:
- if ((*mutex)->m_owner == _thread_run) {
- /* Intetionally deadlock */
- for (;;)
- _thread_kern_sched_state(PS_MUTEX_WAIT, __FILE__, __LINE__);
- }
- goto COMMON_LOCK;
-
- /* Return error (not OK) on attempting to re-lock */
- case PTHREAD_MUTEX_ERRORCHECK:
- if ((*mutex)->m_owner == _thread_run) {
- ret = EDEADLK;
- break;
- }
-
- /* Fast mutexes do not check for any error conditions: */
- case PTHREAD_MUTEX_DEFAULT:
- COMMON_LOCK:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /*
- * Enter a loop to wait for the mutex to be locked by the
- * current thread:
- */
- while ((*mutex)->m_owner != _thread_run) {
- /* Check if the mutex is not locked: */
- if ((*mutex)->m_owner == NULL) {
- /* Lock the mutex for this thread: */
- (*mutex)->m_owner = _thread_run;
-
- /* Reset the lock count for this mutex: */
- (*mutex)->m_data.m_count = 0;
- } else {
- /*
- * Join the queue of threads waiting to lock
- * the mutex:
- */
- _thread_queue_enq(&(*mutex)->m_queue, _thread_run);
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*mutex)->m_owner = _thread_run;
- /* Wait for the mutex: */
- _thread_kern_sched_state_unlock(
- PS_MUTEX_WAIT, &(*mutex)->lock,
- __FILE__, __LINE__);
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Lock the mutex again: */
- _SPINLOCK(&(*mutex)->lock);
- }
+ /*
+ * The mutex takes on attributes of the
+ * running thread when there are no waiters.
+ */
+ (*mutex)->m_prio = _thread_run->active_priority;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ if (_thread_run->active_priority >
+ (*mutex)->m_prio)
+ /* Adjust priorities: */
+ mutex_priority_adjust(*mutex);
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
}
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (_thread_run->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
+ /*
+ * Lock the mutex for the running
+ * thread:
+ */
+ (*mutex)->m_owner = _thread_run;
+
+ /* Track number of priority mutexes owned: */
+ _thread_run->priority_mutex_count++;
- /* Increment the lock count for this mutex: */
- (*mutex)->m_data.m_count++;
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority:
+ */
+ _thread_run->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ _thread_run->inherited_priority;
+ _thread_run->inherited_priority =
+ (*mutex)->m_prio;
+
+ /* Add to the list of owned mutexes: */
+ TAILQ_INSERT_TAIL(&_thread_run->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == _thread_run)
+ ret = mutex_self_lock(*mutex);
+ else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex:
+ */
+ mutex_queue_enq(*mutex, _thread_run);
+
+ /*
+ * Keep a pointer to the mutex this thread
+ * is waiting on:
+ */
+ _thread_run->data.mutex = *mutex;
+
+ /* Clear any previous error: */
+ _thread_run->error = 0;
+
+ /*
+ * Unlock the mutex structure and schedule the
+ * next thread:
+ */
+ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
+ &(*mutex)->lock, __FILE__, __LINE__);
+
+ /* Lock the mutex structure again: */
+ _SPINLOCK(&(*mutex)->lock);
+
+ /*
+ * The threads priority may have changed while
+ * waiting for the mutex causing a ceiling
+ * violation.
+ */
+ ret = _thread_run->error;
+ _thread_run->error = 0;
+
+ /*
+ * This thread is no longer waiting for
+ * the mutex:
+ */
+ _thread_run->data.mutex = NULL;
+ }
break;
/* Trap invalid mutex types: */
@@ -334,6 +556,12 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
@@ -343,56 +571,375 @@ pthread_mutex_lock(pthread_mutex_t * mutex)
int
pthread_mutex_unlock(pthread_mutex_t * mutex)
{
- int ret = 0;
+ return (mutex_unlock_common(mutex, /* add reference */ 0));
+}
+
+int
+_mutex_cv_unlock(pthread_mutex_t * mutex)
+{
+ return (mutex_unlock_common(mutex, /* add reference */ 1));
+}
+
+int
+_mutex_cv_lock(pthread_mutex_t * mutex)
+{
+ int ret;
+ if ((ret = pthread_mutex_lock(mutex)) == 0)
+ (*mutex)->m_refcount--;
+ return (ret);
+}
+
+static inline int
+mutex_self_trylock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EBUSY;
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_self_lock(pthread_mutex_t mutex)
+{
+ int ret = 0;
+
+ switch (mutex->m_type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ /*
+ * POSIX specifies that mutexes should return EDEADLK if a
+ * recursive lock is detected.
+ */
+ ret = EDEADLK;
+ break;
+
+ case PTHREAD_MUTEX_NORMAL:
+ /*
+ * What SS2 define as a 'normal' mutex. Intentionally
+ * deadlock on attempts to get a lock you already own.
+ */
+ _thread_kern_sched_state_unlock(PS_DEADLOCK,
+ &mutex->lock, __FILE__, __LINE__);
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ mutex->m_data.m_count++;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
+ return(ret);
+}
+
+static inline int
+mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+{
+ int ret = 0;
if (mutex == NULL || *mutex == NULL) {
ret = EINVAL;
} else {
+ /*
+ * Guard against being preempted by a scheduling signal.
+ * To support priority inheritence mutexes, we need to
+ * maintain lists of mutex ownerships for each thread as
+ * well as lists of waiting threads for each mutex. In
+ * order to propagate priorities we need to atomically
+ * walk these lists and cannot rely on a single mutex
+ * lock to provide protection against modification.
+ */
+ _thread_kern_sched_defer();
+
/* Lock the mutex structure: */
_SPINLOCK(&(*mutex)->lock);
/* Process according to mutex type: */
- switch ((*mutex)->m_type) {
- /* Default & normal mutexes do not really need to check for
- any error conditions: */
- case PTHREAD_MUTEX_NORMAL:
- case PTHREAD_MUTEX_DEFAULT:
- case PTHREAD_MUTEX_ERRORCHECK:
- /* Check if the running thread is not the owner of the mutex: */
+ switch ((*mutex)->m_protocol) {
+ /* Default POSIX mutex: */
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = (*mutex)->m_owner ? EPERM : EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of
+ * threads waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) {
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+ }
+ break;
+
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Check if the running thread is not the owner of the
+ * mutex:
*/
- else if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ if ((*mutex)->m_owner != _thread_run) {
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
+ }
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
+ /* Decrement the count: */
+ (*mutex)->m_data.m_count--;
+ } else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*mutex)->m_data.m_count = 0;
+
+ /*
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
+ */
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Get the next thread from the queue of threads
+ * waiting on the mutex:
+ */
+ if (((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) == NULL)
+ /* This mutex has no priority. */
+ (*mutex)->m_prio = 0;
+ else {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Set the priority of the mutex. Since
+ * our waiting threads are in descending
+ * priority order, the priority of the
+ * mutex becomes the active priority of
+ * the thread we just dequeued.
+ */
+ (*mutex)->m_prio =
+ (*mutex)->m_owner->active_priority;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning threads inherited priority
+ * now becomes his active priority (the
+ * priority of the mutex).
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
}
break;
- /* Counting mutex: */
- case PTHREAD_MUTEX_RECURSIVE:
- /* Check if the running thread is not the owner of the mutex: */
+ /* POSIX priority ceiling mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
if ((*mutex)->m_owner != _thread_run) {
- /* Return an invalid argument error: */
- ret = EINVAL;
+ /*
+ * Return an invalid argument error for no
+ * owner and a permission error otherwise:
+ */
+ ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
}
- /* Check if there are still counts: */
- else if ((*mutex)->m_data.m_count > 1) {
+ else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*mutex)->m_data.m_count > 1)) {
/* Decrement the count: */
(*mutex)->m_data.m_count--;
} else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
(*mutex)->m_data.m_count = 0;
+
/*
- * Get the next thread from the queue of threads waiting on
- * the mutex:
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ _thread_run->inherited_priority =
+ (*mutex)->m_saved_prio;
+ _thread_run->active_priority =
+ MAX(_thread_run->inherited_priority,
+ _thread_run->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
*/
- if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) {
- /* Allow the new owner of the mutex to run: */
- PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING);
+ _thread_run->priority_mutex_count--;
+
+ /* Remove the mutex from the threads queue. */
+ TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * Enter a loop to find a waiting thread whose
+ * active priority will not cause a ceiling
+ * violation:
+ */
+ while ((((*mutex)->m_owner =
+ mutex_queue_deq(*mutex)) != NULL) &&
+ ((*mutex)->m_owner->active_priority >
+ (*mutex)->m_prio)) {
+ /*
+ * Either the mutex ceiling priority
+ * been lowered and/or this threads
+ * priority has been raised subsequent
+ * to this thread being queued on the
+ * waiting list.
+ */
+ (*mutex)->m_owner->error = EINVAL;
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
+ }
+
+ /* Check for a new owner: */
+ if ((*mutex)->m_owner != NULL) {
+ /*
+ * Track number of priority mutexes owned:
+ */
+ (*mutex)->m_owner->priority_mutex_count++;
+
+ /*
+ * Add the mutex to the threads list
+ * of owned mutexes:
+ */
+ TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
+ (*mutex), m_qe);
+
+ /*
+ * The owner is no longer waiting for
+ * this mutex:
+ */
+ (*mutex)->m_owner->data.mutex = NULL;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ (*mutex)->m_saved_prio =
+ (*mutex)->m_owner->inherited_priority;
+
+ /*
+ * The owning thread inherits the
+ * ceiling priority of the mutex and
+ * executes at that priority:
+ */
+ (*mutex)->m_owner->inherited_priority =
+ (*mutex)->m_prio;
+ (*mutex)->m_owner->active_priority =
+ (*mutex)->m_prio;
+
+ /*
+ * Allow the new owner of the mutex to
+ * run:
+ */
+ PTHREAD_NEW_STATE((*mutex)->m_owner,
+ PS_RUNNING);
}
}
break;
@@ -404,11 +951,348 @@ pthread_mutex_unlock(pthread_mutex_t * mutex)
break;
}
+ if ((ret == 0) && (add_reference != 0)) {
+ /* Increment the reference count: */
+ (*mutex)->m_refcount++;
+ }
+
/* Unlock the mutex structure: */
_SPINUNLOCK(&(*mutex)->lock);
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
}
/* Return the completion status: */
return (ret);
}
+
+
+/*
+ * This function is called when a change in base priority occurs
+ * for a thread that is thread holding, or waiting for, a priority
+ * protection or inheritence mutex. A change in a threads base
+ * priority can effect changes to active priorities of other threads
+ * and to the ordering of mutex locking by waiting threads.
+ *
+ * This must be called while thread scheduling is deferred.
+ */
+void
+_mutex_notify_priochange(pthread_t pthread)
+{
+ /* Adjust the priorites of any owned priority mutexes: */
+ if (pthread->priority_mutex_count > 0) {
+ /*
+ * Rescan the mutexes owned by this thread and correct
+ * their priorities to account for this threads change
+ * in priority. This has the side effect of changing
+ * the threads active priority.
+ */
+ mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
+ }
+
+ /*
+ * If this thread is waiting on a priority inheritence mutex,
+ * check for priority adjustments. A change in priority can
+ * also effect a ceiling violation(*) for a thread waiting on
+ * a priority protection mutex; we don't perform the check here
+ * as it is done in pthread_mutex_unlock.
+ *
+ * (*) It should be noted that a priority change to a thread
+ * _after_ taking and owning a priority ceiling mutex
+ * does not affect ownership of that mutex; the ceiling
+ * priority is only checked before mutex ownership occurs.
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /* Lock the mutex structure: */
+ _SPINLOCK(&pthread->data.mutex->lock);
+
+ /*
+ * Check to make sure this thread is still in the same state
+ * (the spinlock above can yield the CPU to another thread):
+ */
+ if (pthread->state == PS_MUTEX_WAIT) {
+ /*
+ * Remove and reinsert this thread into the list of
+ * waiting threads to preserve decreasing priority
+ * order.
+ */
+ mutex_queue_remove(pthread->data.mutex, pthread);
+ mutex_queue_enq(pthread->data.mutex, pthread);
+
+ if (pthread->data.mutex->m_protocol ==
+ PTHREAD_PRIO_INHERIT) {
+ /* Adjust priorities: */
+ mutex_priority_adjust(pthread->data.mutex);
+ }
+ }
+
+ /* Unlock the mutex structure: */
+ _SPINUNLOCK(&pthread->data.mutex->lock);
+ }
+}
+
+/*
+ * Called when a new thread is added to the mutex waiting queue or
+ * when a threads priority changes that is already in the mutex
+ * waiting queue.
+ */
+static void
+mutex_priority_adjust(pthread_mutex_t mutex)
+{
+ pthread_t pthread_next, pthread = mutex->m_owner;
+ int temp_prio;
+ pthread_mutex_t m = mutex;
+
+ /*
+ * Calculate the mutex priority as the maximum of the highest
+ * active priority of any waiting threads and the owning threads
+ * active priority(*).
+ *
+ * (*) Because the owning threads current active priority may
+ * reflect priority inherited from this mutex (and the mutex
+ * priority may have changed) we must recalculate the active
+ * priority based on the threads saved inherited priority
+ * and its base priority.
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, pthread->base_priority));
+
+ /* See if this mutex really needs adjusting: */
+ if (temp_prio == m->m_prio)
+ /* No need to propagate the priority: */
+ return;
+
+ /* Set new priority of the mutex: */
+ m->m_prio = temp_prio;
+
+ while (m != NULL) {
+ /*
+ * Save the threads priority before rescanning the
+ * owned mutexes:
+ */
+ temp_prio = pthread->active_priority;
+
+ /*
+ * Fix the priorities for all the mutexes this thread has
+ * locked since taking this mutex. This also has a
+ * potential side-effect of changing the threads priority.
+ */
+ mutex_rescan_owned(pthread, m);
+
+ /*
+ * If the thread is currently waiting on a mutex, check
+ * to see if the threads new priority has affected the
+ * priority of the mutex.
+ */
+ if ((temp_prio != pthread->active_priority) &&
+ (pthread->state == PS_MUTEX_WAIT) &&
+ (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Grab the mutex this thread is waiting on: */
+ m = pthread->data.mutex;
+
+ /*
+ * The priority for this thread has changed. Remove
+ * and reinsert this thread into the list of waiting
+ * threads to preserve decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
+
+ /* Grab the waiting thread with highest priority: */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
+
+ /*
+ * Calculate the mutex priority as the maximum of the
+ * highest active priority of any waiting threads and
+ * the owning threads active priority.
+ */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, m->m_owner->base_priority));
+
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated to the
+ * mutex this thread is waiting on and up to
+ * the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+ }
+ else
+ /* We're done: */
+ m = NULL;
+
+ }
+ else
+ /* We're done: */
+ m = NULL;
+ }
+}
+
+static void
+mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex)
+{
+ int active_prio, inherited_prio;
+ pthread_mutex_t m;
+ pthread_t pthread_next;
+
+ /*
+ * Start walking the mutexes the thread has taken since
+ * taking this mutex.
+ */
+ if (mutex == NULL) {
+ /*
+ * A null mutex means start at the beginning of the owned
+ * mutex list.
+ */
+ m = TAILQ_FIRST(&pthread->mutexq);
+
+ /* There is no inherited priority yet. */
+ inherited_prio = 0;
+ }
+ else {
+ /*
+ * The caller wants to start after a specific mutex. It
+ * is assumed that this mutex is a priority inheritence
+ * mutex and that its priority has been correctly
+ * calculated.
+ */
+ m = TAILQ_NEXT(mutex, m_qe);
+
+ /* Start inheriting priority from the specified mutex. */
+ inherited_prio = mutex->m_prio;
+ }
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ while (m != NULL) {
+ /*
+ * We only want to deal with priority inheritence
+ * mutexes. This might be optimized by only placing
+ * priority inheritence mutexes into the owned mutex
+ * list, but it may prove to be useful having all
+ * owned mutexes in this list. Consider a thread
+ * exiting while holding mutexes...
+ */
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
+ /*
+ * Fix the owners saved (inherited) priority to
+ * reflect the priority of the previous mutex.
+ */
+ m->m_saved_prio = inherited_prio;
+
+ if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
+ /* Recalculate the priority of the mutex: */
+ m->m_prio = MAX(active_prio,
+ pthread_next->active_priority);
+ else
+ m->m_prio = active_prio;
+
+ /* Recalculate new inherited and active priorities: */
+ inherited_prio = m->m_prio;
+ active_prio = MAX(m->m_prio, pthread->base_priority);
+ }
+
+ /* Advance to the next mutex owned by this thread: */
+ m = TAILQ_NEXT(m, m_qe);
+ }
+
+ /*
+ * Fix the threads inherited priority and recalculate its
+ * active priority.
+ */
+ pthread->inherited_priority = inherited_prio;
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ if (active_prio != pthread->active_priority) {
+ /*
+ * If this thread is in the priority queue, it must be
+ * removed and reinserted for its new priority.
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ /*
+ * Remove the thread from the priority queue
+ * before changing its priority:
+ */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
+ /*
+ * POSIX states that if the priority is being
+ * lowered, the thread must be inserted at the
+ * head of the queue for its priority if it owns
+ * any priority protection or inheritence mutexes.
+ */
+ if ((active_prio < pthread->active_priority) &&
+ (pthread->priority_mutex_count > 0)) {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+ }
+ else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+ }
+ }
+}
+
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ */
+static inline pthread_t
+mutex_queue_deq(pthread_mutex_t mutex)
+{
+ pthread_t pthread;
+
+ if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL)
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+
+ return(pthread);
+}
+
+/*
+ * Remove a waiting thread from a mutex queue in descending priority order.
+ */
+static inline void
+mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
+{
+ TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
+}
+
+/*
+ * Enqueue a waiting thread to a queue in descending priority order.
+ */
+static inline void
+mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
+{
+ pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
+
+ /*
+ * For the common case of all threads having equal priority,
+ * we perform a quick check against the priority of the thread
+ * at the tail of the queue.
+ */
+ if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
+ TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
+ else {
+ tid = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread->active_priority <= tid->active_priority)
+ tid = TAILQ_NEXT(tid, qe);
+ TAILQ_INSERT_BEFORE(tid, pthread, qe);
+ }
+}
+
#endif
diff --git a/lib/libpthread/thread/thr_mutex_prioceiling.c b/lib/libpthread/thread/thr_mutex_prioceiling.c
new file mode 100644
index 0000000..edd9fb5
--- /dev/null
+++ b/lib/libpthread/thread/thr_mutex_prioceiling.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ *prioceiling = (*mattr)->m_ceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ (*mattr)->m_ceiling = prioceiling;
+
+ return(ret);
+}
+
+int
+pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
+ int *prioceiling)
+{
+ int ret;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else
+ ret = (*mutex)->m_prio;
+
+ return(ret);
+}
+
+int
+pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
+ int prioceiling, int *old_ceiling)
+{
+ int ret = 0;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ else {
+ /* Lock the mutex: */
+ if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ /* Return the old ceiling and set the new ceiling: */
+ *old_ceiling = (*mutex)->m_prio;
+ (*mutex)->m_prio = prioceiling;
+
+ /* Unlock the mutex: */
+ ret = pthread_mutex_unlock(mutex);
+ }
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_mutex_protocol.c b/lib/libpthread/thread/thr_mutex_protocol.c
new file mode 100644
index 0000000..56c5542
--- /dev/null
+++ b/lib/libpthread/thread/thr_mutex_protocol.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else
+ *protocol = (*mattr)->m_protocol;
+
+ return(ret);
+}
+
+int
+pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
+{
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL) ||
+ (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
+ ret = EINVAL;
+ else {
+ (*mattr)->m_protocol = protocol;
+ (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
+ }
+ return(ret);
+}
+
+#endif
diff --git a/lib/libpthread/thread/thr_priority_queue.c b/lib/libpthread/thread/thr_priority_queue.c
new file mode 100644
index 0000000..516a1e0
--- /dev/null
+++ b/lib/libpthread/thread/thr_priority_queue.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <stdlib.h>
+#include <sys/queue.h>
+#include <string.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+/* Prototypes: */
+static void pq_insert_prio_list(pq_queue_t *pq, int prio);
+
+
+int
+_pq_init(pq_queue_t *pq, int minprio, int maxprio)
+{
+ int i, ret = 0;
+ int prioslots = maxprio - minprio + 1;
+
+ if (pq == NULL)
+ ret = -1;
+
+ /* Create the priority queue with (maxprio - minprio + 1) slots: */
+ else if ((pq->pq_lists =
+ (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL)
+ ret = -1;
+
+ else {
+ /* Initialize the queue for each priority slot: */
+ for (i = 0; i < prioslots; i++) {
+ TAILQ_INIT(&pq->pq_lists[i].pl_head);
+ pq->pq_lists[i].pl_prio = i;
+ pq->pq_lists[i].pl_queued = 0;
+ }
+
+ /* Initialize the priority queue: */
+ TAILQ_INIT(&pq->pq_queue);
+
+ /* Remember the queue size: */
+ pq->pq_size = prioslots;
+ }
+ return (ret);
+}
+
+void
+_pq_remove(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe);
+}
+
+
+void
+_pq_insert_head(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+void
+_pq_insert_tail(pq_queue_t *pq, pthread_t pthread)
+{
+ int prio = pthread->active_priority;
+
+ TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe);
+ if (pq->pq_lists[prio].pl_queued == 0)
+ /* Insert the list into the priority queue: */
+ pq_insert_prio_list(pq, prio);
+}
+
+
+pthread_t
+_pq_first(pq_queue_t *pq)
+{
+ pq_list_t *pql;
+ pthread_t pthread = NULL;
+
+ while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) &&
+ (pthread == NULL)) {
+ if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
+ /*
+ * The priority list is empty; remove the list
+ * from the queue.
+ */
+ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link);
+
+ /* Mark the list as not being in the queue: */
+ pql->pl_queued = 0;
+ }
+ }
+ return (pthread);
+}
+
+
+static void
+pq_insert_prio_list(pq_queue_t *pq, int prio)
+{
+ pq_list_t *pql;
+
+ /*
+ * The priority queue is in descending priority order. Start at
+ * the beginning of the queue and find the list before which the
+ * new list should to be inserted.
+ */
+ pql = TAILQ_FIRST(&pq->pq_queue);
+ while ((pql != NULL) && (pql->pl_prio > prio))
+ pql = TAILQ_NEXT(pql, pl_link);
+
+ /* Insert the list: */
+ if (pql == NULL)
+ TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link);
+ else
+ TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link);
+
+ /* Mark this list as being in the queue: */
+ pq->pq_lists[prio].pl_queued = 1;
+}
+
+#endif
diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h
index 2d7e723..bf99a3b 100644
--- a/lib/libpthread/thread/thr_private.h
+++ b/lib/libpthread/thread/thr_private.h
@@ -55,6 +55,7 @@
#include <sys/time.h>
#include <sched.h>
#include <spinlock.h>
+#include <pthread_np.h>
/*
* Kernel fatal error handler macro.
@@ -65,16 +66,59 @@
#define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x));
#define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x));
+
/*
- * State change macro:
+ * Priority queue manipulation macros:
*/
-#define PTHREAD_NEW_STATE(thrd, newstate) { \
+#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
+#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
+#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
+#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq)
+
+/*
+ * Waiting queue manipulation macros:
+ */
+#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe)
+#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe)
+
+/*
+ * State change macro without scheduling queue change:
+ */
+#define PTHREAD_SET_STATE(thrd, newstate) { \
(thrd)->state = newstate; \
(thrd)->fname = __FILE__; \
(thrd)->lineno = __LINE__; \
}
/*
+ * State change macro with scheduling queue change - This must be
+ * called with preemption deferred (see thread_kern_sched_[un]defer).
+ */
+#define PTHREAD_NEW_STATE(thrd, newstate) { \
+ if ((thrd)->state != newstate) { \
+ if ((thrd)->state == PS_RUNNING) { \
+ PTHREAD_PRIOQ_REMOVE(thrd); \
+ PTHREAD_WAITQ_INSERT(thrd); \
+ } else if (newstate == PS_RUNNING) { \
+ PTHREAD_WAITQ_REMOVE(thrd); \
+ PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
+ } \
+ } \
+ PTHREAD_SET_STATE(thrd, newstate); \
+}
+
+/*
+ * Define the signals to be used for scheduling.
+ */
+#if defined(_PTHREADS_COMPAT_SCHED)
+#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
+#define _SCHED_SIGNAL SIGVTALRM
+#else
+#define _ITIMER_SCHED_TIMER ITIMER_PROF
+#define _SCHED_SIGNAL SIGPROF
+#endif
+
+/*
* Queue definitions.
*/
struct pthread_queue {
@@ -84,10 +128,34 @@ struct pthread_queue {
};
/*
+ * Priority queues.
+ *
+ * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
+ */
+typedef struct pq_list {
+ TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
+ TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
+ int pl_prio; /* the priority of this list */
+ int pl_queued; /* is this in the priority queue */
+} pq_list_t;
+
+typedef struct pq_queue {
+ TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
+ pq_list_t *pq_lists; /* array of all priority lists */
+ int pq_size; /* number of priority lists */
+} pq_queue_t;
+
+
+/*
* Static queue initialization values.
*/
#define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL }
+/*
+ * TailQ initialization values.
+ */
+#define TAILQ_INITIALIZER { NULL, NULL }
+
/*
* Mutex definitions.
*/
@@ -98,10 +166,31 @@ union pthread_mutex_data {
struct pthread_mutex {
enum pthread_mutextype m_type;
- struct pthread_queue m_queue;
+ int m_protocol;
+ TAILQ_HEAD(mutex_head, pthread) m_queue;
struct pthread *m_owner;
union pthread_mutex_data m_data;
long m_flags;
+ int m_refcount;
+
+ /*
+ * Used for priority inheritence and protection.
+ *
+ * m_prio - For priority inheritence, the highest active
+ * priority (threads locking the mutex inherit
+ * this priority). For priority protection, the
+ * ceiling priority of this mutex.
+ * m_saved_prio - mutex owners inherited priority before
+ * taking the mutex, restored when the owner
+ * unlocks the mutex.
+ */
+ int m_prio;
+ int m_saved_prio;
+
+ /*
+ * Link for list of all mutexes a thread currently owns.
+ */
+ TAILQ_ENTRY(pthread_mutex) m_qe;
/*
* Lock for accesses to this structure.
@@ -120,11 +209,13 @@ struct pthread_mutex {
* Static mutex initialization values.
*/
#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { MUTEX_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, \
- NULL, { NULL }, MUTEX_FLAGS_INITED }
+ { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
+ NULL, { NULL }, MUTEX_FLAGS_INITED, 0, 0, 0, TAILQ_INITIALIZER }
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
+ int m_protocol;
+ int m_ceiling;
long m_flags;
};
@@ -137,15 +228,16 @@ enum pthread_cond_type {
};
struct pthread_cond {
- enum pthread_cond_type c_type;
- struct pthread_queue c_queue;
- void *c_data;
- long c_flags;
+ enum pthread_cond_type c_type;
+ TAILQ_HEAD(cond_head, pthread) c_queue;
+ pthread_mutex_t c_mutex;
+ void *c_data;
+ long c_flags;
/*
* Lock for accesses to this structure.
*/
- spinlock_t lock;
+ spinlock_t lock;
};
struct pthread_cond_attr {
@@ -164,7 +256,8 @@ struct pthread_cond_attr {
* Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, COND_FLAGS_INITED }
+ { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL \
+ COND_FLAGS_INITED }
/*
* Cleanup definitions.
@@ -176,7 +269,9 @@ struct pthread_cleanup {
};
struct pthread_attr {
- int schedparam_policy;
+ int sched_policy;
+ int sched_inherit;
+ int sched_interval;
int prio;
int suspend;
int flags;
@@ -254,9 +349,11 @@ enum pthread_state {
PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
+ PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
+ PS_DEADLOCK,
PS_STATE_MAX
};
@@ -300,8 +397,8 @@ struct pthread_select_data {
};
union pthread_wait_data {
- pthread_mutex_t *mutex;
- pthread_cond_t *cond;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
const sigset_t *sigwait; /* Waiting on a signal in sigwait */
struct {
short fd; /* Used when thread waiting on fd */
@@ -309,6 +406,7 @@ union pthread_wait_data {
char *fname; /* Source file name for debugging.*/
} fd;
struct pthread_select_data * select_data;
+ spinlock_t *spinlock;
};
/*
@@ -419,7 +517,11 @@ struct pthread {
struct pthread_queue join_queue;
/*
- * The current thread can belong to only one queue at a time.
+ * The current thread can belong to only one scheduling queue
+ * at a time (ready or waiting queue). It can also belong to
+ * a queue of threads waiting on mutexes or condition variables.
+ * Use pqe for the scheduling queue link (both ready and waiting),
+ * and qe for other links (mutexes and condition variables).
*
* Pointer to queue (if any) on which the current thread is waiting.
*
@@ -431,8 +533,11 @@ struct pthread {
/* Pointer to next element in queue. */
struct pthread *qnxt;
+ /* Priority queue entry for this thread: */
+ TAILQ_ENTRY(pthread) pqe;
+
/* Queue entry for this thread: */
- TAILQ_ENTRY(pthread) qe;
+ TAILQ_ENTRY(pthread) qe;
/* Wait data. */
union pthread_wait_data data;
@@ -446,10 +551,59 @@ struct pthread {
/* Signal number when in state PS_SIGWAIT: */
int signo;
+ /*
+ * Set to non-zero when this thread has deferred thread
+ * scheduling. We allow for recursive deferral.
+ */
+ int sched_defer_count;
+
+ /*
+ * Set to TRUE if this thread should yield after undeferring
+ * thread scheduling.
+ */
+ int yield_on_sched_undefer;
+
/* Miscellaneous data. */
- int flags;
-#define PTHREAD_EXITING 0x0100
- char pthread_priority;
+ int flags;
+#define PTHREAD_FLAGS_PRIVATE 0x0001
+#define PTHREAD_EXITING 0x0002
+#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */
+#define PTHREAD_FLAGS_TRACE 0x0008
+
+ /*
+ * Base priority is the user setable and retrievable priority
+ * of the thread. It is only affected by explicit calls to
+ * set thread priority and upon thread creation via a thread
+ * attribute or default priority.
+ */
+ char base_priority;
+
+ /*
+ * Inherited priority is the priority a thread inherits by
+ * taking a priority inheritence or protection mutex. It
+ * is not affected by base priority changes. Inherited
+ * priority defaults to and remains 0 until a mutex is taken
+ * that is being waited on by any other thread whose priority
+ * is non-zero.
+ */
+ char inherited_priority;
+
+ /*
+ * Active priority is always the maximum of the threads base
+ * priority and inherited priority. When there is a change
+ * in either the real or inherited priority, the active
+ * priority must be recalculated.
+ */
+ char active_priority;
+
+ /* Number of priority ceiling or protection mutexes owned. */
+ int priority_mutex_count;
+
+ /*
+ * Queue of currently owned mutexes.
+ */
+ TAILQ_HEAD(, pthread_mutex) mutexq;
+
void *ret;
const void **specific_data;
int specific_data_count;
@@ -475,6 +629,14 @@ SCLASS struct pthread * volatile _thread_run
;
#endif
+/* Ptr to the thread structure for the last user thread to run: */
+SCLASS struct pthread * volatile _last_user_thread
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= &_thread_kern_thread;
+#else
+;
+#endif
+
/*
* Ptr to the thread running in single-threaded mode or NULL if
* running multi-threaded (default POSIX behaviour).
@@ -547,7 +709,7 @@ SCLASS struct pthread *_thread_initial
/* Default thread attributes: */
SCLASS struct pthread_attr pthread_attr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
+= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY, PTHREAD_CREATE_RUNNING,
PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL, PTHREAD_STACK_DEFAULT };
#else
;
@@ -556,7 +718,7 @@ SCLASS struct pthread_attr pthread_attr_default
/* Default mutex attributes: */
SCLASS struct pthread_mutex_attr pthread_mutexattr_default
#ifdef GLOBAL_PTHREAD_PRIVATE
-= { MUTEX_TYPE_FAST, 0 };
+= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
#else
;
#endif
@@ -614,6 +776,27 @@ SCLASS pthread_cond_t _gc_cond
*/
struct sigaction _thread_sigact[NSIG];
+/*
+ * Scheduling queues:
+ */
+SCLASS pq_queue_t _readyq;
+SCLASS TAILQ_HEAD(, pthread) _waitingq;
+
+/* Indicates that the waitingq now has threads ready to run. */
+SCLASS volatile int _waitingq_check_reqd
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= 0
+#endif
+;
+
+/* Thread switch hook. */
+SCLASS pthread_switch_routine_t _sched_switch_hook
+#ifdef GLOBAL_PTHREAD_PRIVATE
+= NULL
+#endif
+;
+
+
/* Undefine the storage class specifier: */
#undef SCLASS
@@ -645,6 +828,14 @@ void _lock_thread(void);
void _lock_thread_list(void);
void _unlock_thread(void);
void _unlock_thread_list(void);
+int _mutex_cv_lock(pthread_mutex_t *);
+int _mutex_cv_unlock(pthread_mutex_t *);
+void _mutex_notify_priochange(struct pthread *);
+int _pq_init(struct pq_queue *pq, int, int);
+void _pq_remove(struct pq_queue *pq, struct pthread *);
+void _pq_insert_head(struct pq_queue *pq, struct pthread *);
+void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
+struct pthread *_pq_first(struct pq_queue *pq);
void _thread_exit(char *, int, char *);
void _thread_fd_unlock(int, int);
void _thread_fd_unlock_debug(int, int, char *, int);
@@ -657,6 +848,8 @@ void _thread_kern_sched_state(enum pthread_state,char *fname,int lineno);
void _thread_kern_sched_state_unlock(enum pthread_state state,
spinlock_t *lock, char *fname, int lineno);
void _thread_kern_set_timeout(struct timespec *);
+void _thread_kern_sched_defer(void);
+void _thread_kern_sched_undefer(void);
void _thread_sig_handler(int, int, struct sigcontext *);
void _thread_start(void);
void _thread_start_sig_handler(void);
diff --git a/lib/libpthread/thread/thr_resume_np.c b/lib/libpthread/thread/thr_resume_np.c
index 7c5f46a..885a457 100644
--- a/lib/libpthread/thread/thr_resume_np.c
+++ b/lib/libpthread/thread/thr_resume_np.c
@@ -45,8 +45,21 @@ pthread_resume_np(pthread_t thread)
if ((ret = _find_thread(thread)) == 0) {
/* The thread exists. Is it suspended? */
if (thread->state != PS_SUSPENDED) {
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Allow the thread to run. */
PTHREAD_NEW_STATE(thread,PS_RUNNING);
+
+ /*
+ * Reenable preemption and yield if a scheduling
+ * signal occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
}
return(ret);
diff --git a/lib/libpthread/thread/thr_select.c b/lib/libpthread/thread/thr_select.c
index d6202db..6d7d7dc 100644
--- a/lib/libpthread/thread/thr_select.c
+++ b/lib/libpthread/thread/thr_select.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/fcntl.h>
#ifdef _THREAD_SAFE
#include <pthread.h>
#include "pthread_private.h"
@@ -47,6 +48,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
struct timespec ts;
struct timeval zero_timeout = {0, 0};
int i, ret = 0, got_all_locks = 1;
+ int f_wait = 1;
struct pthread_select_data data;
if (numfds > _thread_dtablesize) {
@@ -59,6 +61,8 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
/* Set the wake up time: */
_thread_kern_set_timeout(&ts);
+ if (ts.tv_sec == 0 && ts.tv_nsec == 0)
+ f_wait = 0;
} else {
/* Wait for ever: */
_thread_kern_set_timeout(NULL);
@@ -110,7 +114,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds,
if (exceptfds != NULL) {
memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds));
}
- if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0) {
+ if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) {
data.nfds = numfds;
FD_ZERO(&data.readfds);
FD_ZERO(&data.writefds);
diff --git a/lib/libpthread/thread/thr_setprio.c b/lib/libpthread/thread/thr_setprio.c
index dd89f15..008b6b0 100644
--- a/lib/libpthread/thread/thr_setprio.c
+++ b/lib/libpthread/thread/thr_setprio.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -38,17 +38,13 @@
int
pthread_setprio(pthread_t pthread, int prio)
{
- int ret;
+ int ret, policy;
+ struct sched_param param;
- /* Check if the priority is invalid: */
- if (prio < PTHREAD_MIN_PRIORITY || prio > PTHREAD_MAX_PRIORITY)
- /* Return an invalid argument error: */
- ret = EINVAL;
-
- /* Find the thread in the list of active threads: */
- else if ((ret = _find_thread(pthread)) == 0)
- /* Set the thread priority: */
- pthread->pthread_priority = prio;
+ if ((ret = pthread_getschedparam(pthread, &policy, &param)) == 0) {
+ param.sched_priority = prio;
+ ret = pthread_setschedparam(pthread, policy, &param);
+ }
/* Return the error status: */
return (ret);
diff --git a/lib/libpthread/thread/thr_setschedparam.c b/lib/libpthread/thread/thr_setschedparam.c
new file mode 100644
index 0000000..93635da
--- /dev/null
+++ b/lib/libpthread/thread/thr_setschedparam.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#include <sys/param.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+pthread_setschedparam(pthread_t pthread, int policy, struct sched_param *param)
+{
+ int old_prio, in_readyq = 0, ret = 0;
+
+ if ((param == NULL) || (param->sched_priority < PTHREAD_MIN_PRIORITY) ||
+ (param->sched_priority > PTHREAD_MAX_PRIORITY) ||
+ (policy < SCHED_FIFO) || (policy > SCHED_RR))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Find the thread in the list of active threads: */
+ else if ((ret = _find_thread(pthread)) == 0) {
+ /*
+ * Guard against being preempted by a scheduling
+ * signal:
+ */
+ _thread_kern_sched_defer();
+
+ if (param->sched_priority != pthread->base_priority) {
+ /*
+ * Remove the thread from its current priority
+ * queue before any adjustments are made to its
+ * active priority:
+ */
+ if ((pthread != _thread_run) &&
+ (pthread->state == PS_RUNNING)) {
+ in_readyq = 1;
+ old_prio = pthread->active_priority;
+ PTHREAD_PRIOQ_REMOVE(pthread);
+ }
+
+ /* Set the thread base priority: */
+ pthread->base_priority = param->sched_priority;
+
+ /* Recalculate the active priority: */
+ pthread->active_priority = MAX(pthread->base_priority,
+ pthread->inherited_priority);
+
+ if (in_readyq) {
+ if ((pthread->priority_mutex_count > 0) &&
+ (old_prio > pthread->active_priority)) {
+ /*
+ * POSIX states that if the priority is
+ * being lowered, the thread must be
+ * inserted at the head of the queue for
+ * its priority if it owns any priority
+ * protection or inheritence mutexes.
+ */
+ PTHREAD_PRIOQ_INSERT_HEAD(pthread);
+ }
+ else
+ PTHREAD_PRIOQ_INSERT_TAIL(pthread);
+ }
+
+ /*
+ * Check for any mutex priority adjustments. This
+ * includes checking for a priority mutex on which
+ * this thread is waiting.
+ */
+ _mutex_notify_priochange(pthread);
+ }
+
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ /*
+ * Renable preemption and yield if a scheduling signal
+ * arrived while in the critical region:
+ */
+ _thread_kern_sched_undefer();
+ }
+ return(ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c
index 3e55d65..e51d949 100644
--- a/lib/libpthread/thread/thr_sig.c
+++ b/lib/libpthread/thread/thr_sig.c
@@ -38,6 +38,19 @@
#include <pthread.h>
#include "pthread_private.h"
+/*
+ * State change macro for signal handler:
+ */
+#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \
+ if ((_thread_run->sched_defer_count == 0) && \
+ (_thread_kern_in_sched == 0)) { \
+ PTHREAD_NEW_STATE(thrd, newstate); \
+ } else { \
+ _waitingq_check_reqd = 1; \
+ PTHREAD_SET_STATE(thrd, newstate); \
+ } \
+}
+
/* Static variables: */
static int volatile yield_on_unlock_thread = 0;
static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER;
@@ -94,14 +107,13 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
*/
_thread_sys_write(_thread_kern_pipe[1], &c, 1);
}
-
/* Check if the signal requires a dump of thread information: */
if (sig == SIGINFO)
/* Dump thread information to file: */
_thread_dump_info();
/* Check if an interval timer signal: */
- else if (sig == SIGVTALRM) {
+ else if (sig == _SCHED_SIGNAL) {
/* Check if the scheduler interrupt has come at an
* unfortunate time which one of the threads is
* modifying the thread list:
@@ -115,6 +127,14 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
yield_on_unlock_thread = 1;
/*
+ * Check if the scheduler interrupt has come when
+ * the currently running thread has deferred thread
+ * scheduling.
+ */
+ else if (_thread_run->sched_defer_count)
+ _thread_run->yield_on_sched_undefer = 1;
+
+ /*
* Check if the kernel has not been interrupted while
* executing scheduler code:
*/
@@ -170,18 +190,17 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
}
/*
- * Enter a loop to process each thread in the linked
+ * Enter a loop to process each thread in the waiting
* list that is sigwait-ing on a signal. Since POSIX
* doesn't specify which thread will get the signal
* if there are multiple waiters, we'll give it to the
* first one we find.
*/
- for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt) {
+ TAILQ_FOREACH(pthread, &_waitingq, pqe) {
if ((pthread->state == PS_SIGWAIT) &&
sigismember(pthread->data.sigwait, sig)) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -201,11 +220,19 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
* list:
*/
for (pthread = _thread_link_list; pthread != NULL;
- pthread = pthread->nxt)
+ pthread = pthread->nxt) {
+ pthread_t pthread_saved = _thread_run;
+
+ _thread_run = pthread;
_thread_signal(pthread,sig);
- /* Dispatch pending signals to the running thread: */
- _dispatch_signals();
+ /*
+ * Dispatch pending signals to the
+ * running thread:
+ */
+ _dispatch_signals();
+ _thread_run = pthread_saved;
+ }
}
/* Returns nothing. */
@@ -257,7 +284,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -277,7 +304,7 @@ _thread_signal(pthread_t pthread, int sig)
pthread->interrupted = 1;
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
@@ -292,7 +319,7 @@ _thread_signal(pthread_t pthread, int sig)
if (!sigismember(&pthread->sigmask, sig) &&
_thread_sigact[sig - 1].sa_handler != SIG_DFL) {
/* Change the state of the thread to run: */
- PTHREAD_NEW_STATE(pthread,PS_RUNNING);
+ PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING);
/* Return the signal number: */
pthread->signo = sig;
diff --git a/lib/libpthread/thread/thr_sigaction.c b/lib/libpthread/thread/thr_sigaction.c
index 40f3850..73a3b21 100644
--- a/lib/libpthread/thread/thr_sigaction.c
+++ b/lib/libpthread/thread/thr_sigaction.c
@@ -71,7 +71,7 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
* Check if the kernel needs to be advised of a change
* in signal action:
*/
- if (act != NULL && sig != SIGVTALRM && sig != SIGCHLD &&
+ if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD &&
sig != SIGINFO) {
/* Initialise the global signal action structure: */
gact.sa_mask = act->sa_mask;
diff --git a/lib/libpthread/thread/thr_sigpending.c b/lib/libpthread/thread/thr_sigpending.c
new file mode 100644
index 0000000..44a39a6
--- /dev/null
+++ b/lib/libpthread/thread/thr_sigpending.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <signal.h>
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include "pthread_private.h"
+
+int
+sigpending(sigset_t * set)
+{
+ int ret = 0;
+
+ /* Check for a null signal set pointer: */
+ if (set == NULL) {
+ /* Return an invalid argument: */
+ ret = EINVAL;
+ }
+ else {
+ *set = _thread_run->sigpend;
+ }
+ /* Return the completion status: */
+ return (ret);
+}
+#endif
diff --git a/lib/libpthread/thread/thr_sigwait.c b/lib/libpthread/thread/thr_sigwait.c
index 590f9db..98a5359 100644
--- a/lib/libpthread/thread/thr_sigwait.c
+++ b/lib/libpthread/thread/thr_sigwait.c
@@ -56,7 +56,7 @@ sigwait(const sigset_t * set, int *sig)
*/
sigdelset(&act.sa_mask, SIGKILL);
sigdelset(&act.sa_mask, SIGSTOP);
- sigdelset(&act.sa_mask, SIGVTALRM);
+ sigdelset(&act.sa_mask, _SCHED_SIGNAL);
sigdelset(&act.sa_mask, SIGCHLD);
sigdelset(&act.sa_mask, SIGINFO);
diff --git a/lib/libpthread/thread/thr_spinlock.c b/lib/libpthread/thread/thr_spinlock.c
index 9da115e..4da3f8c 100644
--- a/lib/libpthread/thread/thr_spinlock.c
+++ b/lib/libpthread/thread/thr_spinlock.c
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: uthread_spinlock.c,v 1.3 1998/06/06 07:27:06 jb Exp $
+ * $Id: uthread_spinlock.c,v 1.4 1998/06/09 23:13:10 jb Exp $
*
*/
@@ -56,12 +56,9 @@ _spinlock(spinlock_t *lck)
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run)
- return;
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__);
}
/* The running thread now owns the lock: */
@@ -81,24 +78,25 @@ _spinlock(spinlock_t *lck)
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
+ int cnt = 0;
+
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
while(_atomic_lock(&lck->access_lock)) {
- /* Give up the time slice: */
- sched_yield();
-
- /* Check if already locked by the running thread: */
- if (lck->lock_owner == (long) _thread_run) {
+ cnt++;
+ if (cnt > 100) {
char str[256];
- snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) which it had already locked in %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
+ snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno);
_thread_sys_write(2,str,strlen(str));
-
- /* Create a thread dump to help debug this problem: */
- _thread_dump_info();
- return;
+ sleep(1);
+ cnt = 0;
}
+
+ /* Block the thread until the lock. */
+ _thread_run->data.spinlock = lck;
+ _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno);
}
/* The running thread now owns the lock: */
diff --git a/lib/libpthread/thread/thr_suspend_np.c b/lib/libpthread/thread/thr_suspend_np.c
index 871683a..6a6eaf4 100644
--- a/lib/libpthread/thread/thr_suspend_np.c
+++ b/lib/libpthread/thread/thr_suspend_np.c
@@ -51,8 +51,21 @@ pthread_suspend_np(pthread_t thread)
thread->interrupted = 1;
}
+ /*
+ * Guard against preemption by a scheduling signal.
+ * A change of thread state modifies the waiting
+ * and priority queues.
+ */
+ _thread_kern_sched_defer();
+
/* Suspend the thread. */
PTHREAD_NEW_STATE(thread,PS_SUSPENDED);
+
+ /*
+ * Reenable preemption and yield if a scheduling signal
+ * occurred while in the critical region.
+ */
+ _thread_kern_sched_undefer();
}
return(ret);
}
diff --git a/lib/libpthread/thread/thr_switch_np.c b/lib/libpthread/thread/thr_switch_np.c
new file mode 100644
index 0000000..8373214
--- /dev/null
+++ b/lib/libpthread/thread/thr_switch_np.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <errno.h>
+#ifdef _THREAD_SAFE
+#include <pthread.h>
+#include <pthread_np.h>
+#include "pthread_private.h"
+
+
+int
+pthread_switch_add_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine == NULL)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = routine;
+
+ return(ret);
+}
+
+int
+pthread_switch_delete_np(pthread_switch_routine_t routine)
+{
+ int ret = 0;
+
+ if (routine != _sched_switch_hook)
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else
+ /* Shouldn't need a lock to protect this assigment. */
+ _sched_switch_hook = NULL;
+
+ return(ret);
+}
+#endif
OpenPOWER on IntegriCloud