summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavidxu <davidxu@FreeBSD.org>2005-04-02 01:20:00 +0000
committerdavidxu <davidxu@FreeBSD.org>2005-04-02 01:20:00 +0000
commitf066519e91e2290cb79ef12fe7c958ee462cda6c (patch)
tree6aaef5f553a6539306bd6f5679d039ed3c2abcce
parent3cc412b7837a105c757df856c422eb5f497bad67 (diff)
downloadFreeBSD-src-f066519e91e2290cb79ef12fe7c958ee462cda6c.zip
FreeBSD-src-f066519e91e2290cb79ef12fe7c958ee462cda6c.tar.gz
Import my recent 1:1 threading working. some features improved includes:
1. fast simple type mutex. 2. __thread tls works. 3. asynchronous cancellation works ( using signal ). 4. thread synchronization is fully based on umtx, mainly, condition variable and other synchronization objects were rewritten by using umtx directly. those objects can be shared between processes via shared memory, it has to change ABI which does not happen yet. 5. default stack size is increased to 1M on 32 bits platform, 2M for 64 bits platform. As the result, some mysql super-smack benchmarks show performance is improved massivly. Okayed by: jeff, mtm, rwatson, scottl
-rw-r--r--lib/libthr/Makefile18
-rw-r--r--lib/libthr/arch/alpha/Makefile.inc4
-rw-r--r--lib/libthr/arch/alpha/alpha/pthread_md.c53
-rw-r--r--lib/libthr/arch/alpha/include/pthread_md.h (renamed from lib/libthr/arch/ia64/ia64/_curthread.c)54
-rw-r--r--lib/libthr/arch/amd64/Makefile.inc4
-rw-r--r--lib/libthr/arch/amd64/amd64/_setcurthread.c101
-rw-r--r--lib/libthr/arch/amd64/amd64/pthread_md.c57
-rw-r--r--lib/libthr/arch/amd64/include/pthread_md.h103
-rw-r--r--lib/libthr/arch/arm/Makefile.inc7
-rw-r--r--lib/libthr/arch/arm/arm/pthread_md.c (renamed from lib/libthr/arch/sparc64/sparc64/_setcurthread.c)47
-rw-r--r--lib/libthr/arch/arm/include/pthread_md.h106
-rw-r--r--lib/libthr/arch/i386/Makefile.inc4
-rw-r--r--lib/libthr/arch/i386/i386/_curthread.S17
-rw-r--r--lib/libthr/arch/i386/i386/_setcurthread.c136
-rw-r--r--lib/libthr/arch/i386/i386/pthread_md.c84
-rw-r--r--lib/libthr/arch/i386/include/pthread_md.h116
-rw-r--r--lib/libthr/arch/ia64/Makefile.inc4
-rw-r--r--lib/libthr/arch/ia64/ia64/pthread_md.c58
-rw-r--r--lib/libthr/arch/ia64/include/pthread_md.h (renamed from lib/libthr/arch/powerpc/powerpc/_curthread.c)60
-rw-r--r--lib/libthr/arch/powerpc/Makefile.inc4
-rw-r--r--lib/libthr/arch/powerpc/include/pthread_md.h80
-rw-r--r--lib/libthr/arch/powerpc/powerpc/pthread_md.c (renamed from lib/libthr/arch/alpha/alpha/_curthread.c)49
-rw-r--r--lib/libthr/arch/sparc64/Makefile.inc4
-rw-r--r--lib/libthr/arch/sparc64/include/pthread_md.h87
-rw-r--r--lib/libthr/arch/sparc64/sparc64/pthread_md.c56
-rw-r--r--lib/libthr/pthread.map365
-rw-r--r--lib/libthr/sys/Makefile.inc4
-rw-r--r--lib/libthr/sys/thr_error.c19
-rw-r--r--lib/libthr/thread/Makefile.inc22
-rw-r--r--lib/libthr/thread/thr_atfork.c17
-rw-r--r--lib/libthr/thread/thr_attr.c575
-rw-r--r--lib/libthr/thread/thr_barrier.c122
-rw-r--r--lib/libthr/thread/thr_barrierattr.c79
-rw-r--r--lib/libthr/thread/thr_cancel.c194
-rw-r--r--lib/libthr/thread/thr_clean.c12
-rw-r--r--lib/libthr/thread/thr_concurrency.c3
-rw-r--r--lib/libthr/thread/thr_cond.c608
-rw-r--r--lib/libthr/thread/thr_condattr.c128
-rw-r--r--lib/libthr/thread/thr_create.c227
-rw-r--r--lib/libthr/thread/thr_detach.c51
-rw-r--r--lib/libthr/thread/thr_exit.c154
-rw-r--r--lib/libthr/thread/thr_fork.c222
-rw-r--r--lib/libthr/thread/thr_getschedparam.c77
-rw-r--r--lib/libthr/thread/thr_info.c89
-rw-r--r--lib/libthr/thread/thr_init.c375
-rw-r--r--lib/libthr/thread/thr_join.c187
-rw-r--r--lib/libthr/thread/thr_kern.c141
-rw-r--r--lib/libthr/thread/thr_kill.c (renamed from lib/libthr/thread/thr_condattr_init.c)39
-rw-r--r--lib/libthr/thread/thr_list.c342
-rw-r--r--lib/libthr/thread/thr_main_np.c7
-rw-r--r--lib/libthr/thread/thr_mutex.c1974
-rw-r--r--lib/libthr/thread/thr_mutex_prioceiling.c57
-rw-r--r--lib/libthr/thread/thr_mutex_protocol.c30
-rw-r--r--lib/libthr/thread/thr_mutexattr.c (renamed from lib/libthr/thread/thr_mattr_kind_np.c)75
-rw-r--r--lib/libthr/thread/thr_once.c65
-rw-r--r--lib/libthr/thread/thr_printf.c46
-rw-r--r--lib/libthr/thread/thr_private.h1098
-rw-r--r--lib/libthr/thread/thr_pspinlock.c133
-rw-r--r--lib/libthr/thread/thr_resume_np.c55
-rw-r--r--lib/libthr/thread/thr_rwlock.c579
-rw-r--r--lib/libthr/thread/thr_self.c6
-rw-r--r--lib/libthr/thread/thr_sem.c332
-rw-r--r--lib/libthr/thread/thr_seterrno.c6
-rw-r--r--lib/libthr/thread/thr_setschedparam.c153
-rw-r--r--lib/libthr/thread/thr_sig.c246
-rw-r--r--lib/libthr/thread/thr_sigmask.c (renamed from lib/libthr/thread/thr_condattr_destroy.c)23
-rw-r--r--lib/libthr/thread/thr_single_np.c (renamed from lib/libthr/thread/thr_mutexattr_destroy.c)29
-rw-r--r--lib/libthr/thread/thr_spec.c159
-rw-r--r--lib/libthr/thread/thr_spinlock.c168
-rw-r--r--lib/libthr/thread/thr_stack.c230
-rw-r--r--lib/libthr/thread/thr_subr.c91
-rw-r--r--lib/libthr/thread/thr_suspend_np.c52
-rw-r--r--lib/libthr/thread/thr_switch_np.c (renamed from lib/libthr/thread/thr_mattr_init.c)33
-rw-r--r--lib/libthr/thread/thr_symbols.c (renamed from lib/libthr/thread/thr_find_thread.c)49
-rw-r--r--lib/libthr/thread/thr_syscalls.c619
-rw-r--r--lib/libthr/thread/thr_umtx.c80
-rw-r--r--lib/libthr/thread/thr_umtx.h81
77 files changed, 7200 insertions, 4641 deletions
diff --git a/lib/libthr/Makefile b/lib/libthr/Makefile
index 93e86e2..9ee6287 100644
--- a/lib/libthr/Makefile
+++ b/lib/libthr/Makefile
@@ -10,20 +10,26 @@
LIB=thr
SHLIB_MAJOR= 1
DEBUG_FLAGS=-g
-CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
+CFLAGS+=-DPTHREAD_KERNEL
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
-I${.CURDIR}/../../include
+CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
+CFLAGS+=-I${.CURDIR}/sys
CFLAGS+=-I${.CURDIR}/../../libexec/rtld-elf
+CFLAGS+=-I${.CURDIR}/../../libexec/rtld-elf/${MACHINE_ARCH}
+CFLAGS+=-Winline
+
+# CFLAGS+=-DSYSTEM_SCOPE_ONLY
+
+LDFLAGS= -Wl,--version-script=${.CURDIR}/pthread.map
# enable extra internal consistancy checks
-CFLAGS+=-D_PTHREADS_INVARIANTS
+CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
PRECIOUSLIB=
-WARNS?= 2
-
-.include "${.CURDIR}/thread/Makefile.inc"
-.include "${.CURDIR}/sys/Makefile.inc"
.include "${.CURDIR}/arch/${MACHINE_ARCH}/Makefile.inc"
+.include "${.CURDIR}/sys/Makefile.inc"
+.include "${.CURDIR}/thread/Makefile.inc"
.include <bsd.lib.mk>
diff --git a/lib/libthr/arch/alpha/Makefile.inc b/lib/libthr/arch/alpha/Makefile.inc
index 9f6f72c..508d2b4 100644
--- a/lib/libthr/arch/alpha/Makefile.inc
+++ b/lib/libthr/arch/alpha/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _curthread.c
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/alpha/alpha/pthread_md.c b/lib/libthr/arch/alpha/alpha/pthread_md.c
new file mode 100644
index 0000000..1a82341
--- /dev/null
+++ b/lib/libthr/arch/alpha/alpha/pthread_md.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if ((tcb = malloc(sizeof(struct tcb))) != NULL) {
+ memset(tcb, 0, sizeof(struct tcb));
+ tcb->tcb_thread = thread;
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ free(tcb);
+}
diff --git a/lib/libthr/arch/ia64/ia64/_curthread.c b/lib/libthr/arch/alpha/include/pthread_md.h
index 6c3de0e..003cba0 100644
--- a/lib/libthr/arch/ia64/ia64/_curthread.c
+++ b/lib/libthr/arch/alpha/include/pthread_md.h
@@ -22,38 +22,54 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+#include <stddef.h>
#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <pthread.h>
-#include "thr_private.h"
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
-register struct pthread *_tp __asm("%r13");
+/*
+ * Variant I tcb. The structure layout is fixed, don't blindly
+ * change it!
+ */
+struct tcb {
+ void *tcb_dtv;
+ struct pthread *tcb_thread;
+};
-struct pthread *
-_get_curthread(void)
-{
+#define _tp __builtin_thread_pointer()
+#define _tcb ((struct tcb *)_tp)
- return (_tp);
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+/* Called from the thread to set its private data. */
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ __builtin_set_thread_pointer(tcb);
}
-void
-_retire_thread(void *v)
+static __inline struct tcb *
+_tcb_get(void)
{
+ return (_tcb);
}
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
{
- *err = 0;
- if (uc != NULL)
- uc->uc_mcontext.mc_special.tp = (uint64_t)thread;
- else
- _tp = thread;
+ if (_thr_initial)
+ return (_tcb->tcb_thread);
return (NULL);
}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libthr/arch/amd64/Makefile.inc b/lib/libthr/arch/amd64/Makefile.inc
index c822c8c..6e6d577 100644
--- a/lib/libthr/arch/amd64/Makefile.inc
+++ b/lib/libthr/arch/amd64/Makefile.inc
@@ -1,5 +1,5 @@
#$FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _setcurthread.c
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/amd64/amd64/_setcurthread.c b/lib/libthr/arch/amd64/amd64/_setcurthread.c
deleted file mode 100644
index 51395c1..0000000
--- a/lib/libthr/arch/amd64/amd64/_setcurthread.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/types.h>
-#include <sys/ucontext.h>
-
-#include <pthread.h>
-#include <machine/sysarch.h>
-
-#include "thr_private.h"
-#include "rtld_tls.h"
-
-struct tcb {
- struct tcb *tcb_self; /* required by rtld */
- void *tcb_dtv; /* required by rtld */
- struct pthread *tcb_thread;
-};
-
-void
-_retire_thread(void *entry)
-{
- struct tcb *tcb = (struct tcb *)entry;
-
- _rtld_free_tls(tcb, sizeof(struct tcb), 16);
-}
-
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
-{
- struct tcb *tcb;
- void *oldtls;
-
- *err = 0;
-
- if (thr->arch_id != NULL && uc == NULL) {
- amd64_set_fsbase(thr->arch_id);
- return (thr->arch_id);
- }
-
- if (uc == NULL) {
- __asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
- } else {
- oldtls = NULL;
- }
-
- /*
- * Allocate and initialise a new TLS block with enough extra
- * space for our self pointer.
- */
- tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
-
- /*
- * Cache the address of the thread structure here, after
- * rtld's two words of private space.
- */
- tcb->tcb_thread = thr;
-
- if (uc == NULL)
- amd64_set_fsbase(tcb);
- return (tcb);
-}
-
-pthread_t
-_get_curthread(void)
-{
- extern pthread_t _thread_initial;
- pthread_t td;
-
- if (_thread_initial == NULL)
- return (NULL);
- __asm __volatile("movq %%fs:%1, %0" \
- : "=r" (td) \
- : "m" (*(long *)(__offsetof(struct tcb, tcb_thread))));
-
- return (td);
-}
diff --git a/lib/libthr/arch/amd64/amd64/pthread_md.c b/lib/libthr/arch/amd64/amd64/pthread_md.c
new file mode 100644
index 0000000..d2477df
--- /dev/null
+++ b/lib/libthr/arch/amd64/amd64/pthread_md.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <rtld_tls.h>
+
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial)
+ __asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
+ else
+ oldtls = NULL;
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
diff --git a/lib/libthr/arch/amd64/include/pthread_md.h b/lib/libthr/arch/amd64/include/pthread_md.h
new file mode 100644
index 0000000..4500f6b
--- /dev/null
+++ b/lib/libthr/arch/amd64/include/pthread_md.h
@@ -0,0 +1,103 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <machine/sysarch.h>
+#include <ucontext.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+/*
+ * Variant II tcb, first two members are required by rtld,
+ * %fs points to the structure.
+ */
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread;
+ void *tcb_spare[1];
+};
+
+/*
+ * Evaluates to the byte offset of the per-tcb variable name.
+ */
+#define __tcb_offset(name) __offsetof(struct tcb, name)
+
+/*
+ * Evaluates to the type of the per-tcb variable name.
+ */
+#define __tcb_type(name) __typeof(((struct tcb *)0)->name)
+
+/*
+ * Evaluates to the value of the per-tcb variable name.
+ */
+#define TCB_GET64(name) ({ \
+ __tcb_type(name) __result; \
+ \
+ u_long __i; \
+ __asm __volatile("movq %%fs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_long *)(__tcb_offset(name)))); \
+ __result = (__tcb_type(name))__i; \
+ \
+ __result; \
+})
+
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *tcb);
+
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ amd64_set_fsbase(tcb);
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (TCB_GET64(tcb_self));
+}
+
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ if (_thr_initial)
+ return (TCB_GET64(tcb_thread));
+ return (NULL);
+}
+#endif
diff --git a/lib/libthr/arch/arm/Makefile.inc b/lib/libthr/arch/arm/Makefile.inc
new file mode 100644
index 0000000..5a959d3
--- /dev/null
+++ b/lib/libthr/arch/arm/Makefile.inc
@@ -0,0 +1,7 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+CFLAGS+= -DARM_HAS_ATOMIC_CMPSET_32
+
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/sparc64/sparc64/_setcurthread.c b/lib/libthr/arch/arm/arm/pthread_md.c
index 4c0fb88..69cf57e 100644
--- a/lib/libthr/arch/sparc64/sparc64/_setcurthread.c
+++ b/lib/libthr/arch/arm/arm/pthread_md.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2002 Jake Burkholder.
+ * Copyright (C) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -7,9 +7,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -22,38 +22,33 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
+#include <stdlib.h>
#include <sys/types.h>
-#include <sys/ucontext.h>
+#include <rtld_tls.h>
-#include <pthread.h>
-#include "thr_private.h"
+#include "pthread_md.h"
-register struct pthread *_curthread __asm("%g6");
+struct umtx arm_umtx = {
+ .u_owner = UMTX_UNOWNED
+};
-struct pthread *
-_get_curthread(void)
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
{
+ struct tcb *tcb;
- return (_curthread);
+ tcb = malloc(sizeof(struct tcb));
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
}
void
-_retire_thread(void *v)
-{
-}
-
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
+_tcb_dtor(struct tcb *tcb)
{
- *err = 0;
- if (uc != NULL)
- uc->uc_mcontext.mc_global[6] = (uint64_t)thread;
- else
- _curthread = thread;
- return (NULL);
+ free(tcb);
}
diff --git a/lib/libthr/arch/arm/include/pthread_md.h b/lib/libthr/arch/arm/include/pthread_md.h
new file mode 100644
index 0000000..626d567
--- /dev/null
+++ b/lib/libthr/arch/arm/include/pthread_md.h
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/types.h>
+#include <machine/sysarch.h>
+#include <stddef.h>
+#include <errno.h>
+
+static __inline int atomic_cmpset_32(volatile uint32_t *, uint32_t, uint32_t);
+
+#include <sys/umtx.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+/*
+ * Variant II tcb, first two members are required by rtld.
+ */
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread; /* our hook */
+ void *tcb_spare[1];
+};
+
+/*
+ * The tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+/* Called from the thread to set its private data. */
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ *((struct tcb **)ARM_TP_ADDRESS) = tcb;
+}
+
+/*
+ * Get the current tcb.
+ */
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (*((struct tcb **)ARM_TP_ADDRESS));
+}
+
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ if (_thr_initial)
+ return (_tcb_get()->tcb_thread);
+ return (NULL);
+}
+
+extern struct umtx arm_umtx;
+
+static __inline int
+atomic_cmpset_32(volatile uint32_t *dst, uint32_t old, uint32_t newval)
+{
+ int ret;
+
+ _umtx_lock(&arm_umtx);
+ arm_umtx.u_owner = (void*)((uint32_t)arm_umtx.u_owner | UMTX_CONTESTED);
+ if (*dst == old) {
+ *dst = newval;
+ ret = 1;
+ } else
+ ret = 0;
+ _umtx_unlock(&arm_umtx);
+ return (ret);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libthr/arch/i386/Makefile.inc b/lib/libthr/arch/i386/Makefile.inc
index 0c86284..508d2b4 100644
--- a/lib/libthr/arch/i386/Makefile.inc
+++ b/lib/libthr/arch/i386/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _setcurthread.c _curthread.S
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/i386/i386/_curthread.S b/lib/libthr/arch/i386/i386/_curthread.S
deleted file mode 100644
index eb658df..0000000
--- a/lib/libthr/arch/i386/i386/_curthread.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/* $FreeBSD$ */
-
-#include <machine/asm.h>
-
-ENTRY(_get_curthread)
- cmpl $0, _thread_initial
- je nothreads
- movl %gs:8, %eax
- ret
-nothreads:
- xor %eax, %eax
- ret
-
-ENTRY(_set_gs)
- movl 4(%esp), %eax
- movl %eax, %gs
- ret
diff --git a/lib/libthr/arch/i386/i386/_setcurthread.c b/lib/libthr/arch/i386/i386/_setcurthread.c
deleted file mode 100644
index d91c64f..0000000
--- a/lib/libthr/arch/i386/i386/_setcurthread.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/types.h>
-#include <sys/ucontext.h>
-
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <machine/sysarch.h>
-#include <machine/segments.h>
-
-#include "thr_private.h"
-#include "rtld_tls.h"
-
-/* in _curthread.S */
-extern void _set_gs(int);
-
-struct tcb {
- struct tcb *tcb_self; /* required by rtld */
- void *tcb_dtv; /* required by rtld */
- struct pthread *tcb_thread;
- int tcb_ldt;
-};
-
-void
-_retire_thread(void *entry)
-{
- struct tcb *tcb = (struct tcb *)entry;
-
- i386_set_ldt(tcb->tcb_ldt, NULL, 1);
- _rtld_free_tls(tcb, sizeof(struct tcb), 16);
-}
-
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
-{
-#ifndef COMPAT_32BIT
- union descriptor desc;
-#endif
- struct tcb *tcb;
- void *oldtls;
-#ifndef COMPAT_32BIT
- int ldt_index;
-#endif
-
- *err = 0;
-
- if (uc == NULL && thr->arch_id != NULL) {
-#ifdef COMPAT_32BIT
- _amd64_set_gsbase(thr->arch_id);
-#endif
- return (thr->arch_id);
- }
-
- if (uc == NULL) {
- __asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
- } else {
- oldtls = NULL;
- }
-
- /*
- * Allocate and initialise a new TLS block with enough extra
- * space for our self pointer.
- */
- tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
-
- /*
- * Cache the address of the thread structure here, after
- * rtld's two words of private space.
- */
- tcb->tcb_thread = thr;
-
-#ifndef COMPAT_32BIT
- bzero(&desc, sizeof(desc));
-
- /*
- * Set up the descriptor to point at the TLS block.
- */
- desc.sd.sd_lolimit = 0xFFFF;
- desc.sd.sd_lobase = (unsigned int)tcb & 0xFFFFFF;
- desc.sd.sd_type = SDT_MEMRW;
- desc.sd.sd_dpl = SEL_UPL;
- desc.sd.sd_p = 1;
- desc.sd.sd_hilimit = 0xF;
- desc.sd.sd_xx = 0;
- desc.sd.sd_def32 = 1;
- desc.sd.sd_gran = 1;
- desc.sd.sd_hibase = (unsigned int)tcb >> 24;
-
- /* Get a slot from the process' LDT list */
- ldt_index = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1);
- if (ldt_index == -1)
- abort();
- tcb->tcb_ldt = ldt_index;
- /*
- * Set up our gs with the index into the ldt for this entry.
- */
- if (uc != NULL)
- uc->uc_mcontext.mc_gs = LSEL(ldt_index, SEL_UPL);
- else
- _set_gs(LSEL(ldt_index, SEL_UPL));
-#else
- if (uc == NULL)
- _amd64_set_gsbase(tcb);
-#endif
-
- return (tcb);
-}
diff --git a/lib/libthr/arch/i386/i386/pthread_md.c b/lib/libthr/arch/i386/i386/pthread_md.c
new file mode 100644
index 0000000..e0adb25
--- /dev/null
+++ b/lib/libthr/arch/i386/i386/pthread_md.c
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <machine/segments.h>
+#include <machine/sysarch.h>
+#include <string.h>
+#include <rtld_tls.h>
+
+#include "pthread_md.h"
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+#ifndef COMPAT_32BIT
+ union descriptor ldt;
+#endif
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial)
+ __asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
+ else
+ oldtls = NULL;
+
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb) {
+ tcb->tcb_thread = thread;
+#ifndef COMPAT_32BIT
+ ldt.sd.sd_hibase = (unsigned int)tcb >> 24;
+ ldt.sd.sd_lobase = (unsigned int)tcb & 0xFFFFFF;
+ ldt.sd.sd_hilimit = (sizeof(struct tcb) >> 16) & 0xF;
+ ldt.sd.sd_lolimit = sizeof(struct tcb) & 0xFFFF;
+ ldt.sd.sd_type = SDT_MEMRWA;
+ ldt.sd.sd_dpl = SEL_UPL;
+ ldt.sd.sd_p = 1;
+ ldt.sd.sd_xx = 0;
+ ldt.sd.sd_def32 = 1;
+ ldt.sd.sd_gran = 0; /* no more than 1M */
+ tcb->tcb_ldt = i386_set_ldt(LDT_AUTO_ALLOC, &ldt, 1);
+ if (tcb->tcb_ldt < 0) {
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+ tcb = NULL;
+ }
+#endif
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+#ifndef COMPAT_32BIT
+ if (tcb->tcb_ldt >= 0)
+ i386_set_ldt(tcb->tcb_ldt, NULL, 1);
+#endif
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
diff --git a/lib/libthr/arch/i386/include/pthread_md.h b/lib/libthr/arch/i386/include/pthread_md.h
new file mode 100644
index 0000000..721ddac
--- /dev/null
+++ b/lib/libthr/arch/i386/include/pthread_md.h
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 2002 Daniel Eischen <deischen@freebsd.org>.
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <machine/sysarch.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+/*
+ * Variant II tcb, first two members are required by rtld,
+ * %gs points to the structure.
+ */
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread;
+ int tcb_ldt;
+};
+
+/*
+ * Evaluates to the byte offset of the per-tcb variable name.
+ */
+#define __tcb_offset(name) __offsetof(struct tcb, name)
+
+/*
+ * Evaluates to the type of the per-tcb variable name.
+ */
+#define __tcb_type(name) __typeof(((struct tcb *)0)->name)
+
+/*
+ * Evaluates to the value of the per-tcb variable name.
+ */
+#define TCB_GET32(name) ({ \
+ __tcb_type(name) __result; \
+ \
+ u_int __i; \
+ __asm __volatile("movl %%gs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_int *)(__tcb_offset(name)))); \
+ __result = (__tcb_type(name))__i; \
+ \
+ __result; \
+})
+
+/*
+ * The constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *tcb);
+
+/* Called from the thread to set its private data. */
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+#ifndef COMPAT_32BIT
+ int val;
+
+ val = (tcb->tcb_ldt << 3) | 7;
+ __asm __volatile("movl %0, %%gs" : : "r" (val));
+#else
+ _amd64_set_gsbase(tcb);
+#endif
+
+}
+
+/* Get the current kcb. */
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (TCB_GET32(tcb_self));
+}
+
+extern struct pthread *_thr_initial;
+
+/* Get the current thread. */
+static __inline struct pthread *
+_get_curthread(void)
+{
+ if (_thr_initial)
+ return (TCB_GET32(tcb_thread));
+ return (NULL);
+}
+#endif
diff --git a/lib/libthr/arch/ia64/Makefile.inc b/lib/libthr/arch/ia64/Makefile.inc
index 6599098..c07c097 100644
--- a/lib/libthr/arch/ia64/Makefile.inc
+++ b/lib/libthr/arch/ia64/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _curthread.c
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/ia64/ia64/pthread_md.c b/lib/libthr/arch/ia64/ia64/pthread_md.c
new file mode 100644
index 0000000..a6f7def
--- /dev/null
+++ b/lib/libthr/arch/ia64/ia64/pthread_md.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <rtld_tls.h>
+
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial)
+ oldtls = _tp;
+ else
+ oldtls = NULL;
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(tcb), 16);
+}
diff --git a/lib/libthr/arch/powerpc/powerpc/_curthread.c b/lib/libthr/arch/ia64/include/pthread_md.h
index 3782e24..aee5dd2 100644
--- a/lib/libthr/arch/powerpc/powerpc/_curthread.c
+++ b/lib/libthr/arch/ia64/include/pthread_md.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004 Suleiman Souhlal
+ * Copyright (c) 2003 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -22,37 +22,57 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
-#include <sys/types.h>
-#include <sys/ucontext.h>
+/*
+ * Variant I tcb. The structure layout is fixed, don't blindly
+ * change it!
+ */
+struct tcb {
+ void *tcb_dtv;
+ struct pthread *tcb_thread;
+};
-#include <pthread.h>
-#include "thr_private.h"
+register struct tcb *_tp __asm("%r13");
-register struct pthread *_curthread __asm("%r2");
+#define _tcb _tp
-struct pthread *
-_get_curthread(void)
+/*
+ * The tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+/* Called from the thread to set its private data. */
+static __inline void
+_tcb_set(struct tcb *tcb)
{
- return (_curthread);
+ _tp = tcb;
}
-void
-_retire_thread(void *v)
+static __inline struct tcb *
+_tcb_get(void)
{
+ return (_tcb);
}
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
{
- *err = 0;
- if (uc != NULL)
- uc->uc_mcontext.mc_gpr[2] = (uint32_t)thread;
- else
- _curthread = thread;
+ if (_thr_initial)
+ return (_tcb->tcb_thread);
return (NULL);
}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libthr/arch/powerpc/Makefile.inc b/lib/libthr/arch/powerpc/Makefile.inc
index 9f6f72c..508d2b4 100644
--- a/lib/libthr/arch/powerpc/Makefile.inc
+++ b/lib/libthr/arch/powerpc/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _curthread.c
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/powerpc/include/pthread_md.h b/lib/libthr/arch/powerpc/include/pthread_md.h
new file mode 100644
index 0000000..008c4cd
--- /dev/null
+++ b/lib/libthr/arch/powerpc/include/pthread_md.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2004 by Peter Grehan. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+/*
+ * Variant I tcb. The structure layout is fixed, don't blindly
+ * change it.
+ * %r2 points to end of the structure.
+ */
+struct tcb {
+ void *tcb_dtv;
+ struct pthread *tcb_thread;
+};
+
+register uint8_t *_tp __asm("%r2");
+
+#define _tcb ((struct tcb *)(_tp - sizeof(struct tcb)))
+
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ _tp = (uint8_t *)tcb + sizeof(struct tcb);
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ if (_thr_initial)
+ return (_tcb->tcb_thread);
+ return (NULL);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libthr/arch/alpha/alpha/_curthread.c b/lib/libthr/arch/powerpc/powerpc/pthread_md.c
index 56166a7..aa02a8d 100644
--- a/lib/libthr/arch/alpha/alpha/_curthread.c
+++ b/lib/libthr/arch/powerpc/powerpc/pthread_md.c
@@ -1,14 +1,16 @@
/*
- * Copyright (c) 2003 The FreeBSD Project. All rights reserved.
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
+ *
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
@@ -21,35 +23,36 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
#include <sys/types.h>
-#include <sys/ucontext.h>
+#include <rtld_tls.h>
-#include <pthread.h>
-#include "thr_private.h"
+#include "pthread_md.h"
-void *
-_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
{
- *err = 0;
- if (uc != NULL)
- uc->uc_mcontext.mc_thrptr = (uint64_t)thread;
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial)
+ oldtls = _tp;
else
- __builtin_set_thread_pointer(thread);
- return (NULL);
-}
+ oldtls = NULL;
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
-struct pthread *
-_get_curthread(void)
-{
- return (__builtin_thread_pointer());
}
void
-_retire_thread(void *v)
+_tcb_dtor(struct tcb *tcb)
{
+ _rtld_free_tls(tcb, sizeof(tcb), 16);
}
diff --git a/lib/libthr/arch/sparc64/Makefile.inc b/lib/libthr/arch/sparc64/Makefile.inc
index ee22813..508d2b4 100644
--- a/lib/libthr/arch/sparc64/Makefile.inc
+++ b/lib/libthr/arch/sparc64/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
-SRCS+= _setcurthread.c
+SRCS+= pthread_md.c
diff --git a/lib/libthr/arch/sparc64/include/pthread_md.h b/lib/libthr/arch/sparc64/include/pthread_md.h
new file mode 100644
index 0000000..054c2be
--- /dev/null
+++ b/lib/libthr/arch/sparc64/include/pthread_md.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+/*
+ * Variant II tcb, first two members are required by rtld.
+ * %g7 points to the structure.
+ */
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread; /* our hook */
+ void *tcb_spare[1];
+};
+
+register struct tcb *_tp __asm("%g7");
+
+#define _tcb (_tp)
+
+/*
+ * The tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+
+/* Called from the thread to set its private data. */
+static __inline void
+_tcb_set(struct tcb *tcb)
+{
+ _tp = tcb;
+}
+
+/*
+ * Get the current tcb.
+ */
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+extern struct pthread *_thr_initial;
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ if (_thr_initial)
+ return (_tcb->tcb_thread);
+ return (NULL);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libthr/arch/sparc64/sparc64/pthread_md.c b/lib/libthr/arch/sparc64/sparc64/pthread_md.c
new file mode 100644
index 0000000..3f8e105
--- /dev/null
+++ b/lib/libthr/arch/sparc64/sparc64/pthread_md.c
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <rtld_tls.h>
+
+#include "pthread_md.h"
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial)
+ oldtls = _tp;
+ else
+ oldtls = NULL;
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb)
+ tcb->tcb_thread = thread;
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
diff --git a/lib/libthr/pthread.map b/lib/libthr/pthread.map
new file mode 100644
index 0000000..52dd62b
--- /dev/null
+++ b/lib/libthr/pthread.map
@@ -0,0 +1,365 @@
+# $FreeBSD$
+LIBPTHREAD_1_0 {
+global:
+ ___creat;
+ __accept;
+ __close;
+ __connect;
+ __error;
+ __fcntl;
+ __fsync;
+ __msync;
+ __nanosleep;
+ __open;
+ __poll;
+ __pthread_cond_timedwait;
+ __pthread_cond_wait;
+ __pthread_mutex_init;
+ __pthread_mutex_lock;
+ __pthread_mutex_trylock;
+ __pthread_mutex_timedlock;
+ __read;
+ __readv;
+ __recvfrom;
+ __recvmsg;
+ __select;
+ __sendmsg;
+ __sendto;
+ __sigsuspend;
+ __wait4;
+ __write;
+ __writev;
+ _aio_suspend;
+ _execve;
+ _fork;
+ _nanosleep;
+ _pause;
+ _pselect;
+ _pthread_atfork;
+ _pthread_barrier_destroy;
+ _pthread_barrier_init;
+ _pthread_barrier_wait;
+ _pthread_barrierattr_destroy;
+ _pthread_barrierattr_getpshared;
+ _pthread_barrierattr_init;
+ _pthread_barrierattr_setpshared;
+ _pthread_attr_default;
+ _pthread_attr_destroy;
+ _pthread_attr_get_np;
+ _pthread_attr_getdetachstate;
+ _pthread_attr_getguardsize;
+ _pthread_attr_getinheritsched;
+ _pthread_attr_getschedparam;
+ _pthread_attr_getschedpolicy;
+ _pthread_attr_getscope;
+ _pthread_attr_getstack;
+ _pthread_attr_getstackaddr;
+ _pthread_attr_getstacksize;
+ _pthread_attr_init;
+ _pthread_attr_setcreatesuspend_np;
+ _pthread_attr_setdetachstate;
+ _pthread_attr_setguardsize;
+ _pthread_attr_setinheritsched;
+ _pthread_attr_setschedparam;
+ _pthread_attr_setschedpolicy;
+ _pthread_attr_setscope;
+ _pthread_attr_setstack;
+ _pthread_attr_setstackaddr;
+ _pthread_attr_setstacksize;
+ _pthread_cancel;
+ _pthread_cleanup_pop;
+ _pthread_cleanup_push;
+ _pthread_cond_broadcast;
+ _pthread_cond_destroy;
+ _pthread_cond_init;
+ _pthread_cond_signal;
+ _pthread_cond_timedwait;
+ _pthread_cond_wait;
+ _pthread_condattr_default;
+ _pthread_condattr_destroy;
+ _pthread_condattr_getclock;
+ _pthread_condattr_getpshared;
+ _pthread_condattr_init;
+ _pthread_condattr_setclock;
+ _pthread_condattr_setpshared;
+ _pthread_create;
+ _pthread_detach;
+ _pthread_equal;
+ _pthread_exit;
+ _pthread_getconcurrency;
+ _pthread_getprio;
+ _pthread_getschedparam;
+ _pthread_getspecific;
+ _pthread_join;
+ _pthread_key_create;
+ _pthread_key_delete;
+ _pthread_kill;
+ _pthread_main_np;
+ _pthread_multi_np;
+ _pthread_mutex_destroy;
+ _pthread_mutex_getprioceiling;
+ _pthread_mutex_init;
+ _pthread_mutex_lock;
+ _pthread_mutex_setprioceiling;
+ _pthread_mutex_timedlock;
+ _pthread_mutex_trylock;
+ _pthread_mutex_unlock;
+ _pthread_mutexattr_default;
+ _pthread_mutexattr_destroy;
+ _pthread_mutexattr_getkind_np;
+ _pthread_mutexattr_getprioceiling;
+ _pthread_mutexattr_getprotocol;
+ _pthread_mutexattr_gettype;
+ _pthread_mutexattr_init;
+ _pthread_mutexattr_setkind_np;
+ _pthread_mutexattr_setprioceiling;
+ _pthread_mutexattr_setprotocol;
+ _pthread_mutexattr_settype;
+ _pthread_once;
+ _pthread_resume_all_np;
+ _pthread_resume_np;
+ _pthread_rwlock_destroy;
+ _pthread_rwlock_init;
+ _pthread_rwlock_rdlock;
+ _pthread_rwlock_timedrdlock;
+ _pthread_rwlock_timedwrlock;
+ _pthread_rwlock_tryrdlock;
+ _pthread_rwlock_trywrlock;
+ _pthread_rwlock_unlock;
+ _pthread_rwlock_wrlock;
+ _pthread_rwlockattr_destroy;
+ _pthread_rwlockattr_getpshared;
+ _pthread_rwlockattr_init;
+ _pthread_rwlockattr_setpshared;
+ _pthread_self;
+ _pthread_set_name_np;
+ _pthread_setcancelstate;
+ _pthread_setcanceltype;
+ _pthread_setconcurrency;
+ _pthread_setprio;
+ _pthread_setschedparam;
+ _pthread_setspecific;
+ _pthread_sigmask;
+ _pthread_single_np;
+ _pthread_spin_destroy;
+ _pthread_spin_init;
+ _pthread_spin_lock;
+ _pthread_spin_trylock;
+ _pthread_spin_unlock;
+ _pthread_suspend_all_np;
+ _pthread_suspend_np;
+ _pthread_switch_add_np;
+ _pthread_switch_delete_np;
+ _pthread_testcancel;
+ _pthread_yield;
+ _raise;
+ _sem_close;
+ _sem_destroy;
+ _sem_getvalue;
+ _sem_init;
+ _sem_open;
+ _sem_post;
+ _sem_timedwait;
+ _sem_trywait;
+ _sem_unlink;
+ _sem_wait;
+ _sigaction;
+ _sigprocmask;
+ _sigsuspend;
+ _sigwait;
+ _sigtimedwait;
+ _sigwaitinfo;
+ _sleep;
+ _spinlock;
+ _spinlock_debug;
+ _spinunlock;
+ _system;
+ _tcdrain;
+ _vfork;
+ _wait;
+ _waitpid;
+ accept;
+ aio_suspend;
+ close;
+ connect;
+ creat;
+ execve;
+ fcntl;
+ fork;
+ fsync;
+ msync;
+ nanosleep;
+ open;
+ pause;
+ poll;
+ pselect;
+ pthread_atfork;
+ pthread_barrier_destroy;
+ pthread_barrier_init;
+ pthread_barrier_wait;
+ pthread_barrierattr_destroy;
+ pthread_barrierattr_getpshared;
+ pthread_barrierattr_init;
+ pthread_barrierattr_setpshared;
+ pthread_attr_destroy;
+ pthread_attr_get_np;
+ pthread_attr_getdetachstate;
+ pthread_attr_getguardsize;
+ pthread_attr_getinheritsched;
+ pthread_attr_getschedparam;
+ pthread_attr_getschedpolicy;
+ pthread_attr_getscope;
+ pthread_attr_getstack;
+ pthread_attr_getstackaddr;
+ pthread_attr_getstacksize;
+ pthread_attr_init;
+ pthread_attr_setcreatesuspend_np;
+ pthread_attr_setdetachstate;
+ pthread_attr_setguardsize;
+ pthread_attr_setinheritsched;
+ pthread_attr_setschedparam;
+ pthread_attr_setschedpolicy;
+ pthread_attr_setscope;
+ pthread_attr_setstack;
+ pthread_attr_setstackaddr;
+ pthread_attr_setstacksize;
+ pthread_cancel;
+ pthread_cleanup_pop;
+ pthread_cleanup_push;
+ pthread_cond_broadcast;
+ pthread_cond_destroy;
+ pthread_cond_init;
+ pthread_cond_signal;
+ pthread_cond_timedwait;
+ pthread_cond_wait;
+ pthread_condattr_destroy;
+ pthread_condattr_init;
+ pthread_create;
+ pthread_detach;
+ pthread_equal;
+ pthread_exit;
+ pthread_getconcurrency;
+ pthread_getprio;
+ pthread_getschedparam;
+ pthread_getspecific;
+ pthread_join;
+ pthread_key_create;
+ pthread_key_delete;
+ pthread_kill;
+ pthread_main_np;
+ pthread_multi_np;
+ pthread_mutex_destroy;
+ pthread_mutex_getprioceiling;
+ pthread_mutex_init;
+ pthread_mutex_lock;
+ pthread_mutex_setprioceiling;
+ pthread_mutex_timedlock;
+ pthread_mutex_trylock;
+ pthread_mutex_unlock;
+ pthread_mutexattr_destroy;
+ pthread_mutexattr_getkind_np;
+ pthread_mutexattr_getprioceiling;
+ pthread_mutexattr_getprotocol;
+ pthread_mutexattr_gettype;
+ pthread_mutexattr_init;
+ pthread_mutexattr_setkind_np;
+ pthread_mutexattr_setprioceiling;
+ pthread_mutexattr_setprotocol;
+ pthread_mutexattr_settype;
+ pthread_once;
+ pthread_resume_all_np;
+ pthread_resume_np;
+ pthread_rwlock_destroy;
+ pthread_rwlock_init;
+ pthread_rwlock_rdlock;
+ pthread_rwlock_timedrdlock;
+ pthread_rwlock_timedwrlock;
+ pthread_rwlock_tryrdlock;
+ pthread_rwlock_trywrlock;
+ pthread_rwlock_unlock;
+ pthread_rwlock_wrlock;
+ pthread_rwlockattr_destroy;
+ pthread_rwlockattr_getpshared;
+ pthread_rwlockattr_init;
+ pthread_rwlockattr_setpshared;
+ pthread_self;
+ pthread_set_name_np;
+ pthread_setcancelstate;
+ pthread_setcanceltype;
+ pthread_setconcurrency;
+ pthread_setprio;
+ pthread_setschedparam;
+ pthread_setspecific;
+ pthread_sigmask;
+ pthread_single_np;
+ pthread_spin_destroy;
+ pthread_spin_init;
+ pthread_spin_lock;
+ pthread_spin_trylock;
+ pthread_spin_unlock;
+ pthread_suspend_all_np;
+ pthread_suspend_np;
+ pthread_switch_add_np;
+ pthread_switch_delete_np;
+ pthread_testcancel;
+ pthread_yield;
+ raise;
+ read;
+ readv;
+ recvfrom;
+ recvmsg;
+ select;
+ sem_close;
+ sem_destroy;
+ sem_getvalue;
+ sem_init;
+ sem_open;
+ sem_post;
+ sem_timedwait;
+ sem_trywait;
+ sem_unlink;
+ sem_wait;
+ sendmsg;
+ sendto;
+ sigaction;
+ sigaltstack;
+ sigpending;
+ sigprocmask;
+ sigsuspend;
+ sigwait;
+ sigwaitinfo;
+ sigtimedwait;
+ sleep;
+ system;
+ tcdrain;
+ vfork;
+ wait;
+ wait4;
+ waitpid;
+ write;
+ writev;
+
+ # Debugger needs these.
+ _libthr_debug;
+ _thread_active_threads;
+ _thread_keytable;
+ _thread_list;
+ _thread_max_keys;
+ _thread_off_attr_flags;
+ _thread_off_dtv;
+ _thread_off_linkmap;
+ _thread_off_next;
+ _thread_off_tcb;
+ _thread_off_tid;
+ _thread_off_key_allocated;
+ _thread_off_key_destructor;
+ _thread_off_state;
+ _thread_off_thr_locklevel;
+ _thread_off_tlsindex;
+ _thread_off_isdead;
+ _thread_size_key;
+ _thread_state_running;
+ _thread_state_zoombie;
+local:
+ *;
+};
diff --git a/lib/libthr/sys/Makefile.inc b/lib/libthr/sys/Makefile.inc
index 59018f7..70c6dda 100644
--- a/lib/libthr/sys/Makefile.inc
+++ b/lib/libthr/sys/Makefile.inc
@@ -1,5 +1,5 @@
# $FreeBSD$
-.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+.PATH: ${.CURDIR}/sys
-SRCS+= thr_error.c
+SRCS+= thr_error.c
diff --git a/lib/libthr/sys/thr_error.c b/lib/libthr/sys/thr_error.c
index 726c0df..902c054 100644
--- a/lib/libthr/sys/thr_error.c
+++ b/lib/libthr/sys/thr_error.c
@@ -35,25 +35,20 @@
*/
#include <pthread.h>
+
+#include "libc_private.h"
#include "thr_private.h"
#undef errno
extern int errno;
-__weak_reference(___error, __error);
-
int *
-___error()
+__error(void)
{
- struct pthread *pthread;
-
- if (_thread_initial == NULL)
- return (&errno);
-
- pthread = _get_curthread();
+ struct pthread *curthread = _get_curthread();
- if (pthread == _thread_initial)
+ if (curthread != NULL && curthread != _thr_initial)
+ return (&curthread->error);
+ else
return (&errno);
-
- return (&pthread->error);
}
diff --git a/lib/libthr/thread/Makefile.inc b/lib/libthr/thread/Makefile.inc
index 395cefc..d756a32 100644
--- a/lib/libthr/thread/Makefile.inc
+++ b/lib/libthr/thread/Makefile.inc
@@ -4,36 +4,35 @@
.PATH: ${.CURDIR}/thread
SRCS+= \
- thr_atfork.c \
thr_attr.c \
- thr_autoinit.c \
thr_barrier.c \
thr_barrierattr.c \
thr_cancel.c \
thr_clean.c \
thr_concurrency.c \
thr_cond.c \
- thr_condattr_destroy.c \
- thr_condattr_init.c \
+ thr_condattr.c \
thr_create.c \
thr_detach.c \
thr_equal.c \
thr_exit.c \
- thr_find_thread.c \
+ thr_fork.c \
thr_getprio.c \
+ thr_getschedparam.c \
thr_init.c \
thr_join.c \
+ thr_list.c \
thr_kern.c \
+ thr_kill.c \
thr_main_np.c \
- thr_mattr_init.c \
- thr_mattr_kind_np.c \
thr_multi_np.c \
thr_mutex.c \
+ thr_mutexattr.c \
thr_mutex_prioceiling.c \
thr_mutex_protocol.c \
- thr_mutexattr_destroy.c \
thr_once.c \
thr_printf.c \
+ thr_pspinlock.c \
thr_resume_np.c \
thr_rwlock.c \
thr_rwlockattr.c \
@@ -43,10 +42,13 @@ SRCS+= \
thr_setprio.c \
thr_setschedparam.c \
thr_sig.c \
+ thr_single_np.c \
thr_spec.c \
thr_spinlock.c \
thr_stack.c \
- thr_subr.c \
- thr_suspend_np.c \
thr_syscalls.c \
+ thr_suspend_np.c \
+ thr_switch_np.c \
+ thr_symbols.c \
+ thr_umtx.c \
thr_yield.c
diff --git a/lib/libthr/thread/thr_atfork.c b/lib/libthr/thread/thr_atfork.c
index 214424a..370623a 100644
--- a/lib/libthr/thread/thr_atfork.c
+++ b/lib/libthr/thread/thr_atfork.c
@@ -23,16 +23,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include <sys/queue.h>
-
#include "thr_private.h"
__weak_reference(_pthread_atfork, pthread_atfork);
@@ -41,20 +38,20 @@ int
_pthread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void))
{
+ struct pthread *curthread;
struct pthread_atfork *af;
- if (_thread_initial == NULL)
- _thread_init();
+ _thr_check_init();
if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
return (ENOMEM);
+ curthread = _get_curthread();
af->prepare = prepare;
af->parent = parent;
af->child = child;
- _pthread_mutex_lock(&_atfork_mutex);
- TAILQ_INSERT_TAIL(&_atfork_list, af, qe);
- _pthread_mutex_unlock(&_atfork_mutex);
+ THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
+ TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
+ THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
return (0);
}
-
diff --git a/lib/libthr/thread/thr_attr.c b/lib/libthr/thread/thr_attr.c
index 1db7bf3..9e65548 100644
--- a/lib/libthr/thread/thr_attr.c
+++ b/lib/libthr/thread/thr_attr.c
@@ -1,38 +1,4 @@
/*
- * Copyright (c) 1995-1997 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-/*
* Copyright (c) 2003 Craig Rodrigues <rodrigc@attbi.com>.
* All rights reserved.
*
@@ -69,7 +35,6 @@
* Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
* Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.
* Copyright (c) 2002,2003 Alexey Zelkin <phantom@FreeBSD.org>
- * Copyright (c) 2003 Jeff Roberson <jeff@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,333 +62,471 @@
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* XXXTHR I rewrote the entire file, can we lose some of the copyrights? */
-
-#include <sys/param.h>
+/*
+ * Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
#include <errno.h>
#include <pthread.h>
-#include <pthread_np.h>
#include <stdlib.h>
#include <string.h>
+#include <pthread_np.h>
#include "thr_private.h"
__weak_reference(_pthread_attr_destroy, pthread_attr_destroy);
-__weak_reference(_pthread_attr_init, pthread_attr_init);
-__weak_reference(_pthread_attr_setcreatesuspend_np,
- pthread_attr_setcreatesuspend_np);
-__weak_reference(_pthread_attr_setdetachstate, pthread_attr_setdetachstate);
-__weak_reference(_pthread_attr_setguardsize, pthread_attr_setguardsize);
-__weak_reference(_pthread_attr_setinheritsched, pthread_attr_setinheritsched);
-__weak_reference(_pthread_attr_setschedparam, pthread_attr_setschedparam);
-__weak_reference(_pthread_attr_setschedpolicy, pthread_attr_setschedpolicy);
-__weak_reference(_pthread_attr_setscope, pthread_attr_setscope);
-__weak_reference(_pthread_attr_setstack, pthread_attr_setstack);
-__weak_reference(_pthread_attr_setstackaddr, pthread_attr_setstackaddr);
-__weak_reference(_pthread_attr_setstacksize, pthread_attr_setstacksize);
-__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
-__weak_reference(_pthread_attr_getdetachstate, pthread_attr_getdetachstate);
-__weak_reference(_pthread_attr_getguardsize, pthread_attr_getguardsize);
-__weak_reference(_pthread_attr_getinheritsched, pthread_attr_getinheritsched);
-__weak_reference(_pthread_attr_getschedparam, pthread_attr_getschedparam);
-__weak_reference(_pthread_attr_getschedpolicy, pthread_attr_getschedpolicy);
-__weak_reference(_pthread_attr_getscope, pthread_attr_getscope);
-__weak_reference(_pthread_attr_getstack, pthread_attr_getstack);
-__weak_reference(_pthread_attr_getstackaddr, pthread_attr_getstackaddr);
-__weak_reference(_pthread_attr_getstacksize, pthread_attr_getstacksize);
int
-_pthread_attr_init(pthread_attr_t *attr)
+_pthread_attr_destroy(pthread_attr_t *attr)
{
- pthread_attr_t pattr;
-
- if ((pattr = (pthread_attr_t)
- malloc(sizeof(struct pthread_attr))) == NULL)
- return (ENOMEM);
-
- memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
- *attr = pattr;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL)
+ /* Invalid argument: */
+ ret = EINVAL;
+ else {
+ /* Free the memory allocated to the attribute object: */
+ free(*attr);
+
+ /*
+ * Leave the attribute pointer NULL now that the memory
+ * has been freed:
+ */
+ *attr = NULL;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
+
int
-_pthread_attr_destroy(pthread_attr_t *attr)
+_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
{
- if (attr == NULL || *attr == NULL)
+ struct pthread *curthread;
+ struct pthread_attr attr;
+ int ret;
+
+ if (pid == NULL || dst == NULL || *dst == NULL)
return (EINVAL);
- free(*attr);
- *attr = NULL;
+ curthread = _get_curthread();
+ if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
+ return (ret);
+ attr = pid->attr;
+ _thr_ref_delete(curthread, pid);
+ memcpy(*dst, &attr, sizeof(struct pthread_attr));
return (0);
}
+__weak_reference(_pthread_attr_getdetachstate, pthread_attr_getdetachstate);
+
int
-_pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
+_pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
- if (attr == NULL || *attr == NULL) {
- errno = EINVAL;
- return (-1);
- }
- (*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || detachstate == NULL)
+ ret = EINVAL;
+ else {
+ /* Check if the detached flag is set: */
+ if ((*attr)->flags & PTHREAD_DETACHED)
+ /* Return detached: */
+ *detachstate = PTHREAD_CREATE_DETACHED;
+ else
+ /* Return joinable: */
+ *detachstate = PTHREAD_CREATE_JOINABLE;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_getguardsize, pthread_attr_getguardsize);
+
int
-_pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
+_pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
{
- if (attr == NULL || *attr == NULL ||
- (detachstate != PTHREAD_CREATE_DETACHED &&
- detachstate != PTHREAD_CREATE_JOINABLE))
- return (EINVAL);
-
- if (detachstate == PTHREAD_CREATE_DETACHED)
- (*attr)->flags |= PTHREAD_DETACHED;
- else
- (*attr)->flags &= ~PTHREAD_DETACHED;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || guardsize == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the guard size: */
+ *guardsize = (*attr)->guardsize_attr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_getinheritsched, pthread_attr_getinheritsched);
+
int
-_pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
+_pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit)
{
+ int ret = 0;
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
-
- (*attr)->guardsize_attr = roundup(guardsize, _pthread_page_size);
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else
+ *sched_inherit = (*attr)->sched_inherit;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_getschedparam, pthread_attr_getschedparam);
+
int
-_pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
+_pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
{
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
+ int ret = 0;
- (*attr)->sched_inherit = sched_inherit;
+ if ((attr == NULL) || (*attr == NULL) || (param == NULL))
+ ret = EINVAL;
+ else
+ param->sched_priority = (*attr)->prio;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_getschedpolicy, pthread_attr_getschedpolicy);
+
int
-_pthread_attr_setschedparam(pthread_attr_t *attr,
- const struct sched_param *param)
+_pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
-
- if (param == NULL)
- return (ENOTSUP);
+ int ret = 0;
- if (param->sched_priority < PTHREAD_MIN_PRIORITY ||
- param->sched_priority > PTHREAD_MAX_PRIORITY)
- return (ENOTSUP);
-
- (*attr)->prio = param->sched_priority;
+ if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
+ ret = EINVAL;
+ else
+ *policy = (*attr)->sched_policy;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_getscope, pthread_attr_getscope);
+
int
-_pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+_pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope)
{
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
+ int ret = 0;
- if (policy < SCHED_FIFO || policy > SCHED_RR)
- return (ENOTSUP);
+ if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
+ /* Return an invalid argument: */
+ ret = EINVAL;
- (*attr)->sched_policy = policy;
+ else
+ *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
+ PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_getstack, pthread_attr_getstack);
+
int
-_pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
+_pthread_attr_getstack(const pthread_attr_t * __restrict attr,
+ void ** __restrict stackaddr,
+ size_t * __restrict stacksize)
{
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
-
- if (contentionscope != PTHREAD_SCOPE_PROCESS ||
- contentionscope == PTHREAD_SCOPE_SYSTEM)
- /* We don't support PTHREAD_SCOPE_SYSTEM. */
- return (ENOTSUP);
-
- (*attr)->flags |= contentionscope;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL
+ || stacksize == NULL )
+ ret = EINVAL;
+ else {
+ /* Return the stack address and size */
+ *stackaddr = (*attr)->stackaddr_attr;
+ *stacksize = (*attr)->stacksize_attr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_getstackaddr, pthread_attr_getstackaddr);
+
int
-_pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
- size_t stacksize)
+_pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
{
- if (attr == NULL || *attr == NULL || stackaddr == NULL
- || stacksize < PTHREAD_STACK_MIN)
- return (EINVAL);
-
- (*attr)->stackaddr_attr = stackaddr;
- (*attr)->stacksize_attr = stacksize;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the stack address: */
+ *stackaddr = (*attr)->stackaddr_attr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_getstacksize, pthread_attr_getstacksize);
+
int
-_pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
+_pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
- if (attr == NULL || *attr == NULL || stackaddr == NULL)
- return (EINVAL);
-
- (*attr)->stackaddr_attr = stackaddr;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stacksize == NULL)
+ ret = EINVAL;
+ else {
+ /* Return the stack size: */
+ *stacksize = (*attr)->stacksize_attr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_init, pthread_attr_init);
+
int
-_pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
+_pthread_attr_init(pthread_attr_t *attr)
{
- if (attr == NULL || *attr == NULL || stacksize < PTHREAD_STACK_MIN)
- return (EINVAL);
+ int ret;
+ pthread_attr_t pattr;
- (*attr)->stacksize_attr = stacksize;
+ _thr_check_init();
- return (0);
+ /* Allocate memory for the attribute object: */
+ if ((pattr = (pthread_attr_t) malloc(sizeof(struct pthread_attr))) == NULL)
+ /* Insufficient memory: */
+ ret = ENOMEM;
+ else {
+ /* Initialise the attribute object with the defaults: */
+ memcpy(pattr, &_pthread_attr_default, sizeof(struct pthread_attr));
+
+ /* Return a pointer to the attribute object: */
+ *attr = pattr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setcreatesuspend_np, pthread_attr_setcreatesuspend_np);
+
int
-_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
+_pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
{
int ret;
- if (pid == NULL || dst == NULL || *dst == NULL)
- return (EINVAL);
-
- if ((ret = _find_thread(pid)) != 0)
- return (ret);
-
- memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
-
- /*
- * Special case, if stack address was not provided by caller
- * of pthread_create(), then return address allocated internally
- */
- if ((*dst)->stackaddr_attr == NULL)
- (*dst)->stackaddr_attr = pid->stack;
-
- return (0);
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ (*attr)->suspend = THR_CREATE_SUSPENDED;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setdetachstate, pthread_attr_setdetachstate);
+
int
-_pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
+_pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
+ int ret;
- if (attr == NULL || *attr == NULL || detachstate == NULL)
- return (EINVAL);
-
- /* Check if the detached flag is set: */
- if ((*attr)->flags & PTHREAD_DETACHED)
- *detachstate = PTHREAD_CREATE_DETACHED;
- else
- *detachstate = PTHREAD_CREATE_JOINABLE;
-
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL ||
+ (detachstate != PTHREAD_CREATE_DETACHED &&
+ detachstate != PTHREAD_CREATE_JOINABLE))
+ ret = EINVAL;
+ else {
+ /* Check if detached state: */
+ if (detachstate == PTHREAD_CREATE_DETACHED)
+ /* Set the detached flag: */
+ (*attr)->flags |= PTHREAD_DETACHED;
+ else
+ /* Reset the detached flag: */
+ (*attr)->flags &= ~PTHREAD_DETACHED;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setguardsize, pthread_attr_setguardsize);
+
int
-_pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
+_pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
{
- if (attr == NULL || *attr == NULL || guardsize == NULL)
- return (EINVAL);
-
- *guardsize = (*attr)->guardsize_attr;
+ int ret;
- return (0);
+ /* Check for invalid arguments. */
+ if (attr == NULL || *attr == NULL)
+ ret = EINVAL;
+ else {
+ /* Save the stack size. */
+ (*attr)->guardsize_attr = guardsize;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setinheritsched, pthread_attr_setinheritsched);
+
int
-_pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit)
+_pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
{
- if (attr == NULL || *attr == NULL)
- return (EINVAL);
+ int ret = 0;
- *sched_inherit = (*attr)->sched_inherit;
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else if (sched_inherit != PTHREAD_INHERIT_SCHED &&
+ sched_inherit != PTHREAD_EXPLICIT_SCHED)
+ ret = ENOTSUP;
+ else
+ (*attr)->sched_inherit = sched_inherit;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_setschedparam, pthread_attr_setschedparam);
+
int
-_pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
+_pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
{
- if (attr == NULL || *attr == NULL || param == NULL)
- return (EINVAL);
-
- param->sched_priority = (*attr)->prio;
-
- return (0);
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else if (param == NULL) {
+ ret = ENOTSUP;
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
+ /* Return an unsupported value error. */
+ ret = ENOTSUP;
+ } else
+ (*attr)->prio = param->sched_priority;
+
+ return(ret);
}
+__weak_reference(_pthread_attr_setschedpolicy, pthread_attr_setschedpolicy);
+
int
-_pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
+_pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
- if (attr == NULL || *attr == NULL || policy == NULL)
- return (EINVAL);
+ int ret = 0;
- *policy = (*attr)->sched_policy;
+ if ((attr == NULL) || (*attr == NULL))
+ ret = EINVAL;
+ else if ((policy < SCHED_FIFO) || (policy > SCHED_RR)) {
+ ret = ENOTSUP;
+ } else
+ (*attr)->sched_policy = policy;
- return (0);
+ return(ret);
}
+__weak_reference(_pthread_attr_setscope, pthread_attr_setscope);
+
int
-_pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope)
+_pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
{
- if (attr == NULL || *attr == NULL || contentionscope == NULL)
- return (EINVAL);
-
- *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
- PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
-
- return (0);
+ int ret = 0;
+
+ if ((attr == NULL) || (*attr == NULL)) {
+ /* Return an invalid argument: */
+ ret = EINVAL;
+ } else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
+ (contentionscope != PTHREAD_SCOPE_SYSTEM)) {
+ ret = EINVAL;
+ } else if (contentionscope == PTHREAD_SCOPE_SYSTEM) {
+ (*attr)->flags |= contentionscope;
+ } else {
+ (*attr)->flags &= ~PTHREAD_SCOPE_SYSTEM;
+ }
+ return (ret);
}
+__weak_reference(_pthread_attr_setstack, pthread_attr_setstack);
+
int
-_pthread_attr_getstack(const pthread_attr_t * __restrict attr,
- void ** __restrict stackaddr,
- size_t * __restrict stacksize)
+_pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
+ size_t stacksize)
{
- if (attr == NULL || *attr == NULL || stackaddr == NULL
- || stacksize == NULL)
- return (EINVAL);
-
- *stackaddr = (*attr)->stackaddr_attr;
- *stacksize = (*attr)->stacksize_attr;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL
+ || stacksize < PTHREAD_STACK_MIN)
+ ret = EINVAL;
+ else {
+ /* Save the stack address and stack size */
+ (*attr)->stackaddr_attr = stackaddr;
+ (*attr)->stacksize_attr = stacksize;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setstackaddr, pthread_attr_setstackaddr);
+
int
-_pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
+_pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
{
- if (attr == NULL || *attr == NULL || stackaddr == NULL)
- return (EINVAL);
-
- *stackaddr = (*attr)->stackaddr_attr;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stackaddr == NULL)
+ ret = EINVAL;
+ else {
+ /* Save the stack address: */
+ (*attr)->stackaddr_attr = stackaddr;
+ ret = 0;
+ }
+ return(ret);
}
+__weak_reference(_pthread_attr_setstacksize, pthread_attr_setstacksize);
+
int
-_pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
+_pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
- if (attr == NULL || *attr == NULL || stacksize == NULL)
- return (EINVAL);
-
- *stacksize = (*attr)->stacksize_attr;
+ int ret;
- return (0);
+ /* Check for invalid arguments: */
+ if (attr == NULL || *attr == NULL || stacksize < PTHREAD_STACK_MIN)
+ ret = EINVAL;
+ else {
+ /* Save the stack size: */
+ (*attr)->stacksize_attr = stacksize;
+ ret = 0;
+ }
+ return(ret);
}
diff --git a/lib/libthr/thread/thr_barrier.c b/lib/libthr/thread/thr_barrier.c
index 547a721..afb6a40 100644
--- a/lib/libthr/thread/thr_barrier.c
+++ b/lib/libthr/thread/thr_barrier.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,104 +26,82 @@
* $FreeBSD$
*/
-#include <pthread.h>
+#include <errno.h>
#include <stdlib.h>
-#include <string.h>
+#include <pthread.h>
#include "thr_private.h"
-__weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
-__weak_reference(_pthread_barrier_init, pthread_barrier_init);
-__weak_reference(_pthread_barrier_wait, pthread_barrier_wait);
+__weak_reference(_pthread_barrier_init, pthread_barrier_init);
+__weak_reference(_pthread_barrier_wait, pthread_barrier_wait);
+__weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
int
_pthread_barrier_destroy(pthread_barrier_t *barrier)
{
- if (*barrier == NULL)
+ pthread_barrier_t bar;
+
+ if (barrier == NULL || *barrier == NULL)
return (EINVAL);
- if ((*barrier)->b_subtotal > 0)
+
+ bar = *barrier;
+ if (bar->b_waiters > 0)
return (EBUSY);
- PTHREAD_ASSERT((*barrier)->b_subtotal == 0,
- "barrier count must be zero when destroyed");
- free(*barrier);
*barrier = NULL;
+ free(bar);
return (0);
}
int
_pthread_barrier_init(pthread_barrier_t *barrier,
- const pthread_barrierattr_t attr, unsigned int count)
+ const pthread_barrierattr_t *attr, int count)
{
- if (count < 1)
+ pthread_barrier_t bar;
+
+ if (barrier == NULL || count <= 0)
return (EINVAL);
- *barrier =
- (struct pthread_barrier *)malloc(sizeof(struct pthread_barrier));
- if (*barrier == NULL)
+
+ bar = malloc(sizeof(struct pthread_barrier));
+ if (bar == NULL)
return (ENOMEM);
- memset((void *)*barrier, 0, sizeof(struct pthread_barrier));
- (*barrier)->b_total = count;
- TAILQ_INIT(&(*barrier)->b_barrq);
+
+ _thr_umtx_init(&bar->b_lock);
+ bar->b_cycle = 0;
+ bar->b_waiters = 0;
+ bar->b_count = count;
+ *barrier = bar;
+
return (0);
}
int
_pthread_barrier_wait(pthread_barrier_t *barrier)
{
- struct pthread_barrier *b;
- struct pthread *ptd;
- int error;
+ struct pthread *curthread = _get_curthread();
+ pthread_barrier_t bar;
+ long cycle;
+ int ret;
- if (*barrier == NULL)
+ if (barrier == NULL || *barrier == NULL)
return (EINVAL);
- /*
- * Check if threads waiting on the barrier can be released. If
- * so, release them and make this last thread the special thread.
- */
- error = 0;
- b = *barrier;
- UMTX_LOCK(&b->b_lock);
- if (b->b_subtotal == (b->b_total - 1)) {
- TAILQ_FOREACH(ptd, &b->b_barrq, sqe) {
- PTHREAD_LOCK(ptd);
- TAILQ_REMOVE(&b->b_barrq, ptd, sqe);
- ptd->flags &= ~PTHREAD_FLAGS_IN_BARRQ;
- ptd->flags |= PTHREAD_FLAGS_BARR_REL;
- PTHREAD_WAKE(ptd);
- PTHREAD_UNLOCK(ptd);
- }
- b->b_subtotal = 0;
- UMTX_UNLOCK(&b->b_lock);
- return (PTHREAD_BARRIER_SERIAL_THREAD);
+ bar = *barrier;
+ THR_UMTX_LOCK(curthread, &bar->b_lock);
+ if (++bar->b_waiters == bar->b_count) {
+ /* Current thread is lastest thread */
+ bar->b_waiters = 0;
+ bar->b_cycle++;
+ _thr_umtx_wake(&bar->b_cycle, bar->b_count);
+ THR_UMTX_UNLOCK(curthread, &bar->b_lock);
+ ret = PTHREAD_BARRIER_SERIAL_THREAD;
+ } else {
+ cycle = bar->b_cycle;
+ THR_UMTX_UNLOCK(curthread, &bar->b_lock);
+ do {
+ _thr_umtx_wait(&bar->b_cycle, cycle, NULL);
+ /* test cycle to avoid bogus wakeup */
+ } while (cycle == bar->b_cycle);
+ ret = 0;
}
-
- /*
- * More threads need to reach the barrier. Suspend this thread.
- */
- PTHREAD_LOCK(curthread);
- TAILQ_INSERT_HEAD(&b->b_barrq, curthread, sqe);
- curthread->flags |= PTHREAD_FLAGS_IN_BARRQ;
- PTHREAD_UNLOCK(curthread);
- b->b_subtotal++;
- PTHREAD_ASSERT(b->b_subtotal < b->b_total,
- "the number of threads waiting at a barrier is too large");
- UMTX_UNLOCK(&b->b_lock);
- do {
- error = _thread_suspend(curthread, NULL);
- if (error == EINTR) {
- /*
- * Make sure this thread wasn't released from
- * the barrier while it was handling the signal.
- */
- PTHREAD_LOCK(curthread);
- if ((curthread->flags & PTHREAD_FLAGS_BARR_REL) != 0) {
- curthread->flags &= ~PTHREAD_FLAGS_BARR_REL;
- PTHREAD_UNLOCK(curthread);
- error = 0;
- break;
- }
- PTHREAD_UNLOCK(curthread);
- }
- } while (error == EINTR);
- return (error);
+ return (ret);
}
diff --git a/lib/libthr/thread/thr_barrierattr.c b/lib/libthr/thread/thr_barrierattr.c
index 2fe3955..6785937 100644
--- a/lib/libthr/thread/thr_barrierattr.c
+++ b/lib/libthr/thread/thr_barrierattr.c
@@ -1,79 +1,94 @@
-/*-
- * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
+/*
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
+ * notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*
* $FreeBSD$
*/
-#include <pthread.h>
+#include <errno.h>
#include <stdlib.h>
+#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_barrierattr_destroy, pthread_barrierattr_destroy);
__weak_reference(_pthread_barrierattr_init, pthread_barrierattr_init);
-__weak_reference(_pthread_barrierattr_getpshared,
- pthread_barrierattr_getpshared);
__weak_reference(_pthread_barrierattr_setpshared,
- pthread_barrierattr_setpshared);
+ pthread_barrierattr_setpshared);
+__weak_reference(_pthread_barrierattr_getpshared,
+ pthread_barrierattr_getpshared);
int
_pthread_barrierattr_destroy(pthread_barrierattr_t *attr)
{
- if (*attr == NULL)
+
+ if (attr == NULL || *attr == NULL)
return (EINVAL);
+
free(*attr);
- *attr = NULL;
return (0);
}
int
-_pthread_barrierattr_init(pthread_barrierattr_t *attr)
+_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr,
+ int *pshared)
{
- *attr =
- (pthread_barrierattr_t)malloc(sizeof(struct pthread_barrierattr));
- if ((*attr) == NULL)
- return (ENOMEM);
- (*attr)->ba_pshared = PTHREAD_PROCESS_PRIVATE;
+
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+
+ *pshared = (*attr)->pshared;
return (0);
}
int
-_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr, int *pshared)
+_pthread_barrierattr_init(pthread_barrierattr_t *attr)
{
- if (*attr == NULL)
+
+ if (attr == NULL)
return (EINVAL);
- *pshared = (*attr)->ba_pshared;
+
+ if ((*attr = malloc(sizeof(struct pthread_barrierattr))) == NULL)
+ return (ENOMEM);
+
+ (*attr)->pshared = PTHREAD_PROCESS_PRIVATE;
return (0);
}
int
_pthread_barrierattr_setpshared(pthread_barrierattr_t *attr, int pshared)
{
- if (*attr == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
- pshared != PTHREAD_PROCESS_SHARED))
+
+ if (attr == NULL || *attr == NULL)
return (EINVAL);
- (*attr)->ba_pshared = pshared;
+
+ /* Only PTHREAD_PROCESS_PRIVATE is supported. */
+ if (pshared != PTHREAD_PROCESS_PRIVATE)
+ return (EINVAL);
+
+ (*attr)->pshared = pshared;
return (0);
}
diff --git a/lib/libthr/thread/thr_cancel.c b/lib/libthr/thread/thr_cancel.c
index 5880e1f..5a3960e 100644
--- a/lib/libthr/thread/thr_cancel.c
+++ b/lib/libthr/thread/thr_cancel.c
@@ -1,143 +1,163 @@
/*
- * David Leonard <d@openbsd.org>, 1999. Public domain.
+ * Copyright (c) 2005, David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
* $FreeBSD$
+ *
*/
-#include <sys/errno.h>
+
#include <pthread.h>
-#include <stdlib.h>
-#include "thr_private.h"
-/*
- * Static prototypes
- */
-static void testcancel(void);
+#include "thr_private.h"
__weak_reference(_pthread_cancel, pthread_cancel);
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
__weak_reference(_pthread_testcancel, pthread_testcancel);
-/*
- * Posix requires this function to be async-cancel-safe, so it
- * may not aquire any type of resource or call any functions
- * that might do so.
- */
+int _pthread_setcanceltype(int type, int *oldtype);
+
int
_pthread_cancel(pthread_t pthread)
{
- /* Don't continue if cancellation has already been set. */
- if (atomic_cmpset_int(&pthread->cancellation, (int)CS_NULL,
- (int)CS_PENDING) != 1)
- return (0);
+ struct pthread *curthread = _get_curthread();
+ int oldval, newval = 0;
+ int oldtype;
+ int ret;
/*
- * Only wakeup threads that are in cancellation points or
- * have set async cancel.
- * XXX - access to pthread->flags is not safe. We should just
- * unconditionally wake the thread and make sure that
- * the the library correctly handles spurious wakeups.
+ * POSIX says _pthread_cancel should be async cancellation safe,
+ * so we temporarily disable async cancellation.
*/
- if ((pthread->cancellationpoint || pthread->cancelmode == M_ASYNC) &&
- (pthread->flags & PTHREAD_FLAGS_NOT_RUNNING) != 0)
- PTHREAD_WAKE(pthread);
+ _pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
+ if ((ret = _thr_ref_add(curthread, pthread, 0)) != 0) {
+ _pthread_setcanceltype(oldtype, NULL);
+ return (ret);
+ }
+
+ do {
+ oldval = pthread->cancelflags;
+ if (oldval & THR_CANCEL_NEEDED)
+ break;
+ newval = oldval | THR_CANCEL_NEEDED;
+ } while (!atomic_cmpset_acq_int(&pthread->cancelflags, oldval, newval));
+
+ if (!(oldval & THR_CANCEL_NEEDED) && SHOULD_ASYNC_CANCEL(newval))
+ _thr_send_sig(pthread, SIGCANCEL);
+
+ _thr_ref_delete(curthread, pthread);
+ _pthread_setcanceltype(oldtype, NULL);
return (0);
}
-/*
- * Posix requires this function to be async-cancel-safe, so it
- * may not aquire any type of resource or call any functions
- * that might do so.
- */
+static inline void
+testcancel(struct pthread *curthread)
+{
+ int newval;
+
+ newval = curthread->cancelflags;
+ if (SHOULD_CANCEL(newval))
+ pthread_exit(PTHREAD_CANCELED);
+}
+
int
_pthread_setcancelstate(int state, int *oldstate)
{
- int ostate;
+ struct pthread *curthread = _get_curthread();
+ int oldval, ret;
- ostate = (curthread->cancelmode == M_OFF) ? PTHREAD_CANCEL_DISABLE :
- PTHREAD_CANCEL_ENABLE;
+ oldval = curthread->cancelflags;
+ if (oldstate != NULL)
+ *oldstate = ((oldval & THR_CANCEL_DISABLE) ?
+ PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE);
switch (state) {
- case PTHREAD_CANCEL_ENABLE:
- curthread->cancelmode = curthread->cancelstate;
- break;
case PTHREAD_CANCEL_DISABLE:
- if (curthread->cancelmode != M_OFF) {
- curthread->cancelstate = curthread->cancelmode;
- curthread->cancelmode = M_OFF;
- }
+ atomic_set_int(&curthread->cancelflags, THR_CANCEL_DISABLE);
+ ret = 0;
+ break;
+ case PTHREAD_CANCEL_ENABLE:
+ atomic_clear_int(&curthread->cancelflags, THR_CANCEL_DISABLE);
+ testcancel(curthread);
+ ret = 0;
break;
default:
- return (EINVAL);
+ ret = EINVAL;
}
- if (oldstate != NULL)
- *oldstate = ostate;
- return (0);
+
+ return (ret);
}
-/*
- * Posix requires this function to be async-cancel-safe, so it
- * may not aquire any type of resource or call any functions that
- * might do so.
- */
int
_pthread_setcanceltype(int type, int *oldtype)
{
- enum cancel_mode omode;
+ struct pthread *curthread = _get_curthread();
+ int oldval, ret;
- omode = curthread->cancelstate;
+ oldval = curthread->cancelflags;
+ if (oldtype != NULL)
+ *oldtype = ((oldval & THR_CANCEL_AT_POINT) ?
+ PTHREAD_CANCEL_ASYNCHRONOUS :
+ PTHREAD_CANCEL_DEFERRED);
switch (type) {
case PTHREAD_CANCEL_ASYNCHRONOUS:
- if (curthread->cancelmode != M_OFF)
- curthread->cancelmode = M_ASYNC;
- curthread->cancelstate = M_ASYNC;
+ atomic_set_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
+ testcancel(curthread);
+ ret = 0;
break;
case PTHREAD_CANCEL_DEFERRED:
- if (curthread->cancelmode != M_OFF)
- curthread->cancelmode = M_DEFERRED;
- curthread->cancelstate = M_DEFERRED;
+ atomic_clear_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
+ ret = 0;
break;
default:
- return (EINVAL);
+ ret = EINVAL;
}
- if (oldtype != NULL) {
- if (omode == M_DEFERRED)
- *oldtype = PTHREAD_CANCEL_DEFERRED;
- else if (omode == M_ASYNC)
- *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
- }
- return (0);
+
+ return (ret);
}
void
_pthread_testcancel(void)
{
- testcancel();
+ testcancel(_get_curthread());
}
-static void
-testcancel()
+int
+_thr_cancel_enter(struct pthread *curthread)
{
- if (curthread->cancelmode != M_OFF) {
-
- /* Cleanup a canceled thread only once. */
- if (atomic_cmpset_int(&curthread->cancellation,
- (int)CS_PENDING, (int)CS_SET) == 1) {
- _thread_exit_cleanup();
- pthread_exit(PTHREAD_CANCELED);
- PANIC("cancel");
- }
- }
-}
+ int oldval;
-void
-_thread_enter_cancellation_point(void)
-{
- testcancel();
- curthread->cancellationpoint = 1;
+ oldval = curthread->cancelflags;
+ if (!(oldval & THR_CANCEL_AT_POINT)) {
+ atomic_set_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
+ testcancel(curthread);
+ }
+ return (oldval);
}
void
-_thread_leave_cancellation_point(void)
+_thr_cancel_leave(struct pthread *curthread, int previous)
{
- curthread->cancellationpoint = 0;
- testcancel();
+ if (!(previous & THR_CANCEL_AT_POINT))
+ atomic_clear_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
}
diff --git a/lib/libthr/thread/thr_clean.c b/lib/libthr/thread/thr_clean.c
index cc6f34f..33fec2e 100644
--- a/lib/libthr/thread/thr_clean.c
+++ b/lib/libthr/thread/thr_clean.c
@@ -31,10 +31,12 @@
*
* $FreeBSD$
*/
+
#include <signal.h>
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
+
#include "thr_private.h"
__weak_reference(_pthread_cleanup_push, pthread_cleanup_push);
@@ -43,11 +45,14 @@ __weak_reference(_pthread_cleanup_pop, pthread_cleanup_pop);
void
_pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
{
+ struct pthread *curthread = _get_curthread();
struct pthread_cleanup *new;
- if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
+ if ((new = (struct pthread_cleanup *)
+ malloc(sizeof(struct pthread_cleanup))) != NULL) {
new->routine = routine;
new->routine_arg = routine_arg;
+ new->onstack = 0;
new->next = curthread->cleanup;
curthread->cleanup = new;
@@ -57,6 +62,7 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
void
_pthread_cleanup_pop(int execute)
{
+ struct pthread *curthread = _get_curthread();
struct pthread_cleanup *old;
if ((old = curthread->cleanup) != NULL) {
@@ -64,7 +70,7 @@ _pthread_cleanup_pop(int execute)
if (execute) {
old->routine(old->routine_arg);
}
- free(old);
+ if (old->onstack == 0)
+ free(old);
}
}
-
diff --git a/lib/libthr/thread/thr_concurrency.c b/lib/libthr/thread/thr_concurrency.c
index 9f2315b..7f019fe 100644
--- a/lib/libthr/thread/thr_concurrency.c
+++ b/lib/libthr/thread/thr_concurrency.c
@@ -33,6 +33,9 @@
*/
#include <errno.h>
+#include <pthread.h>
+
+#include "thr_private.h"
static int current_concurrency = 0;
diff --git a/lib/libthr/thread/thr_cond.c b/lib/libthr/thread/thr_cond.c
index b2cbb83..cd50c4d 100644
--- a/lib/libthr/thread/thr_cond.c
+++ b/lib/libthr/thread/thr_cond.c
@@ -1,476 +1,344 @@
/*
- * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
+
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
+#include <limits.h>
+
#include "thr_private.h"
/*
- * Proctect two different threads calling a pthread_cond_* function
- * from accidentally initializing the condition variable twice.
+ * Prototypes
*/
-static spinlock_t static_cond_lock = _SPINLOCK_INITIALIZER;
+static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
+static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int cancel);
+static int cond_signal_common(pthread_cond_t *cond, int broadcast);
/*
- * Prototypes
+ * Double underscore versions are cancellation points. Single underscore
+ * versions are not and are provided for libc internal usage (which
+ * shouldn't introduce cancellation points).
*/
-static inline int cond_init(pthread_cond_t *);
-static pthread_t cond_queue_deq(pthread_cond_t);
-static void cond_queue_remove(pthread_cond_t, pthread_t);
-static void cond_queue_enq(pthread_cond_t, pthread_t);
-static int cond_signal(pthread_cond_t *, int);
-static int cond_wait_common(pthread_cond_t *,
- pthread_mutex_t *, const struct timespec *);
+__weak_reference(__pthread_cond_wait, pthread_cond_wait);
+__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
__weak_reference(_pthread_cond_init, pthread_cond_init);
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
-__weak_reference(_pthread_cond_wait, pthread_cond_wait);
-__weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait);
__weak_reference(_pthread_cond_signal, pthread_cond_signal);
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
-#define COND_LOCK(c) \
-do { \
- if (umtx_lock(&(c)->c_lock, curthread->thr_id)) \
- abort(); \
-} while (0)
-
-#define COND_UNLOCK(c) \
-do { \
- if (umtx_unlock(&(c)->c_lock, curthread->thr_id)) \
- abort(); \
-} while (0)
-
-
-/* Reinitialize a condition variable to defaults. */
-int
-_cond_reinit(pthread_cond_t *cond)
+static int
+cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
- if (cond == NULL)
- return (EINVAL);
-
- if (*cond == NULL)
- return (pthread_cond_init(cond, NULL));
+ pthread_cond_t pcond;
+ int rval = 0;
- /*
- * Initialize the condition variable structure:
- */
- TAILQ_INIT(&(*cond)->c_queue);
- (*cond)->c_flags = COND_FLAGS_INITED;
- (*cond)->c_type = COND_TYPE_FAST;
- (*cond)->c_mutex = NULL;
- (*cond)->c_seqno = 0;
- bzero(&(*cond)->c_lock, sizeof((*cond)->c_lock));
-
- return (0);
+ if ((pcond = (pthread_cond_t)
+ malloc(sizeof(struct pthread_cond))) == NULL) {
+ rval = ENOMEM;
+ } else {
+ /*
+ * Initialise the condition variable structure:
+ */
+ _thr_umtx_init(&pcond->c_lock);
+ pcond->c_seqno = 0;
+ pcond->c_waiters = 0;
+ pcond->c_wakeups = 0;
+ if (cond_attr == NULL || *cond_attr == NULL) {
+ pcond->c_pshared = 0;
+ pcond->c_clockid = CLOCK_REALTIME;
+ } else {
+ pcond->c_pshared = (*cond_attr)->c_pshared;
+ pcond->c_clockid = (*cond_attr)->c_clockid;
+ }
+ *cond = pcond;
+ }
+ /* Return the completion status: */
+ return (rval);
}
-int
-_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
+static int
+init_static(struct pthread *thread, pthread_cond_t *cond)
{
- enum pthread_cond_type type;
- pthread_cond_t pcond;
+ int ret;
- if (cond == NULL)
- return (EINVAL);
+ THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
- /*
- * Check if a pointer to a condition variable attribute
- * structure was passed by the caller:
- */
- if (cond_attr != NULL && *cond_attr != NULL)
- type = (*cond_attr)->c_type;
+ if (*cond == NULL)
+ ret = cond_init(cond, NULL);
else
- /* Default to a fast condition variable: */
- type = COND_TYPE_FAST;
-
- /* Process according to condition variable type: */
- switch (type) {
- case COND_TYPE_FAST:
- break;
- default:
- return (EINVAL);
- break;
- }
+ ret = 0;
- if ((pcond = (pthread_cond_t)
- malloc(sizeof(struct pthread_cond))) == NULL)
- return (ENOMEM);
- /*
- * Initialise the condition variable
- * structure:
- */
- TAILQ_INIT(&pcond->c_queue);
- pcond->c_flags |= COND_FLAGS_INITED;
- pcond->c_type = type;
- pcond->c_mutex = NULL;
- pcond->c_seqno = 0;
- bzero(&pcond->c_lock, sizeof(pcond->c_lock));
+ THR_LOCK_RELEASE(thread, &_cond_static_lock);
- *cond = pcond;
-
- return (0);
+ return (ret);
}
int
-_pthread_cond_destroy(pthread_cond_t *cond)
+_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
- /*
- * Short circuit for a statically initialized condvar
- * that is being destroyed without having been used.
- */
- if (*cond == PTHREAD_COND_INITIALIZER)
- return (0);
-
- COND_LOCK(*cond);
-
- /*
- * Free the memory allocated for the condition
- * variable structure:
- */
- free(*cond);
- /*
- * NULL the caller's pointer now that the condition
- * variable has been destroyed:
- */
*cond = NULL;
-
- return (0);
+ return (cond_init(cond, cond_attr));
}
int
-_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+_pthread_cond_destroy(pthread_cond_t *cond)
{
- int rval;
+ struct pthread_cond *cv;
+ struct pthread *curthread = _get_curthread();
+ int rval = 0;
+
+ if (*cond == NULL)
+ rval = EINVAL;
+ else {
+ /* Lock the condition variable structure: */
+ THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
+ if ((*cond)->c_waiters + (*cond)->c_wakeups != 0) {
+ THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
+ return (EBUSY);
+ }
+
+ /*
+ * NULL the caller's pointer now that the condition
+ * variable has been destroyed:
+ */
+ cv = *cond;
+ *cond = NULL;
- rval = cond_wait_common(cond, mutex, NULL);
+ /* Unlock the condition variable structure: */
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
- /* This should never happen. */
- if (rval == ETIMEDOUT)
- abort();
+ /* Free the cond lock structure: */
+ /*
+ * Free the memory allocated for the condition
+ * variable structure:
+ */
+ free(cv);
+
+ }
+ /* Return the completion status: */
return (rval);
}
-int
-_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
- const struct timespec * abstime)
+struct cond_cancel_info
{
- if (abstime == NULL || abstime->tv_nsec >= 1000000000)
- return (EINVAL);
+ pthread_mutex_t *mutex;
+ pthread_cond_t *cond;
+ long seqno;
+};
+
+static void
+cond_cancel_handler(void *arg)
+{
+ struct pthread *curthread = _get_curthread();
+ struct cond_cancel_info *cci = (struct cond_cancel_info *)arg;
+ pthread_cond_t cv;
+
+ cv = *(cci->cond);
+ THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
+ if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
+ if (cv->c_waiters > 0) {
+ cv->c_seqno++;
+ _thr_umtx_wake(&cv->c_seqno, 1);
+ } else
+ cv->c_wakeups--;
+ } else {
+ cv->c_waiters--;
+ }
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
- return (cond_wait_common(cond, mutex, abstime));
+ _mutex_cv_lock(cci->mutex);
}
static int
-cond_wait_common(pthread_cond_t * cond, pthread_mutex_t * mutex,
- const struct timespec * abstime)
+cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int cancel)
{
- int rval = 0;
- int mtxrval;
+ struct pthread *curthread = _get_curthread();
+ struct timespec ts, ts2, *tsp;
+ struct cond_cancel_info cci;
+ pthread_cond_t cv;
+ long seq, oldseq;
+ int oldcancel;
+ int ret = 0;
-
- if (cond == NULL)
- return (EINVAL);
/*
- * If the condition variable is statically initialized, perform dynamic
- * initialization.
+ * If the condition variable is statically initialized,
+ * perform the dynamic initialization:
*/
- if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
- return (rval);
-
- if ((*cond)->c_type != COND_TYPE_FAST)
- return (EINVAL);
-
- COND_LOCK(*cond);
-
- /*
- * If the condvar was statically allocated, properly
- * initialize the tail queue.
- */
- if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
- TAILQ_INIT(&(*cond)->c_queue);
- (*cond)->c_flags |= COND_FLAGS_INITED;
- }
-
- if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
- ((*cond)->c_mutex != *mutex))) {
- COND_UNLOCK(*cond);
- return (EINVAL);
- }
- /* Remember the mutex */
- (*cond)->c_mutex = *mutex;
-
- _thread_enter_cancellation_point();
- if ((rval = _mutex_cv_unlock(mutex)) != 0) {
- if (rval == -1){
- printf("mutex unlock by condvar failed!");
- fflush(stdout);
- abort();
- }
- _thread_leave_cancellation_point();
- COND_UNLOCK(*cond);
- return (rval);
+ if (__predict_false(*cond == NULL &&
+ (ret = init_static(curthread, cond)) != 0))
+ return (ret);
+
+ cv = *cond;
+ THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
+ ret = _mutex_cv_unlock(mutex);
+ if (ret) {
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+ return (ret);
}
+ oldseq = seq = cv->c_seqno;
+ cci.mutex = mutex;
+ cci.cond = cond;
+ cci.seqno = oldseq;
- /*
- * We need to protect the queue operations. It also
- * protects the pthread flag field. This is
- * dropped before calling _thread_suspend() and reaquired
- * when we return.
- */
- PTHREAD_LOCK(curthread);
-
- /*
- * Queue the running thread on the condition
- * variable and wait to be signaled.
- */
- cond_queue_enq(*cond, curthread);
+ cv->c_waiters++;
do {
- PTHREAD_UNLOCK(curthread);
- COND_UNLOCK(*cond);
- if (curthread->cancellation == CS_PENDING) {
- /*
- * Posix says we must lock the mutex
- * even if we're being canceled.
- */
- _mutex_cv_lock(mutex);
- _thread_leave_cancellation_point();
- PANIC("Shouldn't have come back.");
- }
- rval = _thread_suspend(curthread, (struct timespec *)abstime);
- if (rval != 0 && rval != ETIMEDOUT && rval != EINTR) {
- printf("thread suspend returned an invalid value");
- fflush(stdout);
- abort();
- }
- COND_LOCK(*cond);
- PTHREAD_LOCK(curthread);
- if (rval == ETIMEDOUT) {
- /*
- * Condition may have been signaled between the
- * time the thread timed out and locked the condvar.
- * If it wasn't, manually remove it from the queue.
- */
- if ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) == 0)
- rval = 0;
- else
- cond_queue_remove(*cond, curthread);
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+
+ if (abstime != NULL) {
+ clock_gettime(cv->c_clockid, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ tsp = &ts2;
+ } else
+ tsp = NULL;
+
+ if (cancel) {
+ THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
+ _thr_cancel_leave(curthread, oldcancel);
+ THR_CLEANUP_POP(curthread, 0);
+ } else {
+ ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
}
- } while ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0);
- PTHREAD_UNLOCK(curthread);
- COND_UNLOCK(*cond);
- mtxrval = _mutex_cv_lock(mutex);
+ THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
+ seq = cv->c_seqno;
+ if (abstime != NULL && ret == ETIMEDOUT)
+ break;
- /* If the mutex failed return that error. */
- if (mtxrval == -1) {
- printf("mutex lock from condvar failed!");
- fflush(stdout);
- abort();
+ /*
+ * loop if we have never been told to wake up
+ * or we lost a race.
+ */
+ } while (seq == oldseq || cv->c_wakeups == 0);
+
+ if (seq != oldseq && cv->c_wakeups != 0) {
+ cv->c_wakeups--;
+ ret = 0;
+ } else {
+ cv->c_waiters--;
}
- if (mtxrval != 0)
- rval = mtxrval;
-
- _thread_leave_cancellation_point();
- return (rval);
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+ _mutex_cv_lock(mutex);
+ return (ret);
}
int
-_pthread_cond_signal(pthread_cond_t * cond)
+_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
- return (cond_signal(cond, 0));
+
+ return (cond_wait_common(cond, mutex, NULL, 0));
}
int
-_pthread_cond_broadcast(pthread_cond_t * cond)
+__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
- return (cond_signal(cond, 1));
+
+ return (cond_wait_common(cond, mutex, NULL, 1));
}
-static int
-cond_signal(pthread_cond_t * cond, int broadcast)
+int
+_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
+ const struct timespec * abstime)
{
- int rval = 0;
- pthread_t pthread;
- if (cond == NULL)
- return (EINVAL);
- /*
- * If the condition variable is statically initialized, perform dynamic
- * initialization.
- */
- if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
- return (rval);
-
- if ((*cond)->c_type != COND_TYPE_FAST)
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
return (EINVAL);
- COND_LOCK(*cond);
- /*
- * Enter a loop to bring all (or only one) threads off the
- * condition queue:
- */
- do {
- /*
- * Wake up the signaled thread. It will be returned
- * to us locked.
- */
- if ((pthread = cond_queue_deq(*cond)) != NULL) {
- PTHREAD_WAKE(pthread);
- PTHREAD_UNLOCK(pthread);
- }
- } while (broadcast && pthread != NULL);
-
- COND_UNLOCK(*cond);
- return (rval);
+ return (cond_wait_common(cond, mutex, abstime, 0));
}
-void
-_cond_wait_backout(pthread_t pthread)
-{
- pthread_cond_t cond;
-
- cond = pthread->data.cond;
- if (cond == NULL)
- return;
-
- /* Process according to condition variable type: */
- switch (cond->c_type) {
- /* Fast condition variable: */
- case COND_TYPE_FAST:
- cond_queue_remove(cond, pthread);
- break;
- default:
- break;
- }
-}
-
-/*
- * Dequeue a waiting thread from the head of a condition queue in
- * descending priority order.
- */
-static pthread_t
-cond_queue_deq(pthread_cond_t cond)
+int
+__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
{
- pthread_t pthread;
- while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
- PTHREAD_LOCK(pthread);
- cond_queue_remove(cond, pthread);
-
- /*
- * Only exit the loop when we find a thread
- * that hasn't been canceled.
- */
- if (pthread->cancellation == CS_NULL)
- break;
- else
- PTHREAD_UNLOCK(pthread);
- }
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
- return(pthread);
+ return (cond_wait_common(cond, mutex, abstime, 1));
}
-/*
- * Remove a waiting thread from a condition queue in descending priority
- * order.
- */
-static void
-cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
+static int
+cond_signal_common(pthread_cond_t *cond, int broadcast)
{
+ struct pthread *curthread = _get_curthread();
+ pthread_cond_t cv;
+ int ret = 0, oldwaiters;
+
/*
- * Because pthread_cond_timedwait() can timeout as well
- * as be signaled by another thread, it is necessary to
- * guard against removing the thread from the queue if
- * it isn't in the queue.
+ * If the condition variable is statically initialized, perform dynamic
+ * initialization.
*/
- if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
- TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
+ if (__predict_false(*cond == NULL &&
+ (ret = init_static(curthread, cond)) != 0))
+ return (ret);
+
+ cv = *cond;
+ /* Lock the condition variable structure. */
+ THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
+ if (cv->c_waiters) {
+ if (!broadcast) {
+ cv->c_wakeups++;
+ cv->c_waiters--;
+ cv->c_seqno++;
+ _thr_umtx_wake(&cv->c_seqno, 1);
+ } else {
+ oldwaiters = cv->c_waiters;
+ cv->c_wakeups += cv->c_waiters;
+ cv->c_waiters = 0;
+ cv->c_seqno++;
+ _thr_umtx_wake(&cv->c_seqno, oldwaiters);
+ }
}
- /* Check for no more waiters. */
- if (TAILQ_FIRST(&cond->c_queue) == NULL)
- cond->c_mutex = NULL;
+ THR_LOCK_RELEASE(curthread, &cv->c_lock);
+ return (ret);
}
-/*
- * Enqueue a waiting thread to a condition queue in descending priority
- * order.
- */
-static void
-cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
+int
+_pthread_cond_signal(pthread_cond_t * cond)
{
- pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
- char *name;
-
- name = pthread->name ? pthread->name : "unknown";
- if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
- _thread_printf(2, "Thread (%s:%ld) already on condq\n",
- pthread->name, pthread->thr_id);
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
- _thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
- pthread->name, pthread->thr_id);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
- /*
- * For the common case of all threads having equal priority,
- * we perform a quick check against the priority of the thread
- * at the tail of the queue.
- */
- if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
- TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
- else {
- tid = TAILQ_FIRST(&cond->c_queue);
- while (pthread->active_priority <= tid->active_priority)
- tid = TAILQ_NEXT(tid, sqe);
- TAILQ_INSERT_BEFORE(tid, pthread, sqe);
- }
- pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
- pthread->data.cond = cond;
+ return (cond_signal_common(cond, 0));
}
-static inline int
-cond_init(pthread_cond_t *cond)
+int
+_pthread_cond_broadcast(pthread_cond_t * cond)
{
- int error = 0;
- _SPINLOCK(&static_cond_lock);
- if (*cond == PTHREAD_COND_INITIALIZER)
- error = _pthread_cond_init(cond, NULL);
- _SPINUNLOCK(&static_cond_lock);
- return (error);
-}
+ return (cond_signal_common(cond, 1));
+}
diff --git a/lib/libthr/thread/thr_condattr.c b/lib/libthr/thread/thr_condattr.c
new file mode 100644
index 0000000..46b9ec0
--- /dev/null
+++ b/lib/libthr/thread/thr_condattr.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_condattr_init, pthread_condattr_init);
+__weak_reference(_pthread_condattr_destroy, pthread_condattr_destroy);
+__weak_reference(_pthread_condattr_getclock, pthread_condattr_getclock);
+__weak_reference(_pthread_condattr_setclock, pthread_condattr_setclock);
+__weak_reference(_pthread_condattr_getpshared, pthread_condattr_getpshared);
+__weak_reference(_pthread_condattr_setpshared, pthread_condattr_setpshared);
+
+int
+_pthread_condattr_init(pthread_condattr_t *attr)
+{
+ pthread_condattr_t pattr;
+ int ret;
+
+ if ((pattr = (pthread_condattr_t)
+ malloc(sizeof(struct pthread_cond_attr))) == NULL) {
+ ret = ENOMEM;
+ } else {
+ memcpy(pattr, &_pthread_condattr_default,
+ sizeof(struct pthread_cond_attr));
+ *attr = pattr;
+ ret = 0;
+ }
+ return (ret);
+}
+
+int
+_pthread_condattr_destroy(pthread_condattr_t *attr)
+{
+ int ret;
+
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ free(*attr);
+ *attr = NULL;
+ ret = 0;
+ }
+ return(ret);
+}
+
+int
+_pthread_condattr_getclock(const pthread_condattr_t *attr,
+ clockid_t *clock_id)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ *clock_id = (*attr)->c_clockid;
+ return (0);
+}
+
+int
+_pthread_condattr_setclock(pthread_condattr_t *attr,
+ clockid_t clock_id)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+ if (clock_id != CLOCK_REALTIME &&
+ clock_id != CLOCK_VIRTUAL &&
+ clock_id != CLOCK_PROF &&
+ clock_id != CLOCK_MONOTONIC) {
+ return (EINVAL);
+ }
+ (*attr)->c_clockid = clock_id;
+ return (0);
+}
+
+int
+_pthread_condattr_getpshared(const pthread_condattr_t *attr,
+ int *pshared)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+
+ pshared = PTHREAD_PROCESS_PRIVATE;
+ return (0);
+}
+
+int
+_pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
+{
+ if (attr == NULL || *attr == NULL)
+ return (EINVAL);
+
+ if (pshared != PTHREAD_PROCESS_PRIVATE)
+ return (EINVAL);
+ return (0);
+}
diff --git a/lib/libthr/thread/thr_create.c b/lib/libthr/thread/thr_create.c
index c5ad8c9..d5e9a62 100644
--- a/lib/libthr/thread/thr_create.c
+++ b/lib/libthr/thread/thr_create.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -31,27 +32,20 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <fcntl.h>
-#include <unistd.h>
#include <stddef.h>
-#include <sys/time.h>
-#include <machine/reg.h>
#include <pthread.h>
-#include "thr_private.h"
-#include "libc_private.h"
+#include <sys/signalvar.h>
-#define OFF(f) offsetof(struct pthread, f)
-int _thread_thr_id_offset = OFF(thr_id);
-int _thread_next_offset = OFF(tle.tqe_next);
-int _thread_name_offset = OFF(name);
-int _thread_ctx_offset = OFF(ctx);
-#undef OFF
+#include "thr_private.h"
-int _thread_PS_RUNNING_value = PS_RUNNING;
-int _thread_PS_DEAD_value = PS_DEAD;
+static void free_thread(struct pthread *curthread, struct pthread *thread);
+static int create_stack(struct pthread_attr *pattr);
+static void free_stack(struct pthread *curthread, struct pthread_attr *pattr);
+static void thread_start(struct pthread *curthread);
__weak_reference(_pthread_create, pthread_create);
@@ -59,73 +53,80 @@ int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
- int ret = 0;
- pthread_t new_thread;
- pthread_attr_t pattr;
- int flags;
- void *stack;
+ ucontext_t uc;
+ sigset_t sigmask, oldsigmask;
+ struct pthread *curthread, *new_thread;
+ int ret = 0;
+
+ _thr_check_init();
/*
- * Locking functions in libc are required when there are
- * threads other than the initial thread.
+ * Tell libc and others now they need lock to protect their data.
*/
- __isthreaded = 1;
+ if (_thr_isthreaded() == 0 && _thr_setthreaded(1))
+ return (EAGAIN);
- /* Allocate memory for the thread structure: */
- if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL)
+ curthread = _get_curthread();
+ if ((new_thread = _thr_alloc(curthread)) == NULL)
return (EAGAIN);
- /* Check if default thread attributes are required: */
- if (attr == NULL || *attr == NULL)
- pattr = &pthread_attr_default;
- else
- pattr = *attr;
-
- /* Check if a stack was specified in the thread attributes: */
- if ((stack = pattr->stackaddr_attr) == NULL) {
- stack = _thread_stack_alloc(pattr->stacksize_attr,
- pattr->guardsize_attr);
- if (stack == NULL) {
- free(new_thread);
- return (EAGAIN);
- }
+ if (attr == NULL || *attr == NULL)
+ /* Use the default thread attributes: */
+ new_thread->attr = _pthread_attr_default;
+ else
+ new_thread->attr = *(*attr);
+ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
+ /* inherit scheduling contention scope */
+ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
+ new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
+ else
+ new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
+ /*
+ * scheduling policy and scheduling parameters will be
+ * inherited in following code.
+ */
}
- /* Initialise the thread structure: */
- init_td_common(new_thread, pattr, 0);
- new_thread->stack = stack;
- new_thread->start_routine = start_routine;
- new_thread->arg = arg;
+ if (_thr_scope_system > 0)
+ new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
+ else if (_thr_scope_system < 0)
+ new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
- /* Initialise the machine context: */
- getcontext(&new_thread->ctx);
- new_thread->savedsig = new_thread->ctx.uc_sigmask;
- new_thread->ctx.uc_stack.ss_sp = new_thread->stack;
- new_thread->ctx.uc_stack.ss_size = pattr->stacksize_attr;
- makecontext(&new_thread->ctx, (void (*)(void))_thread_start, 1, new_thread);
- new_thread->arch_id = _set_curthread(&new_thread->ctx, new_thread, &ret);
- if (ret != 0) {
- if (pattr->stackaddr_attr == NULL) {
- STACK_LOCK;
- _thread_stack_free(new_thread->stack,
- pattr->stacksize_attr, pattr->guardsize_attr);
- STACK_UNLOCK;
- }
- free(new_thread);
- return (ret);
+ if (create_stack(&new_thread->attr) != 0) {
+ /* Insufficient memory to create a stack: */
+ new_thread->terminated = 1;
+ _thr_free(curthread, new_thread);
+ return (EAGAIN);
}
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ new_thread->magic = THR_MAGIC;
+ new_thread->start_routine = start_routine;
+ new_thread->arg = arg;
+ new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
+ PTHREAD_CANCEL_DEFERRED;
+ getcontext(&uc);
+ SIGFILLSET(uc.uc_sigmask);
+ uc.uc_stack.ss_sp = new_thread->attr.stackaddr_attr;
+ uc.uc_stack.ss_size = new_thread->attr.stacksize_attr;
+ makecontext(&uc, (void (*)(void))thread_start, 1, new_thread);
/*
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
- if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
- /* Copy the scheduling attributes: */
- new_thread->base_priority = curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
- new_thread->attr.prio = curthread->base_priority &
- ~PTHREAD_SIGNAL_PRIORITY;
+ if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
+ /*
+ * Copy the scheduling attributes. Lock the scheduling
+ * lock to get consistent scheduling parameters.
+ */
+ THR_LOCK(curthread);
+ new_thread->base_priority = curthread->base_priority;
+ new_thread->attr.prio = curthread->base_priority;
new_thread->attr.sched_policy = curthread->attr.sched_policy;
+ THR_UNLOCK(curthread);
} else {
/*
* Use just the thread priority, leaving the
@@ -136,53 +137,87 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
}
new_thread->active_priority = new_thread->base_priority;
- THREAD_LIST_LOCK;
-
- /* Add the thread to the linked list of all threads: */
- TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&new_thread->mutexq);
+ TAILQ_INIT(&new_thread->pri_mutexq);
+ /* Initialise hooks in the thread structure: */
+ if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
+ new_thread->flags = THR_FLAGS_SUSPENDED;
+ new_thread->state = PS_RUNNING;
/*
- * Create the thread.
+ * Thread created by thr_create() inherits currrent thread
+ * sigmask, however, before new thread setup itself correctly,
+ * it can not handle signal, so we should masks all signals here.
*/
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED)
- new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
- /* new thread inherits signal mask in kernel */
- _thread_sigblock();
- ret = thr_create(&new_thread->ctx, &new_thread->thr_id, flags);
- /* restore my signal mask */
- _thread_sigunblock();
+ SIGFILLSET(sigmask);
+ __sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask);
+ new_thread->sigmask = oldsigmask;
+ /* Add the new thread. */
+ _thr_link(curthread, new_thread);
+ /* Return thread pointer eariler so that new thread can use it. */
+ (*thread) = new_thread;
+ /* Schedule the new thread. */
+ ret = thr_create(&uc, &new_thread->tid, 0);
+ __sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
if (ret != 0) {
- _thread_printf(STDERR_FILENO, "thr_create() == %d\n", ret);
- PANIC("thr_create");
+ _thr_unlink(curthread, new_thread);
+ free_thread(curthread, new_thread);
+ (*thread) = 0;
+ ret = EAGAIN;
}
+ return (ret);
+}
- THREAD_LIST_UNLOCK;
+static void
+free_thread(struct pthread *curthread, struct pthread *thread)
+{
+ free_stack(curthread, &thread->attr);
+ curthread->terminated = 1;
+ _thr_free(curthread, thread);
+}
- /* Return a pointer to the thread structure: */
- (*thread) = new_thread;
+static int
+create_stack(struct pthread_attr *pattr)
+{
+ int ret;
- return (0);
+ /* Check if a stack was specified in the thread attributes: */
+ if ((pattr->stackaddr_attr) != NULL) {
+ pattr->guardsize_attr = 0;
+ pattr->flags |= THR_STACK_USER;
+ ret = 0;
+ }
+ else
+ ret = _thr_stack_alloc(pattr);
+ return (ret);
}
-void
-_thread_start(pthread_t td)
+static void
+free_stack(struct pthread *curthread, struct pthread_attr *pattr)
{
- int ret;
+ if ((pattr->flags & THR_STACK_USER) == 0) {
+ THREAD_LIST_LOCK(curthread);
+ /* Stack routines don't use malloc/free. */
+ _thr_stack_free(pattr);
+ THREAD_LIST_UNLOCK(curthread);
+ }
+}
- /*
- * for AMD64, we need to set fsbase by thread itself, before
- * fsbase is set, we can not run any other code, for example
- * signal code.
- */
- _set_curthread(NULL, td, &ret);
+static void
+thread_start(struct pthread *curthread)
+{
+ _tcb_set(curthread->tcb);
- /* restore signal mask inherited before */
- __sys_sigprocmask(SIG_SETMASK, &td->savedsig, NULL);
+ /* Thread was created with all signals blocked, unblock them. */
+ __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
- if ((curthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
- _thread_suspend(curthread, NULL);
+ if (curthread->flags & THR_FLAGS_NEED_SUSPEND)
+ _thr_suspend_check(curthread);
+ /* Run the current thread's start routine with argument: */
pthread_exit(curthread->start_routine(curthread->arg));
+
/* This point should never be reached. */
PANIC("Thread has resumed after exit");
}
diff --git a/lib/libthr/thread/thr_detach.c b/lib/libthr/thread/thr_detach.c
index 1d7d334..9ff5f84 100644
--- a/lib/libthr/thread/thr_detach.c
+++ b/lib/libthr/thread/thr_detach.c
@@ -31,9 +31,11 @@
*
* $FreeBSD$
*/
+
+#include <sys/types.h>
#include <errno.h>
#include <pthread.h>
-#include <stdlib.h>
+
#include "thr_private.h"
__weak_reference(_pthread_detach, pthread_detach);
@@ -41,42 +43,31 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
- int error;
+ struct pthread *curthread = _get_curthread();
+ int rval;
- if (pthread->magic != PTHREAD_MAGIC)
+ if (pthread == NULL)
return (EINVAL);
- PTHREAD_LOCK(pthread);
-
- if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
- _thread_sigblock();
- DEAD_LIST_LOCK;
- error = pthread->isdead ? ESRCH : EINVAL;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
- PTHREAD_UNLOCK(pthread);
- return (error);
+ THREAD_LIST_LOCK(curthread);
+ if ((rval = _thr_find_thread(curthread, pthread,
+ /*include dead*/1)) != 0) {
+ THREAD_LIST_UNLOCK(curthread);
+ return (rval);
}
- pthread->attr.flags |= PTHREAD_DETACHED;
-
- /* Check if there is a joiner: */
- if (pthread->joiner != NULL) {
- struct pthread *joiner = pthread->joiner;
-
- /* Set the return value for the woken thread: */
- joiner->join_status.error = ESRCH;
- joiner->join_status.ret = NULL;
- joiner->join_status.thread = NULL;
-
- /*
- * Disconnect the joiner from the thread being detached:
- */
- pthread->joiner = NULL;
- PTHREAD_WAKE(joiner);
+ /* Check if the thread is already detached or has a joiner. */
+ if ((pthread->tlflags & TLFLAGS_DETACHED) != 0 ||
+ (pthread->joiner != NULL)) {
+ THREAD_LIST_UNLOCK(curthread);
+ return (EINVAL);
}
- PTHREAD_UNLOCK(pthread);
+ /* Flag the thread as detached. */
+ pthread->tlflags |= TLFLAGS_DETACHED;
+ if (pthread->state == PS_DEAD)
+ THR_GCLIST_ADD(pthread);
+ THREAD_LIST_UNLOCK(curthread);
return (0);
}
diff --git a/lib/libthr/thread/thr_exit.c b/lib/libthr/thread/thr_exit.c
index f74b5e1..554646b 100644
--- a/lib/libthr/thread/thr_exit.c
+++ b/lib/libthr/thread/thr_exit.c
@@ -31,39 +31,28 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
-#include <string.h>
#include <pthread.h>
+
#include "thr_private.h"
-__weak_reference(_pthread_exit, pthread_exit);
+void _pthread_exit(void *status);
-static void deadlist_free_threads();
+__weak_reference(_pthread_exit, pthread_exit);
void
-_thread_exit(char *fname, int lineno, char *string)
+_thread_exit(char *fname, int lineno, char *msg)
{
- char s[256];
- /* Prepare an error message string: */
- snprintf(s, sizeof(s),
+ /* Write an error message to the standard error file descriptor: */
+ _thread_printf(2,
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
- string, lineno, fname, errno);
-
- /* Write the string to the standard error file descriptor: */
- __sys_write(2, s, strlen(s));
+ msg, lineno, fname, errno);
- /* Force this process to exit: */
- /* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
-#if defined(_PTHREADS_INVARIANTS)
abort();
-#else
- __sys_exit(1);
-#endif
}
/*
@@ -72,8 +61,10 @@ _thread_exit(char *fname, int lineno, char *string)
* abnormal thread termination can be found.
*/
void
-_thread_exit_cleanup(void)
+_thr_exit_cleanup(void)
{
+ struct pthread *curthread = _get_curthread();
+
/*
* POSIX states that cancellation/termination of a thread should
* not release any visible resources (such as mutexes) and that
@@ -93,27 +84,24 @@ _thread_exit_cleanup(void)
void
_pthread_exit(void *status)
{
- struct pthread *pthread;
- int exitNow = 0;
-
- /*
- * This thread will no longer handle any signals.
- */
- _thread_sigblock();
+ struct pthread *curthread = _get_curthread();
/* Check if this thread is already in the process of exiting: */
- if (curthread->exiting) {
+ if ((curthread->cancelflags & THR_CANCEL_EXITING) != 0) {
char msg[128];
- snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
+ snprintf(msg, sizeof(msg), "Thread %p has called "
+ "pthread_exit() from a destructor. POSIX 1003.1 "
+ "1996 s16.2.5.2 does not allow this!", curthread);
PANIC(msg);
}
- /* Flag this thread as exiting: */
- curthread->exiting = 1;
+ /* Flag this thread as exiting. */
+ atomic_set_int(&curthread->cancelflags, THR_CANCEL_EXITING);
+
+ _thr_exit_cleanup();
/* Save the return value: */
curthread->ret = status;
-
while (curthread->cleanup != NULL) {
pthread_cleanup_pop(1);
}
@@ -126,93 +114,23 @@ _pthread_exit(void *status)
_thread_cleanupspecific();
}
- /*
- * Remove read-write lock list. It is allocated as-needed.
- * Therefore, it must be checked for validity before freeing.
- */
- if (curthread->rwlockList != NULL)
- free(curthread->rwlockList);
-
- /* Lock the dead list first to maintain correct lock order */
- DEAD_LIST_LOCK;
- THREAD_LIST_LOCK;
-
- /* Check if there is a thread joining this one: */
- if (curthread->joiner != NULL) {
- pthread = curthread->joiner;
- curthread->joiner = NULL;
-
- /* Set the return value for the joining thread: */
- pthread->join_status.ret = curthread->ret;
- pthread->join_status.error = 0;
- pthread->join_status.thread = NULL;
-
- /* Make the joining thread runnable: */
- PTHREAD_WAKE(pthread);
-
- curthread->attr.flags |= PTHREAD_DETACHED;
- }
-
- /*
- * Free any memory allocated for dead threads.
- * Add this thread to the list of dead threads, and
- * also remove it from the active threads list.
- */
- deadlist_free_threads();
- TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
- TAILQ_REMOVE(&_thread_list, curthread, tle);
-
- /* If we're the last thread, call it quits */
- if (TAILQ_EMPTY(&_thread_list))
- exitNow = 1;
-
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
-
- if (exitNow)
+ if (!_thr_isthreaded())
exit(0);
- /*
- * This function will not return unless we are the last
- * thread, which we can't be because we've already checked
- * for that.
- */
- thr_exit((long *)&curthread->isdead);
-
- /* This point should not be reached. */
- PANIC("Dead thread has resumed");
-}
-
-/*
- * Note: this function must be called with the dead thread list
- * locked.
- */
-static void
-deadlist_free_threads()
-{
- struct pthread *ptd, *ptdTemp;
-
- TAILQ_FOREACH_SAFE(ptd, &_dead_list, dle, ptdTemp) {
- /* Don't destroy the initial thread or non-detached threads. */
- if (ptd == _thread_initial ||
- (ptd->attr.flags & PTHREAD_DETACHED) == 0 ||
- !ptd->isdead)
- continue;
- TAILQ_REMOVE(&_dead_list, ptd, dle);
- deadlist_free_onethread(ptd);
- }
-}
-
-void
-deadlist_free_onethread(struct pthread *ptd)
-{
-
- if (ptd->attr.stackaddr_attr == NULL && ptd->stack != NULL) {
- STACK_LOCK;
- _thread_stack_free(ptd->stack, ptd->attr.stacksize_attr,
- ptd->attr.guardsize_attr);
- STACK_UNLOCK;
+ THREAD_LIST_LOCK(curthread);
+ _thread_active_threads--;
+ if (_thread_active_threads == 0) {
+ THREAD_LIST_UNLOCK(curthread);
+ exit(0);
+ /* Never reach! */
}
- _retire_thread(ptd->arch_id);
- free(ptd);
+ if (curthread->tlflags & TLFLAGS_DETACHED)
+ THR_GCLIST_ADD(curthread);
+ curthread->state = PS_DEAD;
+ THREAD_LIST_UNLOCK(curthread);
+ if (curthread->joiner)
+ _thr_umtx_wake(&curthread->state, INT_MAX);
+ thr_exit(&curthread->terminated);
+ PANIC("thr_exit() returned");
+ /* Never reach! */
}
diff --git a/lib/libthr/thread/thr_fork.c b/lib/libthr/thread/thr_fork.c
new file mode 100644
index 0000000..8b311cb
--- /dev/null
+++ b/lib/libthr/thread/thr_fork.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <spinlock.h>
+
+#include "libc_private.h"
+#include "thr_private.h"
+
+__weak_reference(_pthread_atfork, pthread_atfork);
+
+int
+_pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void))
+{
+ struct pthread *curthread;
+ struct pthread_atfork *af;
+
+ _thr_check_init();
+
+ if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
+ return (ENOMEM);
+
+ curthread = _get_curthread();
+ af->prepare = prepare;
+ af->parent = parent;
+ af->child = child;
+ THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
+ TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
+ THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
+ return (0);
+}
+
+/*
+ * For a while, allow libpthread to work with a libc that doesn't
+ * export the malloc lock.
+ */
+#pragma weak __malloc_lock
+
+__weak_reference(_fork, fork);
+
+pid_t
+_fork(void)
+{
+ static umtx_t inprogress;
+ static int waiters;
+ umtx_t tmp;
+
+ struct pthread *curthread;
+ struct pthread_atfork *af;
+ pid_t ret;
+ int errsave;
+ int unlock_malloc;
+
+ if (!_thr_is_inited())
+ return (__sys_fork());
+
+ curthread = _get_curthread();
+
+ /*
+ * Block all signals until we reach a safe point.
+ */
+ _thr_signal_block(curthread);
+
+ THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
+ tmp = inprogress;
+ while (tmp) {
+ waiters++;
+ THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
+ _thr_umtx_wait(&inprogress, tmp, NULL);
+ THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
+ waiters--;
+ tmp = inprogress;
+ }
+ inprogress = 1;
+
+ /* Unlock mutex, allow new hook to be added during executing hooks. */
+ THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
+
+ /* Run down atfork prepare handlers. */
+ TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
+ if (af->prepare != NULL)
+ af->prepare();
+ }
+
+ /*
+ * Try our best to protect memory from being corrupted in
+ * child process because another thread in malloc code will
+ * simply be kill by fork().
+ */
+ if ((_thr_isthreaded() != 0) && (__malloc_lock != NULL)) {
+ unlock_malloc = 1;
+ _spinlock(__malloc_lock);
+ } else {
+ unlock_malloc = 0;
+ }
+
+ /* Fork a new process: */
+ if ((ret = __sys_fork()) == 0) {
+ /* Child process */
+ errsave = errno;
+ inprogress = 0;
+ curthread->cancelflags &= ~THR_CANCEL_NEEDED;
+ /*
+ * Thread list will be reinitialized, and later we call
+ * _libpthread_init(), it will add us back to list.
+ */
+ curthread->tlflags &= ~(TLFLAGS_IN_TDLIST | TLFLAGS_DETACHED);
+
+ /* child is a new kernel thread. */
+ thr_self(&curthread->tid);
+
+ /* clear other threads locked us. */
+ _thr_umtx_init(&curthread->lock);
+ _thr_umtx_init(&_thr_atfork_lock);
+ _thr_setthreaded(0);
+
+ /* reinitialize libc spinlocks, this includes __malloc_lock. */
+ _thr_spinlock_init();
+ _mutex_fork(curthread);
+
+ /* reinitalize library. */
+ _libpthread_init(curthread);
+
+ /* Ready to continue, unblock signals. */
+ _thr_signal_unblock(curthread);
+
+ /* Run down atfork child handlers. */
+ TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
+ if (af->child != NULL)
+ af->child();
+ }
+ } else {
+ /* Parent process */
+ errsave = errno;
+
+ if (unlock_malloc)
+ _spinunlock(__malloc_lock);
+
+ /* Ready to continue, unblock signals. */
+ _thr_signal_unblock(curthread);
+
+ /* Run down atfork parent handlers. */
+ TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
+ if (af->parent != NULL)
+ af->parent();
+ }
+
+ THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
+ inprogress = 0;
+ if (waiters)
+ _thr_umtx_wake(&inprogress, waiters);
+ THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
+ }
+ errno = errsave;
+
+ /* Return the process ID: */
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_getschedparam.c b/lib/libthr/thread/thr_getschedparam.c
new file mode 100644
index 0000000..907174c
--- /dev/null
+++ b/lib/libthr/thread/thr_getschedparam.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Daniel Eischen.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <errno.h>
+#include <pthread.h>
+
+#include "thr_private.h"
+
+__weak_reference(_pthread_getschedparam, pthread_getschedparam);
+
+int
+_pthread_getschedparam(pthread_t pthread, int *policy,
+ struct sched_param *param)
+{
+ struct pthread *curthread = _get_curthread();
+ int ret, tmp;
+
+ if ((param == NULL) || (policy == NULL))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ else if (pthread == curthread) {
+ /*
+ * Avoid searching the thread list when it is the current
+ * thread.
+ */
+ THR_THREAD_LOCK(curthread, curthread);
+ param->sched_priority =
+ THR_BASE_PRIORITY(pthread->base_priority);
+ tmp = pthread->attr.sched_policy;
+ THR_THREAD_UNLOCK(curthread, curthread);
+ *policy = tmp;
+ ret = 0;
+ }
+ /* Find the thread in the list of active threads. */
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ THR_THREAD_LOCK(curthread, pthread);
+ param->sched_priority =
+ THR_BASE_PRIORITY(pthread->base_priority);
+ tmp = pthread->attr.sched_policy;
+ THR_THREAD_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+ *policy = tmp;
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_info.c b/lib/libthr/thread/thr_info.c
index b0fae83..7142f03 100644
--- a/lib/libthr/thread/thr_info.c
+++ b/lib/libthr/thread/thr_info.c
@@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
+
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
@@ -38,6 +39,7 @@
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
+
#include "thr_private.h"
#ifndef NELEMENTS
@@ -57,10 +59,8 @@ struct s_thread_info {
static const struct s_thread_info thread_info[] = {
{PS_RUNNING , "Running"},
{PS_MUTEX_WAIT , "Waiting on a mutex"},
- {PS_COND_WAIT , "Waiting on a condition variable"},
- {PS_SLEEP_WAIT , "Sleeping"},
- {PS_WAIT_WAIT , "Waiting process"},
{PS_JOIN , "Waiting to join"},
+ {PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
{PS_DEADLOCK , "Deadlocked"},
{PS_STATE_MAX , "Not a real state!"}
@@ -69,14 +69,12 @@ static const struct s_thread_info thread_info[] = {
void
_thread_dump_info(void)
{
- char s[512];
- int fd;
- int i;
- pthread_t pthread;
- char tmpfile[128];
+ char s[512], tmpfile[128];
+ pthread_t pthread;
+ int fd, i;
for (i = 0; i < 100000; i++) {
- snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
+ snprintf(tmpfile, sizeof(tmpfile), "/tmp/pthread.dump.%u.%i",
getpid(), i);
/* Open the dump file for append and create it if necessary: */
if ((fd = __sys_open(tmpfile, O_RDWR | O_CREAT | O_EXCL,
@@ -99,37 +97,34 @@ _thread_dump_info(void)
/* all 100000 possibilities are in use :( */
return;
} else {
- /* Output a header for active threads: */
- strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
+ /* Dump the active threads. */
+ strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
__sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
TAILQ_FOREACH(pthread, &_thread_list, tle) {
- dump_thread(fd, pthread, /*long_verson*/ 1);
+ if (pthread->state != PS_DEAD)
+ dump_thread(fd, pthread, /*long_verson*/ 1);
}
- /* Check if there are no dead threads: */
- DEAD_LIST_LOCK;
- if (TAILQ_FIRST(&_dead_list) == NULL) {
- /* Output a record: */
- strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
- __sys_write(fd, s, strlen(s));
- } else {
- /* Output a header for dead threads: */
- strcpy(s, "\n\nDEAD THREADS\n\n");
- __sys_write(fd, s, strlen(s));
+ /*
+ * Dump the ready threads.
+ * XXX - We can't easily do this because the run queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
+ __sys_write(fd, s, strlen(s));
- /*
- * Enter a loop to report each thread in the global
- * dead thread list:
- */
- TAILQ_FOREACH(pthread, &_dead_list, dle) {
- dump_thread(fd, pthread, /*long_version*/ 0);
- }
- }
- DEAD_LIST_UNLOCK;
- /* Close the dump file: */
+ /*
+ * Dump the waiting threads.
+ * XXX - We can't easily do this because the wait queues
+ * are per-KSEG.
+ */
+ strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
+ __sys_write(fd, s, strlen(s));
+
+ /* Close the dump file. */
__sys_close(fd);
}
}
@@ -137,8 +132,9 @@ _thread_dump_info(void)
static void
dump_thread(int fd, pthread_t pthread, int long_version)
{
- char s[512];
- int i;
+ struct pthread *curthread = _get_curthread();
+ char s[512];
+ int i;
/* Find the state: */
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
@@ -147,10 +143,12 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Output a record for the thread: */
snprintf(s, sizeof(s),
- "--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
+ "--------------------\n"
+ "Thread %p (%s), scope %s, prio %3d, state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ? "" : pthread->name,
- pthread->active_priority, thread_info[i].name, pthread->fname,
- pthread->lineno);
+ pthread->attr.flags & PTHREAD_SCOPE_SYSTEM ? "system" : "process",
+ pthread->active_priority,
+ thread_info[i].name, pthread->fname, pthread->lineno);
__sys_write(fd, s, strlen(s));
if (long_version != 0) {
@@ -161,11 +159,12 @@ dump_thread(int fd, pthread_t pthread, int long_version)
__sys_write(fd, s, strlen(s));
}
/* Check if this is the initial thread: */
- if (pthread == _thread_initial) {
+ if (pthread == _thr_initial) {
/* Output a record for the initial thread: */
strcpy(s, "This is the initial thread\n");
__sys_write(fd, s, strlen(s));
}
+
/* Process according to thread state: */
switch (pthread->state) {
/*
@@ -173,7 +172,15 @@ dump_thread(int fd, pthread_t pthread, int long_version)
* coded to dump information:
*/
default:
- /* Nothing to do here. */
+ snprintf(s, sizeof(s), "sigmask (hi) ");
+ __sys_write(fd, s, strlen(s));
+ for (i = _SIG_WORDS - 1; i >= 0; i--) {
+ snprintf(s, sizeof(s), "%08x ",
+ pthread->sigmask.__bits[i]);
+ __sys_write(fd, s, strlen(s));
+ }
+ snprintf(s, sizeof(s), "(lo)\n");
+ __sys_write(fd, s, strlen(s));
break;
}
}
@@ -181,10 +188,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Set the thread name for debug: */
void
-_pthread_set_name_np(pthread_t thread, const char *name)
+_pthread_set_name_np(pthread_t thread, char *name)
{
/* Check if the caller has specified a valid thread: */
- if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
+ if (thread != NULL && thread->magic == THR_MAGIC) {
if (thread->name != NULL) {
/* Free space for previous name. */
free(thread->name);
diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c
index 45d238c..f9545c2 100644
--- a/lib/libthr/thread/thr_init.c
+++ b/lib/libthr/thread/thr_init.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@@ -38,6 +39,7 @@
#include "namespace.h"
#include <sys/param.h>
#include <sys/types.h>
+#include <sys/signalvar.h>
#include <machine/reg.h>
#include <sys/ioctl.h>
@@ -56,6 +58,7 @@
#include <fcntl.h>
#include <paths.h>
#include <pthread.h>
+#include <pthread_np.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -63,19 +66,22 @@
#include <unistd.h>
#include "un-namespace.h"
+#include "libc_private.h"
#include "thr_private.h"
-extern void _thread_init_hack(void);
+int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int __pthread_mutex_lock(pthread_mutex_t *);
+int __pthread_mutex_trylock(pthread_mutex_t *);
+void _thread_init_hack(void) __attribute__ ((constructor));
+
+static void init_private(void);
+static void init_main_thread(struct pthread *thread);
/*
* All weak references used within libc should be in this table.
- * This will is so that static libraries will work.
- *
- * XXXTHR - Check this list.
+ * This is so that static libraries will work.
*/
static void *references[] = {
- &_thread_init_hack,
- &_thread_init,
&_accept,
&_bind,
&_close,
@@ -126,6 +132,7 @@ static void *references[] = {
&_sigsuspend,
&_socket,
&_socketpair,
+ &_thread_init_hack,
&_wait4,
&_write,
&_writev
@@ -138,8 +145,6 @@ static void *references[] = {
* libraries, then the actual functions will not be loaded.
*/
static void *libgcc_references[] = {
- &_thread_init_hack,
- &_thread_init,
&_pthread_once,
&_pthread_key_create,
&_pthread_key_delete,
@@ -149,123 +154,93 @@ static void *libgcc_references[] = {
&_pthread_mutex_destroy,
&_pthread_mutex_lock,
&_pthread_mutex_trylock,
- &_pthread_mutex_unlock
+ &_pthread_mutex_unlock,
+ &_pthread_create
};
-int _pthread_guard_default;
-int _pthread_page_size;
-int _pthread_stack_default;
-int _pthread_stack_initial;
+#define DUAL_ENTRY(entry) \
+ (pthread_func_t)entry, (pthread_func_t)entry
-/*
- * Initialize the current thread.
- */
-void
-init_td_common(struct pthread *td, struct pthread_attr *attrp, int reinit)
-{
- /*
- * Some parts of a pthread are initialized only once.
- */
- if (!reinit) {
- memset(td, 0, sizeof(struct pthread));
- td->cancelmode = M_DEFERRED;
- td->cancelstate = M_DEFERRED;
- td->cancellation = CS_NULL;
- memcpy(&td->attr, attrp, sizeof(struct pthread_attr));
- td->magic = PTHREAD_MAGIC;
- TAILQ_INIT(&td->mutexq);
- td->base_priority = PTHREAD_DEFAULT_PRIORITY;
- td->active_priority = PTHREAD_DEFAULT_PRIORITY;
- td->inherited_priority = PTHREAD_MIN_PRIORITY;
- } else {
- memset(&td->join_status, 0, sizeof(struct join_status));
- }
- td->joiner = NULL;
- td->error = 0;
- td->flags = 0;
-}
+static pthread_func_t jmp_table[][2] = {
+ {DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
+ {DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
+ {DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
+ {DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
+ {(pthread_func_t)__pthread_cond_wait,
+ (pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
+ {DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
+ {DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
+ {DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
+ {DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
+ {DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
+ {DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
+ {(pthread_func_t)__pthread_mutex_lock,
+ (pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
+ {(pthread_func_t)__pthread_mutex_trylock,
+ (pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
+ {DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
+ {DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
+ {DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
+ {DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
+ {DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
+ {DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
+ {DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
+ {DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
+ {DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
+ {DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
+ {DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
+ {DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */
+};
+
+extern int _thread_state_running;
+static int init_once = 0;
/*
- * Initialize the active and dead threads list. Any threads in the active
- * list will be removed and the thread td * will be marked as the
- * initial thread and inserted in the list as the only thread. Any threads
- * in the dead threads list will also be removed.
+ * For the shared version of the threads library, the above is sufficient.
+ * But for the archive version of the library, we need a little bit more.
+ * Namely, we must arrange for this particular module to be pulled in from
+ * the archive library at link time. To accomplish that, we define and
+ * initialize a variable, "_thread_autoinit_dummy_decl". This variable is
+ * referenced (as an extern) from libc/stdlib/exit.c. This will always
+ * create a need for this module, ensuring that it is present in the
+ * executable.
*/
+extern int _thread_autoinit_dummy_decl;
+int _thread_autoinit_dummy_decl = 0;
+
void
-init_tdlist(struct pthread *td, int reinit)
+_thread_init_hack(void)
{
- struct pthread *tdTemp, *tdTemp2;
-
- _thread_initial = td;
- td->name = strdup("_thread_initial");
-
- /*
- * If this is not the first initialization, remove any entries
- * that may be in the list and deallocate their memory. Also
- * destroy any global pthread primitives (they will be recreated).
- */
- if (reinit) {
- TAILQ_FOREACH_SAFE(tdTemp, &_thread_list, tle, tdTemp2) {
- if (tdTemp != NULL && tdTemp != td) {
- TAILQ_REMOVE(&_thread_list, tdTemp, tle);
- free(tdTemp);
- }
- }
- TAILQ_FOREACH_SAFE(tdTemp, &_dead_list, dle, tdTemp2) {
- if (tdTemp != NULL) {
- TAILQ_REMOVE(&_dead_list, tdTemp, dle);
- free(tdTemp);
- }
- }
- _pthread_mutex_destroy(&dead_list_lock);
- } else {
- TAILQ_INIT(&_thread_list);
- TAILQ_INIT(&_dead_list);
-
- /* Insert this thread as the first thread in the active list */
- TAILQ_INSERT_HEAD(&_thread_list, td, tle);
- }
- /*
- * Initialize the active thread list lock and the
- * dead threads list lock.
- */
- memset(&thread_list_lock, 0, sizeof(spinlock_t));
- if (_pthread_mutex_init(&dead_list_lock,NULL) != 0)
- PANIC("Failed to initialize garbage collector primitives");
+ _libpthread_init(NULL);
}
+
/*
- * Threaded process initialization
+ * Threaded process initialization.
+ *
+ * This is only called under two conditions:
+ *
+ * 1) Some thread routines have detected that the library hasn't yet
+ * been initialized (_thr_initial == NULL && curthread == NULL), or
+ *
+ * 2) An explicit call to reinitialize after a fork (indicated
+ * by curthread != NULL)
*/
void
-_thread_init(void)
+_libpthread_init(struct pthread *curthread)
{
- struct pthread *pthread;
- int fd;
- size_t len;
- int mib[2];
- int error;
+ int fd, first = 0;
+ sigset_t sigset, oldset;
/* Check if this function has already been called: */
- if (_thread_initial)
- /* Only initialise the threaded application once. */
+ if ((_thr_initial != NULL) && (curthread == NULL))
+ /* Only initialize the threaded application once. */
return;
- _pthread_page_size = getpagesize();
- _pthread_guard_default = getpagesize();
- if (sizeof(void *) == 8) {
- _pthread_stack_default = PTHREAD_STACK64_DEFAULT;
- _pthread_stack_initial = PTHREAD_STACK64_INITIAL;
- }
- else {
- _pthread_stack_default = PTHREAD_STACK32_DEFAULT;
- _pthread_stack_initial = PTHREAD_STACK32_INITIAL;
- }
-
- pthread_attr_default.guardsize_attr = _pthread_guard_default;
- pthread_attr_default.stacksize_attr = _pthread_stack_default;
-
/*
* Make gcc quiescent about {,libgcc_}references not being
* referenced:
@@ -273,11 +248,22 @@ _thread_init(void)
if ((references[0] == NULL) || (libgcc_references[0] == NULL))
PANIC("Failed loading mandatory references in _thread_init");
+ /* Pull debug symbols in for static binary */
+ _thread_state_running = PS_RUNNING;
+
+ /*
+ * Check the size of the jump table to make sure it is preset
+ * with the correct number of entries.
+ */
+ if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
+ PANIC("Thread jump table not properly initialized");
+ memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
+
/*
* Check for the special case of this process running as
* or in place of init as pid = 1:
*/
- if (getpid() == 1) {
+ if ((_thr_pid = getpid()) == 1) {
/*
* Setup a new session for this process which is
* assumed to be running as root.
@@ -292,74 +278,141 @@ _thread_init(void)
PANIC("Can't set login to root");
if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
PANIC("Can't set controlling terminal");
- if (__sys_dup2(fd, 0) == -1 ||
- __sys_dup2(fd, 1) == -1 ||
- __sys_dup2(fd, 2) == -1)
- PANIC("Can't dup2");
}
- /* Allocate memory for the thread structure of the initial thread: */
- if ((pthread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
- /*
- * Insufficient memory to initialise this application, so
- * abort:
- */
- PANIC("Cannot allocate memory for initial thread");
+ /* Initialize pthread private data. */
+ init_private();
+
+ /* Set the initial thread. */
+ if (curthread == NULL) {
+ first = 1;
+ /* Create and initialize the initial thread. */
+ curthread = _thr_alloc(NULL);
+ if (curthread == NULL)
+ PANIC("Can't allocate initial thread");
+ init_main_thread(curthread);
}
+ /*
+ * Add the thread to the thread list queue.
+ */
+ THR_LIST_ADD(curthread);
+ _thread_active_threads = 1;
- init_tdlist(pthread, 0);
- init_td_common(pthread, &pthread_attr_default, 0);
- pthread->arch_id = _set_curthread(NULL, pthread, &error);
+ /* Setup the thread specific data */
+ _tcb_set(curthread->tcb);
- /* Get our thread id. */
- thr_self(&pthread->thr_id);
+ if (first) {
+ SIGFILLSET(sigset);
+ SIGDELSET(sigset, SIGTRAP);
+ __sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
+ _thr_signal_init();
+ _thr_initial = curthread;
+ SIGDELSET(oldset, SIGCANCEL);
+ __sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
+ }
+}
- /* Find the stack top */
- mib[0] = CTL_KERN;
- mib[1] = KERN_USRSTACK;
- len = sizeof (_usrstack);
- if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
- _usrstack = (void *)USRSTACK;
+/*
+ * This function and pthread_create() do a lot of the same things.
+ * It'd be nice to consolidate the common stuff in one place.
+ */
+static void
+init_main_thread(struct pthread *thread)
+{
+ /* Setup the thread attributes. */
+ thr_self(&thread->tid);
+ thread->attr = _pthread_attr_default;
/*
- * Create a red zone below the main stack. All other stacks are
- * constrained to a maximum size by the paramters passed to
- * mmap(), but this stack is only limited by resource limits, so
- * this stack needs an explicitly mapped red zone to protect the
- * thread stack that is just beyond.
+ * Set up the thread stack.
+ *
+ * Create a red zone below the main stack. All other stacks
+ * are constrained to a maximum size by the parameters
+ * passed to mmap(), but this stack is only limited by
+ * resource limits, so this stack needs an explicitly mapped
+ * red zone to protect the thread stack that is just beyond.
*/
- if (mmap(_usrstack - _pthread_stack_initial -
- _pthread_guard_default, _pthread_guard_default, 0,
- MAP_ANON, -1, 0) == MAP_FAILED)
+ if (mmap((void *)_usrstack - _thr_stack_initial -
+ _thr_guard_default, _thr_guard_default, 0, MAP_ANON,
+ -1, 0) == MAP_FAILED)
PANIC("Cannot allocate red zone for initial thread");
- /* Set the main thread stack pointer. */
- pthread->stack = _usrstack - _pthread_stack_initial;
+ /*
+ * Mark the stack as an application supplied stack so that it
+ * isn't deallocated.
+ *
+ * XXX - I'm not sure it would hurt anything to deallocate
+ * the main thread stack because deallocation doesn't
+ * actually free() it; it just puts it in the free
+ * stack queue for later reuse.
+ */
+ thread->attr.stackaddr_attr = (void *)_usrstack - _thr_stack_initial;
+ thread->attr.stacksize_attr = _thr_stack_initial;
+ thread->attr.guardsize_attr = _thr_guard_default;
+ thread->attr.flags |= THR_STACK_USER;
+
+ /*
+ * Write a magic value to the thread structure
+ * to help identify valid ones:
+ */
+ thread->magic = THR_MAGIC;
- /* Set the stack attributes. */
- pthread->attr.stackaddr_attr = pthread->stack;
- pthread->attr.stacksize_attr = _pthread_stack_initial;
+ thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
+ thread->name = strdup("initial thread");
- /* Setup the context for initial thread. */
- getcontext(&pthread->ctx);
- pthread->ctx.uc_stack.ss_sp = pthread->stack;
- pthread->ctx.uc_stack.ss_size = _pthread_stack_initial;
+ /* Default the priority of the initial thread: */
+ thread->base_priority = THR_DEFAULT_PRIORITY;
+ thread->active_priority = THR_DEFAULT_PRIORITY;
+ thread->inherited_priority = 0;
- /* Initialize the atfork list and mutex */
- TAILQ_INIT(&_atfork_list);
- _pthread_mutex_init(&_atfork_mutex, NULL);
-}
+ /* Initialize the mutex queue: */
+ TAILQ_INIT(&thread->mutexq);
+ TAILQ_INIT(&thread->pri_mutexq);
-/*
- * Special start up code for NetBSD/Alpha
- */
-#if defined(__NetBSD__) && defined(__alpha__)
-int
-main(int argc, char *argv[], char *env);
+ thread->state = PS_RUNNING;
+ thread->uniqueid = 0;
-int
-_thread_main(int argc, char *argv[], char *env)
-{
- _thread_init();
- return (main(argc, argv, env));
+ /* Others cleared to zero by thr_alloc() */
}
+
+static void
+init_private(void)
+{
+ size_t len;
+ int mib[2];
+
+ _thr_umtx_init(&_mutex_static_lock);
+ _thr_umtx_init(&_cond_static_lock);
+ _thr_umtx_init(&_rwlock_static_lock);
+ _thr_umtx_init(&_keytable_lock);
+ _thr_umtx_init(&_thr_atfork_lock);
+ _thr_spinlock_init();
+ _thr_list_init();
+
+ /*
+ * Avoid reinitializing some things if they don't need to be,
+ * e.g. after a fork().
+ */
+ if (init_once == 0) {
+ /* Find the stack top */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_USRSTACK;
+ len = sizeof (_usrstack);
+ if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
+ PANIC("Cannot get kern.usrstack from sysctl");
+ _thr_page_size = getpagesize();
+ _thr_guard_default = _thr_page_size;
+ _pthread_attr_default.guardsize_attr = _thr_guard_default;
+ _pthread_attr_default.stacksize_attr = _thr_stack_default;
+
+ TAILQ_INIT(&_thr_atfork_list);
+#ifdef SYSTEM_SCOPE_ONLY
+ _thr_scope_system = 1;
+#else
+ if (getenv("LIBPTHREAD_SYSTEM_SCOPE") != NULL)
+ _thr_scope_system = 1;
+ else if (getenv("LIBPTHREAD_PROCESS_SCOPE") != NULL)
+ _thr_scope_system = -1;
#endif
+ }
+ init_once = 1;
+}
diff --git a/lib/libthr/thread/thr_join.c b/lib/libthr/thread/thr_join.c
index 29dd421..c44b261 100644
--- a/lib/libthr/thread/thr_join.c
+++ b/lib/libthr/thread/thr_join.c
@@ -31,170 +31,75 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
#include <pthread.h>
-#include <stdlib.h>
+
#include "thr_private.h"
__weak_reference(_pthread_join, pthread_join);
+static void backout_join(void *arg)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread *pthread = (struct pthread *)arg;
+
+ THREAD_LIST_LOCK(curthread);
+ pthread->joiner = NULL;
+ THREAD_LIST_UNLOCK(curthread);
+}
+
int
_pthread_join(pthread_t pthread, void **thread_return)
{
- int ret, dead;
- pthread_t thread;
+ struct pthread *curthread = _get_curthread();
+ void *tmp;
+ long state;
+ int oldcancel;
+ int ret = 0;
- /* Check if the caller has specified an invalid thread: */
- if (pthread->magic != PTHREAD_MAGIC)
- /* Invalid thread: */
- return(EINVAL);
+ if (pthread == NULL)
+ return (EINVAL);
- /* Check if the caller has specified itself: */
if (pthread == curthread)
- /* Avoid a deadlock condition: */
- return(EDEADLK);
+ return (EDEADLK);
- /*
- * Search for the specified thread in the list of active threads. This
- * is done manually here rather than calling _find_thread() because
- * the searches in _thread_list and _dead_list (as well as setting up
- * join/detach state) have to be done atomically.
- */
- ret = 0;
- dead = 0;
- thread = NULL;
- _thread_sigblock();
- DEAD_LIST_LOCK;
- THREAD_LIST_LOCK;
- if (!pthread->isdead) {
- TAILQ_FOREACH(thread, &_thread_list, tle) {
- if (thread == pthread) {
- PTHREAD_LOCK(pthread);
- break;
- }
- }
- }
- if (thread == NULL) {
- TAILQ_FOREACH(thread, &_dead_list, dle) {
- if (thread == pthread) {
- PTHREAD_LOCK(pthread);
- dead = 1;
- break;
- }
- }
- }
-
- /* Check if the thread was not found or has been detached: */
- if (thread == NULL) {
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
+ THREAD_LIST_LOCK(curthread);
+ if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) {
ret = ESRCH;
- goto out;
- }
- if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
- PTHREAD_UNLOCK(pthread);
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
- ret = EINVAL;
- goto out;
- }
-
- if (pthread->joiner != NULL) {
+ } else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) {
+ ret = ESRCH;
+ } else if (pthread->joiner != NULL) {
/* Multiple joiners are not supported. */
- /* XXXTHR - support multiple joiners. */
- PTHREAD_UNLOCK(pthread);
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
ret = ENOTSUP;
- goto out;
-
}
+ if (ret) {
+ THREAD_LIST_UNLOCK(curthread);
+ return (ret);
+ }
+ /* Set the running thread to be the joiner: */
+ pthread->joiner = curthread;
- /* Check if the thread is not dead: */
- if (!dead) {
- /* Set the running thread to be the joiner: */
- pthread->joiner = curthread;
- PTHREAD_UNLOCK(pthread);
-
- /* Keep track of which thread we're joining to: */
- curthread->join_status.thread = pthread;
-
- while (curthread->join_status.thread == pthread) {
- /* Wait for our signal to wake up. */
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
- if (curthread->cancellation != CS_NULL)
- pthread->joiner = NULL;
- _thread_enter_cancellation_point();
-
- /*
- * XXX - Workaround to make a join a cancellation
- * point. Must find a better solution.
- */
- PTHREAD_LOCK(curthread);
- curthread->flags |= PTHREAD_FLAGS_SUSPENDED;
- PTHREAD_UNLOCK(curthread);
- ret = _thread_suspend(curthread, NULL);
- if (ret != 0 && ret != EAGAIN && ret != EINTR)
- PANIC("Unable to suspend in join.");
- PTHREAD_LOCK(curthread);
- curthread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
- PTHREAD_UNLOCK(curthread);
- if (curthread->cancellation != CS_NULL)
- pthread->joiner = NULL;
- _thread_leave_cancellation_point();
+ THREAD_LIST_UNLOCK(curthread);
- /*
- * XXX - For correctness reasons.
- * We must aquire these in the same order and also
- * importantly, release in the same order because
- * otherwise we might deadlock with the joined thread
- * when we attempt to release one of these locks.
- */
- _thread_sigblock();
- DEAD_LIST_LOCK;
- THREAD_LIST_LOCK;
- }
+ THR_CLEANUP_PUSH(curthread, backout_join, pthread);
+ oldcancel = _thr_cancel_enter(curthread);
- /*
- * The thread return value and error are set by the thread we're
- * joining to when it exits or detaches:
- */
- ret = curthread->join_status.error;
- if ((ret == 0) && (thread_return != NULL))
- *thread_return = curthread->join_status.ret;
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
- } else {
- /*
- * The thread exited (is dead) without being detached, and no
- * thread has joined it.
- */
+ while ((state = pthread->state) != PS_DEAD) {
+ _thr_umtx_wait(&pthread->state, state, NULL);
+ }
- /* Check if the return value is required: */
- if (thread_return != NULL) {
- /* Return the thread's return value: */
- *thread_return = pthread->ret;
- }
+ _thr_cancel_leave(curthread, oldcancel);
+ THR_CLEANUP_POP(curthread, 0);
- /* Free all remaining memory allocated to the thread. */
- pthread->attr.flags |= PTHREAD_DETACHED;
- PTHREAD_UNLOCK(pthread);
- TAILQ_REMOVE(&_dead_list, pthread, dle);
- deadlist_free_onethread(pthread);
- THREAD_LIST_UNLOCK;
- DEAD_LIST_UNLOCK;
- _thread_sigunblock();
- }
+ tmp = pthread->ret;
+ THREAD_LIST_LOCK(curthread);
+ pthread->tlflags |= TLFLAGS_DETACHED;
+ THR_GCLIST_ADD(pthread);
+ THREAD_LIST_UNLOCK(curthread);
-out:
- _thread_leave_cancellation_point();
+ if (thread_return != NULL)
+ *thread_return = tmp;
- /* Return the completion status: */
return (ret);
}
diff --git a/lib/libthr/thread/thr_kern.c b/lib/libthr/thread/thr_kern.c
index 0f0305e..4c451f1 100644
--- a/lib/libthr/thread/thr_kern.c
+++ b/lib/libthr/thread/thr_kern.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2003 Jeffrey Roberson <jeff@freebsd.org>
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,120 +27,74 @@
* $FreeBSD$
*/
-#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/signalvar.h>
-#include <sys/time.h>
-#include <sys/timespec.h>
#include <pthread.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <unistd.h>
#include "thr_private.h"
-/* XXX Why can't I get this from time.h? :-( */
-#define timespecsub(vvp, uvp) \
- do { \
- (vvp)->tv_sec -= (uvp)->tv_sec; \
- (vvp)->tv_nsec -= (uvp)->tv_nsec; \
- if ((vvp)->tv_nsec < 0) { \
- (vvp)->tv_sec--; \
- (vvp)->tv_nsec += 1000000000; \
- } \
- } while (0)
+/*#define DEBUG_THREAD_KERN */
+#ifdef DEBUG_THREAD_KERN
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
-void
-_thread_critical_enter(pthread_t pthread)
-{
- _thread_sigblock();
- UMTX_LOCK(&pthread->lock);
-}
-
-void
-_thread_critical_exit(pthread_t pthread)
+/*
+ * This is called when the first thread (other than the initial
+ * thread) is created.
+ */
+int
+_thr_setthreaded(int threaded)
{
- UMTX_UNLOCK(&pthread->lock);
- _thread_sigunblock();
+ if (((threaded == 0) ^ (__isthreaded == 0)) == 0)
+ return (0);
+
+ __isthreaded = threaded;
+#if 0
+ if (threaded != 0) {
+ _thr_rtld_init();
+ } else {
+ _thr_rtld_fini();
+ }
+#endif
+ return (0);
}
void
-_thread_sigblock()
+_thr_signal_block(struct pthread *curthread)
{
sigset_t set;
- sigset_t sav;
-
- /*
- * Block all signals.
- */
- SIGFILLSET(set);
- SIGDELSET(set, SIGTRAP);
-
- /* If we have already blocked signals, just up the refcount */
- if (++curthread->signest > 1)
+
+ if (curthread->sigblock > 0) {
+ curthread->sigblock++;
return;
- PTHREAD_ASSERT(curthread->signest == 1,
- ("Blocked signal nesting level must be 1!"));
-
- if (__sys_sigprocmask(SIG_SETMASK, &set, &sav)) {
- _thread_printf(STDERR_FILENO, "Critical Enter: sig err %d\n",
- errno);
- abort();
}
- curthread->savedsig = sav;
+ SIGFILLSET(set);
+ SIGDELSET(set, SIGBUS);
+ SIGDELSET(set, SIGILL);
+ SIGDELSET(set, SIGFPE);
+ SIGDELSET(set, SIGSEGV);
+ SIGDELSET(set, SIGTRAP);
+ __sys_sigprocmask(SIG_BLOCK, &set, &curthread->sigmask);
+ curthread->sigblock++;
}
void
-_thread_sigunblock()
+_thr_signal_unblock(struct pthread *curthread)
{
- sigset_t set;
-
- /* We might be in a nested 'blocked signal' section */
- if (--curthread->signest > 0)
- return;
- PTHREAD_ASSERT(curthread->signest == 0,
- ("Non-Zero blocked signal nesting level."));
-
- /*
- * Restore signals.
- */
- set = curthread->savedsig;
- if (__sys_sigprocmask(SIG_SETMASK, &set, NULL)) {
- _thread_printf(STDERR_FILENO, "Critical Exit: sig err %d\n",
- errno);
- abort();
- }
+ if (--curthread->sigblock == 0)
+ __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
}
int
-_thread_suspend(pthread_t pthread, const struct timespec *abstime)
+_thr_send_sig(struct pthread *thread, int sig)
{
- struct timespec remaining;
- struct timespec *ts;
- int error;
-
- /*
- * Compute the remainder of the run time.
- */
- if (abstime) {
- struct timespec now;
- struct timeval tv;
-
- GET_CURRENT_TOD(tv);
- TIMEVAL_TO_TIMESPEC(&tv, &now);
-
- remaining = *abstime;
- timespecsub(&remaining, &now);
- ts = &remaining;
+ return thr_kill(thread->tid, sig);
+}
- /*
- * NOTE: timespecsub() makes sure the tv_nsec member >= 0.
- */
- if (ts->tv_sec < 0)
- return (ETIMEDOUT);
- } else
- ts = NULL;
- error = thr_suspend(ts);
- return (error == -1 ? errno : error);
+void
+_thr_assert_lock_level()
+{
+ PANIC("locklevel <= 0");
}
diff --git a/lib/libthr/thread/thr_condattr_init.c b/lib/libthr/thread/thr_kill.c
index 1af12e1..38ec80f 100644
--- a/lib/libthr/thread/thr_condattr_init.c
+++ b/lib/libthr/thread/thr_kill.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,28 +31,37 @@
*
* $FreeBSD$
*/
-#include <string.h>
-#include <stdlib.h>
+
#include <errno.h>
+#include <signal.h>
#include <pthread.h>
+
#include "thr_private.h"
-__weak_reference(_pthread_condattr_init, pthread_condattr_init);
+__weak_reference(_pthread_kill, pthread_kill);
int
-_pthread_condattr_init(pthread_condattr_t *attr)
+_pthread_kill(pthread_t pthread, int sig)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- pthread_condattr_t pattr;
- if ((pattr = (pthread_condattr_t)
- malloc(sizeof(struct pthread_cond_attr))) == NULL) {
- ret = ENOMEM;
- } else {
- memcpy(pattr, &pthread_condattr_default,
- sizeof(struct pthread_cond_attr));
- *attr = pattr;
- ret = 0;
+ /* Check for invalid signal numbers: */
+ if (sig < 0 || sig > _SIG_MAXSIG)
+ /* Invalid signal: */
+ ret = EINVAL;
+ /*
+ * Ensure the thread is in the list of active threads, and the
+ * signal is valid (signal 0 specifies error checking only) and
+ * not being ignored:
+ */
+ else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
+ if (sig > 0)
+ _thr_send_sig(pthread, sig);
+ _thr_ref_delete(curthread, pthread);
}
- return(ret);
+
+ /* Return the completion status: */
+ return (ret);
}
diff --git a/lib/libthr/thread/thr_list.c b/lib/libthr/thread/thr_list.c
new file mode 100644
index 0000000..905ec77
--- /dev/null
+++ b/lib/libthr/thread/thr_list.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include "thr_private.h"
+#include "libc_private.h"
+
+/*#define DEBUG_THREAD_LIST */
+#ifdef DEBUG_THREAD_LIST
+#define DBG_MSG stdout_debug
+#else
+#define DBG_MSG(x...)
+#endif
+
+/*
+ * Define a high water mark for the maximum number of threads that
+ * will be cached. Once this level is reached, any extra threads
+ * will be free()'d.
+ */
+#define MAX_CACHED_THREADS 100
+
+/*
+ * We've got to keep track of everything that is allocated, not only
+ * to have a speedy free list, but also so they can be deallocated
+ * after a fork().
+ */
+static TAILQ_HEAD(, pthread) free_threadq;
+static umtx_t free_thread_lock;
+static umtx_t tcb_lock;
+static int free_thread_count = 0;
+static int inited = 0;
+static u_int64_t next_uniqueid = 1;
+
+LIST_HEAD(thread_hash_head, pthread);
+#define HASH_QUEUES 128
+static struct thread_hash_head thr_hashtable[HASH_QUEUES];
+#define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES)
+
+static void thr_destroy(struct pthread *curthread, struct pthread *thread);
+
+void
+_thr_list_init(void)
+{
+ int i;
+
+ _gc_count = 0;
+ _thr_umtx_init(&_thr_list_lock);
+ TAILQ_INIT(&_thread_list);
+ TAILQ_INIT(&free_threadq);
+ _thr_umtx_init(&free_thread_lock);
+ _thr_umtx_init(&tcb_lock);
+ if (inited) {
+ for (i = 0; i < HASH_QUEUES; ++i)
+ LIST_INIT(&thr_hashtable[i]);
+ }
+ inited = 1;
+}
+
+void
+_thr_gc(struct pthread *curthread)
+{
+ struct pthread *td, *td_next;
+ TAILQ_HEAD(, pthread) worklist;
+
+ TAILQ_INIT(&worklist);
+ THREAD_LIST_LOCK(curthread);
+
+ /* Check the threads waiting for GC. */
+ for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
+ td_next = TAILQ_NEXT(td, gcle);
+ if (td->terminated == 0) {
+ /* make sure we are not still in userland */
+ continue;
+ }
+ _thr_stack_free(&td->attr);
+ if (((td->tlflags & TLFLAGS_DETACHED) != 0) &&
+ (td->refcount == 0)) {
+ THR_GCLIST_REMOVE(td);
+ /*
+ * The thread has detached and is no longer
+ * referenced. It is safe to remove all
+ * remnants of the thread.
+ */
+ THR_LIST_REMOVE(td);
+ TAILQ_INSERT_HEAD(&worklist, td, gcle);
+ }
+ }
+ THREAD_LIST_UNLOCK(curthread);
+
+ while ((td = TAILQ_FIRST(&worklist)) != NULL) {
+ TAILQ_REMOVE(&worklist, td, gcle);
+ /*
+ * XXX we don't free initial thread, because there might
+ * have some code referencing initial thread.
+ */
+ if (td == _thr_initial) {
+ DBG_MSG("Initial thread won't be freed\n");
+ continue;
+ }
+
+ DBG_MSG("Freeing thread %p\n", td);
+ _thr_free(curthread, td);
+ }
+}
+
+struct pthread *
+_thr_alloc(struct pthread *curthread)
+{
+ struct pthread *thread = NULL;
+ struct tcb *tcb;
+
+ if (curthread != NULL) {
+ if (GC_NEEDED())
+ _thr_gc(curthread);
+ if (free_thread_count > 0) {
+ THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
+ if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
+ TAILQ_REMOVE(&free_threadq, thread, tle);
+ free_thread_count--;
+ }
+ THR_LOCK_RELEASE(curthread, &free_thread_lock);
+ }
+ }
+ if (thread == NULL) {
+ thread = malloc(sizeof(struct pthread));
+ if (thread == NULL)
+ return (NULL);
+ }
+ if (curthread != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &tcb_lock);
+ tcb = _tcb_ctor(thread, 0 /* not initial tls */);
+ THR_LOCK_RELEASE(curthread, &tcb_lock);
+ } else {
+ tcb = _tcb_ctor(thread, 1 /* initial tls */);
+ }
+ if (tcb != NULL) {
+ memset(thread, 0, sizeof(*thread));
+ thread->tcb = tcb;
+ } else {
+ thr_destroy(curthread, thread);
+ thread = NULL;
+ }
+ return (thread);
+}
+
+void
+_thr_free(struct pthread *curthread, struct pthread *thread)
+{
+ DBG_MSG("Freeing thread %p\n", thread);
+ if (thread->name) {
+ free(thread->name);
+ thread->name = NULL;
+ }
+ /*
+ * Always free tcb, as we only know it is part of RTLD TLS
+ * block, but don't know its detail and can not assume how
+ * it works, so better to avoid caching it here.
+ */
+ if (curthread != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &tcb_lock);
+ _tcb_dtor(thread->tcb);
+ THR_LOCK_RELEASE(curthread, &tcb_lock);
+ } else {
+ _tcb_dtor(thread->tcb);
+ }
+ thread->tcb = NULL;
+ if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
+ thr_destroy(curthread, thread);
+ } else {
+ /*
+ * Add the thread to the free thread list, this also avoids
+ * pthread id is reused too quickly, may help some buggy apps.
+ */
+ THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
+ TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
+ free_thread_count++;
+ THR_LOCK_RELEASE(curthread, &free_thread_lock);
+ }
+}
+
+static void
+thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
+{
+ free(thread);
+}
+
+/*
+ * Add an active thread:
+ *
+ * o Assign the thread a unique id (which GDB uses to track
+ * threads.
+ * o Add the thread to the list of all threads and increment
+ * number of active threads.
+ */
+void
+_thr_link(struct pthread *curthread, struct pthread *thread)
+{
+ THREAD_LIST_LOCK(curthread);
+ /*
+ * Initialize the unique id (which GDB uses to track
+ * threads), add the thread to the list of all threads,
+ * and
+ */
+ thread->uniqueid = next_uniqueid++;
+ THR_LIST_ADD(thread);
+ if (thread->attr.flags & PTHREAD_DETACHED)
+ thread->tlflags |= TLFLAGS_DETACHED;
+ _thread_active_threads++;
+ THREAD_LIST_UNLOCK(curthread);
+}
+
+/*
+ * Remove an active thread.
+ */
+void
+_thr_unlink(struct pthread *curthread, struct pthread *thread)
+{
+ THREAD_LIST_LOCK(curthread);
+ THR_LIST_REMOVE(thread);
+ _thread_active_threads--;
+ THREAD_LIST_UNLOCK(curthread);
+}
+
+void
+_thr_hash_add(struct pthread *thread)
+{
+ struct thread_hash_head *head;
+
+ head = &thr_hashtable[THREAD_HASH(thread)];
+ LIST_INSERT_HEAD(head, thread, hle);
+}
+
+void
+_thr_hash_remove(struct pthread *thread)
+{
+ LIST_REMOVE(thread, hle);
+}
+
+struct pthread *
+_thr_hash_find(struct pthread *thread)
+{
+ struct pthread *td;
+ struct thread_hash_head *head;
+
+ head = &thr_hashtable[THREAD_HASH(thread)];
+ LIST_FOREACH(td, head, hle) {
+ if (td == thread)
+ return (thread);
+ }
+ return (NULL);
+}
+
+/*
+ * Find a thread in the linked list of active threads and add a reference
+ * to it. Threads with positive reference counts will not be deallocated
+ * until all references are released.
+ */
+int
+_thr_ref_add(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ int ret;
+
+ if (thread == NULL)
+ /* Invalid thread: */
+ return (EINVAL);
+
+ THREAD_LIST_LOCK(curthread);
+ if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
+ thread->refcount++;
+ }
+ THREAD_LIST_UNLOCK(curthread);
+
+ /* Return zero if the thread exists: */
+ return (ret);
+}
+
+void
+_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
+{
+ if (thread != NULL) {
+ THREAD_LIST_LOCK(curthread);
+ thread->refcount--;
+ if ((thread->refcount == 0) &&
+ (thread->tlflags & TLFLAGS_GC_SAFE) != 0)
+ THR_GCLIST_ADD(thread);
+ THREAD_LIST_UNLOCK(curthread);
+ }
+}
+
+int
+_thr_find_thread(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ struct pthread *pthread;
+
+ if (thread == NULL)
+ /* Invalid thread: */
+ return (EINVAL);
+
+ pthread = _thr_hash_find(thread);
+ if (pthread) {
+ if (include_dead == 0 && pthread->state == PS_DEAD) {
+ pthread = NULL;
+ }
+ }
+
+ /* Return zero if the thread exists: */
+ return ((pthread != NULL) ? 0 : ESRCH);
+}
diff --git a/lib/libthr/thread/thr_main_np.c b/lib/libthr/thread/thr_main_np.c
index 1d5849d..6b4626e 100644
--- a/lib/libthr/thread/thr_main_np.c
+++ b/lib/libthr/thread/thr_main_np.c
@@ -24,11 +24,12 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD$
+ * $FreeBSD$
*/
#include <pthread.h>
#include <pthread_np.h>
+
#include "thr_private.h"
__weak_reference(_pthread_main_np, pthread_main_np);
@@ -40,8 +41,8 @@ int
_pthread_main_np()
{
- if (!_thread_initial)
+ if (!_thr_initial)
return (-1);
else
- return (pthread_equal(pthread_self(), _thread_initial) ? 1 : 0);
+ return (pthread_equal(pthread_self(), _thr_initial) ? 1 : 0);
}
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index ec28931..2126080 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -31,575 +31,887 @@
*
* $FreeBSD$
*/
+
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <pthread.h>
-#include <time.h>
#include "thr_private.h"
#if defined(_PTHREADS_INVARIANTS)
-#define _MUTEX_INIT_LINK(m) do { \
+#define MUTEX_INIT_LINK(m) do { \
(m)->m_qe.tqe_prev = NULL; \
(m)->m_qe.tqe_next = NULL; \
} while (0)
-#define _MUTEX_ASSERT_IS_OWNED(m) do { \
+#define MUTEX_ASSERT_IS_OWNED(m) do { \
if ((m)->m_qe.tqe_prev == NULL) \
PANIC("mutex is not on list"); \
} while (0)
-#define _MUTEX_ASSERT_NOT_OWNED(m) do { \
+#define MUTEX_ASSERT_NOT_OWNED(m) do { \
if (((m)->m_qe.tqe_prev != NULL) || \
((m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \
+ THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
+ "thread in syncq when it shouldn't be."); \
+} while (0);
#else
-#define _MUTEX_INIT_LINK(m)
-#define _MUTEX_ASSERT_IS_OWNED(m)
-#define _MUTEX_ASSERT_NOT_OWNED(m)
+#define MUTEX_INIT_LINK(m)
+#define MUTEX_ASSERT_IS_OWNED(m)
+#define MUTEX_ASSERT_NOT_OWNED(m)
+#define THR_ASSERT_NOT_IN_SYNCQ(thr)
#endif
+#define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+#define MUTEX_DESTROY(m) do { \
+ free(m); \
+} while (0)
+
/*
* Prototypes
*/
-static void acquire_mutex(struct pthread_mutex *, struct pthread *);
-static int get_mcontested(pthread_mutex_t,
- const struct timespec *);
-static void mutex_attach_to_next_pthread(struct pthread_mutex *);
-static int mutex_init(pthread_mutex_t *, int);
-static int mutex_lock_common(pthread_mutex_t *, int,
- const struct timespec *);
-static inline int mutex_self_lock(pthread_mutex_t, int);
-static inline int mutex_unlock_common(pthread_mutex_t *, int);
-static inline pthread_t mutex_queue_deq(pthread_mutex_t);
-static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
-static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
-static void restore_prio_inheritance(struct pthread *);
-static void restore_prio_protection(struct pthread *);
-
-
-static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
-
-static struct pthread_mutex_attr static_mutex_attr =
- PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
-static pthread_mutexattr_t static_mattr = &static_mutex_attr;
+static long mutex_handoff(struct pthread *, struct pthread_mutex *);
+static int mutex_self_trylock(struct pthread *, pthread_mutex_t);
+static int mutex_self_lock(struct pthread *, pthread_mutex_t,
+ const struct timespec *abstime);
+static int mutex_unlock_common(pthread_mutex_t *, int);
+static void mutex_priority_adjust(struct pthread *, pthread_mutex_t);
+static void mutex_rescan_owned (struct pthread *, struct pthread *,
+ struct pthread_mutex *);
+#if 0
+static pthread_t mutex_queue_deq(pthread_mutex_t);
+#endif
+static void mutex_queue_remove(pthread_mutex_t, pthread_t);
+static void mutex_queue_enq(pthread_mutex_t, pthread_t);
-/* Single underscore versions provided for libc internal usage: */
-__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
+__weak_reference(__pthread_mutex_init, pthread_mutex_init);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
-__weak_reference(__pthread_mutex_unlock, pthread_mutex_unlock);
+__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
+__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
+/* Single underscore versions provided for libc internal usage: */
/* No difference between libc and application usage of these: */
-__weak_reference(_pthread_mutex_init, pthread_mutex_init);
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
-__weak_reference(_pthread_mutex_timedlock, pthread_mutex_timedlock);
+__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
-
-/*
- * Reinitialize a private mutex; this is only used for internal mutexes.
- */
-int
-_mutex_reinit(pthread_mutex_t * mutex)
+static int
+mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr, int private)
{
- int ret = 0;
+ struct pthread_mutex *pmutex;
+ enum pthread_mutextype type;
+ int protocol;
+ int ceiling;
+ int flags;
+ int ret = 0;
+
+ /* Check if default mutex attributes: */
+ if (mutex_attr == NULL || *mutex_attr == NULL) {
+ /* Default to a (error checking) POSIX mutex: */
+ type = PTHREAD_MUTEX_ERRORCHECK;
+ protocol = PTHREAD_PRIO_NONE;
+ ceiling = THR_MAX_PRIORITY;
+ flags = 0;
+ }
+
+ /* Check mutex type: */
+ else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
+ ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+
+ /* Check mutex protocol: */
+ else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
+ ((*mutex_attr)->m_protocol > PTHREAD_PRIO_PROTECT))
+ /* Return an invalid argument error: */
+ ret = EINVAL;
- if (*mutex == PTHREAD_MUTEX_INITIALIZER)
- ret = _pthread_mutex_init(mutex, NULL);
else {
- /*
- * Initialize the mutex structure:
- */
- (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
- (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_owner = NULL;
- (*mutex)->m_data.m_count = 0;
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
- (*mutex)->m_refcount = 0;
- (*mutex)->m_prio = 0;
- (*mutex)->m_saved_prio = 0;
- _MUTEX_INIT_LINK(*mutex);
- memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
+ /* Use the requested mutex type and protocol: */
+ type = (*mutex_attr)->m_type;
+ protocol = (*mutex_attr)->m_protocol;
+ ceiling = (*mutex_attr)->m_ceiling;
+ flags = (*mutex_attr)->m_flags;
+ }
+
+ /* Check no errors so far: */
+ if (ret == 0) {
+ if ((pmutex = (pthread_mutex_t)
+ malloc(sizeof(struct pthread_mutex))) == NULL) {
+ ret = ENOMEM;
+ } else {
+ _thr_umtx_init(&pmutex->m_lock);
+ /* Set the mutex flags: */
+ pmutex->m_flags = flags;
+
+ /* Process according to mutex type: */
+ switch (type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ /* Nothing to do here. */
+ break;
+
+ /* Single UNIX Spec 2 recursive mutex: */
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Reset the mutex count: */
+ pmutex->m_count = 0;
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ /* Initialise the rest of the mutex: */
+ TAILQ_INIT(&pmutex->m_queue);
+ pmutex->m_flags |= MUTEX_FLAGS_INITED;
+ if (private)
+ pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
+ pmutex->m_owner = NULL;
+ pmutex->m_type = type;
+ pmutex->m_protocol = protocol;
+ pmutex->m_refcount = 0;
+ if (protocol == PTHREAD_PRIO_PROTECT)
+ pmutex->m_prio = ceiling;
+ else
+ pmutex->m_prio = -1;
+ pmutex->m_saved_prio = 0;
+ MUTEX_INIT_LINK(pmutex);
+ *mutex = pmutex;
+ } else {
+ /* Free the mutex lock structure: */
+ MUTEX_DESTROY(pmutex);
+ *mutex = NULL;
+ }
+ }
}
+ /* Return the completion status: */
return (ret);
}
-int
-_pthread_mutex_init(pthread_mutex_t * mutex,
- const pthread_mutexattr_t * mutex_attr)
+static int
+init_static(struct pthread *thread, pthread_mutex_t *mutex)
{
- struct pthread_mutex_attr default_attr = {PTHREAD_MUTEX_ERRORCHECK,
- PTHREAD_PRIO_NONE, PTHREAD_MAX_PRIORITY, 0 };
- struct pthread_mutex_attr *attr;
+ int ret;
- if (mutex_attr == NULL) {
- attr = &default_attr;
- } else {
- /*
- * Check that the given mutex attribute is valid.
- */
- if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
- ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
- return (EINVAL);
- else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
- ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
- return (EINVAL);
- attr = *mutex_attr;
- }
- if ((*mutex =
- (pthread_mutex_t)malloc(sizeof(struct pthread_mutex))) == NULL)
- return (ENOMEM);
- memset((void *)(*mutex), 0, sizeof(struct pthread_mutex));
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
- /* Initialise the rest of the mutex: */
- TAILQ_INIT(&(*mutex)->m_queue);
- _MUTEX_INIT_LINK(*mutex);
- (*mutex)->m_protocol = attr->m_protocol;
- (*mutex)->m_flags = (attr->m_flags | MUTEX_FLAGS_INITED);
- (*mutex)->m_type = attr->m_type;
- if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT)
- (*mutex)->m_prio = attr->m_ceiling;
- return (0);
+ if (*mutex == NULL)
+ ret = mutex_init(mutex, NULL, 0);
+ else
+ ret = 0;
+
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
+
+ return (ret);
}
-int
-_pthread_mutex_destroy(pthread_mutex_t * mutex)
+static int
+init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
{
- /*
- * If this mutex was statically initialized, don't bother
- * initializing it in order to destroy it immediately.
- */
- if (*mutex == PTHREAD_MUTEX_INITIALIZER)
- return (0);
+ int ret;
- /* Lock the mutex structure: */
- _SPINLOCK(&(*mutex)->lock);
+ THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
- /*
- * Check to see if this mutex is in use:
- */
- if (((*mutex)->m_owner != NULL) ||
- (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
- ((*mutex)->m_refcount != 0)) {
- /* Unlock the mutex structure: */
- _SPINUNLOCK(&(*mutex)->lock);
- return (EBUSY);
- }
+ if (*mutex == NULL)
+ ret = mutex_init(mutex, NULL, 1);
+ else
+ ret = 0;
- /*
- * Free the memory allocated for the mutex
- * structure:
- */
- _MUTEX_ASSERT_NOT_OWNED(*mutex);
- _SPINUNLOCK(&(*mutex)->lock);
- free(*mutex);
+ THR_LOCK_RELEASE(thread, &_mutex_static_lock);
- /*
- * Leave the caller's pointer NULL now that
- * the mutex has been destroyed:
- */
- *mutex = NULL;
+ return (ret);
+}
+
+int
+_pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+ return mutex_init(mutex, mutex_attr, 1);
+}
+
+int
+__pthread_mutex_init(pthread_mutex_t *mutex,
+ const pthread_mutexattr_t *mutex_attr)
+{
+ return mutex_init(mutex, mutex_attr, 0);
+}
+int
+_mutex_reinit(pthread_mutex_t *mutex)
+{
+ _thr_umtx_init(&(*mutex)->m_lock);
+ TAILQ_INIT(&(*mutex)->m_queue);
+ MUTEX_INIT_LINK(*mutex);
+ (*mutex)->m_owner = NULL;
+ (*mutex)->m_count = 0;
+ (*mutex)->m_refcount = 0;
+ (*mutex)->m_prio = 0;
+ (*mutex)->m_saved_prio = 0;
return (0);
}
-static int
-mutex_init(pthread_mutex_t *mutex, int private)
+void
+_mutex_fork(struct pthread *curthread)
{
- pthread_mutexattr_t *pma;
- int error;
-
- error = 0;
- pma = private ? &static_mattr : NULL;
- _SPINLOCK(&static_init_lock);
- if (*mutex == PTHREAD_MUTEX_INITIALIZER)
- error = _pthread_mutex_init(mutex, pma);
- _SPINUNLOCK(&static_init_lock);
- return (error);
+ TAILQ_INIT(&curthread->mutexq);
+ TAILQ_INIT(&curthread->pri_mutexq);
+ curthread->priority_mutex_count = 0;
+#if 0
+ struct pthread_mutex *m;
+
+ TAILQ_FOREACH(m, &curthread->mutexq, m_qe) {
+ m->m_lock = (umtx_t)curthread->tid;
+ }
+
+ /* Clear contender for priority mutexes */
+ TAILQ_FOREACH(m, &curthread->pri_mutexq, m_qe) {
+ /* clear another thread locked us */
+ _thr_umtx_init(&m->m_lock);
+ TAILQ_INIT(&m->m_queue);
+ }
+#endif
}
-/*
- * Acquires a mutex for the current thread. The caller must
- * lock the mutex before calling this function.
- */
-static void
-acquire_mutex(struct pthread_mutex *mtx, struct pthread *ptd)
+int
+_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
- mtx->m_owner = ptd;
- _MUTEX_ASSERT_NOT_OWNED(mtx);
- PTHREAD_LOCK(ptd);
- TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
- PTHREAD_UNLOCK(ptd);
+ struct pthread *curthread = _get_curthread();
+ pthread_mutex_t m;
+ int ret = 0;
+
+ if (mutex == NULL || *mutex == NULL)
+ ret = EINVAL;
+ else {
+ /*
+ * Try to lock the mutex structure, we only need to
+ * try once, if failed, the mutex is in used.
+ */
+ ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
+ if (ret)
+ return (ret);
+
+ /*
+ * Check mutex other fields to see if this mutex is
+ * in use. Mostly for prority mutex types, or there
+ * are condition variables referencing it.
+ */
+ if (((*mutex)->m_owner != NULL) ||
+ (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
+ ((*mutex)->m_refcount != 0)) {
+ THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
+ ret = EBUSY;
+ } else {
+ /*
+ * Save a pointer to the mutex so it can be free'd
+ * and set the caller's pointer to NULL:
+ */
+ m = *mutex;
+ *mutex = NULL;
+
+ /* Unlock the mutex structure: */
+ _thr_umtx_unlock(&m->m_lock, curthread->tid);
+
+ /*
+ * Free the memory allocated for the mutex
+ * structure:
+ */
+ MUTEX_ASSERT_NOT_OWNED(m);
+ MUTEX_DESTROY(m);
+ }
+ }
+
+ /* Return the completion status: */
+ return (ret);
}
-/*
- * Releases a mutex from the current thread. The owner must
- * lock the mutex. The next thread on the queue will be returned
- * locked by the current thread. The caller must take care to
- * unlock it.
- */
-static void
-mutex_attach_to_next_pthread(struct pthread_mutex *mtx)
+static int
+mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
{
- struct pthread *ptd;
+ int ret = 0;
+
+ THR_ASSERT((mutex != NULL) && (*mutex != NULL),
+ "Uninitialized mutex in mutex_trylock_common");
+
+ /* Short cut for simple mutex. */
+ if ((*mutex)->m_protocol == PTHREAD_PRIO_NONE) {
+ ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
+ if (ret == 0) {
+ (*mutex)->m_owner = curthread;
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
+ TAILQ_INSERT_TAIL(&curthread->mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == curthread) {
+ ret = mutex_self_trylock(curthread, *mutex);
+ } /* else {} */
+
+ return (ret);
+ }
- _MUTEX_ASSERT_IS_OWNED(mtx);
- TAILQ_REMOVE(&mtx->m_owner->mutexq, (mtx), m_qe);
- _MUTEX_INIT_LINK(mtx);
+ /* Code for priority mutex */
+
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
/*
- * Deque next thread waiting for this mutex and attach
- * the mutex to it. The thread will already be locked.
+ * If the mutex was statically allocated, properly
+ * initialize the tail queue.
*/
- if ((ptd = mutex_queue_deq(mtx)) != NULL) {
- TAILQ_INSERT_TAIL(&ptd->mutexq, mtx, m_qe);
- ptd->data.mutex = NULL;
- PTHREAD_WAKE(ptd);
+ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
+ TAILQ_INIT(&(*mutex)->m_queue);
+ MUTEX_INIT_LINK(*mutex);
+ (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
}
- mtx->m_owner = ptd;
+
+ /* Process according to mutex type: */
+ switch ((*mutex)->m_protocol) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for the running thread: */
+ (*mutex)->m_owner = curthread;
+
+ THR_LOCK(curthread);
+ /* Track number of priority mutexes owned: */
+ curthread->priority_mutex_count++;
+
+ /*
+ * The mutex takes on the attributes of the
+ * running thread when there are no waiters.
+ */
+ (*mutex)->m_prio = curthread->active_priority;
+ (*mutex)->m_saved_prio =
+ curthread->inherited_priority;
+ curthread->inherited_priority = (*mutex)->m_prio;
+ THR_UNLOCK(curthread);
+
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
+ TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == curthread)
+ ret = mutex_self_trylock(curthread, *mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
+ break;
+
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (curthread->active_priority > (*mutex)->m_prio)
+ ret = EINVAL;
+
+ /* Check if this mutex is not locked: */
+ else if ((*mutex)->m_owner == NULL) {
+ /* Lock the mutex for the running thread: */
+ (*mutex)->m_owner = curthread;
+
+ THR_LOCK(curthread);
+ /* Track number of priority mutexes owned: */
+ curthread->priority_mutex_count++;
+
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority.
+ */
+ curthread->active_priority = (*mutex)->m_prio;
+ (*mutex)->m_saved_prio =
+ curthread->inherited_priority;
+ curthread->inherited_priority =
+ (*mutex)->m_prio;
+ THR_UNLOCK(curthread);
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*mutex);
+ TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
+ (*mutex), m_qe);
+ } else if ((*mutex)->m_owner == curthread)
+ ret = mutex_self_trylock(curthread, *mutex);
+ else
+ /* Return a busy error: */
+ ret = EBUSY;
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
+
+ /* Return the completion status: */
+ return (ret);
}
int
__pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- int ret = 0;
+ struct pthread *curthread = _get_curthread();
+ int ret = 0;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
- (ret = mutex_init(mutex, 0)) == 0)
- ret = mutex_lock_common(mutex, 1, NULL);
+ if ((*mutex != NULL) ||
+ ((ret = init_static(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
-/*
- * Libc internal.
- */
int
_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
/*
* If the mutex is statically initialized, perform the dynamic
* initialization marking the mutex private (delete safe):
*/
- if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
- (ret = mutex_init(mutex, 1)) == 0)
- ret = mutex_lock_common(mutex, 1, NULL);
+ if ((*mutex != NULL) ||
+ ((ret = init_static_private(curthread, mutex)) == 0))
+ ret = mutex_trylock_common(curthread, mutex);
return (ret);
}
static int
-mutex_lock_common(pthread_mutex_t * mutex, int nonblock,
- const struct timespec *abstime)
+mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
+ const struct timespec * abstime)
{
- int error;
+ struct timespec ts, ts2;
+ long cycle;
+ int ret = 0;
- error = 0;
- PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
+ THR_ASSERT((m != NULL) && (*m != NULL),
"Uninitialized mutex in mutex_lock_common");
- PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
- (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
- "Invalid mutex protocol");
- _SPINLOCK(&(*mutex)->lock);
- /*
- * If the mutex was statically allocated, properly
- * initialize the tail queue.
- */
- if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
- TAILQ_INIT(&(*mutex)->m_queue);
- (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
- _MUTEX_INIT_LINK(*mutex);
+ if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000))
+ return (EINVAL);
+
+ /* Short cut for simple mutex. */
+
+ if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
+ /* Default POSIX mutex: */
+ ret = THR_UMTX_TRYLOCK(curthread, &(*m)->m_lock);
+ if (ret == 0) {
+ (*m)->m_owner = curthread;
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*m);
+ TAILQ_INSERT_TAIL(&curthread->mutexq,
+ (*m), m_qe);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m, abstime);
+ } else {
+ if (abstime == NULL) {
+ THR_UMTX_LOCK(curthread, &(*m)->m_lock);
+ ret = 0;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ ret = THR_UMTX_TIMEDLOCK(curthread,
+ &(*m)->m_lock, &ts2);
+ /*
+ * Timed out wait is not restarted if
+ * it was interrupted, not worth to do it.
+ */
+ if (ret == EINTR)
+ ret = ETIMEDOUT;
+ }
+ if (ret == 0) {
+ (*m)->m_owner = curthread;
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*m);
+ TAILQ_INSERT_TAIL(&curthread->mutexq,
+ (*m), m_qe);
+ }
+ }
+ return (ret);
}
-retry:
+ /* Code for priority mutex */
+
/*
- * If the mutex is a priority protected mutex the thread's
- * priority may not be higher than that of the mutex.
+ * Enter a loop waiting to become the mutex owner. We need a
+ * loop in case the waiting thread is interrupted by a signal
+ * to execute a signal handler. It is not (currently) possible
+ * to remain in the waiting queue while running a handler.
+ * Instead, the thread is interrupted and backed out of the
+ * waiting queue prior to executing the signal handler.
*/
- if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
- curthread->active_priority > (*mutex)->m_prio) {
- _SPINUNLOCK(&(*mutex)->lock);
- return (EINVAL);
- }
- if ((*mutex)->m_owner == NULL) {
- /*
- * Mutex is currently unowned.
- */
- acquire_mutex(*mutex, curthread);
- } else if ((*mutex)->m_owner == curthread) {
- /*
- * Mutex is owned by curthread. We must test against
- * certain conditions in such a case.
- */
- if ((error = mutex_self_lock((*mutex), nonblock)) != 0) {
- _SPINUNLOCK(&(*mutex)->lock);
- return (error);
- }
- } else {
- if (nonblock) {
- error = EBUSY;
- goto out;
- }
+ do {
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
/*
- * Another thread owns the mutex. This thread must
- * wait for that thread to unlock the mutex. This
- * thread must not return to the caller if it was
- * interrupted by a signal.
+ * If the mutex was statically allocated, properly
+ * initialize the tail queue.
*/
- error = get_mcontested(*mutex, abstime);
- if (error == EINTR)
- goto retry;
- else if (error == ETIMEDOUT)
- goto out;
- }
+ if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
+ TAILQ_INIT(&(*m)->m_queue);
+ (*m)->m_flags |= MUTEX_FLAGS_INITED;
+ MUTEX_INIT_LINK(*m);
+ }
- if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE)
- (*mutex)->m_data.m_count++;
+ /* Process according to mutex type: */
+ switch ((*m)->m_protocol) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /* Check if this mutex is not locked: */
+ if ((*m)->m_owner == NULL) {
+ /* Lock the mutex for this thread: */
+ (*m)->m_owner = curthread;
+
+ THR_LOCK(curthread);
+ /* Track number of priority mutexes owned: */
+ curthread->priority_mutex_count++;
+
+ /*
+ * The mutex takes on attributes of the
+ * running thread when there are no waiters.
+ * Make sure the thread's scheduling lock is
+ * held while priorities are adjusted.
+ */
+ (*m)->m_prio = curthread->active_priority;
+ (*m)->m_saved_prio =
+ curthread->inherited_priority;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_UNLOCK(curthread);
+
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*m);
+ TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
+ (*m), m_qe);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m, abstime);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex and save a pointer to the mutex.
+ */
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
+
+ if (curthread->active_priority > (*m)->m_prio)
+ /* Adjust priorities: */
+ mutex_priority_adjust(curthread, *m);
+
+ THR_LOCK(curthread);
+ cycle = curthread->cycle;
+ THR_UNLOCK(curthread);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ ret = _thr_umtx_wait(&curthread->cycle, cycle,
+ &ts2);
+ if (ret == EINTR)
+ ret = 0;
+
+ if (THR_IN_MUTEXQ(curthread)) {
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+ mutex_queue_remove(*m, curthread);
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ }
+ /*
+ * Only clear these after assuring the
+ * thread is dequeued.
+ */
+ curthread->data.mutex = NULL;
+ }
+ break;
- /*
- * The mutex is now owned by curthread.
- */
- PTHREAD_LOCK(curthread);
+ /* POSIX priority protection mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /* Check for a priority ceiling violation: */
+ if (curthread->active_priority > (*m)->m_prio) {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ ret = EINVAL;
+ }
+ /* Check if this mutex is not locked: */
+ else if ((*m)->m_owner == NULL) {
+ /*
+ * Lock the mutex for the running
+ * thread:
+ */
+ (*m)->m_owner = curthread;
+
+ THR_LOCK(curthread);
+ /* Track number of priority mutexes owned: */
+ curthread->priority_mutex_count++;
+
+ /*
+ * The running thread inherits the ceiling
+ * priority of the mutex and executes at that
+ * priority. Make sure the thread's
+ * scheduling lock is held while priorities
+ * are adjusted.
+ */
+ curthread->active_priority = (*m)->m_prio;
+ (*m)->m_saved_prio =
+ curthread->inherited_priority;
+ curthread->inherited_priority = (*m)->m_prio;
+ THR_UNLOCK(curthread);
+
+ /* Add to the list of owned mutexes: */
+ MUTEX_ASSERT_NOT_OWNED(*m);
+ TAILQ_INSERT_TAIL(&curthread->pri_mutexq,
+ (*m), m_qe);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else if ((*m)->m_owner == curthread) {
+ ret = mutex_self_lock(curthread, *m, abstime);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ } else {
+ /*
+ * Join the queue of threads waiting to lock
+ * the mutex and save a pointer to the mutex.
+ */
+ mutex_queue_enq(*m, curthread);
+ curthread->data.mutex = *m;
+
+ /* Clear any previous error: */
+ curthread->error = 0;
+
+ THR_LOCK(curthread);
+ cycle = curthread->cycle;
+ THR_UNLOCK(curthread);
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ ret = _thr_umtx_wait(&curthread->cycle, cycle,
+ &ts2);
+ if (ret == EINTR)
+ ret = 0;
+
+ curthread->data.mutex = NULL;
+ if (THR_IN_MUTEXQ(curthread)) {
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+ mutex_queue_remove(*m, curthread);
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ }
+ /*
+ * Only clear these after assuring the
+ * thread is dequeued.
+ */
+ curthread->data.mutex = NULL;
+
+ /*
+ * The threads priority may have changed while
+ * waiting for the mutex causing a ceiling
+ * violation.
+ */
+ ret = curthread->error;
+ curthread->error = 0;
+ }
+ break;
- /*
- * The mutex's priority may have changed while waiting for it.
- */
- if ((*mutex)->m_protocol == PTHREAD_PRIO_PROTECT &&
- curthread->active_priority > (*mutex)->m_prio) {
- mutex_attach_to_next_pthread(*mutex);
- if ((*mutex)->m_owner != NULL)
- PTHREAD_UNLOCK((*mutex)->m_owner);
- PTHREAD_UNLOCK(curthread);
- _SPINUNLOCK(&(*mutex)->lock);
- return (EINVAL);
- }
+ /* Trap invalid mutex types: */
+ default:
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
- switch ((*mutex)->m_protocol) {
- case PTHREAD_PRIO_INHERIT:
- curthread->prio_inherit_count++;
- break;
- case PTHREAD_PRIO_PROTECT:
- PTHREAD_ASSERT((curthread->active_priority <=
- (*mutex)->m_prio), "priority protection violation");
- curthread->prio_protect_count++;
- if ((*mutex)->m_prio > curthread->active_priority) {
- curthread->inherited_priority = (*mutex)->m_prio;
- curthread->active_priority = (*mutex)->m_prio;
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
}
- break;
- default:
- /* Nothing */
- break;
- }
- PTHREAD_UNLOCK(curthread);
-out:
- _SPINUNLOCK(&(*mutex)->lock);
- return (error);
-}
-/*
- * Caller must lock thread.
- */
-void
-adjust_prio_inheritance(struct pthread *ptd)
-{
- struct pthread_mutex *tempMtx;
- struct pthread *tempTd;
-
- /*
- * Scan owned mutexes's wait queue and execute at the
- * higher of thread's current priority or the priority of
- * the highest priority thread waiting on any of the the
- * mutexes the thread owns. Note: the highest priority thread
- * on a queue is always at the head of the queue.
- */
- TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
- if (tempMtx->m_protocol != PTHREAD_PRIO_INHERIT)
- continue;
+ } while (((*m)->m_owner != curthread) && (ret == 0));
- /*
- * XXX LOR with respect to tempMtx and ptd.
- * Order should be: 1. mutex
- * 2. pthread
- */
- _SPINLOCK(&tempMtx->lock);
-
- tempTd = TAILQ_FIRST(&tempMtx->m_queue);
- if (tempTd != NULL) {
- PTHREAD_LOCK(tempTd);
- if (tempTd->active_priority > ptd->active_priority) {
- ptd->inherited_priority =
- tempTd->active_priority;
- ptd->active_priority =
- tempTd->active_priority;
- }
- PTHREAD_UNLOCK(tempTd);
- }
- _SPINUNLOCK(&tempMtx->lock);
- }
+ /* Return the completion status: */
+ return (ret);
}
-/*
- * Caller must lock thread.
- */
-static void
-restore_prio_inheritance(struct pthread *ptd)
+int
+__pthread_mutex_lock(pthread_mutex_t *m)
{
- ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
- ptd->active_priority = ptd->base_priority;
- adjust_prio_inheritance(ptd);
-}
+ struct pthread *curthread;
+ int ret = 0;
-/*
- * Caller must lock thread.
- */
-void
-adjust_prio_protection(struct pthread *ptd)
-{
- struct pthread_mutex *tempMtx;
+ _thr_check_init();
+
+ curthread = _get_curthread();
/*
- * The thread shall execute at the higher of its priority or
- * the highest priority ceiling of all the priority protection
- * mutexes it owns.
+ * If the mutex is statically initialized, perform the dynamic
+ * initialization:
*/
- TAILQ_FOREACH(tempMtx, &ptd->mutexq, m_qe) {
- if (tempMtx->m_protocol != PTHREAD_PRIO_PROTECT)
- continue;
- if (ptd->active_priority < tempMtx->m_prio) {
- ptd->inherited_priority = tempMtx->m_prio;
- ptd->active_priority = tempMtx->m_prio;
- }
- }
-}
+ if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m, NULL);
-/*
- * Caller must lock thread.
- */
-static void
-restore_prio_protection(struct pthread *ptd)
-{
- ptd->inherited_priority = PTHREAD_MIN_PRIORITY;
- ptd->active_priority = ptd->base_priority;
- adjust_prio_protection(ptd);
+ return (ret);
}
int
-__pthread_mutex_lock(pthread_mutex_t *mutex)
+_pthread_mutex_lock(pthread_mutex_t *m)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ _thr_check_init();
+
+ curthread = _get_curthread();
/*
* If the mutex is statically initialized, perform the dynamic
- * initialization:
+ * initialization marking it private (delete safe):
*/
- if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
- ((ret = mutex_init(mutex, 0)) == 0))
- ret = mutex_lock_common(mutex, 0, NULL);
+ if ((*m != NULL) ||
+ ((ret = init_static_private(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m, NULL);
return (ret);
}
-/*
- * Libc internal.
- */
int
-_pthread_mutex_lock(pthread_mutex_t *mutex)
+__pthread_mutex_timedlock(pthread_mutex_t *m,
+ const struct timespec *abs_timeout)
{
+ struct pthread *curthread;
int ret = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ _thr_check_init();
+
+ curthread = _get_curthread();
/*
* If the mutex is statically initialized, perform the dynamic
- * initialization marking it private (delete safe):
+ * initialization:
*/
- if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
- ((ret = mutex_init(mutex, 1)) == 0))
- ret = mutex_lock_common(mutex, 0, NULL);
+ if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m, abs_timeout);
return (ret);
}
int
-_pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
+_pthread_mutex_timedlock(pthread_mutex_t *m,
+ const struct timespec *abs_timeout)
{
- int error;
+ struct pthread *curthread;
+ int ret = 0;
+
+ _thr_check_init();
- error = 0;
- if (_thread_initial == NULL)
- _thread_init();
+ curthread = _get_curthread();
/*
- * Initialize it if it's a valid statically inited mutex.
+ * If the mutex is statically initialized, perform the dynamic
+ * initialization marking it private (delete safe):
*/
- if ((*mutex != PTHREAD_MUTEX_INITIALIZER) ||
- ((error = mutex_init(mutex, 0)) == 0))
- error = mutex_lock_common(mutex, 0, abstime);
+ if ((*m != NULL) ||
+ ((ret = init_static_private(curthread, m)) == 0))
+ ret = mutex_lock_common(curthread, m, abs_timeout);
- PTHREAD_ASSERT(error != EINTR, "According to SUSv3 this function shall not return an error code of EINTR");
- return (error);
+ return (ret);
}
int
-__pthread_mutex_unlock(pthread_mutex_t * mutex)
+_pthread_mutex_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 0));
+ return (mutex_unlock_common(m, /* add reference */ 0));
}
-/*
- * Libc internal
- */
int
-_pthread_mutex_unlock(pthread_mutex_t * mutex)
+_mutex_cv_unlock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 0));
+ return (mutex_unlock_common(m, /* add reference */ 1));
}
int
-_mutex_cv_unlock(pthread_mutex_t * mutex)
+_mutex_cv_lock(pthread_mutex_t *m)
{
- return (mutex_unlock_common(mutex, /* add reference */ 1));
+ struct pthread *curthread;
+ int ret;
+
+ curthread = _get_curthread();
+ if ((ret = _pthread_mutex_lock(m)) == 0)
+ (*m)->m_refcount--;
+ return (ret);
}
-int
-_mutex_cv_lock(pthread_mutex_t * mutex)
+static int
+mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
{
int ret;
- if ((ret = _pthread_mutex_lock(mutex)) == 0)
- (*mutex)->m_refcount--;
+
+ switch (m->m_type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
+ case PTHREAD_MUTEX_ERRORCHECK:
+ case PTHREAD_MUTEX_NORMAL:
+ ret = EBUSY;
+ break;
+
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ if (m->m_count + 1 > 0) {
+ m->m_count++;
+ ret = 0;
+ } else
+ ret = EAGAIN;
+ break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
+ }
+
return (ret);
}
-/*
- * Caller must lock mutex and then disable signals and lock curthread.
- */
-static inline int
-mutex_self_lock(pthread_mutex_t mutex, int noblock)
+static int
+mutex_self_lock(struct pthread *curthread, pthread_mutex_t m,
+ const struct timespec *abstime)
{
- switch (mutex->m_type) {
+ struct timespec ts1, ts2;
+ int ret;
+
+ switch (m->m_type) {
+ /* case PTHREAD_MUTEX_DEFAULT: */
case PTHREAD_MUTEX_ERRORCHECK:
- /*
- * POSIX specifies that mutexes should return EDEADLK if a
- * recursive lock is detected.
- */
- if (noblock)
- return (EBUSY);
- return (EDEADLK);
+ if (abstime) {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ } else {
+ /*
+ * POSIX specifies that mutexes should return
+ * EDEADLK if a recursive lock is detected.
+ */
+ ret = EDEADLK;
+ }
break;
case PTHREAD_MUTEX_NORMAL:
@@ -607,84 +919,565 @@ mutex_self_lock(pthread_mutex_t mutex, int noblock)
* What SS2 define as a 'normal' mutex. Intentionally
* deadlock on attempts to get a lock you already own.
*/
- if (noblock)
- return (EBUSY);
- curthread->isdeadlocked = 1;
- _SPINUNLOCK(&(mutex)->lock);
- _thread_suspend(curthread, NULL);
- PANIC("Shouldn't resume here?\n");
+ ret = 0;
+ if (m->m_protocol != PTHREAD_PRIO_NONE) {
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ if (abstime) {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ } else {
+ ts1.tv_sec = 30;
+ ts1.tv_nsec = 0;
+ for (;;)
+ __sys_nanosleep(&ts1, NULL);
+ }
break;
- default:
- /* Do Nothing */
+ case PTHREAD_MUTEX_RECURSIVE:
+ /* Increment the lock count: */
+ if (m->m_count + 1 > 0) {
+ m->m_count++;
+ ret = 0;
+ } else
+ ret = EAGAIN;
break;
+
+ default:
+ /* Trap invalid mutex types; */
+ ret = EINVAL;
}
- return (0);
+
+ return (ret);
}
-static inline int
-mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
+static int
+mutex_unlock_common(pthread_mutex_t *m, int add_reference)
{
+ struct pthread *curthread = _get_curthread();
+ long tid = -1;
+ int ret = 0;
+
+ if (m == NULL || *m == NULL)
+ ret = EINVAL;
+ else {
+ /* Short cut for simple mutex. */
+
+ if ((*m)->m_protocol == PTHREAD_PRIO_NONE) {
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
+ if (__predict_false((*m)->m_owner != curthread)) {
+ ret = EPERM;
+ } else if (__predict_false(
+ (*m)->m_type == PTHREAD_MUTEX_RECURSIVE &&
+ (*m)->m_count > 0)) {
+ /* Decrement the count: */
+ (*m)->m_count--;
+ if (add_reference)
+ (*m)->m_refcount++;
+ } else {
+ /*
+ * Clear the count in case this is a recursive
+ * mutex.
+ */
+ (*m)->m_count = 0;
+ (*m)->m_owner = NULL;
+ /* Remove the mutex from the threads queue. */
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&curthread->mutexq, (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
+ if (add_reference)
+ (*m)->m_refcount++;
+ /*
+ * Hand off the mutex to the next waiting
+ * thread.
+ */
+ _thr_umtx_unlock(&(*m)->m_lock, curthread->tid);
+ }
+ return (ret);
+ }
+
+ /* Code for priority mutex */
+
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
+
+ /* Process according to mutex type: */
+ switch ((*m)->m_protocol) {
+ /* POSIX priority inheritence mutex: */
+ case PTHREAD_PRIO_INHERIT:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
+ if ((*m)->m_owner != curthread)
+ ret = EPERM;
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
+ /* Decrement the count: */
+ (*m)->m_count--;
+ else {
+ /*
+ * Clear the count in case this is recursive
+ * mutex.
+ */
+ (*m)->m_count = 0;
+
+ /*
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ THR_LOCK(curthread);
+ curthread->inherited_priority =
+ (*m)->m_saved_prio;
+ curthread->active_priority =
+ MAX(curthread->inherited_priority,
+ curthread->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
+ */
+ curthread->priority_mutex_count--;
+ THR_UNLOCK(curthread);
+
+ /* Remove the mutex from the threads queue. */
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
+
+ /*
+ * Hand off the mutex to the next waiting
+ * thread:
+ */
+ tid = mutex_handoff(curthread, *m);
+ }
+ break;
+
+ /* POSIX priority ceiling mutex: */
+ case PTHREAD_PRIO_PROTECT:
+ /*
+ * Check if the running thread is not the owner of the
+ * mutex:
+ */
+ if ((*m)->m_owner != curthread)
+ ret = EPERM;
+ else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
+ ((*m)->m_count > 0))
+ /* Decrement the count: */
+ (*m)->m_count--;
+ else {
+ /*
+ * Clear the count in case this is a recursive
+ * mutex.
+ */
+ (*m)->m_count = 0;
+
+ /*
+ * Restore the threads inherited priority and
+ * recompute the active priority (being careful
+ * not to override changes in the threads base
+ * priority subsequent to locking the mutex).
+ */
+ THR_LOCK(curthread);
+ curthread->inherited_priority =
+ (*m)->m_saved_prio;
+ curthread->active_priority =
+ MAX(curthread->inherited_priority,
+ curthread->base_priority);
+
+ /*
+ * This thread now owns one less priority mutex.
+ */
+ curthread->priority_mutex_count--;
+ THR_UNLOCK(curthread);
+
+ /* Remove the mutex from the threads queue. */
+ MUTEX_ASSERT_IS_OWNED(*m);
+ TAILQ_REMOVE(&(*m)->m_owner->pri_mutexq,
+ (*m), m_qe);
+ MUTEX_INIT_LINK(*m);
+
+ /*
+ * Hand off the mutex to the next waiting
+ * thread:
+ */
+ tid = mutex_handoff(curthread, *m);
+ }
+ break;
+
+ /* Trap invalid mutex types: */
+ default:
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ break;
+ }
+
+ if ((ret == 0) && (add_reference != 0))
+ /* Increment the reference count: */
+ (*m)->m_refcount++;
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
+ }
+
+ /* Return the completion status: */
+ return (ret);
+}
+
+
+/*
+ * This function is called when a change in base priority occurs for
+ * a thread that is holding or waiting for a priority protection or
+ * inheritence mutex. A change in a threads base priority can effect
+ * changes to active priorities of other threads and to the ordering
+ * of mutex locking by waiting threads.
+ *
+ * This must be called without the target thread's scheduling lock held.
+ */
+void
+_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
+ int propagate_prio)
+{
+ struct pthread_mutex *m;
+
+ /* Adjust the priorites of any owned priority mutexes: */
+ if (pthread->priority_mutex_count > 0) {
+ /*
+ * Rescan the mutexes owned by this thread and correct
+ * their priorities to account for this threads change
+ * in priority. This has the side effect of changing
+ * the threads active priority.
+ *
+ * Be sure to lock the first mutex in the list of owned
+ * mutexes. This acts as a barrier against another
+ * simultaneous call to change the threads priority
+ * and from the owning thread releasing the mutex.
+ */
+ m = TAILQ_FIRST(&pthread->pri_mutexq);
+ if (m != NULL) {
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+ /*
+ * Make sure the thread still owns the lock.
+ */
+ if (m == TAILQ_FIRST(&pthread->pri_mutexq))
+ mutex_rescan_owned(curthread, pthread,
+ /* rescan all owned */ NULL);
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ }
+
/*
- * Error checking.
+ * If this thread is waiting on a priority inheritence mutex,
+ * check for priority adjustments. A change in priority can
+ * also cause a ceiling violation(*) for a thread waiting on
+ * a priority protection mutex; we don't perform the check here
+ * as it is done in pthread_mutex_unlock.
+ *
+ * (*) It should be noted that a priority change to a thread
+ * _after_ taking and owning a priority ceiling mutex
+ * does not affect ownership of that mutex; the ceiling
+ * priority is only checked before mutex ownership occurs.
*/
- if ((*mutex)->m_owner != curthread)
- return (EPERM);
- PTHREAD_ASSERT(((*mutex)->m_protocol >= PTHREAD_PRIO_NONE &&
- (*mutex)->m_protocol <= PTHREAD_PRIO_PROTECT),
- "Invalid mutex protocol");
-
- _SPINLOCK(&(*mutex)->lock);
- if ((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) {
- (*mutex)->m_data.m_count--;
- PTHREAD_ASSERT((*mutex)->m_data.m_count >= 0,
- "The mutex recurse count cannot be less than zero");
- if ((*mutex)->m_data.m_count > 0) {
- _SPINUNLOCK(&(*mutex)->lock);
- return (0);
+ if (propagate_prio != 0) {
+ /*
+ * Lock the thread's scheduling queue. This is a bit
+ * convoluted; the "in synchronization queue flag" can
+ * only be cleared with both the thread's scheduling and
+ * mutex locks held. The thread's pointer to the wanted
+ * mutex is guaranteed to be valid during this time.
+ */
+ THR_THREAD_LOCK(curthread, pthread);
+
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
+ ((m = pthread->data.mutex) == NULL))
+ THR_THREAD_UNLOCK(curthread, pthread);
+ else {
+ /*
+ * This thread is currently waiting on a mutex; unlock
+ * the scheduling queue lock and lock the mutex. We
+ * can't hold both at the same time because the locking
+ * order could cause a deadlock.
+ */
+ THR_THREAD_UNLOCK(curthread, pthread);
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
+ /*
+ * Check to make sure this thread is still in the
+ * same state (the lock above can yield the CPU to
+ * another thread or the thread may be running on
+ * another CPU).
+ */
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (pthread->data.mutex == m)) {
+ /*
+ * Remove and reinsert this thread into
+ * the list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
+
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT)
+ /* Adjust priorities: */
+ mutex_priority_adjust(curthread, m);
+ }
+
+ /* Unlock the mutex structure: */
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
}
}
+}
+
+/*
+ * Called when a new thread is added to the mutex waiting queue or
+ * when a threads priority changes that is already in the mutex
+ * waiting queue.
+ *
+ * This must be called with the mutex locked by the current thread.
+ */
+static void
+mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
+{
+ pthread_mutex_t m = mutex;
+ struct pthread *pthread_next, *pthread = mutex->m_owner;
+ int done, temp_prio;
+
+ /*
+ * Calculate the mutex priority as the maximum of the highest
+ * active priority of any waiting threads and the owning threads
+ * active priority(*).
+ *
+ * (*) Because the owning threads current active priority may
+ * reflect priority inherited from this mutex (and the mutex
+ * priority may have changed) we must recalculate the active
+ * priority based on the threads saved inherited priority
+ * and its base priority.
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio, pthread->base_priority));
+
+ /* See if this mutex really needs adjusting: */
+ if (temp_prio == m->m_prio)
+ /* No need to propagate the priority: */
+ return;
+
+ /* Set new priority of the mutex: */
+ m->m_prio = temp_prio;
+
+ /*
+ * Don't unlock the mutex passed in as an argument. It is
+ * expected to be locked and unlocked by the caller.
+ */
+ done = 1;
+ do {
+ /*
+ * Save the threads priority before rescanning the
+ * owned mutexes:
+ */
+ temp_prio = pthread->active_priority;
+
+ /*
+ * Fix the priorities for all mutexes held by the owning
+ * thread since taking this mutex. This also has a
+ * potential side-effect of changing the threads priority.
+ *
+ * At this point the mutex is locked by the current thread.
+ * The owning thread can't release the mutex until it is
+ * unlocked, so we should be able to safely walk its list
+ * of owned mutexes.
+ */
+ mutex_rescan_owned(curthread, pthread, m);
+
+ /*
+ * If this isn't the first time through the loop,
+ * the current mutex needs to be unlocked.
+ */
+ if (done == 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+
+ /* Assume we're done unless told otherwise: */
+ done = 1;
+
+ /*
+ * If the thread is currently waiting on a mutex, check
+ * to see if the threads new priority has affected the
+ * priority of the mutex.
+ */
+ if ((temp_prio != pthread->active_priority) &&
+ ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ ((m = pthread->data.mutex) != NULL) &&
+ (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
+ /* Lock the mutex structure: */
+ THR_LOCK_ACQUIRE(curthread, &m->m_lock);
+
+ /*
+ * Make sure the thread is still waiting on the
+ * mutex:
+ */
+ if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
+ (m == pthread->data.mutex)) {
+ /*
+ * The priority for this thread has changed.
+ * Remove and reinsert this thread into the
+ * list of waiting threads to preserve
+ * decreasing priority order.
+ */
+ mutex_queue_remove(m, pthread);
+ mutex_queue_enq(m, pthread);
+
+ /*
+ * Grab the waiting thread with highest
+ * priority:
+ */
+ pthread_next = TAILQ_FIRST(&m->m_queue);
+
+ /*
+ * Calculate the mutex priority as the maximum
+ * of the highest active priority of any
+ * waiting threads and the owning threads
+ * active priority.
+ */
+ temp_prio = MAX(pthread_next->active_priority,
+ MAX(m->m_saved_prio,
+ m->m_owner->base_priority));
+
+ if (temp_prio != m->m_prio) {
+ /*
+ * The priority needs to be propagated
+ * to the mutex this thread is waiting
+ * on and up to the owner of that mutex.
+ */
+ m->m_prio = temp_prio;
+ pthread = m->m_owner;
+
+ /* We're not done yet: */
+ done = 0;
+ }
+ }
+ /* Only release the mutex if we're done: */
+ if (done != 0)
+ THR_LOCK_RELEASE(curthread, &m->m_lock);
+ }
+ } while (done == 0);
+}
+
+static void
+mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
+ struct pthread_mutex *mutex)
+{
+ struct pthread_mutex *m;
+ struct pthread *pthread_next;
+ int active_prio, inherited_prio;
/*
- * Release the mutex from this thread and attach it to
- * the next thread in the queue, if there is one waiting.
+ * Start walking the mutexes the thread has taken since
+ * taking this mutex.
*/
- PTHREAD_LOCK(curthread);
- mutex_attach_to_next_pthread(*mutex);
- if ((*mutex)->m_owner != NULL)
- PTHREAD_UNLOCK((*mutex)->m_owner);
- if (add_reference != 0) {
- /* Increment the reference count: */
- (*mutex)->m_refcount++;
+ if (mutex == NULL) {
+ /*
+ * A null mutex means start at the beginning of the owned
+ * mutex list.
+ */
+ m = TAILQ_FIRST(&pthread->pri_mutexq);
+
+ /* There is no inherited priority yet. */
+ inherited_prio = 0;
+ } else {
+ /*
+ * The caller wants to start after a specific mutex. It
+ * is assumed that this mutex is a priority inheritence
+ * mutex and that its priority has been correctly
+ * calculated.
+ */
+ m = TAILQ_NEXT(mutex, m_qe);
+
+ /* Start inheriting priority from the specified mutex. */
+ inherited_prio = mutex->m_prio;
+ }
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
+ /*
+ * We only want to deal with priority inheritence
+ * mutexes. This might be optimized by only placing
+ * priority inheritence mutexes into the owned mutex
+ * list, but it may prove to be useful having all
+ * owned mutexes in this list. Consider a thread
+ * exiting while holding mutexes...
+ */
+ if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
+ /*
+ * Fix the owners saved (inherited) priority to
+ * reflect the priority of the previous mutex.
+ */
+ m->m_saved_prio = inherited_prio;
+
+ if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
+ /* Recalculate the priority of the mutex: */
+ m->m_prio = MAX(active_prio,
+ pthread_next->active_priority);
+ else
+ m->m_prio = active_prio;
+
+ /* Recalculate new inherited and active priorities: */
+ inherited_prio = m->m_prio;
+ active_prio = MAX(m->m_prio, pthread->base_priority);
+ }
}
- _SPINUNLOCK(&(*mutex)->lock);
/*
- * Fix priority of the thread that just released the mutex.
+ * Fix the threads inherited priority and recalculate its
+ * active priority.
*/
- switch ((*mutex)->m_protocol) {
- case PTHREAD_PRIO_INHERIT:
- curthread->prio_inherit_count--;
- PTHREAD_ASSERT(curthread->prio_inherit_count >= 0,
- "priority inheritance counter cannot be less than zero");
- restore_prio_inheritance(curthread);
- if (curthread->prio_protect_count > 0)
- restore_prio_protection(curthread);
- break;
- case PTHREAD_PRIO_PROTECT:
- curthread->prio_protect_count--;
- PTHREAD_ASSERT(curthread->prio_protect_count >= 0,
- "priority protection counter cannot be less than zero");
- restore_prio_protection(curthread);
- if (curthread->prio_inherit_count > 0)
- restore_prio_inheritance(curthread);
- break;
- default:
- /* Nothing */
- break;
+ pthread->inherited_priority = inherited_prio;
+ active_prio = MAX(inherited_prio, pthread->base_priority);
+
+ if (active_prio != pthread->active_priority) {
+ /* Lock the thread's scheduling queue: */
+ THR_THREAD_LOCK(curthread, pthread);
+
+ /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) */
+ if (1) {
+ /*
+ * This thread is not in a run queue. Just set
+ * its active priority.
+ */
+ pthread->active_priority = active_prio;
+ }
+ else {
+ /*
+ * This thread is in a run queue. Remove it from
+ * the queue before changing its priority:
+ */
+ /* THR_RUNQ_REMOVE(pthread);*/
+ /*
+ * POSIX states that if the priority is being
+ * lowered, the thread must be inserted at the
+ * head of the queue for its priority if it owns
+ * any priority protection or inheritence mutexes.
+ */
+ if ((active_prio < pthread->active_priority) &&
+ (pthread->priority_mutex_count > 0)) {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+ /* THR_RUNQ_INSERT_HEAD(pthread); */
+ } else {
+ /* Set the new active priority. */
+ pthread->active_priority = active_prio;
+ /* THR_RUNQ_INSERT_TAIL(pthread);*/
+ }
+ }
+ THR_THREAD_UNLOCK(curthread, pthread);
}
- PTHREAD_UNLOCK(curthread);
- return (0);
}
void
@@ -692,85 +1485,182 @@ _mutex_unlock_private(pthread_t pthread)
{
struct pthread_mutex *m, *m_next;
- for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
+ for (m = TAILQ_FIRST(&pthread->pri_mutexq); m != NULL; m = m_next) {
m_next = TAILQ_NEXT(m, m_qe);
if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
- _pthread_mutex_unlock(&m);
+ pthread_mutex_unlock(&m);
}
}
-void
-_mutex_lock_backout(pthread_t pthread)
+/*
+ * Dequeue a waiting thread from the head of a mutex queue in descending
+ * priority order.
+ *
+ * In order to properly dequeue a thread from the mutex queue and
+ * make it runnable without the possibility of errant wakeups, it
+ * is necessary to lock the thread's scheduling queue while also
+ * holding the mutex lock.
+ */
+static long
+mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
{
- struct pthread_mutex *mutex;
+ struct pthread *pthread;
+ long tid = -1;
+
+ /* Keep dequeueing until we find a valid thread: */
+ mutex->m_owner = NULL;
+ pthread = TAILQ_FIRST(&mutex->m_queue);
+ while (pthread != NULL) {
+ /* Take the thread's scheduling lock: */
+ THR_THREAD_LOCK(curthread, pthread);
- mutex = pthread->data.mutex;
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ /* Remove the thread from the mutex queue: */
+ TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
+
+ /*
+ * Only exit the loop if the thread hasn't been
+ * cancelled.
+ */
+ switch (mutex->m_protocol) {
+ case PTHREAD_PRIO_NONE:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
+ break;
- mutex_queue_remove(mutex, pthread);
+ case PTHREAD_PRIO_INHERIT:
+ /*
+ * Assign the new owner and add the mutex to the
+ * thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->pri_mutexq, mutex, m_qe);
- /* This thread is no longer waiting for the mutex: */
- pthread->data.mutex = NULL;
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+ /*
+ * Set the priority of the mutex. Since our waiting
+ * threads are in descending priority order, the
+ * priority of the mutex becomes the active priority
+ * of the thread we just dequeued.
+ */
+ mutex->m_prio = pthread->active_priority;
+
+ /* Save the owning threads inherited priority: */
+ mutex->m_saved_prio = pthread->inherited_priority;
+
+ /*
+ * The owning threads inherited priority now becomes
+ * his active priority (the priority of the mutex).
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ break;
+
+ case PTHREAD_PRIO_PROTECT:
+ if (pthread->active_priority > mutex->m_prio) {
+ /*
+ * Either the mutex ceiling priority has
+ * been lowered and/or this threads priority
+ * has been raised subsequent to the thread
+ * being queued on the waiting list.
+ */
+ pthread->error = EINVAL;
+ }
+ else {
+ /*
+ * Assign the new owner and add the mutex
+ * to the thread's list of owned mutexes.
+ */
+ mutex->m_owner = pthread;
+ TAILQ_INSERT_TAIL(&pthread->pri_mutexq,
+ mutex, m_qe);
+
+ /* Track number of priority mutexes owned: */
+ pthread->priority_mutex_count++;
+
+ /*
+ * Save the owning threads inherited
+ * priority:
+ */
+ mutex->m_saved_prio =
+ pthread->inherited_priority;
+
+ /*
+ * The owning thread inherits the ceiling
+ * priority of the mutex and executes at
+ * that priority:
+ */
+ pthread->inherited_priority = mutex->m_prio;
+ pthread->active_priority = mutex->m_prio;
+
+ }
+ break;
+ }
+
+ /* Make the thread runnable and unlock the scheduling queue: */
+ pthread->cycle++;
+ _thr_umtx_wake(&pthread->cycle, 1);
+
+ THR_THREAD_UNLOCK(curthread, pthread);
+ if (mutex->m_owner == pthread)
+ /* We're done; a valid owner was found. */
+ break;
+ else
+ /* Get the next thread from the waiting queue: */
+ pthread = TAILQ_NEXT(pthread, sqe);
}
+
+ if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
+ /* This mutex has no priority: */
+ mutex->m_prio = 0;
+ return (tid);
}
+#if 0
/*
* Dequeue a waiting thread from the head of a mutex queue in descending
- * priority order. This funtion will return with the thread locked.
+ * priority order.
*/
-static inline pthread_t
-mutex_queue_deq(pthread_mutex_t mutex)
+static pthread_t
+mutex_queue_deq(struct pthread_mutex *mutex)
{
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
- PTHREAD_LOCK(pthread);
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
-
- /*
- * Only exit the loop if the thread hasn't been
- * asynchronously cancelled.
- */
- if (pthread->cancelmode == M_ASYNC &&
- pthread->cancellation != CS_NULL)
- continue;
- else
- break;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
}
+
return (pthread);
}
+#endif
/*
* Remove a waiting thread from a mutex queue in descending priority order.
*/
-static inline void
+static void
mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
{
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
+ if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
- pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
+ pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
}
}
/*
* Enqueue a waiting thread to a queue in descending priority order.
*/
-static inline void
+static void
mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
- char *name;
-
- name = pthread->name ? pthread->name : "unknown";
- if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
- _thread_printf(2, "Thread (%s:%ld) already on condq\n",
- pthread->name, pthread->thr_id);
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
- _thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
- pthread->name, pthread->thr_id);
- PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
+
+ THR_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
@@ -784,99 +1674,5 @@ mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
- if (mutex->m_protocol == PTHREAD_PRIO_INHERIT &&
- pthread == TAILQ_FIRST(&mutex->m_queue)) {
- PTHREAD_LOCK(mutex->m_owner);
- if (pthread->active_priority >
- mutex->m_owner->active_priority) {
- mutex->m_owner->inherited_priority =
- pthread->active_priority;
- mutex->m_owner->active_priority =
- pthread->active_priority;
- }
- PTHREAD_UNLOCK(mutex->m_owner);
- }
- pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
-}
-
-/*
- * Caller must lock mutex and pthread.
- */
-void
-readjust_priorities(struct pthread *pthread, struct pthread_mutex *mtx)
-{
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- PTHREAD_ASSERT(mtx != NULL,
- "mutex is NULL when it should not be");
- mutex_queue_remove(mtx, pthread);
- mutex_queue_enq(mtx, pthread);
- PTHREAD_LOCK(mtx->m_owner);
- adjust_prio_inheritance(mtx->m_owner);
- if (mtx->m_owner->prio_protect_count > 0)
- adjust_prio_protection(mtx->m_owner);
- PTHREAD_UNLOCK(mtx->m_owner);
- }
- if (pthread->prio_inherit_count > 0)
- adjust_prio_inheritance(pthread);
- if (pthread->prio_protect_count > 0)
- adjust_prio_protection(pthread);
-}
-
-/*
- * Returns with the lock owned and on the thread's mutexq. If
- * the mutex is currently owned by another thread it will sleep
- * until it is available.
- */
-static int
-get_mcontested(pthread_mutex_t mutexp, const struct timespec *abstime)
-{
- int error;
-
- /*
- * If the timeout is invalid this thread is not allowed
- * to block;
- */
- if (abstime != NULL) {
- if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
- return (EINVAL);
- }
-
- /*
- * Put this thread on the mutex's list of waiting threads.
- * The lock on the thread ensures atomic (as far as other
- * threads are concerned) setting of the thread state with
- * it's status on the mutex queue.
- */
- PTHREAD_LOCK(curthread);
- mutex_queue_enq(mutexp, curthread);
- do {
- if (curthread->cancelmode == M_ASYNC &&
- curthread->cancellation != CS_NULL) {
- mutex_queue_remove(mutexp, curthread);
- PTHREAD_UNLOCK(curthread);
- _SPINUNLOCK(&mutexp->lock);
- pthread_testcancel();
- }
- curthread->data.mutex = mutexp;
- PTHREAD_UNLOCK(curthread);
- _SPINUNLOCK(&mutexp->lock);
- error = _thread_suspend(curthread, abstime);
- if (error != 0 && error != ETIMEDOUT && error != EINTR)
- PANIC("Cannot suspend on mutex.");
- _SPINLOCK(&mutexp->lock);
- PTHREAD_LOCK(curthread);
- if (error == ETIMEDOUT) {
- /*
- * Between the timeout and when the mutex was
- * locked the previous owner may have released
- * the mutex to this thread. Or not.
- */
- if (mutexp->m_owner == curthread)
- error = 0;
- else
- _mutex_lock_backout(curthread);
- }
- } while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
- PTHREAD_UNLOCK(curthread);
- return (error);
+ pthread->sflags |= THR_FLAGS_IN_SYNCQ;
}
diff --git a/lib/libthr/thread/thr_mutex_prioceiling.c b/lib/libthr/thread/thr_mutex_prioceiling.c
index c7396a4..edea124 100644
--- a/lib/libthr/thread/thr_mutex_prioceiling.c
+++ b/lib/libthr/thread/thr_mutex_prioceiling.c
@@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
+
#include <string.h>
#include <stdlib.h>
#include <errno.h>
@@ -47,7 +48,9 @@ _pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
{
int ret = 0;
- if (*mattr == NULL)
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else
*prioceiling = (*mattr)->m_ceiling;
@@ -60,26 +63,30 @@ _pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
{
int ret = 0;
- if (*mattr == NULL)
+ if ((mattr == NULL) || (*mattr == NULL))
ret = EINVAL;
- else if (prioceiling <= PTHREAD_MAX_PRIORITY &&
- prioceiling >= PTHREAD_MIN_PRIORITY)
- (*mattr)->m_ceiling = prioceiling;
- else
+ else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
+ else
+ (*mattr)->m_ceiling = prioceiling;
- return (ret);
+ return(ret);
}
int
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
int *prioceiling)
{
- if (*mutex == NULL)
- return (EINVAL);
+ int ret;
+
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
else
- *prioceiling = (*mutex)->m_prio;
- return (0);
+ ret = (*mutex)->m_prio;
+
+ return(ret);
}
int
@@ -87,27 +94,23 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
int prioceiling, int *old_ceiling)
{
int ret = 0;
+ int tmp;
- if (*mutex == NULL)
- return (EINVAL);
- else if (prioceiling > PTHREAD_MAX_PRIORITY ||
- prioceiling < PTHREAD_MIN_PRIORITY)
- return (EINVAL);
-
- /*
- * Because of the use of pthread_mutex_unlock(), the
- * priority ceiling of a mutex cannot be changed
- * while the mutex is held by another thread. It also,
- * means that the the thread trying to change the
- * priority ceiling must adhere to prio protection rules.
- */
- if ((ret = pthread_mutex_lock(mutex)) == 0) {
- /* Return the old ceiling and set the new ceiling: */
- *old_ceiling = (*mutex)->m_prio;
+ if ((mutex == NULL) || (*mutex == NULL))
+ ret = EINVAL;
+ else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
+ ret = EINVAL;
+ /* Lock the mutex: */
+ else if ((ret = pthread_mutex_lock(mutex)) == 0) {
+ tmp = (*mutex)->m_prio;
+ /* Set the new ceiling: */
(*mutex)->m_prio = prioceiling;
/* Unlock the mutex: */
ret = pthread_mutex_unlock(mutex);
+
+ /* Return the old ceiling: */
+ *old_ceiling = tmp;
}
return(ret);
}
diff --git a/lib/libthr/thread/thr_mutex_protocol.c b/lib/libthr/thread/thr_mutex_protocol.c
index 56c8622..526cbe0 100644
--- a/lib/libthr/thread/thr_mutex_protocol.c
+++ b/lib/libthr/thread/thr_mutex_protocol.c
@@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
+
#include <string.h>
#include <stdlib.h>
#include <errno.h>
@@ -43,19 +44,28 @@ __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol);
int
_pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
{
- if (*mattr == NULL)
- return (EINVAL);
- *protocol = (*mattr)->m_protocol;
- return(0);
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL))
+ ret = EINVAL;
+ else
+ *protocol = (*mattr)->m_protocol;
+
+ return(ret);
}
int
_pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
{
- if (*mattr == NULL || protocol < PTHREAD_PRIO_NONE ||
- protocol > PTHREAD_PRIO_PROTECT)
- return (EINVAL);
- (*mattr)->m_protocol = protocol;
- (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
- return(0);
+ int ret = 0;
+
+ if ((mattr == NULL) || (*mattr == NULL) ||
+ (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
+ ret = EINVAL;
+ else {
+ (*mattr)->m_protocol = protocol;
+ (*mattr)->m_ceiling = THR_MAX_PRIORITY;
+ }
+ return(ret);
}
+
diff --git a/lib/libthr/thread/thr_mattr_kind_np.c b/lib/libthr/thread/thr_mutexattr.c
index 2b1e8af..180afd3 100644
--- a/lib/libthr/thread/thr_mattr_kind_np.c
+++ b/lib/libthr/thread/thr_mutexattr.c
@@ -31,14 +31,70 @@
*
* $FreeBSD$
*/
+
+/*
+ * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
+__weak_reference(_pthread_mutexattr_init, pthread_mutexattr_init);
__weak_reference(_pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np);
__weak_reference(_pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np);
__weak_reference(_pthread_mutexattr_gettype, pthread_mutexattr_gettype);
__weak_reference(_pthread_mutexattr_settype, pthread_mutexattr_settype);
+__weak_reference(_pthread_mutexattr_destroy, pthread_mutexattr_destroy);
+
+int
+_pthread_mutexattr_init(pthread_mutexattr_t *attr)
+{
+ int ret;
+ pthread_mutexattr_t pattr;
+
+ if ((pattr = (pthread_mutexattr_t)
+ malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
+ ret = ENOMEM;
+ } else {
+ memcpy(pattr, &_pthread_mutexattr_default,
+ sizeof(struct pthread_mutex_attr));
+ *attr = pattr;
+ ret = 0;
+ }
+ return (ret);
+}
int
_pthread_mutexattr_setkind_np(pthread_mutexattr_t *attr, int kind)
@@ -71,10 +127,9 @@ int
_pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
int ret;
- if (*attr == NULL || type < PTHREAD_MUTEX_ERRORCHECK ||
- type >= MUTEX_TYPE_MAX) {
+ if (attr == NULL || *attr == NULL || type >= MUTEX_TYPE_MAX) {
errno = EINVAL;
- ret = EINVAL;
+ ret = -1;
} else {
(*attr)->m_type = type;
ret = 0;
@@ -96,3 +151,17 @@ _pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type)
}
return ret;
}
+
+int
+_pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+{
+ int ret;
+ if (attr == NULL || *attr == NULL) {
+ ret = EINVAL;
+ } else {
+ free(*attr);
+ *attr = NULL;
+ ret = 0;
+ }
+ return(ret);
+}
diff --git a/lib/libthr/thread/thr_once.c b/lib/libthr/thread/thr_once.c
index cef478d..8716e75 100644
--- a/lib/libthr/thread/thr_once.c
+++ b/lib/libthr/thread/thr_once.c
@@ -31,23 +31,68 @@
*
* $FreeBSD$
*/
+
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
+
#include "thr_private.h"
__weak_reference(_pthread_once, pthread_once);
+#define ONCE_NEVER_DONE PTHREAD_NEEDS_INIT
+#define ONCE_DONE PTHREAD_DONE_INIT
+#define ONCE_IN_PROGRESS 0x02
+#define ONCE_MASK 0x03
+
+static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t once_cv = PTHREAD_COND_INITIALIZER;
+
+/*
+ * POSIX:
+ * The pthread_once() function is not a cancellation point. However,
+ * if init_routine is a cancellation point and is canceled, the effect
+ * on once_control shall be as if pthread_once() was never called.
+ */
+
+static void
+once_cancel_handler(void *arg)
+{
+ pthread_once_t *once_control = arg;
+
+ _pthread_mutex_lock(&once_lock);
+ once_control->state = ONCE_NEVER_DONE;
+ _pthread_mutex_unlock(&once_lock);
+ _pthread_cond_broadcast(&once_cv);
+}
+
int
-_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
+_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
- if (once_control->state == PTHREAD_NEEDS_INIT) {
- if (_thread_initial == NULL)
- _thread_init();
- pthread_mutex_lock(&(once_control->mutex));
- if (once_control->state == PTHREAD_NEEDS_INIT) {
- init_routine();
- once_control->state = PTHREAD_DONE_INIT;
- }
- pthread_mutex_unlock(&(once_control->mutex));
+ int wakeup = 0;
+
+ if (once_control->state == ONCE_DONE)
+ return (0);
+ _pthread_mutex_lock(&once_lock);
+ while (*(volatile int *)&(once_control->state) == ONCE_IN_PROGRESS)
+ _pthread_cond_wait(&once_cv, &once_lock);
+ /*
+ * If previous thread was canceled, then the state still
+ * could be ONCE_NEVER_DONE, we need to check it again.
+ */
+ if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) {
+ once_control->state = ONCE_IN_PROGRESS;
+ _pthread_mutex_unlock(&once_lock);
+ _pthread_cleanup_push(once_cancel_handler, once_control);
+ init_routine();
+ _pthread_cleanup_pop(0);
+ _pthread_mutex_lock(&once_lock);
+ once_control->state = ONCE_DONE;
+ wakeup = 1;
}
+ _pthread_mutex_unlock(&once_lock);
+ if (wakeup)
+ _pthread_cond_broadcast(&once_cv);
return (0);
}
+
diff --git a/lib/libthr/thread/thr_printf.c b/lib/libthr/thread/thr_printf.c
index e6f1a84..7d32ae7 100644
--- a/lib/libthr/thread/thr_printf.c
+++ b/lib/libthr/thread/thr_printf.c
@@ -22,15 +22,10 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/types.h>
-#include <sys/fcntl.h>
-#include <sys/uio.h>
-#include <errno.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
@@ -57,15 +52,20 @@ _thread_printf(int fd, const char *fmt, ...)
{
static const char digits[16] = "0123456789abcdef";
va_list ap;
- char buf[10];
+ char buf[20];
char *s;
- unsigned r, u;
- int c, d;
+ unsigned long r, u;
+ int c;
+ long d;
+ int islong;
va_start(ap, fmt);
while ((c = *fmt++)) {
+ islong = 0;
if (c == '%') {
- c = *fmt++;
+next: c = *fmt++;
+ if (c == '\0')
+ goto out;
switch (c) {
case 'c':
pchar(fd, va_arg(ap, int));
@@ -73,20 +73,31 @@ _thread_printf(int fd, const char *fmt, ...)
case 's':
pstr(fd, va_arg(ap, char *));
continue;
+ case 'l':
+ islong = 1;
+ goto next;
+ case 'p':
+ islong = 1;
case 'd':
case 'u':
- case 'p':
case 'x':
r = ((c == 'u') || (c == 'd')) ? 10 : 16;
if (c == 'd') {
- d = va_arg(ap, unsigned);
+ if (islong)
+ d = va_arg(ap, unsigned long);
+ else
+ d = va_arg(ap, unsigned);
if (d < 0) {
pchar(fd, '-');
- u = (unsigned)(d * -1);
+ u = (unsigned long)(d * -1);
} else
- u = (unsigned)d;
- } else
- u = va_arg(ap, unsigned);
+ u = (unsigned long)d;
+ } else {
+ if (islong)
+ u = va_arg(ap, unsigned long);
+ else
+ u = va_arg(ap, unsigned);
+ }
s = buf;
do {
*s++ = digits[u % r];
@@ -98,6 +109,7 @@ _thread_printf(int fd, const char *fmt, ...)
}
pchar(fd, c);
}
+out:
va_end(ap);
}
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index c6ddb79..5763007 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -29,8 +29,6 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * Private thread definitions for the uthread kernel.
- *
* $FreeBSD$
*/
@@ -38,113 +36,118 @@
#define _THR_PRIVATE_H
/*
- * Evaluate the storage class specifier.
- */
-#ifdef GLOBAL_PTHREAD_PRIVATE
-#define SCLASS
-#else
-#define SCLASS extern
-#endif
-
-/*
* Include files.
*/
#include <sys/types.h>
-#include <sys/cdefs.h>
-#include <sys/errno.h>
-#include <sys/msg.h>
#include <sys/time.h>
-#include <sys/param.h>
+#include <sys/cdefs.h>
#include <sys/queue.h>
-#include <pthread_np.h>
-#include <sched.h>
+#include <machine/atomic.h>
+#include <errno.h>
+#include <limits.h>
#include <signal.h>
-#include <spinlock.h>
+#include <stddef.h>
#include <stdio.h>
-#include <ucontext.h>
+#include <sched.h>
#include <unistd.h>
-#if defined(_PTHREADS_INVARIANTS)
-#include <assert.h>
-#endif
-
-#include <machine/atomic.h>
+#include <ucontext.h>
#include <sys/thr.h>
-#include <sys/umtx.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/pmap.h>
-#include <vm/vm_map.h>
+#include <pthread.h>
+
+#include "pthread_md.h"
+#include "thr_umtx.h"
-#if defined(_PTHREADS_INVARIANTS)
/*
- * Kernel fatal error handler macro.
+ * Evaluate the storage class specifier.
*/
-#define PANIC(string) \
- do { \
- _thread_printf(STDOUT_FILENO, (string)); \
- _thread_printf(STDOUT_FILENO, \
- "\nAbnormal termination, file: %s, line: %d\n", \
- __FILE__, __LINE__); \
- abort(); \
- } while (0)
-
-#define PTHREAD_ASSERT(cond, msg) do { \
- if (!(cond)) \
- PANIC(msg); \
-} while (0)
+#ifdef GLOBAL_PTHREAD_PRIVATE
+#define SCLASS
+#define SCLASS_PRESET(x...) = x
+#else
+#define SCLASS extern
+#define SCLASS_PRESET(x...)
+#endif
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
- PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
- "Illegal call from signal handler");
+/* Signal to do cancellation */
+#define SIGCANCEL 32
-#else /* !_PTHREADS_INVARIANTS */
-#define PANIC(string) _thread_exit(__FILE__, __LINE__, (string))
-#define PTHREAD_ASSERT(cond, msg)
-#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
-#endif /* _PTHREADS_INVARIANTS */
+/*
+ * Kernel fatal error handler macro.
+ */
+#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
/* Output debug messages like this: */
-#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, args)
-#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, args)
+#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
+#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
-/*
- * Currently executing thread.
- */
-#define curthread _get_curthread()
+#ifdef _PTHREADS_INVARIANTS
+#define THR_ASSERT(cond, msg) do { \
+ if (__predict_false(!(cond))) \
+ PANIC(msg); \
+} while (0)
+#else
+#define THR_ASSERT(cond, msg)
+#endif
-/*
- * Locking macros
- */
-#define UMTX_LOCK(m) \
- do { \
- if (umtx_lock((m), curthread->thr_id) != 0) \
- abort(); \
+#define TIMESPEC_ADD(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
+ if ((dst)->tv_nsec > 1000000000) { \
+ (dst)->tv_sec++; \
+ (dst)->tv_nsec -= 1000000000; \
+ } \
} while (0)
-#define UMTX_TRYLOCK(m, r) \
- do { \
- (r) = umtx_trylock((m), curthread->thr_id); \
- if ((r) != 0 && (r) != EBUSY) \
- abort(); \
+#define TIMESPEC_SUB(dst, src, val) \
+ do { \
+ (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
+ (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
+ if ((dst)->tv_nsec < 0) { \
+ (dst)->tv_sec--; \
+ (dst)->tv_nsec += 1000000000; \
+ } \
} while (0)
-#define UMTX_UNLOCK(m) \
- do { \
- if (umtx_unlock((m), curthread->thr_id) != 0) \
- abort(); \
- } while (0)
+struct pthread_mutex {
+ /*
+ * Lock for accesses to this structure.
+ */
+ volatile umtx_t m_lock;
+ enum pthread_mutextype m_type;
+ int m_protocol;
+ TAILQ_HEAD(mutex_head, pthread) m_queue;
+ struct pthread *m_owner;
+ long m_flags;
+ int m_count;
+ int m_refcount;
-#define PTHREAD_LOCK(p) UMTX_LOCK(&(p)->lock)
-#define PTHREAD_UNLOCK(p) UMTX_UNLOCK(&(p)->lock)
+ /*
+ * Used for priority inheritence and protection.
+ *
+ * m_prio - For priority inheritence, the highest active
+ * priority (threads locking the mutex inherit
+ * this priority). For priority protection, the
+ * ceiling priority of this mutex.
+ * m_saved_prio - mutex owners inherited priority before
+ * taking the mutex, restored when the owner
+ * unlocks the mutex.
+ */
+ int m_prio;
+ int m_saved_prio;
-#define PTHREAD_WAKE(ptd) thr_wake((ptd)->thr_id)
+ /*
+ * Link for list of all mutexes a thread currently owns.
+ */
+ TAILQ_ENTRY(pthread_mutex) m_qe;
+};
/*
- * TailQ initialization values.
+ * Flags for mutexes.
*/
-#define TAILQ_INITIALIZER { NULL, NULL }
-
-#define UMTX_INITIALIZER { NULL }
+#define MUTEX_FLAGS_PRIVATE 0x01
+#define MUTEX_FLAGS_INITED 0x02
+#define MUTEX_FLAGS_BUSY 0x04
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
@@ -153,128 +156,73 @@ struct pthread_mutex_attr {
long m_flags;
};
-/*
- * Static mutex initialization values.
- */
-
#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
-#define PTHREAD_MUTEX_STATIC_INITIALIZER \
- { PTHREAD_MUTEXATTR_STATIC_INITIALIZER, UMTX_INITIALIZER, NULL, \
- 0, 0, TAILQ_INITIALIZER }
-
-union pthread_mutex_data {
- void *m_ptr;
- int m_count;
-};
-
-struct pthread_mutex {
- enum pthread_mutextype m_type;
- int m_protocol;
- TAILQ_HEAD(mutex_head, pthread) m_queue;
- struct pthread *m_owner;
- union pthread_mutex_data m_data;
- long m_flags;
- int m_refcount;
-
- /*
- * Used for priority inheritence and protection.
- *
- * m_prio - For priority inheritence, the highest active
- * priority (threads locking the mutex inherit
- * this priority). For priority protection, the
- * ceiling priority of this mutex.
- * m_saved_prio - mutex owners inherited priority before
- * taking the mutex, restored when the owner
- * unlocks the mutex.
- */
- int m_prio;
- int m_saved_prio;
-
- /*
- * Link for list of all mutexes a thread currently owns.
- */
- TAILQ_ENTRY(pthread_mutex) m_qe;
-
- /*
- * Lock for accesses to this structure.
- */
- spinlock_t lock;
-};
-
-struct pthread_spinlock {
- void *s_owner;
- unsigned int s_magic;
-};
-
-/*
- * Flags for mutexes.
- */
-#define MUTEX_FLAGS_PRIVATE 0x01
-#define MUTEX_FLAGS_INITED 0x02
-#define MUTEX_FLAGS_BUSY 0x04
-
-/*
- * Condition variable definitions.
- */
-enum pthread_cond_type {
- COND_TYPE_FAST,
- COND_TYPE_MAX
-};
-
struct pthread_cond {
- enum pthread_cond_type c_type;
- TAILQ_HEAD(cond_head, pthread) c_queue;
- pthread_mutex_t c_mutex;
- void *c_data;
- long c_flags;
- int c_seqno;
-
/*
* Lock for accesses to this structure.
*/
- struct umtx c_lock;
+ volatile umtx_t c_lock;
+ volatile umtx_t c_seqno;
+ volatile int c_waiters;
+ volatile int c_wakeups;
+ int c_pshared;
+ int c_clockid;
};
struct pthread_cond_attr {
- enum pthread_cond_type c_type;
- long c_flags;
+ int c_pshared;
+ int c_clockid;
};
-/*
- * Flags for condition variables.
- */
-#define COND_FLAGS_INITED 0x01
+struct pthread_barrier {
+ volatile umtx_t b_lock;
+ volatile umtx_t b_cycle;
+ volatile int b_count;
+ volatile int b_waiters;
+};
-/*
- * Static cond initialization values.
- */
-#define PTHREAD_COND_STATIC_INITIALIZER \
- { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
- 0, 0, UMTX_INITIALIZER }
+struct pthread_barrierattr {
+ int pshared;
+};
+
+struct pthread_spinlock {
+ volatile umtx_t s_lock;
+};
/*
- * Semaphore definitions.
+ * Flags for condition variables.
*/
-struct sem {
-#define SEM_MAGIC ((u_int32_t) 0x09fa4012)
- u_int32_t magic;
- pthread_mutex_t lock;
- pthread_cond_t gtzero;
- u_int32_t count;
- u_int32_t nwaiters;
-};
+#define COND_FLAGS_PRIVATE 0x01
+#define COND_FLAGS_INITED 0x02
+#define COND_FLAGS_BUSY 0x04
/*
* Cleanup definitions.
*/
struct pthread_cleanup {
struct pthread_cleanup *next;
- void (*routine) ();
+ void (*routine)();
void *routine_arg;
+ int onstack;
};
+#define THR_CLEANUP_PUSH(td, func, arg) { \
+ struct pthread_cleanup __cup; \
+ \
+ __cup.routine = func; \
+ __cup.routine_arg = arg; \
+ __cup.onstack = 1; \
+ __cup.next = (td)->cleanup; \
+ (td)->cleanup = &__cup;
+
+#define THR_CLEANUP_POP(td, exec) \
+ (td)->cleanup = __cup.next; \
+ if ((exec) != 0) \
+ __cup.routine(__cup.routine_arg); \
+}
+
struct pthread_atfork {
TAILQ_ENTRY(pthread_atfork) qe;
void (*prepare)(void);
@@ -288,9 +236,10 @@ struct pthread_attr {
int sched_interval;
int prio;
int suspend;
+#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
int flags;
void *arg_attr;
- void (*cleanup_attr) ();
+ void (*cleanup_attr)();
void *stackaddr_attr;
size_t stacksize_attr;
size_t guardsize_attr;
@@ -299,37 +248,20 @@ struct pthread_attr {
/*
* Thread creation state attributes.
*/
-#define PTHREAD_CREATE_RUNNING 0
-#define PTHREAD_CREATE_SUSPENDED 1
+#define THR_CREATE_RUNNING 0
+#define THR_CREATE_SUSPENDED 1
/*
* Miscellaneous definitions.
*/
-#define PTHREAD_STACK32_DEFAULT (1 * 1024 * 1024)
-#define PTHREAD_STACK64_DEFAULT (2 * 1024 * 1024)
-/*
- * Size of default red zone at the end of each stack. In actuality, this "red
- * zone" is merely an unmapped region, except in the case of the initial stack.
- * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
- * region, an unmapped gap between thread stacks achieves the same effect as
- * explicitly mapped red zones.
- * This is declared and initialized in uthread_init.c.
- */
-extern int _pthread_guard_default;
-
-extern int _pthread_page_size;
-
-extern int _pthread_stack_default;
-
-extern int _pthread_stack_initial;
+#define THR_STACK_DEFAULT (sizeof(void *) / 4 * 1024 * 1024)
/*
* Maximum size of initial thread's stack. This perhaps deserves to be larger
* than the stacks of other threads, since many applications are likely to run
* almost entirely on this stack.
*/
-#define PTHREAD_STACK32_INITIAL (2 * 1024 * 1024)
-#define PTHREAD_STACK64_INITIAL (4 * 1024 * 1024)
+#define THR_STACK_INITIAL (THR_STACK_DEFAULT * 2)
/*
* Define the different priority ranges. All applications have thread
@@ -345,60 +277,30 @@ extern int _pthread_stack_initial;
* The approach taken is that, within each class, signal delivery
* always has priority over thread execution.
*/
-#define PTHREAD_DEFAULT_PRIORITY 15
-#define PTHREAD_MIN_PRIORITY 0
-#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
-#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
-#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
-#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
-#define PTHREAD_LAST_PRIORITY \
- (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
-#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
-
-/*
- * Clock resolution in microseconds.
- */
-#define CLOCK_RES_USEC 10000
-#define CLOCK_RES_USEC_MIN 1000
+#define THR_DEFAULT_PRIORITY 15
+#define THR_MIN_PRIORITY 0
+#define THR_MAX_PRIORITY 31 /* 0x1F */
+#define THR_SIGNAL_PRIORITY 32 /* 0x20 */
+#define THR_RT_PRIORITY 64 /* 0x40 */
+#define THR_FIRST_PRIORITY THR_MIN_PRIORITY
+#define THR_LAST_PRIORITY \
+ (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
+#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
/*
* Time slice period in microseconds.
*/
#define TIMESLICE_USEC 20000
-/*
- * XXX Define a thread-safe macro to get the current time of day
- * which is updated at regular intervals by the scheduling signal
- * handler.
- */
-#define GET_CURRENT_TOD(tv) gettimeofday(&(tv), NULL)
-
-struct pthread_barrierattr {
- int ba_pshared;
-};
-
-/*
- * POSIX Threads barrier object.
- * Lock order:
- * 1. pthread_barrier
- * 2. pthread
- */
-struct pthread_barrier {
- TAILQ_HEAD(barrq_head, pthread) b_barrq;
- struct umtx b_lock;
- int b_total;
- int b_subtotal;
-};
-
struct pthread_rwlockattr {
int pshared;
};
struct pthread_rwlock {
pthread_mutex_t lock; /* monitor lock */
- int state; /* 0 = idle >0 = # of readers -1 = writer */
pthread_cond_t read_signal;
pthread_cond_t write_signal;
+ int state; /* 0 = idle >0 = # of readers -1 = writer */
int blocked_writers;
};
@@ -407,42 +309,11 @@ struct pthread_rwlock {
*/
enum pthread_state {
PS_RUNNING,
- PS_MUTEX_WAIT,
- PS_COND_WAIT,
- PS_BARRIER_WAIT,
- PS_SLEEP_WAIT, /* XXX We need to wrap syscalls to set this state */
- PS_WAIT_WAIT,
- PS_JOIN,
- PS_DEAD,
- PS_DEADLOCK,
- PS_STATE_MAX
+ PS_DEAD
};
-
-/*
- * File descriptor locking definitions.
- */
-#define FD_READ 0x1
-#define FD_WRITE 0x2
-#define FD_RDWR (FD_READ | FD_WRITE)
-
union pthread_wait_data {
pthread_mutex_t mutex;
- pthread_cond_t cond;
- spinlock_t *spinlock;
- struct pthread *thread;
-};
-
-struct join_status {
- struct pthread *thread;
- void *ret;
- int error;
-};
-
-struct pthread_state_data {
- union pthread_wait_data psd_wait_data;
- enum pthread_state psd_state;
- int psd_flags;
};
struct pthread_specific_elem {
@@ -450,35 +321,13 @@ struct pthread_specific_elem {
int seqno;
};
-struct rwlock_held {
- LIST_ENTRY(rwlock_held) rh_link;
- struct pthread_rwlock *rh_rwlock;
- int rh_rdcount;
- int rh_wrcount;
+struct pthread_key {
+ volatile int allocated;
+ volatile int count;
+ int seqno;
+ void (*destructor)(void *);
};
-LIST_HEAD(rwlock_listhead, rwlock_held);
-
-/*
- * The cancel mode a thread is in is determined by the
- * the cancel type and state it is set in. The two values
- * are combined into one mode:
- * Mode State Type
- * ---- ----- ----
- * off disabled deferred
- * off disabled async
- * deferred enabled deferred
- * async enabled async
- */
-enum cancel_mode { M_OFF, M_DEFERRED, M_ASYNC };
-
-/*
- * A thread's cancellation is pending until the cancel
- * mode has been tested to determine if the thread can be
- * cancelled immediately.
- */
-enum cancellation_state { CS_NULL, CS_PENDING, CS_SET };
-
/*
* Thread structure.
*/
@@ -487,29 +336,42 @@ struct pthread {
* Magic value to help recognize a valid thread structure
* from an invalid one:
*/
-#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
+#define THR_MAGIC ((u_int32_t) 0xd09ba115)
u_int32_t magic;
char *name;
- long thr_id;
- sigset_t savedsig;
- int signest; /* blocked signal netsting level */
- int ptdflags; /* used by other other threads
- to signal this thread */
- int isdead;
- int isdeadlocked;
- int exiting;
- int cancellationpoint;
+ u_int64_t uniqueid; /* for gdb */
/*
* Lock for accesses to this thread structure.
*/
- struct umtx lock;
+ umtx_t lock;
+
+ /* Thread is terminated in kernel, written by kernel. */
+ long terminated;
+
+ /* Kernel thread id. */
+ long tid;
+
+ /* Internal condition variable cycle number. */
+ umtx_t cycle;
+
+ /* How many low level locks the thread held. */
+ int locklevel;
+
+ /* Signal blocked counter. */
+ int sigblock;
+
+ /* Queue entry for list of all threads. */
+ TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
+
+ /* Queue entry for GC lists. */
+ TAILQ_ENTRY(pthread) gcle;
- /* Queue entry for list of all threads: */
- TAILQ_ENTRY(pthread) tle;
+ /* Hash queue entry. */
+ LIST_ENTRY(pthread) hle;
- /* Queue entry for list of dead threads: */
- TAILQ_ENTRY(pthread) dle;
+ /* Threads reference count. */
+ int refcount;
/*
* Thread start routine, argument, stack pointer and thread
@@ -517,68 +379,67 @@ struct pthread {
*/
void *(*start_routine)(void *);
void *arg;
- void *stack;
struct pthread_attr attr;
- /*
- * Machine context, including signal state.
- */
- ucontext_t ctx;
-
- /*
- * The primary method of obtaining a thread's cancel state
- * and type is through cancelmode. The cancelstate field is
- * only so we don't loose the cancel state when the mode is
- * turned off.
+ /*
+ * Cancelability flags
*/
- enum cancel_mode cancelmode;
- enum cancel_mode cancelstate;
-
- /* Specifies if cancellation is pending, acted upon, or neither. */
- enum cancellation_state cancellation;
+#define THR_CANCEL_DISABLE 0x0001
+#define THR_CANCEL_EXITING 0x0002
+#define THR_CANCEL_AT_POINT 0x0004
+#define THR_CANCEL_NEEDED 0x0008
+#define SHOULD_CANCEL(val) \
+ (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
+ THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
+
+#define SHOULD_ASYNC_CANCEL(val) \
+ (((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING | \
+ THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) == \
+ (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
+ int cancelflags;
+
+ /* Thread temporary signal mask. */
+ sigset_t sigmask;
+
+ /* Thread state: */
+ umtx_t state;
/*
* Error variable used instead of errno. The function __error()
* returns a pointer to this.
*/
- int error;
+ int error;
/*
* The joiner is the thread that is joining to this thread. The
* join status keeps track of a join operation to another thread.
*/
struct pthread *joiner;
- struct join_status join_status;
/*
- * A thread can belong to:
- *
- * o A queue of threads waiting for a mutex
- * o A queue of threads waiting for a condition variable
- *
- * A thread can also be joining a thread (the joiner field above).
- *
- * Use sqe for synchronization (mutex and condition variable) queue
- * links.
+ * The current thread can belong to a priority mutex queue.
+ * This is the synchronization queue link.
*/
- TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
+ TAILQ_ENTRY(pthread) sqe;
/* Wait data. */
union pthread_wait_data data;
- /* Miscellaneous flags; only set with signals deferred. */
- int flags;
-#define PTHREAD_FLAGS_PRIVATE 0x0001
-#define PTHREAD_FLAGS_BARR_REL 0x0004 /* has been released from barrier */
-#define PTHREAD_FLAGS_IN_BARRQ 0x0008 /* in barrier queue using sqe link */
-#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
-#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
-#define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
-#define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
-#define PTHREAD_FLAGS_IN_SYNCQ \
- (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ | PTHREAD_FLAGS_IN_BARRQ)
-#define PTHREAD_FLAGS_NOT_RUNNING \
- (PTHREAD_FLAGS_IN_SYNCQ | PTHREAD_FLAGS_SUSPENDED)
+ int sflags;
+#define THR_FLAGS_IN_SYNCQ 0x0001
+
+ /* Miscellaneous flags; only set with scheduling lock held. */
+ int flags;
+#define THR_FLAGS_PRIVATE 0x0001
+#define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
+#define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
+
+ /* Thread list flags; only set with thread list lock held. */
+ int tlflags;
+#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
+#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
+#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
+#define TLFLAGS_DETACHED 0x0008 /* thread is detached */
/*
* Base priority is the user setable and retrievable priority
@@ -586,7 +447,7 @@ struct pthread {
* set thread priority and upon thread creation via a thread
* attribute or default priority.
*/
- char base_priority;
+ char base_priority;
/*
* Inherited priority is the priority a thread inherits by
@@ -596,7 +457,7 @@ struct pthread {
* that is being waited on by any other thread whose priority
* is non-zero.
*/
- char inherited_priority;
+ char inherited_priority;
/*
* Active priority is always the maximum of the threads base
@@ -604,190 +465,212 @@ struct pthread {
* in either the base or inherited priority, the active
* priority must be recalculated.
*/
- char active_priority;
+ char active_priority;
/* Number of priority ceiling or protection mutexes owned. */
- int prio_inherit_count;
- int prio_protect_count;
+ int priority_mutex_count;
- /*
- * Queue of currently owned mutexes.
- */
+ /* Queue of currently owned simple type mutexes. */
TAILQ_HEAD(, pthread_mutex) mutexq;
- /*
- * List of read-write locks owned for reading _OR_ writing.
- * This is accessed only by the current thread, so there's
- * no need for mutual exclusion.
- */
- struct rwlock_listhead *rwlockList;
+ /* Queue of currently owned priority type mutexs. */
+ TAILQ_HEAD(, pthread_mutex) pri_mutexq;
void *ret;
struct pthread_specific_elem *specific;
int specific_data_count;
+ /* Number rwlocks rdlocks held. */
+ int rdlock_count;
+
/*
- * Architecture specific id field used for _{get, set}_curthread()
- * interface.
- */
- void *arch_id;
+ * Current locks bitmap for rtld. */
+ int rtld_bits;
+
+ /* Thread control block */
+ struct tcb *tcb;
/* Cleanup handlers Link List */
- struct pthread_cleanup *cleanup;
- char *fname; /* Ptr to source file name */
- int lineno; /* Source line number. */
+ struct pthread_cleanup *cleanup;
};
-/*
- * Global variables for the uthread kernel.
- */
+#define THR_UMTX_TRYLOCK(thrd, lck) \
+ _thr_umtx_trylock((lck), (thrd)->tid)
-SCLASS void *_usrstack
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= (void *) USRSTACK;
-#else
-;
-#endif
+#define THR_UMTX_LOCK(thrd, lck) \
+ _thr_umtx_lock((lck), (thrd)->tid)
-SCLASS spinlock_t stack_lock
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= _SPINLOCK_INITIALIZER
-#endif
-;
-#define STACK_LOCK _SPINLOCK(&stack_lock);
-#define STACK_UNLOCK _SPINUNLOCK(&stack_lock);
+#define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \
+ _thr_umtx_timedlock((lck), (thrd)->tid, (timo))
-/* List of all threads: */
-SCLASS TAILQ_HEAD(, pthread) _thread_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_thread_list);
-#else
-;
-#endif
+#define THR_UMTX_UNLOCK(thrd, lck) \
+ _thr_umtx_unlock((lck), (thrd)->tid)
-/* Dead threads: */
-SCLASS TAILQ_HEAD(, pthread) _dead_list
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= TAILQ_HEAD_INITIALIZER(_dead_list);
-#else
-;
-#endif
+#define THR_LOCK_ACQUIRE(thrd, lck) \
+do { \
+ (thrd)->locklevel++; \
+ _thr_umtx_lock(lck, (thrd)->tid); \
+} while (0)
-/*
- * These two locks protect the global active threads list and
- * the global dead threads list, respectively. Combining these
- * into one lock for both lists doesn't seem wise, since it
- * would likely increase contention during busy thread creation
- * and destruction for very little savings in space.
- *
- * The lock for the "dead threads list" must be a pthread mutex
- * because it is used with condition variables to synchronize
- * the gc thread with active threads in the process of exiting or
- * dead threads who have just been joined.
- */
-SCLASS spinlock_t thread_list_lock
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= _SPINLOCK_INITIALIZER
-#endif
-;
-SCLASS pthread_mutex_t dead_list_lock
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL
-#endif
-;
+#define THR_LOCK_RELEASE(thrd, lck) \
+do { \
+ if ((thrd)->locklevel > 0) { \
+ _thr_umtx_unlock((lck), (thrd)->tid); \
+ (thrd)->locklevel--; \
+ } else { \
+ _thr_assert_lock_level(); \
+ } \
+} while (0)
-#define THREAD_LIST_LOCK _SPINLOCK(&thread_list_lock)
-#define THREAD_LIST_UNLOCK _SPINUNLOCK(&thread_list_lock)
-#define DEAD_LIST_LOCK _pthread_mutex_lock(&dead_list_lock)
-#define DEAD_LIST_UNLOCK _pthread_mutex_unlock(&dead_list_lock)
+#define THR_LOCK(curthrd) THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
+#define THR_UNLOCK(curthrd) THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
+#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
+#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
-/* Initial thread: */
-SCLASS struct pthread *_thread_initial
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= NULL;
-#else
-;
-#endif
+#define THREAD_LIST_LOCK(curthrd) \
+do { \
+ THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \
+} while (0)
-SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _atfork_list;
-SCLASS pthread_mutex_t _atfork_mutex;
+#define THREAD_LIST_UNLOCK(curthrd) \
+do { \
+ THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \
+} while (0)
-/* Default thread attributes: */
-SCLASS struct pthread_attr pthread_attr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
- PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
- -1, -1 };
-#else
-;
-#endif
+/*
+ * Macros to insert/remove threads to the all thread list and
+ * the gc list.
+ */
+#define THR_LIST_ADD(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
+ _thr_hash_add(thrd); \
+ (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_LIST_REMOVE(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_list, thrd, tle); \
+ _thr_hash_remove(thrd); \
+ (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
+ } \
+} while (0)
+#define THR_GCLIST_ADD(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
+ TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
+ (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
+ _gc_count++; \
+ } \
+} while (0)
+#define THR_GCLIST_REMOVE(thrd) do { \
+ if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
+ TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
+ (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
+ _gc_count--; \
+ } \
+} while (0)
-/* Default mutex attributes: */
-SCLASS struct pthread_mutex_attr pthread_mutexattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
-#else
-;
-#endif
+#define GC_NEEDED() (_gc_count >= 5)
-/* Default condition variable attributes: */
-SCLASS struct pthread_cond_attr pthread_condattr_default
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= { COND_TYPE_FAST, 0 };
-#else
-;
-#endif
+#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
+
+extern int __isthreaded;
/*
- * Array of signal actions for this process.
+ * Global variables for the pthread kernel.
*/
-SCLASS struct sigaction _thread_sigact[NSIG];
-/* Precomputed signal set for _thread_suspend. */
-SCLASS sigset_t _thread_suspend_sigset;
+SCLASS void *_usrstack SCLASS_PRESET(NULL);
+SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
+/* For debugger */
+SCLASS int _libthr_debug SCLASS_PRESET(0);
+SCLASS int _thr_scope_system SCLASS_PRESET(0);
-/* Tracks the number of threads blocked while waiting for a spinlock. */
-SCLASS volatile int _spinblock_count
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+/* List of all threads: */
+SCLASS TAILQ_HEAD(, pthread) _thread_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
-/*
- * libthread_db.so support.
- */
-SCLASS int _libthr_debug
-#ifdef GLOBAL_PTHREAD_PRIVATE
-= 0
-#endif
-;
+/* List of threads needing GC: */
+SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
+ SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
+
+SCLASS int _thread_active_threads SCLASS_PRESET(1);
+
+SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
+SCLASS umtx_t _thr_atfork_lock;
+
+/* Default thread attributes: */
+SCLASS struct pthread_attr _pthread_attr_default
+ SCLASS_PRESET({
+ .sched_policy = SCHED_RR,
+ .sched_inherit = 0,
+ .sched_interval = TIMESLICE_USEC,
+ .prio = THR_DEFAULT_PRIORITY,
+ .suspend = THR_CREATE_RUNNING,
+ .flags = 0,
+ .arg_attr = NULL,
+ .cleanup_attr = NULL,
+ .stackaddr_attr = NULL,
+ .stacksize_attr = THR_STACK_DEFAULT,
+ .guardsize_attr = 0
+ });
+
+/* Default mutex attributes: */
+SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
+ SCLASS_PRESET({
+ .m_type = PTHREAD_MUTEX_DEFAULT,
+ .m_protocol = PTHREAD_PRIO_NONE,
+ .m_ceiling = 0,
+ .m_flags = 0
+ });
-/* Undefine the storage class specifier: */
+/* Default condition variable attributes: */
+SCLASS struct pthread_cond_attr _pthread_condattr_default
+ SCLASS_PRESET({
+ .c_pshared = PTHREAD_PROCESS_PRIVATE,
+ .c_clockid = CLOCK_REALTIME
+ });
+
+SCLASS pid_t _thr_pid SCLASS_PRESET(0);
+SCLASS int _thr_guard_default;
+SCLASS int _thr_stack_default SCLASS_PRESET(THR_STACK_DEFAULT);
+SCLASS int _thr_stack_initial SCLASS_PRESET(THR_STACK_INITIAL);
+SCLASS int _thr_page_size;
+/* Garbage thread count. */
+SCLASS int _gc_count SCLASS_PRESET(0);
+
+SCLASS umtx_t _mutex_static_lock;
+SCLASS umtx_t _cond_static_lock;
+SCLASS umtx_t _rwlock_static_lock;
+SCLASS umtx_t _keytable_lock;
+SCLASS umtx_t _thr_list_lock;
+
+/* Undefine the storage class and preset specifiers: */
#undef SCLASS
+#undef SCLASS_PRESET
/*
* Function prototype definitions.
*/
__BEGIN_DECLS
-char *__ttyname_basic(int);
-char *__ttyname_r_basic(int, char *, size_t);
-char *ttyname_r(int, char *, size_t);
-void _cond_wait_backout(pthread_t);
-int _find_thread(pthread_t);
-pthread_t _get_curthread(void);
-void *_set_curthread(ucontext_t *, struct pthread *, int *);
-void _retire_thread(void *arch_id);
-void *_thread_stack_alloc(size_t, size_t);
-void _thread_stack_free(void *, size_t, size_t);
-int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
+int _thr_setthreaded(int);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
-void _mutex_lock_backout(pthread_t);
-void _mutex_notify_priochange(pthread_t);
+void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
int _mutex_reinit(pthread_mutex_t *);
-void _mutex_unlock_private(pthread_t);
-int _cond_reinit(pthread_cond_t *);
+void _mutex_fork(struct pthread *curthread);
+void _mutex_unlock_private(struct pthread *);
+void _libpthread_init(struct pthread *);
void *_pthread_getspecific(pthread_key_t);
+int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *);
+int _pthread_cond_destroy(pthread_cond_t *);
+int _pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
+int _pthread_cond_timedwait(pthread_cond_t *, pthread_mutex_t *,
+ const struct timespec *);
+int _pthread_cond_signal(pthread_cond_t *);
+int _pthread_cond_broadcast(pthread_cond_t *);
+int _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
+ void *(*start_routine) (void *), void *arg);
int _pthread_key_create(pthread_key_t *, void (*) (void *));
int _pthread_key_delete(pthread_key_t);
int _pthread_mutex_destroy(pthread_mutex_t *);
@@ -799,45 +682,55 @@ int _pthread_mutexattr_init(pthread_mutexattr_t *);
int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
-pthread_t _pthread_self(void);
+int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
+int _pthread_rwlock_destroy (pthread_rwlock_t *);
+struct pthread *_pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
-int _spintrylock(spinlock_t *);
-void _thread_exit(char *, int, char *);
-void _thread_exit_cleanup(void);
-void *_thread_cleanup(pthread_t);
+void _pthread_testcancel(void);
+void _pthread_yield(void);
+void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
+void _pthread_cleanup_pop(int execute);
+struct pthread *_thr_alloc(struct pthread *);
+void _thread_exit(char *, int, char *) __dead2;
+void _thr_exit_cleanup(void);
+int _thr_ref_add(struct pthread *, struct pthread *, int);
+void _thr_ref_delete(struct pthread *, struct pthread *);
+int _thr_find_thread(struct pthread *, struct pthread *, int);
+void _thr_rtld_init(void);
+void _thr_rtld_fini(void);
+int _thr_stack_alloc(struct pthread_attr *);
+void _thr_stack_free(struct pthread_attr *);
+void _thr_free(struct pthread *, struct pthread *);
+void _thr_gc(struct pthread *);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);
-void _thread_init(void);
-void _thread_printf(int fd, const char *, ...);
-void _thread_start(pthread_t td);
-void _thread_seterrno(pthread_t, int);
-void _thread_enter_cancellation_point(void);
-void _thread_leave_cancellation_point(void);
-void _thread_cancellation_point(void);
-int _thread_suspend(pthread_t thread, const struct timespec *abstime);
-void _thread_critical_enter(pthread_t);
-void _thread_critical_exit(pthread_t);
-void _thread_sigblock();
-void _thread_sigunblock();
-void adjust_prio_inheritance(struct pthread *);
-void adjust_prio_protection(struct pthread *);
-void deadlist_free_onethread(struct pthread *);
-void init_td_common(struct pthread *, struct pthread_attr *, int);
-void init_tdlist(struct pthread *, int);
-void proc_sigact_copyin(int, const struct sigaction *);
-void proc_sigact_copyout(int, struct sigaction *);
-void readjust_priorities(struct pthread *, struct pthread_mutex *);
-struct sigaction *proc_sigact_sigaction(int);
+void _thread_printf(int, const char *, ...);
+void _thr_spinlock_init(void);
+int _thr_cancel_enter(struct pthread *);
+void _thr_cancel_leave(struct pthread *, int);
+void _thr_signal_block(struct pthread *);
+void _thr_signal_unblock(struct pthread *);
+void _thr_signal_init(void);
+void _thr_signal_deinit(void);
+int _thr_send_sig(struct pthread *, int sig);
+void _thr_list_init();
+void _thr_hash_add(struct pthread *);
+void _thr_hash_remove(struct pthread *);
+struct pthread *_thr_hash_find(struct pthread *);
+void _thr_link(struct pthread *curthread, struct pthread *thread);
+void _thr_unlink(struct pthread *curthread, struct pthread *thread);
+void _thr_suspend_check(struct pthread *curthread);
+void _thr_assert_lock_level() __dead2;
/* #include <sys/aio.h> */
#ifdef _SYS_AIO_H_
int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
#endif
-/* #include <sys/event.h> */
-#ifdef _SYS_EVENT_H_
-int __sys_kevent(int, const struct kevent *, int, struct kevent *,
- int, const struct timespec *);
+/* #include <fcntl.h> */
+#ifdef _SYS_FCNTL_H_
+int __sys_fcntl(int, int, ...);
+int __sys_open(const char *, int, ...);
#endif
/* #include <sys/ioctl.h> */
@@ -845,115 +738,92 @@ int __sys_kevent(int, const struct kevent *, int, struct kevent *,
int __sys_ioctl(int, unsigned long, ...);
#endif
-/* #include <sys/msg.h> */
-#ifdef _SYS_MSG_H_
-int __sys_msgrcv(int, void *, size_t, long, int);
-int __sys_msgsnd(int, const void *, size_t, int);
-#endif
-
-/* #include <sys/mman.h> */
-#ifdef _SYS_MMAN_H_
-int __sys_msync(void *, size_t, int);
+/* #inclde <sched.h> */
+#ifdef _SCHED_H_
+int __sys_sched_yield(void);
#endif
-/* #include <sys/mount.h> */
-#ifdef _SYS_MOUNT_H_
-int __sys_fstatfs(int, struct statfs *);
+/* #include <signal.h> */
+#ifdef _SIGNAL_H_
+int __sys_kill(pid_t, int);
+int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
+int __sys_sigpending(sigset_t *);
+int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
+int __sys_sigsuspend(const sigset_t *);
+int __sys_sigreturn(ucontext_t *);
+int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
#endif
/* #include <sys/socket.h> */
#ifdef _SYS_SOCKET_H_
int __sys_accept(int, struct sockaddr *, socklen_t *);
-int __sys_bind(int, const struct sockaddr *, socklen_t);
int __sys_connect(int, const struct sockaddr *, socklen_t);
-int __sys_getpeername(int, struct sockaddr *, socklen_t *);
-int __sys_getsockname(int, struct sockaddr *, socklen_t *);
-int __sys_getsockopt(int, int, int, void *, socklen_t *);
-int __sys_listen(int, int);
ssize_t __sys_recv(int, void *, size_t, int);
-ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
-ssize_t __sys_recvmsg(int, struct msghdr *, int);
-int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
-ssize_t __sys_sendmsg(int, const struct msghdr *, int);
-ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
-int __sys_setsockopt(int, int, int, const void *, socklen_t);
-int __sys_shutdown(int, int);
-int __sys_socket(int, int, int);
-int __sys_socketpair(int, int, int, int *);
-#endif
-
-/* #include <sys/stat.h> */
-#ifdef _SYS_STAT_H_
-int __sys_fchflags(int, u_long);
-int __sys_fchmod(int, mode_t);
-int __sys_fstat(int, struct stat *);
+ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
+ssize_t __sys_recvmsg(int, struct msghdr *, int);
+int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
+ off_t *, int);
+ssize_t __sys_sendmsg(int, const struct msghdr *, int);
+ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
#endif
/* #include <sys/uio.h> */
-#ifdef _SYS_UIO_H_
-ssize_t __sys_readv(int, const struct iovec *, int);
-ssize_t __sys_writev(int, const struct iovec *, int);
-#endif
-
-/* #include <sys/wait.h> */
-#ifdef WNOHANG
-pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
+#ifdef _SYS_UIO_H_
+ssize_t __sys_readv(int, const struct iovec *, int);
+ssize_t __sys_writev(int, const struct iovec *, int);
#endif
-/* #include <dirent.h> */
-#ifdef _DIRENT_H_
-int __sys_getdirentries(int, char *, int, long *);
+/* #include <time.h> */
+#ifdef _TIME_H_
+int __sys_nanosleep(const struct timespec *, struct timespec *);
#endif
-/* #include <fcntl.h> */
-#ifdef _SYS_FCNTL_H_
-int __sys_fcntl(int, int, ...);
-int __sys_flock(int, int);
-int __sys_open(const char *, int, ...);
+/* #include <unistd.h> */
+#ifdef _UNISTD_H_
+int __sys_close(int);
+int __sys_execve(const char *, char * const *, char * const *);
+int __sys_fork(void);
+int __sys_fsync(int);
+pid_t __sys_getpid(void);
+int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+ssize_t __sys_read(int, void *, size_t);
+ssize_t __sys_write(int, const void *, size_t);
+void __sys_exit(int);
+int __sys_sigwait(const sigset_t *, int *);
+int __sys_sigtimedwait(const sigset_t *, siginfo_t *,
+ const struct timespec *);
+int __sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
#endif
/* #include <poll.h> */
#ifdef _SYS_POLL_H_
-int __sys_poll(struct pollfd *, unsigned, int);
-#endif
-
-/* #include <semaphore.h> */
-#ifdef _SEMAPHORE_H_
-int __sem_timedwait(sem_t * __restrict, const struct timespec * __restrict);
-int __sem_wait(sem_t *);
+int __sys_poll(struct pollfd *, unsigned, int);
#endif
-/* #include <signal.h> */
-#ifdef _SIGNAL_H_
-int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
-int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
-int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
-int __sys_sigreturn(ucontext_t *);
-int __sys_sigsuspend(const sigset_t *);
-int __sys_sigtimedwait(const sigset_t * __restrict, siginfo_t * __restrict,
- const struct timespec * __restrict);
-int __sys_sigwait(const sigset_t * __restrict, int * __restrict);
-int __sys_sigwaitinfo(const sigset_t * __restrict, siginfo_t * __restrict);
+/* #include <sys/mman.h> */
+#ifdef _SYS_MMAN_H_
+int __sys_msync(void *, size_t, int);
#endif
-/* #include <unistd.h> */
-#ifdef _UNISTD_H_
-int __sys_close(int);
-int __sys_dup(int);
-int __sys_dup2(int, int);
-int __sys_execve(const char *, char * const *, char * const *);
-void __sys_exit(int);
-int __sys_fchown(int, uid_t, gid_t);
-pid_t __sys_fork(void);
-long __sys_fpathconf(int, int);
-int __sys_fsync(int);
-int __sys_pipe(int *);
-ssize_t __sys_pread(int, void *, size_t, off_t);
-ssize_t __sys_pwrite(int, const void *, size_t, off_t);
-ssize_t __sys_read(int, void *, size_t);
-ssize_t __sys_write(int, const void *, size_t);
-#endif
+static inline int
+_thr_isthreaded(void)
+{
+ return (__isthreaded != 0);
+}
+
+static inline int
+_thr_is_inited(void)
+{
+ return (_thr_initial != NULL);
+}
+
+static inline void
+_thr_check_init(void)
+{
+ if (_thr_initial == NULL)
+ _libpthread_init(NULL);
+}
__END_DECLS
-#endif /* !_PTHREAD_PRIVATE_H */
+#endif /* !_THR_PRIVATE_H */
diff --git a/lib/libthr/thread/thr_pspinlock.c b/lib/libthr/thread/thr_pspinlock.c
new file mode 100644
index 0000000..9343fdc
--- /dev/null
+++ b/lib/libthr/thread/thr_pspinlock.c
@@ -0,0 +1,133 @@
+/*-
+ * Copyright (c) 2003 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "thr_private.h"
+
+#define SPIN_COUNT 100000
+
+__weak_reference(_pthread_spin_init, pthread_spin_init);
+__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
+__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
+__weak_reference(_pthread_spin_lock, pthread_spin_lock);
+__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
+
+int
+_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
+{
+ struct pthread_spinlock *lck;
+ int ret;
+
+ if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE)
+ ret = EINVAL;
+ else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
+ ret = ENOMEM;
+ else {
+ _thr_umtx_init(&lck->s_lock);
+ *lock = lck;
+ ret = 0;
+ }
+
+ return (ret);
+}
+
+int
+_pthread_spin_destroy(pthread_spinlock_t *lock)
+{
+ int ret;
+
+ if (lock == NULL || *lock == NULL)
+ ret = EINVAL;
+ else {
+ free(*lock);
+ *lock = NULL;
+ ret = 0;
+ }
+
+ return (ret);
+}
+
+int
+_pthread_spin_trylock(pthread_spinlock_t *lock)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_spinlock *lck;
+ int ret;
+
+ if (lock == NULL || (lck = *lock) == NULL)
+ ret = EINVAL;
+ else
+ ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock);
+ return (ret);
+}
+
+int
+_pthread_spin_lock(pthread_spinlock_t *lock)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_spinlock *lck;
+ int ret, count;
+
+ if (lock == NULL || (lck = *lock) == NULL)
+ ret = EINVAL;
+ else {
+ count = SPIN_COUNT;
+ while ((ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock)) != 0) {
+ while (lck->s_lock) {
+#ifdef __i386__
+ /* tell cpu we are spinning */
+ __asm __volatile("pause");
+#endif
+ if (--count <= 0) {
+ count = SPIN_COUNT;
+ _pthread_yield();
+ }
+ }
+ }
+ ret = 0;
+ }
+
+ return (ret);
+}
+
+int
+_pthread_spin_unlock(pthread_spinlock_t *lock)
+{
+ struct pthread *curthread = _get_curthread();
+ struct pthread_spinlock *lck;
+ int ret;
+
+ if (lock == NULL || (lck = *lock) == NULL)
+ ret = EINVAL;
+ else {
+ ret = THR_UMTX_UNLOCK(curthread, &lck->s_lock);
+ }
+ return (ret);
+}
diff --git a/lib/libthr/thread/thr_resume_np.c b/lib/libthr/thread/thr_resume_np.c
index a18d57d..8fe6de9 100644
--- a/lib/libthr/thread/thr_resume_np.c
+++ b/lib/libthr/thread/thr_resume_np.c
@@ -31,30 +31,31 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
#include <pthread.h>
-#include <stdlib.h>
-#include "thr_private.h"
-static void resume_common(struct pthread *);
+#include "thr_private.h"
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
+static void resume_common(struct pthread *thread);
+
/* Resume a thread: */
int
_pthread_resume_np(pthread_t thread)
{
+ struct pthread *curthread = _get_curthread();
int ret;
- /* Find the thread in the list of active threads: */
- if ((ret = _find_thread(thread)) == 0) {
- PTHREAD_LOCK(thread);
-
- if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
- resume_common(thread);
-
- PTHREAD_UNLOCK(thread);
+ /* Add a reference to the thread: */
+ if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
+ /* Lock the threads scheduling queue: */
+ THR_THREAD_LOCK(curthread, thread);
+ resume_common(thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ _thr_ref_delete(curthread, thread);
}
return (ret);
}
@@ -62,28 +63,30 @@ _pthread_resume_np(pthread_t thread)
void
_pthread_resume_all_np(void)
{
- struct pthread *thread;
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+
+ /* Take the thread list lock: */
+ THREAD_LIST_LOCK(curthread);
- _thread_sigblock();
- THREAD_LIST_LOCK;
TAILQ_FOREACH(thread, &_thread_list, tle) {
- PTHREAD_LOCK(thread);
- if ((thread != curthread) &&
- ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
+ if (thread != curthread) {
+ THR_THREAD_LOCK(curthread, thread);
resume_common(thread);
- PTHREAD_UNLOCK(thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
}
- THREAD_LIST_UNLOCK;
- _thread_sigunblock();
+
+ /* Release the thread list lock: */
+ THREAD_LIST_UNLOCK(curthread);
}
-/*
- * The caller is required to have locked the thread before
- * calling this function.
- */
static void
resume_common(struct pthread *thread)
{
- thread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
- thr_wake(thread->thr_id);
+ /* Clear the suspend flag: */
+ thread->flags &= ~THR_FLAGS_NEED_SUSPEND;
+ thread->cycle++;
+ _thr_umtx_wake(&thread->cycle, 1);
+ _thr_send_sig(thread, SIGCANCEL);
}
diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c
index 73c489e..6881ef8 100644
--- a/lib/libthr/thread/thr_rwlock.c
+++ b/lib/libthr/thread/thr_rwlock.c
@@ -1,6 +1,5 @@
/*-
* Copyright (c) 1998 Alex Nash
- * Copyright (c) 2004 Michael Telahun Makonnen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,180 +30,183 @@
#include <limits.h>
#include <stdlib.h>
+#include "namespace.h"
#include <pthread.h>
+#include "un-namespace.h"
#include "thr_private.h"
/* maximum number of times a read lock may be obtained */
#define MAX_READ_LOCKS (INT_MAX - 1)
-/*
- * For distinguishing operations on read and write locks.
- */
-enum rwlock_type {RWT_READ, RWT_WRITE};
-
-/* Support for staticaly initialized mutexes. */
-static struct umtx init_lock = UMTX_INITIALIZER;
-
__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
-__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
+__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
-static int insert_rwlock(struct pthread_rwlock *, enum rwlock_type);
-static int rwlock_init_static(struct pthread_rwlock **rwlock);
-static int rwlock_rdlock_common(pthread_rwlock_t *, int,
- const struct timespec *);
-static int rwlock_wrlock_common(pthread_rwlock_t *, int,
- const struct timespec *);
+/*
+ * Prototypes
+ */
-int
-_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
+static int
+rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
pthread_rwlock_t prwlock;
+ int ret;
- if (rwlock == NULL || *rwlock == NULL)
- return (EINVAL);
-
- prwlock = *rwlock;
-
- if (prwlock->state != 0)
- return (EBUSY);
+ /* allocate rwlock object */
+ prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
- pthread_mutex_destroy(&prwlock->lock);
- pthread_cond_destroy(&prwlock->read_signal);
- pthread_cond_destroy(&prwlock->write_signal);
- free(prwlock);
+ if (prwlock == NULL)
+ return (ENOMEM);
- *rwlock = NULL;
+ /* initialize the lock */
+ if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
+ free(prwlock);
+ else {
+ /* initialize the read condition signal */
+ ret = _pthread_cond_init(&prwlock->read_signal, NULL);
+
+ if (ret != 0) {
+ _pthread_mutex_destroy(&prwlock->lock);
+ free(prwlock);
+ } else {
+ /* initialize the write condition signal */
+ ret = _pthread_cond_init(&prwlock->write_signal, NULL);
+
+ if (ret != 0) {
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_mutex_destroy(&prwlock->lock);
+ free(prwlock);
+ } else {
+ /* success */
+ prwlock->state = 0;
+ prwlock->blocked_writers = 0;
+ *rwlock = prwlock;
+ }
+ }
+ }
- return (0);
+ return (ret);
}
int
-_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
+_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
{
- pthread_rwlock_t prwlock;
- int ret;
+ int ret;
- /* allocate rwlock object */
- prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
+ if (rwlock == NULL)
+ ret = EINVAL;
+ else {
+ pthread_rwlock_t prwlock;
- if (prwlock == NULL) {
- ret = ENOMEM;
- goto out;
- }
+ prwlock = *rwlock;
- /* initialize the lock */
- if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
- goto out;
-
- /* initialize the read condition signal */
- if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0)
- goto out_readcond;
-
- /* initialize the write condition signal */
- if ((ret = pthread_cond_init(&prwlock->write_signal, NULL)) != 0)
- goto out_writecond;
-
- /* success */
- prwlock->state = 0;
- prwlock->blocked_writers = 0;
-
- *rwlock = prwlock;
- return (0);
-
-out_writecond:
- pthread_cond_destroy(&prwlock->read_signal);
-out_readcond:
- pthread_mutex_destroy(&prwlock->lock);
-out:
- if (prwlock != NULL)
+ _pthread_mutex_destroy(&prwlock->lock);
+ _pthread_cond_destroy(&prwlock->read_signal);
+ _pthread_cond_destroy(&prwlock->write_signal);
free(prwlock);
- return(ret);
+
+ *rwlock = NULL;
+
+ ret = 0;
+ }
+ return (ret);
}
-/*
- * If nonblocking is 0 this function will wait on the lock. If
- * it is greater than 0 it will return immediately with EBUSY.
- */
static int
-rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
- const struct timespec *timeout)
+init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
{
- struct rwlock_held *rh;
- pthread_rwlock_t prwlock;
- int ret;
+ int ret;
- rh = NULL;
- if (rwlock == NULL)
- return(EINVAL);
+ THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
- /*
- * Check for validity of the timeout parameter.
- */
- if (timeout != NULL &&
- (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
+ if (*rwlock == NULL)
+ ret = rwlock_init(rwlock, NULL);
+ else
+ ret = 0;
+
+ THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
+
+ return (ret);
+}
+
+int
+_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
+{
+ *rwlock = NULL;
+ return (rwlock_init(rwlock, attr));
+}
+
+static int
+rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
+{
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ if (rwlock == NULL)
return (EINVAL);
- if ((ret = rwlock_init_static(rwlock)) !=0 )
- return (ret);
prwlock = *rwlock;
+ /* check for static initialization */
+ if (prwlock == NULL) {
+ if ((ret = init_static(curthread, rwlock)) != 0)
+ return (ret);
+
+ prwlock = *rwlock;
+ }
+
/* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
/* check lock count */
if (prwlock->state == MAX_READ_LOCKS) {
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
return (EAGAIN);
}
- /* give writers priority over readers */
- while (prwlock->blocked_writers || prwlock->state < 0) {
- if (nonblocking) {
- pthread_mutex_unlock(&prwlock->lock);
- return (EBUSY);
- }
-
+ curthread = _get_curthread();
+ if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
/*
- * If this lock is already held for writing we have
- * a deadlock situation.
+ * To avoid having to track all the rdlocks held by
+ * a thread or all of the threads that hold a rdlock,
+ * we keep a simple count of all the rdlocks held by
+ * a thread. If a thread holds any rdlocks it is
+ * possible that it is attempting to take a recursive
+ * rdlock. If there are blocked writers and precedence
+ * is given to them, then that would result in the thread
+ * deadlocking. So allowing a thread to take the rdlock
+ * when it already has one or more rdlocks avoids the
+ * deadlock. I hope the reader can follow that logic ;-)
*/
- if (curthread->rwlockList != NULL && prwlock->state < 0) {
- LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
- if (rh->rh_rwlock == prwlock &&
- rh->rh_wrcount > 0) {
- pthread_mutex_unlock(&prwlock->lock);
- return (EDEADLK);
- }
- }
- }
- if (timeout == NULL)
- ret = pthread_cond_wait(&prwlock->read_signal,
+ ; /* nothing needed */
+ } else {
+ /* give writers priority over readers */
+ while (prwlock->blocked_writers || prwlock->state < 0) {
+ if (abstime)
+ ret = _pthread_cond_timedwait
+ (&prwlock->read_signal,
+ &prwlock->lock, abstime);
+ else
+ ret = _pthread_cond_wait(&prwlock->read_signal,
&prwlock->lock);
- else
- ret = pthread_cond_timedwait(&prwlock->read_signal,
- &prwlock->lock, timeout);
-
- if (ret != 0 && ret != EINTR) {
- /* can't do a whole lot if this fails */
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ if (ret != 0) {
+ /* can't do a whole lot if this fails */
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
+ }
}
}
- ++prwlock->state; /* indicate we are locked for reading */
- ret = insert_rwlock(prwlock, RWT_READ);
- if (ret != 0) {
- pthread_mutex_unlock(&prwlock->lock);
- return (ret);
- }
+ curthread->rdlock_count++;
+ prwlock->state++; /* indicate we are locked for reading */
/*
* Something is really wrong if this call fails. Returning
@@ -212,262 +214,207 @@ rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
* lock. Decrementing 'state' is no good because we probably
* don't have the monitor lock.
*/
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(0);
+ return (ret);
}
int
_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
- return (rwlock_rdlock_common(rwlock, 0, NULL));
+ return (rwlock_rdlock_common(rwlock, NULL));
}
int
-_pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
- const struct timespec *timeout)
+_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
+ const struct timespec *abstime)
{
- return (rwlock_rdlock_common(rwlock, 0, timeout));
+ return (rwlock_rdlock_common(rwlock, abstime));
}
int
_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
- return (rwlock_rdlock_common(rwlock, 1, NULL));
-}
-
-int
-_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
-{
- struct rwlock_held *rh;
- pthread_rwlock_t prwlock;
- int ret;
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
- rh = NULL;
- if (rwlock == NULL || *rwlock == NULL)
- return(EINVAL);
+ if (rwlock == NULL)
+ return (EINVAL);
prwlock = *rwlock;
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ /* check for static initialization */
+ if (prwlock == NULL) {
+ if ((ret = init_static(curthread, rwlock)) != 0)
+ return (ret);
- if (curthread->rwlockList != NULL) {
- LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
- if (rh->rh_rwlock == prwlock)
- break;
- }
+ prwlock = *rwlock;
}
- if (rh == NULL) {
- ret = EPERM;
- goto out;
- }
- if (prwlock->state > 0) {
- PTHREAD_ASSERT(rh->rh_wrcount == 0,
- "write count on a readlock should be zero!");
- rh->rh_rdcount--;
- if (--prwlock->state == 0 && prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
- } else if (prwlock->state < 0) {
- PTHREAD_ASSERT(rh->rh_rdcount == 0,
- "read count on a writelock should be zero!");
- rh->rh_wrcount--;
- prwlock->state = 0;
- if (prwlock->blocked_writers)
- ret = pthread_cond_signal(&prwlock->write_signal);
- else
- ret = pthread_cond_broadcast(&prwlock->read_signal);
- } else {
- /*
- * No thread holds this lock. We should never get here.
- */
- PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread");
- ret = EPERM;
- goto out;
+
+ /* grab the monitor lock */
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
+
+ curthread = _get_curthread();
+ if (prwlock->state == MAX_READ_LOCKS)
+ ret = EAGAIN;
+ else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
+ /* see comment for pthread_rwlock_rdlock() */
+ curthread->rdlock_count++;
+ prwlock->state++;
}
- if (rh->rh_wrcount == 0 && rh->rh_rdcount == 0) {
- LIST_REMOVE(rh, rh_link);
- free(rh);
+ /* give writers priority over readers */
+ else if (prwlock->blocked_writers || prwlock->state < 0)
+ ret = EBUSY;
+ else {
+ curthread->rdlock_count++;
+ prwlock->state++; /* indicate we are locked for reading */
}
-out:
- /* see the comment on this in rwlock_rdlock_common */
- pthread_mutex_unlock(&prwlock->lock);
+ /* see the comment on this in pthread_rwlock_rdlock */
+ _pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ return (ret);
}
int
-_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
+_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
- return (rwlock_wrlock_common(rwlock, 0, NULL));
-}
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
-int
-_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
- const struct timespec *timeout)
-{
- return (rwlock_wrlock_common(rwlock, 0, timeout));
+ if (rwlock == NULL)
+ return (EINVAL);
+
+ prwlock = *rwlock;
+
+ /* check for static initialization */
+ if (prwlock == NULL) {
+ if ((ret = init_static(curthread, rwlock)) != 0)
+ return (ret);
+
+ prwlock = *rwlock;
+ }
+
+ /* grab the monitor lock */
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
+
+ if (prwlock->state != 0)
+ ret = EBUSY;
+ else
+ /* indicate we are locked for writing */
+ prwlock->state = -1;
+
+ /* see the comment on this in pthread_rwlock_rdlock */
+ _pthread_mutex_unlock(&prwlock->lock);
+
+ return (ret);
}
int
-_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
+_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
- return (rwlock_wrlock_common(rwlock, 1, NULL));
+ struct pthread *curthread;
+ pthread_rwlock_t prwlock;
+ int ret;
+
+ if (rwlock == NULL)
+ return (EINVAL);
+
+ prwlock = *rwlock;
+
+ if (prwlock == NULL)
+ return (EINVAL);
+
+ /* grab the monitor lock */
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
+
+ curthread = _get_curthread();
+ if (prwlock->state > 0) {
+ curthread->rdlock_count--;
+ prwlock->state--;
+ if (prwlock->state == 0 && prwlock->blocked_writers)
+ ret = _pthread_cond_signal(&prwlock->write_signal);
+ } else if (prwlock->state < 0) {
+ prwlock->state = 0;
+
+ if (prwlock->blocked_writers)
+ ret = _pthread_cond_signal(&prwlock->write_signal);
+ else
+ ret = _pthread_cond_broadcast(&prwlock->read_signal);
+ } else
+ ret = EINVAL;
+
+ /* see the comment on this in pthread_rwlock_rdlock */
+ _pthread_mutex_unlock(&prwlock->lock);
+
+ return (ret);
}
-/*
- * If nonblocking is 0 this function will wait on the lock. If
- * it is greater than 0 it will return immediately with EBUSY.
- */
static int
-rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
- const struct timespec *timeout)
+rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
- struct rwlock_held *rh;
- pthread_rwlock_t prwlock;
- int ret;
+ struct pthread *curthread = _get_curthread();
+ pthread_rwlock_t prwlock;
+ int ret;
- rh = NULL;
if (rwlock == NULL)
- return(EINVAL);
-
- /*
- * Check the timeout value for validity.
- */
- if (timeout != NULL &&
- (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
return (EINVAL);
- if ((ret = rwlock_init_static(rwlock)) !=0 )
- return (ret);
prwlock = *rwlock;
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
- return(ret);
+ /* check for static initialization */
+ if (prwlock == NULL) {
+ if ((ret = init_static(curthread, rwlock)) != 0)
+ return (ret);
- while (prwlock->state != 0) {
- if (nonblocking) {
- pthread_mutex_unlock(&prwlock->lock);
- return (EBUSY);
- }
+ prwlock = *rwlock;
+ }
- /*
- * If this thread already holds the lock for reading
- * or writing we have a deadlock situation.
- */
- if (curthread->rwlockList != NULL) {
- LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
- if (rh->rh_rwlock == prwlock) {
- PTHREAD_ASSERT((rh->rh_rdcount > 0 ||
- rh->rh_wrcount > 0),
- "Invalid 0 R/RW count!");
- pthread_mutex_unlock(&prwlock->lock);
- return (EDEADLK);
- break;
- }
- }
- }
+ /* grab the monitor lock */
+ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
+ return (ret);
- ++prwlock->blocked_writers;
+ while (prwlock->state != 0) {
+ prwlock->blocked_writers++;
- if (timeout == NULL)
- ret = pthread_cond_wait(&prwlock->write_signal,
- &prwlock->lock);
+ if (abstime != NULL)
+ ret = _pthread_cond_timedwait(&prwlock->write_signal,
+ &prwlock->lock, abstime);
else
- ret = pthread_cond_timedwait(&prwlock->write_signal,
- &prwlock->lock, timeout);
-
- if (ret != 0 && ret != EINTR) {
- --prwlock->blocked_writers;
- pthread_mutex_unlock(&prwlock->lock);
- return(ret);
+ ret = _pthread_cond_wait(&prwlock->write_signal,
+ &prwlock->lock);
+ if (ret != 0) {
+ prwlock->blocked_writers--;
+ _pthread_mutex_unlock(&prwlock->lock);
+ return (ret);
}
- --prwlock->blocked_writers;
+ prwlock->blocked_writers--;
}
/* indicate we are locked for writing */
prwlock->state = -1;
- ret = insert_rwlock(prwlock, RWT_WRITE);
- if (ret != 0) {
- pthread_mutex_unlock(&prwlock->lock);
- return (ret);
- }
/* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&prwlock->lock);
+ _pthread_mutex_unlock(&prwlock->lock);
- return(0);
+ return (ret);
}
-static int
-insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt)
+int
+_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
{
- struct rwlock_held *rh;
-
- /*
- * Initialize the rwlock list in the thread. Although this function
- * may be called for many read-write locks, the initialization
- * of the the head happens only once during the lifetime of
- * the thread.
- */
- if (curthread->rwlockList == NULL) {
- curthread->rwlockList =
- (struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead));
- if (curthread->rwlockList == NULL) {
- return (ENOMEM);
- }
- LIST_INIT(curthread->rwlockList);
- }
-
- LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
- if (rh->rh_rwlock == prwlock) {
- if (rwt == RWT_READ)
- rh->rh_rdcount++;
- else if (rwt == RWT_WRITE)
- rh->rh_wrcount++;
- return (0);
- }
- }
-
- /*
- * This is the first time we're holding this lock,
- * create a new entry.
- */
- rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held));
- if (rh == NULL)
- return (ENOMEM);
- rh->rh_rwlock = prwlock;
- rh->rh_rdcount = 0;
- rh->rh_wrcount = 0;
- if (rwt == RWT_READ)
- rh->rh_rdcount = 1;
- else if (rwt == RWT_WRITE)
- rh->rh_wrcount = 1;
- LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link);
- return (0);
+ return (rwlock_wrlock_common (rwlock, NULL));
}
-/*
- * There are consumers of rwlocks, inluding our own libc, that depend on
- * a PTHREAD_RWLOCK_INITIALIZER to do for rwlocks what
- * a similarly named symbol does for statically initialized mutexes.
- * This symbol was dropped in The Open Group Base Specifications Issue 6
- * and does not exist in IEEE Std 1003.1, 2003, but it should still be
- * supported for backwards compatibility.
- */
-static int
-rwlock_init_static(struct pthread_rwlock **rwlock)
+int
+_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
+ const struct timespec *abstime)
{
- int error;
-
- error = 0;
- UMTX_LOCK(&init_lock);
- if (*rwlock == PTHREAD_RWLOCK_INITIALIZER)
- error = _pthread_rwlock_init(rwlock, NULL);
- UMTX_UNLOCK(&init_lock);
- return (error);
+ return (rwlock_wrlock_common (rwlock, abstime));
}
diff --git a/lib/libthr/thread/thr_self.c b/lib/libthr/thread/thr_self.c
index 0d69538..baaf44c 100644
--- a/lib/libthr/thread/thr_self.c
+++ b/lib/libthr/thread/thr_self.c
@@ -31,7 +31,9 @@
*
* $FreeBSD$
*/
+
#include <pthread.h>
+
#include "thr_private.h"
__weak_reference(_pthread_self, pthread_self);
@@ -39,6 +41,8 @@ __weak_reference(_pthread_self, pthread_self);
pthread_t
_pthread_self(void)
{
+ _thr_check_init();
+
/* Return the running thread pointer: */
- return (curthread);
+ return (_get_curthread());
}
diff --git a/lib/libthr/thread/thr_sem.c b/lib/libthr/thread/thr_sem.c
index c634034..bd952ef 100644
--- a/lib/libthr/thread/thr_sem.c
+++ b/lib/libthr/thread/thr_sem.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
* Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
* All rights reserved.
*
@@ -29,227 +30,240 @@
* $FreeBSD$
*/
-#include <stdlib.h>
+#include "namespace.h"
+#include <sys/queue.h>
#include <errno.h>
-#include <semaphore.h>
+#include <fcntl.h>
#include <pthread.h>
+#include <semaphore.h>
+#include <stdlib.h>
+#include <time.h>
+#include <_semaphore.h>
+#include "un-namespace.h"
+
#include "thr_private.h"
-#define _SEM_CHECK_VALIDITY(sem) \
- if ((*(sem))->magic != SEM_MAGIC) { \
- errno = EINVAL; \
- retval = -1; \
- goto RETURN; \
- }
__weak_reference(_sem_init, sem_init);
__weak_reference(_sem_destroy, sem_destroy);
-__weak_reference(_sem_open, sem_open);
-__weak_reference(_sem_close, sem_close);
-__weak_reference(_sem_unlink, sem_unlink);
-__weak_reference(_sem_wait, sem_wait);
+__weak_reference(_sem_getvalue, sem_getvalue);
__weak_reference(_sem_trywait, sem_trywait);
+__weak_reference(_sem_wait, sem_wait);
+__weak_reference(_sem_timedwait, sem_timedwait);
__weak_reference(_sem_post, sem_post);
-__weak_reference(_sem_getvalue, sem_getvalue);
-int
-_sem_init(sem_t *sem, int pshared, unsigned int value)
+static inline int
+sem_check_validity(sem_t *sem)
{
- int retval;
- /*
- * Range check the arguments.
- */
- if (pshared != 0) {
- /*
- * The user wants a semaphore that can be shared among
- * processes, which this implementation can't do. Sounds like a
- * permissions problem to me (yeah right).
- */
- errno = EPERM;
- retval = -1;
- goto RETURN;
+ if ((sem != NULL) && ((*sem)->magic == SEM_MAGIC))
+ return (0);
+ else {
+ errno = EINVAL;
+ return (-1);
}
+}
+
+static sem_t
+sem_alloc(unsigned int value, semid_t semid, int system_sem)
+{
+ sem_t sem;
if (value > SEM_VALUE_MAX) {
errno = EINVAL;
- retval = -1;
- goto RETURN;
+ return (NULL);
}
- *sem = (sem_t)malloc(sizeof(struct sem));
- if (*sem == NULL) {
+ sem = (sem_t)malloc(sizeof(struct sem));
+ if (sem == NULL) {
errno = ENOSPC;
- retval = -1;
- goto RETURN;
+ return (NULL);
}
-
+ _thr_umtx_init((umtx_t *)&sem->lock);
/*
- * Initialize the semaphore.
+ * Fortunatly count and nwaiters are adjacency, so we can
+ * use umtx_wait to wait on it, umtx_wait needs an address
+ * can be accessed as a long interger.
*/
- if (pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
- free(*sem);
- errno = ENOSPC;
- retval = -1;
- goto RETURN;
- }
-
- if (pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
- pthread_mutex_destroy(&(*sem)->lock);
- free(*sem);
- errno = ENOSPC;
- retval = -1;
- goto RETURN;
- }
-
- (*sem)->count = (u_int32_t)value;
- (*sem)->nwaiters = 0;
- (*sem)->magic = SEM_MAGIC;
-
- retval = 0;
- RETURN:
- return retval;
+ sem->count = (u_int32_t)value;
+ sem->nwaiters = 0;
+ sem->magic = SEM_MAGIC;
+ sem->semid = semid;
+ sem->syssem = system_sem;
+ return (sem);
}
int
-_sem_destroy(sem_t *sem)
+_sem_init(sem_t *sem, int pshared, unsigned int value)
{
- int retval;
-
- _SEM_CHECK_VALIDITY(sem);
-
- /* Make sure there are no waiters. */
- pthread_mutex_lock(&(*sem)->lock);
- if ((*sem)->nwaiters > 0) {
- pthread_mutex_unlock(&(*sem)->lock);
- errno = EBUSY;
- retval = -1;
- goto RETURN;
- }
- pthread_mutex_unlock(&(*sem)->lock);
-
- pthread_mutex_destroy(&(*sem)->lock);
- pthread_cond_destroy(&(*sem)->gtzero);
- (*sem)->magic = 0;
+ semid_t semid;
- free(*sem);
+ semid = (semid_t)SEM_USER;
+ if ((pshared != 0) && (ksem_init(&semid, value) != 0))
+ return (-1);
- retval = 0;
- RETURN:
- return retval;
-}
-
-sem_t *
-_sem_open(const char *name, int oflag, ...)
-{
- errno = ENOSYS;
- return SEM_FAILED;
+ (*sem) = sem_alloc(value, semid, pshared);
+ if ((*sem) == NULL) {
+ if (pshared != 0)
+ ksem_destroy(semid);
+ return (-1);
+ }
+ return (0);
}
int
-_sem_close(sem_t *sem)
+_sem_destroy(sem_t *sem)
{
- errno = ENOSYS;
- return -1;
-}
+ int retval;
-int
-_sem_unlink(const char *name)
-{
- errno = ENOSYS;
- return -1;
+ if (sem_check_validity(sem) != 0)
+ return (-1);
+
+ /*
+ * If this is a system semaphore let the kernel track it otherwise
+ * make sure there are no waiters.
+ */
+ if ((*sem)->syssem != 0)
+ retval = ksem_destroy((*sem)->semid);
+ else {
+ retval = 0;
+ (*sem)->magic = 0;
+ }
+ if (retval == 0)
+ free(*sem);
+ return (retval);
}
int
-_sem_wait(sem_t *sem)
+_sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
{
- int retval;
+ int retval;
- _thread_enter_cancellation_point();
-
- _SEM_CHECK_VALIDITY(sem);
-
- pthread_mutex_lock(&(*sem)->lock);
+ if (sem_check_validity(sem) != 0)
+ return (-1);
- while ((*sem)->count == 0) {
- (*sem)->nwaiters++;
- pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
- (*sem)->nwaiters--;
+ if ((*sem)->syssem != 0)
+ retval = ksem_getvalue((*sem)->semid, sval);
+ else {
+ *sval = (int)(*sem)->count;
+ retval = 0;
}
- (*sem)->count--;
-
- pthread_mutex_unlock(&(*sem)->lock);
-
- retval = 0;
- RETURN:
- _thread_leave_cancellation_point();
- return retval;
+ return (retval);
}
int
_sem_trywait(sem_t *sem)
{
- int retval;
+ int val;
- _SEM_CHECK_VALIDITY(sem);
+ if (sem_check_validity(sem) != 0)
+ return (-1);
- pthread_mutex_lock(&(*sem)->lock);
+ if ((*sem)->syssem != 0)
+ return (ksem_trywait((*sem)->semid));
- if ((*sem)->count > 0) {
- (*sem)->count--;
- retval = 0;
- } else {
- errno = EAGAIN;
- retval = -1;
+ while ((val = (*sem)->count) > 0) {
+ if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
+ return (0);
}
-
- pthread_mutex_unlock(&(*sem)->lock);
-
- RETURN:
- return retval;
+ errno = EAGAIN;
+ return (-1);
}
int
-_sem_post(sem_t *sem)
+_sem_wait(sem_t *sem)
{
- int retval;
+ struct pthread *curthread;
+ int val, oldcancel, retval;
+
+ if (sem_check_validity(sem) != 0)
+ return (-1);
+
+ curthread = _get_curthread();
+ if ((*sem)->syssem != 0) {
+ oldcancel = _thr_cancel_enter(curthread);
+ retval = ksem_wait((*sem)->semid);
+ _thr_cancel_leave(curthread, oldcancel);
+ return (retval);
+ }
- _SEM_CHECK_VALIDITY(sem);
+ _pthread_testcancel();
+ do {
+ while ((val = (*sem)->count) > 0) {
+ if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
+ return (0);
+ }
+ oldcancel = _thr_cancel_enter(curthread);
+ retval = _thr_umtx_wait((umtx_t *)&(*sem)->count, 0, NULL);
+ _thr_cancel_leave(curthread, oldcancel);
+ } while (retval == 0);
+ errno = retval;
+ return (-1);
+}
+
+int
+_sem_timedwait(sem_t * __restrict sem, struct timespec * __restrict abstime)
+{
+ struct timespec ts, ts2;
+ struct pthread *curthread;
+ int val, oldcancel, retval;
+
+ if (sem_check_validity(sem) != 0)
+ return (-1);
+
+ curthread = _get_curthread();
+ if ((*sem)->syssem != 0) {
+ oldcancel = _thr_cancel_enter(curthread);
+ retval = ksem_timedwait((*sem)->semid, abstime);
+ _thr_cancel_leave(curthread, oldcancel);
+ return (retval);
+ }
/*
- * sem_post() is required to be safe to call from within signal
- * handlers. Thus, we must defer signals.
+ * The timeout argument is only supposed to
+ * be checked if the thread would have blocked.
*/
- pthread_mutex_lock(&(*sem)->lock);
-
- /* GIANT_LOCK(curthread); */
-
- (*sem)->count++;
- if ((*sem)->nwaiters > 0)
- pthread_cond_signal(&(*sem)->gtzero);
-
- /* GIANT_UNLOCK(curthread); */
-
- pthread_mutex_unlock(&(*sem)->lock);
-
- retval = 0;
- RETURN:
- return retval;
+ _pthread_testcancel();
+ do {
+ while ((val = (*sem)->count) > 0) {
+ if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
+ return (0);
+ }
+ if (abstime == NULL) {
+ errno = EINVAL;
+ return (-1);
+ }
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ oldcancel = _thr_cancel_enter(curthread);
+ retval = _thr_umtx_wait((umtx_t *)&(*sem)->count, 0, &ts2);
+ _thr_cancel_leave(curthread, oldcancel);
+ } while (retval == 0);
+ errno = retval;
+ return (-1);
}
int
-_sem_getvalue(sem_t *sem, int *sval)
+_sem_post(sem_t *sem)
{
- int retval;
-
- _SEM_CHECK_VALIDITY(sem);
+ int val, retval;
+
+ if (sem_check_validity(sem) != 0)
+ return (-1);
- pthread_mutex_lock(&(*sem)->lock);
- *sval = (int)(*sem)->count;
- pthread_mutex_unlock(&(*sem)->lock);
+ if ((*sem)->syssem != 0)
+ return (ksem_post((*sem)->semid));
- retval = 0;
- RETURN:
- return retval;
+ /*
+ * sem_post() is required to be safe to call from within
+ * signal handlers, these code should work as that.
+ */
+ do {
+ val = (*sem)->count;
+ } while (!atomic_cmpset_acq_int(&(*sem)->count, val, val + 1));
+ retval = _thr_umtx_wake((umtx_t *)&(*sem)->count, val + 1);
+ if (retval > 0)
+ retval = 0;
+ return (retval);
}
diff --git a/lib/libthr/thread/thr_seterrno.c b/lib/libthr/thread/thr_seterrno.c
index ec801d6..f481799 100644
--- a/lib/libthr/thread/thr_seterrno.c
+++ b/lib/libthr/thread/thr_seterrno.c
@@ -31,23 +31,23 @@
*
* $FreeBSD$
*/
+
#include <pthread.h>
+
#include "thr_private.h"
/*
* This function needs to reference the global error variable which is
* normally hidden from the user.
*/
-#ifdef errno
#undef errno
-#endif
extern int errno;
void
_thread_seterrno(pthread_t thread, int error)
{
/* Check for the initial thread: */
- if (thread == _thread_initial)
+ if (thread == NULL || thread == _thr_initial)
/* The initial thread always uses the global error variable: */
errno = error;
else
diff --git a/lib/libthr/thread/thr_setschedparam.c b/lib/libthr/thread/thr_setschedparam.c
index fff1abf..4f0a60d 100644
--- a/lib/libthr/thread/thr_setschedparam.c
+++ b/lib/libthr/thread/thr_setschedparam.c
@@ -31,93 +31,106 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
#include <sys/param.h>
#include <pthread.h>
-#include <stdlib.h>
+
#include "thr_private.h"
-__weak_reference(_pthread_getschedparam, pthread_getschedparam);
__weak_reference(_pthread_setschedparam, pthread_setschedparam);
int
-_pthread_getschedparam(pthread_t pthread, int *policy,
- struct sched_param *param)
-{
- if (param == NULL || policy == NULL)
- return (EINVAL);
- if (_find_thread(pthread) == ESRCH)
- return (ESRCH);
- param->sched_priority = pthread->base_priority;
- *policy = pthread->attr.sched_policy;
- return(0);
-}
-
-int
_pthread_setschedparam(pthread_t pthread, int policy,
const struct sched_param *param)
{
- struct pthread_mutex *mtx;
- int old_prio;
+ struct pthread *curthread = _get_curthread();
+ int in_syncq;
+ int in_readyq = 0;
+ int old_prio;
+ int ret = 0;
- mtx = NULL;
- old_prio = 0;
- if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR))
- return (EINVAL);
- if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
- (param->sched_priority > PTHREAD_MAX_PRIORITY))
- return (ENOTSUP);
- if (_find_thread(pthread) != 0)
- return (ESRCH);
-
- /*
- * If the pthread is waiting on a mutex grab it now. Doing it now
- * even though we do not need it immediately greatly simplifies the
- * LOR avoidance code.
- */
- do {
- PTHREAD_LOCK(pthread);
- if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
- mtx = pthread->data.mutex;
- if (_spintrylock(&mtx->lock) == EBUSY)
- PTHREAD_UNLOCK(pthread);
- else
- break;
- } else {
- mtx = NULL;
- break;
- }
- } while (1);
+ if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
+ /* Return an invalid argument error: */
+ ret = EINVAL;
+ } else if ((param->sched_priority < THR_MIN_PRIORITY) ||
+ (param->sched_priority > THR_MAX_PRIORITY)) {
+ /* Return an unsupported value error. */
+ ret = ENOTSUP;
- PTHREAD_ASSERT(pthread->active_priority >= pthread->inherited_priority,
- "active priority cannot be less than inherited priority");
- old_prio = pthread->base_priority;
- pthread->base_priority = param->sched_priority;
- if (param->sched_priority <= pthread->active_priority) {
+ /* Find the thread in the list of active threads: */
+ } else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
+ == 0) {
/*
- * Active priority is affected only if it was the
- * base priority and the new base priority is lower.
+ * Lock the threads scheduling queue while we change
+ * its priority:
*/
- if (pthread->active_priority == old_prio &&
- pthread->active_priority != pthread->inherited_priority) {
- pthread->active_priority = param->sched_priority;
- readjust_priorities(pthread, mtx);
+ THR_THREAD_LOCK(curthread, pthread);
+ if (pthread->state == PS_DEAD) {
+ THR_THREAD_UNLOCK(curthread, pthread);
+ _thr_ref_delete(curthread, pthread);
+ return (ESRCH);
}
+ in_syncq = pthread->sflags & THR_FLAGS_IN_SYNCQ;
- } else {
- /*
- * New base priority is greater than active priority. This
- * only affects threads that are holding priority inheritance
- * mutexes this thread is waiting on and its position in the
- * queue.
- */
- pthread->active_priority = param->sched_priority;
- readjust_priorities(pthread, mtx);
+ /* Set the scheduling policy: */
+ pthread->attr.sched_policy = policy;
+
+ if (param->sched_priority ==
+ THR_BASE_PRIORITY(pthread->base_priority))
+ /*
+ * There is nothing to do; unlock the threads
+ * scheduling queue.
+ */
+ THR_THREAD_UNLOCK(curthread, pthread);
+ else {
+ /*
+ * Remove the thread from its current priority
+ * queue before any adjustments are made to its
+ * active priority:
+ */
+ old_prio = pthread->active_priority;
+ /* if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) */ {
+ in_readyq = 1;
+ /* THR_RUNQ_REMOVE(pthread); */
+ }
+
+ /* Set the thread base priority: */
+ pthread->base_priority &=
+ (THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
+ pthread->base_priority = param->sched_priority;
+ /* Recalculate the active priority: */
+ pthread->active_priority = MAX(pthread->base_priority,
+ pthread->inherited_priority);
+
+ if (in_readyq) {
+ if ((pthread->priority_mutex_count > 0) &&
+ (old_prio > pthread->active_priority)) {
+ /*
+ * POSIX states that if the priority is
+ * being lowered, the thread must be
+ * inserted at the head of the queue for
+ * its priority if it owns any priority
+ * protection or inheritence mutexes.
+ */
+ /* THR_RUNQ_INSERT_HEAD(pthread); */
+ }
+ else
+ /* THR_RUNQ_INSERT_TAIL(pthread)*/ ;
+ }
+
+ /* Unlock the threads scheduling queue: */
+ THR_THREAD_UNLOCK(curthread, pthread);
+
+ /*
+ * Check for any mutex priority adjustments. This
+ * includes checking for a priority mutex on which
+ * this thread is waiting.
+ */
+ _mutex_notify_priochange(curthread, pthread, in_syncq);
+ }
+ _thr_ref_delete(curthread, pthread);
}
- pthread->attr.sched_policy = policy;
- PTHREAD_UNLOCK(pthread);
- if (mtx != NULL)
- _SPINUNLOCK(&mtx->lock);
- return(0);
+ return (ret);
}
diff --git a/lib/libthr/thread/thr_sig.c b/lib/libthr/thread/thr_sig.c
index 8a805af..1410059 100644
--- a/lib/libthr/thread/thr_sig.c
+++ b/lib/libthr/thread/thr_sig.c
@@ -1,28 +1,33 @@
/*
- * Copyright (c) 2003 Jeffrey Roberson <jeff@freebsd.org>
- * Copyright (c) 2003 Jonathan Mini <mini@freebsd.org>
+ * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*
* $FreeBSD$
*/
@@ -31,12 +36,11 @@
#include <sys/types.h>
#include <sys/signalvar.h>
#include <signal.h>
+#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
-#include <setjmp.h>
-#include <errno.h>
+#include <string.h>
#include <pthread.h>
-#include <stdlib.h>
#include "thr_private.h"
@@ -47,49 +51,195 @@
#define DBG_MSG(x...)
#endif
+static void
+sigcancel_handler(int sig, siginfo_t *info, ucontext_t *ucp)
+{
+ struct pthread *curthread = _get_curthread();
+
+ if (curthread->cancelflags & THR_CANCEL_AT_POINT)
+ pthread_testcancel();
+ if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
+ __sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL);
+ _thr_suspend_check(curthread);
+ }
+}
+
+void
+_thr_suspend_check(struct pthread *curthread)
+{
+ long cycle;
+
+ /* Async suspend. */
+ _thr_signal_block(curthread);
+ THR_LOCK(curthread);
+ if ((curthread->flags & (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED))
+ == THR_FLAGS_NEED_SUSPEND) {
+ curthread->flags |= THR_FLAGS_SUSPENDED;
+ while (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
+ cycle = curthread->cycle;
+ THR_UNLOCK(curthread);
+ _thr_signal_unblock(curthread);
+ _thr_umtx_wait(&curthread->cycle, cycle, NULL);
+ _thr_signal_block(curthread);
+ THR_LOCK(curthread);
+ }
+ curthread->flags &= ~THR_FLAGS_SUSPENDED;
+ }
+ THR_UNLOCK(curthread);
+ _thr_signal_unblock(curthread);
+}
+
+void
+_thr_signal_init(void)
+{
+ struct sigaction act;
+
+ /* Install cancel handler. */
+ SIGEMPTYSET(act.sa_mask);
+ act.sa_flags = SA_SIGINFO | SA_RESTART;
+ act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler;
+ __sys_sigaction(SIGCANCEL, &act, NULL);
+}
+
+void
+_thr_signal_deinit(void)
+{
+}
+
+__weak_reference(_sigaction, sigaction);
+
+int
+_sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
+{
+ /* Check if the signal number is out of range: */
+ if (sig < 1 || sig > _SIG_MAXSIG || sig == SIGCANCEL) {
+ /* Return an invalid argument: */
+ errno = EINVAL;
+ return (-1);
+ }
+
+ return __sys_sigaction(sig, act, oact);
+}
+
+__weak_reference(_sigprocmask, sigprocmask);
+
+int
+_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
+{
+ const sigset_t *p = set;
+ sigset_t newset;
+
+ if (how != SIG_UNBLOCK) {
+ if (set != NULL) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ p = &newset;
+ }
+ }
+ return (__sys_sigprocmask(how, p, oset));
+}
+
__weak_reference(_pthread_sigmask, pthread_sigmask);
int
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int error;
+ if (_sigprocmask(how, set, oset))
+ return (errno);
+ return (0);
+}
+
+__weak_reference(_sigsuspend, sigsuspend);
+
+int
+_sigsuspend(const sigset_t * set)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ const sigset_t *pset;
+ int oldcancel;
+ int ret;
- /*
- * This always sets the mask on the current thread.
- */
- error = sigprocmask(how, set, oset);
+ if (SIGISMEMBER(*set, SIGCANCEL)) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ pset = &newset;
+ } else
+ pset = set;
- /*
- * pthread_sigmask returns errno or success while sigprocmask returns
- * -1 and sets errno.
- */
- if (error == -1)
- error = errno;
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __sys_sigsuspend(pset);
+ _thr_cancel_leave(curthread, oldcancel);
- return (error);
+ return (ret);
}
+__weak_reference(__sigwait, sigwait);
+__weak_reference(__sigtimedwait, sigtimedwait);
+__weak_reference(__sigwaitinfo, sigwaitinfo);
-__weak_reference(_pthread_kill, pthread_kill);
+int
+__sigtimedwait(const sigset_t *set, siginfo_t *info,
+ const struct timespec * timeout)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ const sigset_t *pset;
+ int oldcancel;
+ int ret;
+
+ if (SIGISMEMBER(*set, SIGCANCEL)) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ pset = &newset;
+ } else
+ pset = set;
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __sys_sigtimedwait(pset, info, timeout);
+ _thr_cancel_leave(curthread, oldcancel);
+ return (ret);
+}
int
-_pthread_kill(pthread_t pthread, int sig)
+__sigwaitinfo(const sigset_t *set, siginfo_t *info)
{
- int error;
-
- if (sig < 0 || sig > NSIG)
- return (EINVAL);
- if (_thread_initial == NULL)
- _thread_init();
- error = _find_thread(pthread);
- if (error != 0)
- return (error);
-
- /*
- * A 0 signal means do error-checking but don't send signal.
- */
- if (sig == 0)
- return (0);
-
- return (thr_kill(pthread->thr_id, sig));
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ const sigset_t *pset;
+ int oldcancel;
+ int ret;
+
+ if (SIGISMEMBER(*set, SIGCANCEL)) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ pset = &newset;
+ } else
+ pset = set;
+
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __sys_sigwaitinfo(pset, info);
+ _thr_cancel_leave(curthread, oldcancel);
+ return (ret);
+}
+
+int
+__sigwait(const sigset_t *set, int *sig)
+{
+ struct pthread *curthread = _get_curthread();
+ sigset_t newset;
+ const sigset_t *pset;
+ int oldcancel;
+ int ret;
+
+ if (SIGISMEMBER(*set, SIGCANCEL)) {
+ newset = *set;
+ SIGDELSET(newset, SIGCANCEL);
+ pset = &newset;
+ } else
+ pset = set;
+
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __sys_sigwait(pset, sig);
+ _thr_cancel_leave(curthread, oldcancel);
+ return (ret);
}
diff --git a/lib/libthr/thread/thr_condattr_destroy.c b/lib/libthr/thread/thr_sigmask.c
index e0ade00..2024cc0 100644
--- a/lib/libthr/thread/thr_condattr_destroy.c
+++ b/lib/libthr/thread/thr_sigmask.c
@@ -31,23 +31,22 @@
*
* $FreeBSD$
*/
-#include <stdlib.h>
+
#include <errno.h>
+#include <signal.h>
#include <pthread.h>
#include "thr_private.h"
-__weak_reference(_pthread_condattr_destroy, pthread_condattr_destroy);
+__weak_reference(_pthread_sigmask, pthread_sigmask);
+
+extern int
+_sigprocmask(int how, const sigset_t *set, sigset_t *oset);
int
-_pthread_condattr_destroy(pthread_condattr_t *attr)
+_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- int ret;
- if (attr == NULL || *attr == NULL) {
- ret = EINVAL;
- } else {
- free(*attr);
- *attr = NULL;
- ret = 0;
- }
- return(ret);
+ /* use our overridden verion of _sigprocmask */
+ if (_sigprocmask(how, set, oset))
+ return (errno);
+ return (0);
}
diff --git a/lib/libthr/thread/thr_mutexattr_destroy.c b/lib/libthr/thread/thr_single_np.c
index b9852b5..c8e5a94 100644
--- a/lib/libthr/thread/thr_mutexattr_destroy.c
+++ b/lib/libthr/thread/thr_single_np.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
+ * Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,23 +31,20 @@
*
* $FreeBSD$
*/
-#include <stdlib.h>
-#include <errno.h>
+
#include <pthread.h>
-#include "thr_private.h"
+#include <pthread_np.h>
-__weak_reference(_pthread_mutexattr_destroy, pthread_mutexattr_destroy);
+__weak_reference(_pthread_single_np, pthread_single_np);
-int
-_pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+int _pthread_single_np()
{
- int ret;
- if (attr == NULL || *attr == NULL) {
- ret = EINVAL;
- } else {
- free(*attr);
- *attr = NULL;
- ret = 0;
- }
- return(ret);
+
+ /* Enter single-threaded (non-POSIX) scheduling mode: */
+ pthread_suspend_all_np();
+ /*
+ * XXX - Do we want to do this?
+ * __is_threaded = 0;
+ */
+ return (0);
}
diff --git a/lib/libthr/thread/thr_spec.c b/lib/libthr/thread/thr_spec.c
index eefae51..c5464b5 100644
--- a/lib/libthr/thread/thr_spec.c
+++ b/lib/libthr/thread/thr_spec.c
@@ -31,23 +31,17 @@
*
* $FreeBSD$
*/
+
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
-#include "thr_private.h"
-struct pthread_key {
- spinlock_t lock;
- volatile int allocated;
- volatile int count;
- int seqno;
- void (*destructor) ();
-};
+#include "thr_private.h"
/* Static variables: */
-static struct pthread_key key_table[PTHREAD_KEYS_MAX];
+struct pthread_key _thread_keytable[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@@ -56,44 +50,49 @@ __weak_reference(_pthread_setspecific, pthread_setspecific);
int
-_pthread_key_create(pthread_key_t * key, void (*destructor) (void *))
+_pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
{
- for ((*key) = 0; (*key) < PTHREAD_KEYS_MAX; (*key)++) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[*key].lock);
+ struct pthread *curthread = _get_curthread();
+ int i;
+
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
- if (key_table[(*key)].allocated == 0) {
- key_table[(*key)].allocated = 1;
- key_table[(*key)].destructor = destructor;
- key_table[(*key)].seqno++;
+ if (_thread_keytable[i].allocated == 0) {
+ _thread_keytable[i].allocated = 1;
+ _thread_keytable[i].destructor = destructor;
+ _thread_keytable[i].seqno++;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ *key = i;
return (0);
}
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[*key].lock);
}
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (EAGAIN);
}
int
_pthread_key_delete(pthread_key_t key)
{
+ struct pthread *curthread = _get_curthread();
int ret = 0;
- if (key < PTHREAD_KEYS_MAX) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
+ if ((unsigned int)key < PTHREAD_KEYS_MAX) {
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
- if (key_table[key].allocated)
- key_table[key].allocated = 0;
+ if (_thread_keytable[key].allocated)
+ _thread_keytable[key].allocated = 0;
else
ret = EINVAL;
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
+ /* Unlock the key table: */
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
} else
ret = EINVAL;
return (ret);
@@ -102,49 +101,57 @@ _pthread_key_delete(pthread_key_t key)
void
_thread_cleanupspecific(void)
{
+ struct pthread *curthread = _get_curthread();
+ void (*destructor)( void *);
void *data = NULL;
int key;
- int itr;
- void (*destructor)( void *);
-
- for (itr = 0; itr < PTHREAD_DESTRUCTOR_ITERATIONS; itr++) {
- for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
- if (curthread->specific_data_count > 0) {
- /* Lock the key table entry: */
- _SPINLOCK(&key_table[key].lock);
- destructor = NULL;
-
- if (key_table[key].allocated &&
- (curthread->specific[key].data != NULL)) {
- if (curthread->specific[key].seqno ==
- key_table[key].seqno) {
- data = (void *) curthread->specific[key].data;
- destructor = key_table[key].destructor;
- }
- curthread->specific[key].data = NULL;
- curthread->specific_data_count--;
+ int i;
+
+ if (curthread->specific == NULL)
+ return;
+
+ /* Lock the key table: */
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
+ for (i = 0; (i < PTHREAD_DESTRUCTOR_ITERATIONS) &&
+ (curthread->specific_data_count > 0); i++) {
+ for (key = 0; (key < PTHREAD_KEYS_MAX) &&
+ (curthread->specific_data_count > 0); key++) {
+ destructor = NULL;
+
+ if (_thread_keytable[key].allocated &&
+ (curthread->specific[key].data != NULL)) {
+ if (curthread->specific[key].seqno ==
+ _thread_keytable[key].seqno) {
+ data = (void *)
+ curthread->specific[key].data;
+ destructor = _thread_keytable[key].destructor;
}
+ curthread->specific[key].data = NULL;
+ curthread->specific_data_count--;
+ }
- /* Unlock the key table entry: */
- _SPINUNLOCK(&key_table[key].lock);
-
+ /*
+ * If there is a destructore, call it
+ * with the key table entry unlocked:
+ */
+ if (destructor != NULL) {
/*
- * If there is a destructore, call it
- * with the key table entry unlocked:
+ * Don't hold the lock while calling the
+ * destructor:
*/
- if (destructor)
- destructor(data);
- } else {
- free(curthread->specific);
- curthread->specific = NULL;
- return;
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ destructor(data);
+ THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
}
}
}
- if (curthread->specific != NULL) {
- free(curthread->specific);
- curthread->specific = NULL;
- }
+ THR_LOCK_RELEASE(curthread, &_keytable_lock);
+ free(curthread->specific);
+ curthread->specific = NULL;
+ if (curthread->specific_data_count > 0)
+ stderr_debug("Thread %p has exited with leftover "
+ "thread-specific data after %d destructor iterations\n",
+ curthread, PTHREAD_DESTRUCTOR_ITERATIONS);
}
static inline struct pthread_specific_elem *
@@ -164,23 +171,24 @@ pthread_key_allocate_data(void)
int
_pthread_setspecific(pthread_key_t key, const void *value)
{
+ struct pthread *pthread;
int ret = 0;
- pthread_t pthread = curthread;
+
+ /* Point to the running thread: */
+ pthread = _get_curthread();
if ((pthread->specific) ||
(pthread->specific = pthread_key_allocate_data())) {
- if (key < PTHREAD_KEYS_MAX) {
- if (key_table[key].allocated) {
+ if ((unsigned int)key < PTHREAD_KEYS_MAX) {
+ if (_thread_keytable[key].allocated) {
if (pthread->specific[key].data == NULL) {
if (value != NULL)
pthread->specific_data_count++;
- } else {
- if (value == NULL)
- pthread->specific_data_count--;
- }
+ } else if (value == NULL)
+ pthread->specific_data_count--;
pthread->specific[key].data = value;
pthread->specific[key].seqno =
- key_table[key].seqno;
+ _thread_keytable[key].seqno;
ret = 0;
} else
ret = EINVAL;
@@ -194,14 +202,17 @@ _pthread_setspecific(pthread_key_t key, const void *value)
void *
_pthread_getspecific(pthread_key_t key)
{
- pthread_t pthread = curthread;
+ struct pthread *pthread;
void *data;
+ /* Point to the running thread: */
+ pthread = _get_curthread();
+
/* Check if there is specific data: */
- if (pthread->specific != NULL && key < PTHREAD_KEYS_MAX) {
+ if (pthread->specific != NULL && (unsigned int)key < PTHREAD_KEYS_MAX) {
/* Check if this key has been used before: */
- if (key_table[key].allocated &&
- (pthread->specific[key].seqno == key_table[key].seqno)) {
+ if (_thread_keytable[key].allocated &&
+ (pthread->specific[key].seqno == _thread_keytable[key].seqno)) {
/* Return the value: */
data = (void *) pthread->specific[key].data;
} else {
diff --git a/lib/libthr/thread/thr_spinlock.c b/lib/libthr/thread/thr_spinlock.c
index d590ad5..7d5cd84 100644
--- a/lib/libthr/thread/thr_spinlock.c
+++ b/lib/libthr/thread/thr_spinlock.c
@@ -1,5 +1,4 @@
/*
- * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@@ -35,141 +34,92 @@
*/
#include <sys/types.h>
-#include <machine/atomic.h>
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sched.h>
#include <pthread.h>
-#include <unistd.h>
-
#include <libc_private.h>
+#include <spinlock.h>
#include "thr_private.h"
-#define THR_SPIN_MAGIC 0xdadadada
-#define THR_SPIN_UNOWNED (void *)0
-#define MAGIC_TEST_RETURN_ON_FAIL(l) \
- do { \
- if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC) \
- return (EINVAL); \
- } while(0)
+#define MAX_SPINLOCKS 20
-__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
-__weak_reference(_pthread_spin_init, pthread_spin_init);
-__weak_reference(_pthread_spin_lock, pthread_spin_lock);
-__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
-__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
+/*
+ * These data structures are used to trace all spinlocks
+ * in libc.
+ */
+struct spinlock_extra {
+ spinlock_t *owner;
+};
-int
-_pthread_spin_destroy(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL((*lock));
- if ((*lock)->s_owner == THR_SPIN_UNOWNED) {
- (*lock)->s_magic = 0;
- free((*lock));
- *lock = NULL;
- return (0);
- }
- return (EBUSY);
-}
+static umtx_t spinlock_static_lock;
+static struct spinlock_extra extra[MAX_SPINLOCKS];
+static int spinlock_count;
+static int initialized;
-int
-_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
-{
- struct pthread_spinlock *s;
-
- s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock));
- if (s == NULL)
- return (ENOMEM);
- s->s_magic = THR_SPIN_MAGIC;
- s->s_owner = THR_SPIN_UNOWNED;
- *lock = s;
- return (0);
-}
+static void init_spinlock(spinlock_t *lck);
/*
- * If the caller sets nonblocking to 1, this function will return
- * immediately without acquiring the lock it is owned by another thread.
- * If set to 0, it will keep spinning until it acquires the lock.
+ * These are for compatability only. Spinlocks of this type
+ * are deprecated.
*/
-int
-_pthread_spin_lock(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if ((*lock)->s_owner == curthread)
- return (EDEADLK);
- while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
- (void *)curthread) != 1)
- ; /* SPIN */
- return (0);
-}
-
-int
-_pthread_spin_trylock(pthread_spinlock_t *lock)
-{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
- (void *)curthread) == 1)
- return (0);
- return (EBUSY);
-}
-int
-_pthread_spin_unlock(pthread_spinlock_t *lock)
+void
+_spinunlock(spinlock_t *lck)
{
- MAGIC_TEST_RETURN_ON_FAIL(*lock);
- if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread,
- THR_SPIN_UNOWNED) == 1)
- return (0);
- return (EPERM);
+ THR_UMTX_UNLOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
void
-_spinunlock(spinlock_t *lck)
+_spinlock(spinlock_t *lck)
{
- if (umtx_unlock((struct umtx *)lck, curthread->thr_id))
- abort();
+ if (!__isthreaded)
+ PANIC("Spinlock called when not threaded.");
+ if (!initialized)
+ PANIC("Spinlocks not initialized.");
+ if (lck->fname == NULL)
+ init_spinlock(lck);
+ THR_UMTX_LOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
-/*
- * Lock a location for the running thread. Yield to allow other
- * threads to run if this thread is blocked because the lock is
- * not available. Note that this function does not sleep. It
- * assumes that the lock will be available very soon.
- */
void
-_spinlock(spinlock_t *lck)
+_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
- if (umtx_lock((struct umtx *)lck, curthread->thr_id))
- abort();
+ _spinlock(lck);
}
-int
-_spintrylock(spinlock_t *lck)
+static void
+init_spinlock(spinlock_t *lck)
{
- int error;
+ static int count = 0;
- error = umtx_lock((struct umtx *)lck, curthread->thr_id);
- if (error != 0 && error != EBUSY)
- abort();
- return (error);
+ THR_UMTX_LOCK(_get_curthread(), &spinlock_static_lock);
+ if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
+ lck->fname = (char *)&extra[spinlock_count];
+ extra[spinlock_count].owner = lck;
+ spinlock_count++;
+ }
+ THR_UMTX_UNLOCK(_get_curthread(), &spinlock_static_lock);
+ if (lck->fname == NULL && ++count < 5)
+ stderr_debug("Warning: exceeded max spinlocks");
}
-/*
- * Lock a location for the running thread. Yield to allow other
- * threads to run if this thread is blocked because the lock is
- * not available. Note that this function does not sleep. It
- * assumes that the lock will be available very soon.
- *
- * This function checks if the running thread has already locked the
- * location, warns if this occurs and creates a thread dump before
- * returning.
- */
void
-_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
+_thr_spinlock_init(void)
{
- if (umtx_lock((struct umtx *)lck, curthread->thr_id))
- abort();
+ int i;
+
+ _thr_umtx_init(&spinlock_static_lock);
+ if (initialized != 0) {
+ /*
+ * called after fork() to reset state of libc spin locks,
+ * it is not quite right since libc may be in inconsistent
+ * state, resetting the locks to allow current thread to be
+ * able to hold them may not help things too much, but
+ * anyway, we do our best.
+ * it is better to do pthread_atfork in libc.
+ */
+ for (i = 0; i < spinlock_count; i++)
+ _thr_umtx_init((umtx_t *)&extra[i].owner->access_lock);
+ } else {
+ initialized = 1;
+ }
}
diff --git a/lib/libthr/thread/thr_stack.c b/lib/libthr/thread/thr_stack.c
index bd82157..3c5503f 100644
--- a/lib/libthr/thread/thr_stack.c
+++ b/lib/libthr/thread/thr_stack.c
@@ -26,12 +26,13 @@
*
* $FreeBSD$
*/
+
#include <sys/types.h>
#include <sys/mman.h>
-#include <sys/param.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <pthread.h>
+
#include "thr_private.h"
/* Spare thread stack. */
@@ -43,31 +44,32 @@ struct stack {
};
/*
- * Default sized (stack and guard) spare stack queue. Stacks are cached to
- * avoid additional complexity managing mmap()ed stack regions. Spare stacks
- * are used in LIFO order to increase cache locality.
+ * Default sized (stack and guard) spare stack queue. Stacks are cached
+ * to avoid additional complexity managing mmap()ed stack regions. Spare
+ * stacks are used in LIFO order to increase cache locality.
*/
-static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
+static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
/*
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
- * Stacks are cached to avoid additional complexity managing mmap()ed stack
- * regions. This list is unordered, since ordering on both stack size and guard
- * size would be more trouble than it's worth. Stacks are allocated from this
- * cache on a first size match basis.
+ * Stacks are cached to avoid additional complexity managing mmap()ed
+ * stack regions. This list is unordered, since ordering on both stack
+ * size and guard size would be more trouble than it's worth. Stacks are
+ * allocated from this cache on a first size match basis.
*/
-static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
+static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
/**
- * Base address of the last stack allocated (including its red zone, if there is
- * one). Stacks are allocated contiguously, starting beyond the top of the main
- * stack. When a new stack is created, a red zone is typically created
- * (actually, the red zone is simply left unmapped) above the top of the stack,
- * such that the stack will not be able to grow all the way to the bottom of the
- * next stack. This isn't fool-proof. It is possible for a stack to grow by a
- * large amount, such that it grows into the next stack, and as long as the
- * memory within the red zone is never accessed, nothing will prevent one thread
- * stack from trouncing all over the next.
+ * Base address of the last stack allocated (including its red zone, if
+ * there is one). Stacks are allocated contiguously, starting beyond the
+ * top of the main stack. When a new stack is created, a red zone is
+ * typically created (actually, the red zone is mapped with PROT_NONE) above
+ * the top of the stack, such that the stack will not be able to grow all
+ * the way to the bottom of the next stack. This isn't fool-proof. It is
+ * possible for a stack to grow by a large amount, such that it grows into
+ * the next stack, and as long as the memory within the red zone is never
+ * accessed, nothing will prevent one thread stack from trouncing all over
+ * the next.
*
* low memory
* . . . . . . . . . . . . . . . . . .
@@ -78,7 +80,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone (guard page) | red zone for 2nd thread
* | |
* +-----------------------------------+
- * | stack 2 - _pthread_stack_default | top of 2nd thread stack
+ * | stack 2 - _thr_stack_default | top of 2nd thread stack
* | |
* | |
* | |
@@ -89,7 +91,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone | red zone for 1st thread
* | |
* +-----------------------------------+
- * | stack 1 - _pthread_stack_default | top of 1st thread stack
+ * | stack 1 - _thr_stack_default | top of 1st thread stack
* | |
* | |
* | |
@@ -100,7 +102,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone |
* | | red zone for main thread
* +-----------------------------------+
- * | USRSTACK - _pthread_stack_initial | top of main thread stack
+ * | USRSTACK - _thr_stack_initial | top of main thread stack
* | | ^
* | | |
* | | |
@@ -111,48 +113,59 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* high memory
*
*/
-static void * last_stack;
+static void *last_stack = NULL;
+
+/*
+ * Round size up to the nearest multiple of
+ * _thr_page_size.
+ */
+static inline size_t
+round_up(size_t size)
+{
+ if (size % _thr_page_size != 0)
+ size = ((size / _thr_page_size) + 1) *
+ _thr_page_size;
+ return size;
+}
-void *
-_thread_stack_alloc(size_t stacksize, size_t guardsize)
+int
+_thr_stack_alloc(struct pthread_attr *attr)
{
- void *stack = NULL;
- struct stack *spare_stack;
- size_t stack_size;
+ struct pthread *curthread = _get_curthread();
+ struct stack *spare_stack;
+ size_t stacksize;
+ size_t guardsize;
+ char *stackaddr;
/*
- * Round up stack size to nearest multiple of _pthread_page_size,
- * so that mmap() * will work. If the stack size is not an even
- * multiple, we end up initializing things such that there is unused
- * space above the beginning of the stack, so the stack sits snugly
- * against its guard.
+ * Round up stack size to nearest multiple of _thr_page_size so
+ * that mmap() * will work. If the stack size is not an even
+ * multiple, we end up initializing things such that there is
+ * unused space above the beginning of the stack, so the stack
+ * sits snugly against its guard.
*/
- if (stacksize % _pthread_page_size != 0)
- stack_size = ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- else
- stack_size = stacksize;
+ stacksize = round_up(attr->stacksize_attr);
+ guardsize = round_up(attr->guardsize_attr);
+
+ attr->stackaddr_attr = NULL;
+ attr->flags &= ~THR_STACK_USER;
/*
+ * Use the garbage collector lock for synchronization of the
+ * spare stack lists and allocations from usrstack.
+ */
+ THREAD_LIST_LOCK(curthread);
+ /*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:
*/
- if (stack_size == _pthread_stack_default &&
- guardsize == _pthread_guard_default) {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- STACK_LOCK;
-
- if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
- /* Use the spare stack. */
+ if ((stacksize == THR_STACK_DEFAULT) &&
+ (guardsize == _thr_guard_default)) {
+ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
+ /* Use the spare stack. */
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
}
-
- /* Unlock the garbage collector mutex. */
- STACK_UNLOCK;
}
/*
* The user specified a non-default stack and/or guard size, so try to
@@ -160,76 +173,83 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
* rounded up stack size (stack_size) in the search:
*/
else {
- /*
- * Use the garbage collector mutex for synchronization of the
- * spare stack list.
- */
- STACK_LOCK;
-
- LIST_FOREACH(spare_stack, &_mstackq, qe) {
- if (spare_stack->stacksize == stack_size &&
+ LIST_FOREACH(spare_stack, &mstackq, qe) {
+ if (spare_stack->stacksize == stacksize &&
spare_stack->guardsize == guardsize) {
LIST_REMOVE(spare_stack, qe);
- stack = spare_stack->stackaddr;
+ attr->stackaddr_attr = spare_stack->stackaddr;
break;
}
}
-
- /* Unlock the garbage collector mutex. */
- STACK_UNLOCK;
}
-
- /* Check if a stack was not allocated from a stack cache: */
- if (stack == NULL) {
-
+ if (attr->stackaddr_attr != NULL) {
+ /* A cached stack was found. Release the lock. */
+ THREAD_LIST_UNLOCK(curthread);
+ }
+ else {
+ /* Allocate a stack from usrstack. */
if (last_stack == NULL)
- last_stack = _usrstack - _pthread_stack_initial -
- _pthread_guard_default;
+ last_stack = _usrstack - _thr_stack_initial -
+ _thr_guard_default;
/* Allocate a new stack. */
- stack = last_stack - stack_size;
+ stackaddr = last_stack - stacksize - guardsize;
/*
- * Even if stack allocation fails, we don't want to try to use
- * this location again, so unconditionally decrement
+ * Even if stack allocation fails, we don't want to try to
+ * use this location again, so unconditionally decrement
* last_stack. Under normal operating conditions, the most
- * likely reason for an mmap() error is a stack overflow of the
- * adjacent thread stack.
+ * likely reason for an mmap() error is a stack overflow of
+ * the adjacent thread stack.
*/
- last_stack -= (stack_size + guardsize);
-
- /* Stack: */
- if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
- -1, 0) == MAP_FAILED)
- stack = NULL;
+ last_stack -= (stacksize + guardsize);
+
+ /* Release the lock before mmap'ing it. */
+ THREAD_LIST_UNLOCK(curthread);
+
+ /* Map the stack and guard page together, and split guard
+ page from allocated space: */
+ if ((stackaddr = mmap(stackaddr, stacksize+guardsize,
+ PROT_READ | PROT_WRITE, MAP_STACK,
+ -1, 0)) != MAP_FAILED &&
+ (guardsize == 0 ||
+ mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
+ stackaddr += guardsize;
+ } else {
+ if (stackaddr != MAP_FAILED)
+ munmap(stackaddr, stacksize + guardsize);
+ stackaddr = NULL;
+ }
+ attr->stackaddr_attr = stackaddr;
}
-
- return (stack);
+ if (attr->stackaddr_attr != NULL)
+ return (0);
+ else
+ return (-1);
}
-/* This function must be called with the 'dead thread list' lock held. */
+/* This function must be called with _thread_list_lock held. */
void
-_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
+_thr_stack_free(struct pthread_attr *attr)
{
- struct stack *spare_stack;
-
- spare_stack = (stack + stacksize - sizeof(struct stack));
- /* Round stacksize up to nearest multiple of _pthread_page_size. */
- if (stacksize % _pthread_page_size != 0) {
- spare_stack->stacksize =
- ((stacksize / _pthread_page_size) + 1) *
- _pthread_page_size;
- } else
- spare_stack->stacksize = stacksize;
- spare_stack->guardsize = guardsize;
- spare_stack->stackaddr = stack;
-
- if (spare_stack->stacksize == _pthread_stack_default &&
- spare_stack->guardsize == _pthread_guard_default) {
- /* Default stack/guard size. */
- LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
- } else {
- /* Non-default stack/guard size. */
- LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
+ struct stack *spare_stack;
+
+ if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
+ && (attr->stackaddr_attr != NULL)) {
+ spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
+ - sizeof(struct stack));
+ spare_stack->stacksize = round_up(attr->stacksize_attr);
+ spare_stack->guardsize = round_up(attr->guardsize_attr);
+ spare_stack->stackaddr = attr->stackaddr_attr;
+
+ if (spare_stack->stacksize == THR_STACK_DEFAULT &&
+ spare_stack->guardsize == _thr_guard_default) {
+ /* Default stack/guard size. */
+ LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
+ } else {
+ /* Non-default stack/guard size. */
+ LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
+ }
+ attr->stackaddr_attr = NULL;
}
}
diff --git a/lib/libthr/thread/thr_subr.c b/lib/libthr/thread/thr_subr.c
deleted file mode 100644
index c777d17..0000000
--- a/lib/libthr/thread/thr_subr.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*-
- * Copyright (c) 2003 Michael Telahun Makonnen
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Problems/Questions to: Mike Makonnen <mtm@FreeBSD.Org>
- *
- * $FreeBSD$
- */
-
-#include <stdlib.h>
-#include <errno.h>
-#include <string.h>
-#include <sys/param.h>
-#include <sys/queue.h>
-#include <pthread.h>
-
-#include "thr_private.h"
-
-/*
- * Lock for the process global signal actions list.
- * This lock does NOT insure up-to-date-ness, only integrity.
- */
-struct umtx sigactList_lock = UMTX_INITIALIZER;
-
-/*
- * proc_sigact_copyin(sig, actp)
- * Copy the contents of actp into the process global
- * action for signal sig.
- */
-void
-proc_sigact_copyin(int sig, const struct sigaction *actp)
-{
- UMTX_LOCK(&sigactList_lock);
- bcopy((const void *)actp, (void *)&_thread_sigact[sig - 1],
- sizeof(struct sigaction));
- UMTX_UNLOCK(&sigactList_lock);
-}
-
-/*
- * proc_sigact_copyout(sig, sigact)
- * Copy the contents of the process global action for
- * signal sig into sigact.
- */
-void
-proc_sigact_copyout(int sig, struct sigaction *actp)
-{
- UMTX_LOCK(&sigactList_lock);
- bcopy((const void *)&_thread_sigact[sig - 1], (void *)actp,
- sizeof(struct sigaction));
- UMTX_UNLOCK(&sigactList_lock);
-}
-
-/*
- * proc_sigact_sigaction(sig)
- * Obtains the struct sigaction associated with signal sig.
- * The address of the structure is the return value. It is
- * upto the caller to check the value of the structure at
- * that address against SIG_IGN and SIG_DFL before trying
- * to dereference it.
- */
-struct sigaction *
-proc_sigact_sigaction(int sig)
-{
- struct sigaction *actp;
-
- UMTX_LOCK(&sigactList_lock);
- actp = &_thread_sigact[sig - 1];
- UMTX_UNLOCK(&sigactList_lock);
- return (actp);
-}
diff --git a/lib/libthr/thread/thr_suspend_np.c b/lib/libthr/thread/thr_suspend_np.c
index de95735..fd929ad 100644
--- a/lib/libthr/thread/thr_suspend_np.c
+++ b/lib/libthr/thread/thr_suspend_np.c
@@ -31,10 +31,14 @@
*
* $FreeBSD$
*/
+
#include <errno.h>
#include <pthread.h>
+
#include "thr_private.h"
+static void suspend_common(struct pthread *thread);
+
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
@@ -42,12 +46,54 @@ __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
int
_pthread_suspend_np(pthread_t thread)
{
- /* XXXTHR */
- return (ENOTSUP);
+ struct pthread *curthread = _get_curthread();
+ int ret;
+
+ /* Suspending the current thread doesn't make sense. */
+ if (thread == _get_curthread())
+ ret = EDEADLK;
+
+ /* Add a reference to the thread: */
+ else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
+ == 0) {
+ /* Lock the threads scheduling queue: */
+ THR_THREAD_LOCK(curthread, thread);
+ suspend_common(thread);
+ /* Unlock the threads scheduling queue: */
+ THR_THREAD_UNLOCK(curthread, thread);
+
+ /* Don't forget to remove the reference: */
+ _thr_ref_delete(curthread, thread);
+ }
+ return (ret);
}
void
_pthread_suspend_all_np(void)
{
- /* XXXTHR */
+ struct pthread *curthread = _get_curthread();
+ struct pthread *thread;
+
+ /* Take the thread list lock: */
+ THREAD_LIST_LOCK(curthread);
+
+ TAILQ_FOREACH(thread, &_thread_list, tle) {
+ if (thread != curthread) {
+ THR_THREAD_LOCK(curthread, thread);
+ suspend_common(thread);
+ THR_THREAD_UNLOCK(curthread, thread);
+ }
+ }
+
+ /* Release the thread list lock: */
+ THREAD_LIST_UNLOCK(curthread);
+}
+
+static void
+suspend_common(struct pthread *thread)
+{
+ if (thread->state != PS_DEAD) {
+ thread->flags |= THR_FLAGS_NEED_SUSPEND;
+ _thr_send_sig(thread, SIGCANCEL);
+ }
}
diff --git a/lib/libthr/thread/thr_mattr_init.c b/lib/libthr/thread/thr_switch_np.c
index 3d4195c..98497b7 100644
--- a/lib/libthr/thread/thr_mattr_init.c
+++ b/lib/libthr/thread/thr_switch_np.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996 Jeffrey Hsu <hsu@freebsd.org>.
+ * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -12,12 +12,12 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by John Birrell.
+ * This product includes software developed by Daniel Eischen.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
@@ -31,26 +31,25 @@
*
* $FreeBSD$
*/
-#include <string.h>
-#include <stdlib.h>
+
#include <errno.h>
#include <pthread.h>
+#include <pthread_np.h>
+
#include "thr_private.h"
-__weak_reference(_pthread_mutexattr_init, pthread_mutexattr_init);
+
+__weak_reference(_pthread_switch_add_np, pthread_switch_add_np);
+__weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
int
-_pthread_mutexattr_init(pthread_mutexattr_t *attr)
+_pthread_switch_add_np(pthread_switch_routine_t routine)
{
- pthread_mutexattr_t pattr;
-
- if ((pattr = (pthread_mutexattr_t)
- malloc(sizeof(struct pthread_mutex_attr))) == NULL)
- return (ENOMEM);
-
- memcpy(pattr, &pthread_mutexattr_default,
- sizeof(struct pthread_mutex_attr));
- *attr = pattr;
+ return (ENOTSUP);
+}
- return (0);
+int
+_pthread_switch_delete_np(pthread_switch_routine_t routine)
+{
+ return (ENOTSUP);
}
diff --git a/lib/libthr/thread/thr_find_thread.c b/lib/libthr/thread/thr_symbols.c
index a00d19e..313cad7 100644
--- a/lib/libthr/thread/thr_find_thread.c
+++ b/lib/libthr/thread/thr_symbols.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
+ * Copyright (c) 2004 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,32 +31,29 @@
*
* $FreeBSD$
*/
-#include <errno.h>
-#include <pthread.h>
-#include "thr_private.h"
-
-/* Find a thread in the linked list of active threads: */
-int
-_find_thread(pthread_t pthread)
-{
- pthread_t pthread1;
- if (pthread == NULL)
- return(EINVAL);
-
- THREAD_LIST_LOCK;
+#include <stddef.h>
+#include <pthread.h>
+#include <rtld.h>
- /* Search for the specified thread: */
- pthread1 = NULL;
- TAILQ_FOREACH(pthread1, &_thread_list, tle) {
- if (pthread == pthread1)
- break;
- }
+#include "thr_private.h"
- THREAD_LIST_UNLOCK;
- if (pthread1 != NULL && pthread1->magic != PTHREAD_MAGIC)
- return (EINVAL);
+/* A collection of symbols needed by debugger */
- /* Return zero if the thread exists: */
- return ((pthread1 != NULL) ? 0:ESRCH);
-}
+/* int _libthr_debug */
+int _thread_off_tcb = offsetof(struct pthread, tcb);
+int _thread_off_tid = offsetof(struct pthread, tid);
+int _thread_off_next = offsetof(struct pthread, tle.tqe_next);
+int _thread_off_attr_flags = offsetof(struct pthread, attr.flags);
+int _thread_off_thr_locklevel = offsetof(struct pthread, locklevel);
+int _thread_off_linkmap = offsetof(Obj_Entry, linkmap);
+int _thread_off_tlsindex = offsetof(Obj_Entry, tlsindex);
+int _thread_off_isdead = offsetof(struct pthread, terminated);
+int _thread_size_key = sizeof(struct pthread_key);
+int _thread_off_key_allocated = offsetof(struct pthread_key, allocated);
+int _thread_off_key_destructor = offsetof(struct pthread_key, destructor);
+int _thread_max_keys = PTHREAD_KEYS_MAX;
+int _thread_off_dtv = DTV_OFFSET;
+int _thread_off_state = offsetof(struct pthread, state);
+int _thread_state_running = PS_RUNNING;
+int _thread_state_zoombie = PS_DEAD;
diff --git a/lib/libthr/thread/thr_syscalls.c b/lib/libthr/thread/thr_syscalls.c
index 7e2b075..b67eeb8 100644
--- a/lib/libthr/thread/thr_syscalls.c
+++ b/lib/libthr/thread/thr_syscalls.c
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2000 Jason Evans <jasone@freebsd.org>.
- * Copyright (c) 2002 Daniel M. Eischen <deischen@freebsd.org>
- * Copyright (c) 2003 Jeff Roberson <jeff@freebsd.org>
+ * Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,24 +64,21 @@
*
*/
-#include <sys/cdefs.h>
-#include <sys/fcntl.h>
+#include <sys/types.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/select.h>
+#include <sys/signalvar.h>
#include <sys/socket.h>
+#include <sys/stat.h>
#include <sys/time.h>
-#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
-
#include <aio.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
-#include <pthread.h>
-#include <semaphore.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
@@ -89,33 +86,35 @@
#include <string.h>
#include <termios.h>
#include <unistd.h>
+#include <pthread.h>
#include "thr_private.h"
-extern spinlock_t *__malloc_lock;
-
extern int __creat(const char *, mode_t);
-extern int __sleep(unsigned int);
-extern int __sys_nanosleep(const struct timespec *, struct timespec *);
-extern int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
-extern int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
+extern int __pause(void);
+extern int __pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
+ const struct timespec *timo, const sigset_t *mask);
+extern unsigned int __sleep(unsigned int);
extern int __system(const char *);
extern int __tcdrain(int);
extern pid_t __wait(int *);
extern pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
extern pid_t __waitpid(pid_t, int *, int);
-__weak_reference(_accept, accept);
-
+__weak_reference(__accept, accept);
int
-_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
+__accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
+ struct pthread *curthread;
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ curthread = _get_curthread();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_accept(s, addr, addrlen);
- _thread_leave_cancellation_point();
- return (ret);
+ _thr_cancel_leave(curthread, oldcancel);
+
+ return (ret);
}
__weak_reference(_aio_suspend, aio_suspend);
@@ -124,222 +123,162 @@ int
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
timespec *timeout)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
+ int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_aio_suspend(iocbs, niocb, timeout);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
+ return (ret);
}
-__weak_reference(_close, close);
+__weak_reference(__close, close);
int
-_close(int fd)
+__close(int fd)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_close(fd);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
+ return (ret);
}
-__weak_reference(_connect, connect);
+__weak_reference(__connect, connect);
int
-_connect(int s, const struct sockaddr *n, socklen_t l)
+__connect(int fd, const struct sockaddr *name, socklen_t namelen)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
- ret = __sys_connect(s, n, l);
- _thread_leave_cancellation_point();
- return ret;
+ curthread = _get_curthread();
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __sys_connect(fd, name, namelen);
+ _thr_cancel_leave(curthread, oldcancel);
+
+ return (ret);
}
-
-__weak_reference(_creat, creat);
+
+__weak_reference(___creat, creat);
int
-_creat(const char *path, mode_t mode)
+___creat(const char *path, mode_t mode)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
+ int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __creat(path, mode);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-__weak_reference(_fcntl, fcntl);
+__weak_reference(__fcntl, fcntl);
int
-_fcntl(int fd, int cmd,...)
+__fcntl(int fd, int cmd,...)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
va_list ap;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
va_start(ap, cmd);
switch (cmd) {
- case F_DUPFD:
- case F_SETFD:
- case F_SETFL:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
- break;
- case F_GETFD:
- case F_GETFL:
- ret = __sys_fcntl(fd, cmd);
- break;
- default:
- ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
+ case F_DUPFD:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
+ break;
+ case F_SETFD:
+ case F_SETFL:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
+ break;
+ case F_GETFD:
+ case F_GETFL:
+ ret = __sys_fcntl(fd, cmd);
+ break;
+ default:
+ ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
}
va_end(ap);
- _thread_leave_cancellation_point();
-
- return ret;
-}
-
-__weak_reference(_fork, fork);
+ _thr_cancel_leave(curthread, oldcancel);
-int
-_fork(int fd)
-{
- int ret;
- struct pthread_atfork *af;
-
- _pthread_mutex_lock(&_atfork_mutex);
-
- /* Run down atfork prepare handlers. */
- TAILQ_FOREACH_REVERSE(af, &_atfork_list, atfork_head, qe) {
- if (af->prepare != NULL)
- af->prepare();
- }
-
- /*
- * Fork a new process.
- * XXX - The correct way to handle __malloc_lock is to have
- * the threads libraries (or libc) install fork handlers for it
- * in their initialization routine. We should probably
- * do that for all the locks in libc.
- */
- if (__isthreaded && __malloc_lock != NULL)
- _SPINLOCK(__malloc_lock);
- ret = __sys_fork();
- if (ret == 0) {
- __isthreaded = 0;
- if (__malloc_lock != NULL)
- memset(__malloc_lock, 0, sizeof(spinlock_t));
- init_tdlist(curthread, 1);
- init_td_common(curthread, NULL, 1);
- _mutex_reinit(&_atfork_mutex);
-
- /* Run down atfork child handlers. */
- TAILQ_FOREACH(af, &_atfork_list, qe) {
- if (af->child != NULL)
- af->child();
- }
- } else if (ret != -1) {
- /* Run down atfork parent handlers. */
- TAILQ_FOREACH(af, &_atfork_list, qe) {
- if (af->parent != NULL)
- af->parent();
- }
- }
-
- if (ret != 0) {
- if (__isthreaded && __malloc_lock != NULL)
- _SPINUNLOCK(__malloc_lock);
- _pthread_mutex_unlock(&_atfork_mutex);
- }
- return ret;
+ return (ret);
}
-
-__weak_reference(_fsync, fsync);
+__weak_reference(__fsync, fsync);
int
-_fsync(int fd)
+__fsync(int fd)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_fsync(fd);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
-}
-
-__weak_reference(_msgrcv, msgrcv);
-
-int
-_msgrcv(int id, void *p, size_t sz, long t, int f)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_msgrcv(id, p, sz, t, f);
- _thread_leave_cancellation_point();
- return ret;
-}
-
-__weak_reference(_msgsnd, msgsnd);
-
-int
-_msgsnd(int id, const void *p, size_t sz, int f)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_msgsnd(id, p, sz, f);
- _thread_leave_cancellation_point();
- return ret;
+ return (ret);
}
-__weak_reference(_msync, msync);
+__weak_reference(__msync, msync);
int
-_msync(void *addr, size_t len, int flags)
+__msync(void *addr, size_t len, int flags)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_msync(addr, len, flags);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-__weak_reference(_nanosleep, nanosleep);
+__weak_reference(__nanosleep, nanosleep);
int
-_nanosleep(const struct timespec * time_to_sleep, struct timespec *
- time_remaining)
+__nanosleep(const struct timespec *time_to_sleep,
+ struct timespec *time_remaining)
{
- int ret;
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
+ int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_nanosleep(time_to_sleep, time_remaining);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
+ return (ret);
}
-__weak_reference(_open, open);
+__weak_reference(__open, open);
int
-_open(const char *path, int flags,...)
+__open(const char *path, int flags,...)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
int mode = 0;
va_list ap;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
/* Check if the file is being created: */
if (flags & O_CREAT) {
@@ -350,325 +289,202 @@ _open(const char *path, int flags,...)
}
ret = __sys_open(path, flags, mode);
- _thread_leave_cancellation_point();
+
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-/*
- * The implementation in libc calls sigpause(), which is also
- * a cancellation point.
- */
-#if 0
__weak_reference(_pause, pause);
int
_pause(void)
{
- _thread_enter_cancellation_point();
- __pause();
- _thread_leave_cancellation_point();
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
+ int ret;
+
+ oldcancel = _thr_cancel_enter(curthread);
+ ret = __pause();
+ _thr_cancel_leave(curthread, oldcancel);
+
+ return ret;
}
-#endif
-__weak_reference(_poll, poll);
+__weak_reference(__poll, poll);
int
-_poll(struct pollfd *fds, unsigned int nfds, int timeout)
+__poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_poll(fds, nfds, timeout);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-/* XXXFix */
-#if 0
-__weak_reference(_pread, pread);
-
-ssize_t
-_pread(int d, void *b, size_t n, off_t o)
-{
- ssize_t ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_pread(d, b, n, o);
- _thread_leave_cancellation_point();
- return (ret);
-}
-#endif
-
-/* The libc version calls select(), which is also a cancellation point. */
-#if 0
-extern int __pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
- const struct timespec *timo, const sigset_t *mask);
+__weak_reference(_pselect, pselect);
int
-pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
+_pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __pselect(count, rfds, wfds, efds, timo, mask);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return (ret);
}
-#endif
-
-/* XXXFix */
-#if 0
-__weak_reference(_pwrite, pwrite);
-
-ssize_t
-_pwrite(int d, const void *b, size_t n, off_t o)
-{
- ssize_t ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_pwrite(d, b, n, o);
- _thread_leave_cancellation_point();
- return (ret);
-}
-#endif
__weak_reference(_raise, raise);
int
_raise(int sig)
{
- int error;
+ int ret;
- error = pthread_kill(pthread_self(), sig);
- if (error != 0) {
- errno = error;
- error = -1;
+ if (!_thr_isthreaded())
+ ret = kill(getpid(), sig);
+ else {
+ ret = pthread_kill(pthread_self(), sig);
+ if (ret != 0) {
+ errno = ret;
+ ret = -1;
+ }
}
- return (error);
+ return (ret);
}
-__weak_reference(_read, read);
+__weak_reference(__read, read);
ssize_t
-_read(int fd, void *buf, size_t nbytes)
+__read(int fd, void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
ssize_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_read(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-__weak_reference(_readv, readv);
+__weak_reference(__readv, readv);
ssize_t
-_readv(int fd, const struct iovec *iov, int iovcnt)
+__readv(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
ssize_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_readv(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-/*
- * The libc implementation of recv() calls recvfrom, which
- * is also a cancellation point.
- */
-#if 0
-__weak_reference(_recv, recv);
+__weak_reference(__recvfrom, recvfrom);
ssize_t
-_recv(int s, void *b, size_t l, int f)
-{
- ssize_t ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_recv(s, b, l, f);
- _thread_leave_cancellation_point();
- return (ret);
-}
-#endif
-
-__weak_reference(_recvfrom, recvfrom);
-
-ssize_t
-_recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from,
+__recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from,
socklen_t *fl)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
ssize_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_recvfrom(s, b, l, f, from, fl);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return (ret);
}
-__weak_reference(_recvmsg, recvmsg);
+__weak_reference(__recvmsg, recvmsg);
ssize_t
-_recvmsg(int s, struct msghdr *m, int f)
+__recvmsg(int s, struct msghdr *m, int f)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
+ int oldcancel;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_recvmsg(s, m, f);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return (ret);
}
-__weak_reference(_select, select);
+__weak_reference(__select, select);
int
-_select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
+__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
- _thread_leave_cancellation_point();
-
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-/*
- * Libc implements this by calling _sendto(), which is also a
- * cancellation point.
- */
-#if 0
-__weak_reference(_send, send);
+__weak_reference(__sendmsg, sendmsg);
ssize_t
-_send(int s, const void *m, size_t l, int f)
+__sendmsg(int s, const struct msghdr *m, int f)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
+ int oldcancel;
- _thread_enter_cancellation_point();
- ret = _sendto(s, m, l, f, NULL, 0);
- _thread_leave_cancellation_point();
- return (ret);
-}
-#endif
-
-__weak_reference(_sendmsg, sendmsg);
-
-ssize_t
-_sendmsg(int s, const struct msghdr *m, int f)
-{
- ssize_t ret;
-
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sendmsg(s, m, f);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return (ret);
}
-__weak_reference(_sendto, sendto);
+__weak_reference(__sendto, sendto);
ssize_t
-_sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t,
+__sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t,
socklen_t tl)
{
+ struct pthread *curthread = _get_curthread();
ssize_t ret;
+ int oldcancel;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sendto(s, m, l, f, t, tl);
- _thread_leave_cancellation_point();
- return (ret);
-}
-
-/*
- * The implementation in libc calls sigsuspend(), which is also
- * a cancellation point.
- */
-#if 0
-__weak_reference(_sigpause, sigpause);
-
-int
-_sigpause(int m)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_sigpause(m);
- _thread_leave_cancellation_point();
- return (ret);
-}
-#endif
-
-__weak_reference(_sigsuspend, sigsuspend);
-
-int
-_sigsuspend(const sigset_t *m)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_sigsuspend(m);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return (ret);
}
-__weak_reference(_sigtimedwait, sigtimedwait);
-
-int
-_sigtimedwait(const sigset_t *s, siginfo_t *i, const struct timespec *t)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_sigtimedwait(s, i, t);
- _thread_leave_cancellation_point();
- return (ret);
-}
-
-__weak_reference(_sigwait, sigwait);
-
-int
-_sigwait(const sigset_t *s, int *i)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_sigwait(s, i);
- _thread_leave_cancellation_point();
- return (ret);
-}
-
-__weak_reference(_sigwaitinfo, sigwaitinfo);
-
-int
-_sigwaitinfo(const sigset_t *s, siginfo_t *i)
-{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_sigwaitinfo(s, i);
- _thread_leave_cancellation_point();
- return (ret);
-}
-
-__weak_reference(_sleep, sleep);
-
unsigned int
_sleep(unsigned int seconds)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
unsigned int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sleep(seconds);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
+ return (ret);
}
__weak_reference(_system, system);
@@ -676,120 +492,117 @@ __weak_reference(_system, system);
int
_system(const char *string)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __system(string);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-
__weak_reference(_tcdrain, tcdrain);
int
_tcdrain(int fd)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
int ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __tcdrain(fd);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
- return ret;
+ return (ret);
}
-/*
- * The usleep() implementation calls nanosleep(), which is also
- * a cancellation point.
- */
-#if 0
-__weak_reference(_usleep, usleep);
+__weak_reference(_vfork, vfork);
int
-_usleep(useconds_t u)
+_vfork(void)
{
- int ret;
-
- _thread_enter_cancellation_point();
- ret = __sys_usleep(u);
- _thread_leave_cancellation_point();
- return (ret);
+ return (fork());
}
-#endif
__weak_reference(_wait, wait);
pid_t
_wait(int *istat)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
pid_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __wait(istat);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-__weak_reference(_wait4, wait4);
+__weak_reference(__wait4, wait4);
pid_t
-_wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
+__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
pid_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_wait4(pid, istat, options, rusage);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-/*
- * The libc implementation of waitpid calls wait4().
- */
-#if 0
__weak_reference(_waitpid, waitpid);
pid_t
_waitpid(pid_t wpid, int *status, int options)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
pid_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __waitpid(wpid, status, options);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-#endif
-__weak_reference(_write, write);
+__weak_reference(__write, write);
ssize_t
-_write(int fd, const void *buf, size_t nbytes)
+__write(int fd, const void *buf, size_t nbytes)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
ssize_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_write(fd, buf, nbytes);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
-__weak_reference(_writev, writev);
+__weak_reference(__writev, writev);
ssize_t
-_writev(int fd, const struct iovec *iov, int iovcnt)
+__writev(int fd, const struct iovec *iov, int iovcnt)
{
+ struct pthread *curthread = _get_curthread();
+ int oldcancel;
ssize_t ret;
- _thread_enter_cancellation_point();
+ oldcancel = _thr_cancel_enter(curthread);
ret = __sys_writev(fd, iov, iovcnt);
- _thread_leave_cancellation_point();
+ _thr_cancel_leave(curthread, oldcancel);
return ret;
}
diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c
new file mode 100644
index 0000000..ba73de8
--- /dev/null
+++ b/lib/libthr/thread/thr_umtx.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#include "thr_private.h"
+#include "thr_umtx.h"
+
+int
+__thr_umtx_lock(volatile umtx_t *mtx, long id)
+{
+ while (_umtx_op((struct umtx *)mtx, UMTX_OP_LOCK, id, 0, 0))
+ ;
+ return (0);
+}
+
+int
+__thr_umtx_timedlock(volatile umtx_t *mtx, long id,
+ const struct timespec *timeout)
+{
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0)))
+ return (ETIMEDOUT);
+ if (_umtx_op((struct umtx *)mtx, UMTX_OP_LOCK, id, 0,
+ (void *)timeout) == 0)
+ return (0);
+ return (errno);
+}
+
+int
+__thr_umtx_unlock(volatile umtx_t *mtx, long id)
+{
+ if (_umtx_op((struct umtx *)mtx, UMTX_OP_UNLOCK, id, 0, 0) == 0)
+ return (0);
+ return (errno);
+}
+
+int
+_thr_umtx_wait(volatile umtx_t *mtx, long id, const struct timespec *timeout)
+{
+ if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
+ timeout->tv_nsec <= 0)))
+ return (ETIMEDOUT);
+ if (_umtx_op((struct umtx *)mtx, UMTX_OP_WAIT, id, 0,
+ (void*) timeout) == 0)
+ return (0);
+ return (errno);
+}
+
+int
+_thr_umtx_wake(volatile umtx_t *mtx, int nr_wakeup)
+{
+ if (_umtx_op((struct umtx *)mtx, UMTX_OP_WAKE, nr_wakeup, 0, 0) == 0)
+ return (0);
+ return (errno);
+}
diff --git a/lib/libthr/thread/thr_umtx.h b/lib/libthr/thread/thr_umtx.h
new file mode 100644
index 0000000..4ccfb69
--- /dev/null
+++ b/lib/libthr/thread/thr_umtx.h
@@ -0,0 +1,81 @@
+/*-
+ * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _THR_FBSD_UMTX_H_
+#define _THR_FBSD_UMTX_H_
+
+#include <sys/umtx.h>
+
+typedef long umtx_t;
+
+int __thr_umtx_lock(volatile umtx_t *mtx, long id);
+int __thr_umtx_timedlock(volatile umtx_t *mtx, long id,
+ const struct timespec *timeout);
+int __thr_umtx_unlock(volatile umtx_t *mtx, long id);
+
+static inline void
+_thr_umtx_init(volatile umtx_t *mtx)
+{
+ *mtx = 0;
+}
+
+static inline int
+_thr_umtx_trylock(volatile umtx_t *mtx, long id)
+{
+ return umtx_trylock((struct umtx *)mtx, id);
+}
+
+static inline int
+_thr_umtx_lock(volatile umtx_t *mtx, long id)
+{
+ if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
+ return (0);
+ return __thr_umtx_lock(mtx, id);
+}
+
+static inline int
+_thr_umtx_timedlock(volatile umtx_t *mtx, long id,
+ const struct timespec *timeout)
+{
+ if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
+ return (0);
+ return __thr_umtx_timedlock(mtx, id, timeout);
+}
+
+static inline int
+_thr_umtx_unlock(volatile umtx_t *mtx, long id)
+{
+ if (atomic_cmpset_rel_ptr(mtx, (void *)id, (void *)UMTX_UNOWNED))
+ return (0);
+ return __thr_umtx_unlock(mtx, id);
+}
+
+int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
+ const struct timespec *timeout);
+int _thr_umtx_wake(volatile umtx_t *mtx, int count);
+#endif
OpenPOWER on IntegriCloud