summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/arch
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libpthread/arch')
-rw-r--r--lib/libpthread/arch/alpha/Makefile.inc5
-rw-r--r--lib/libpthread/arch/alpha/alpha/context.S353
-rw-r--r--lib/libpthread/arch/alpha/alpha/enter_uts.S42
-rw-r--r--lib/libpthread/arch/alpha/alpha/pthread_md.c76
-rw-r--r--lib/libpthread/arch/alpha/include/atomic_ops.h75
-rw-r--r--lib/libpthread/arch/alpha/include/pthread_md.h247
-rw-r--r--lib/libpthread/arch/amd64/Makefile.inc5
-rw-r--r--lib/libpthread/arch/amd64/amd64/context.S217
-rw-r--r--lib/libpthread/arch/amd64/amd64/enter_uts.S41
-rw-r--r--lib/libpthread/arch/amd64/amd64/pthread_md.c82
-rw-r--r--lib/libpthread/arch/amd64/include/atomic_ops.h57
-rw-r--r--lib/libpthread/arch/amd64/include/pthread_md.h268
-rw-r--r--lib/libpthread/arch/arm/Makefile.inc7
-rw-r--r--lib/libpthread/arch/arm/arm/context.S79
-rw-r--r--lib/libpthread/arch/arm/arm/pthread_md.c86
-rw-r--r--lib/libpthread/arch/arm/include/atomic_ops.h53
-rw-r--r--lib/libpthread/arch/arm/include/pthread_md.h257
-rw-r--r--lib/libpthread/arch/i386/Makefile.inc5
-rw-r--r--lib/libpthread/arch/i386/i386/pthread_md.c100
-rw-r--r--lib/libpthread/arch/i386/i386/thr_enter_uts.S44
-rw-r--r--lib/libpthread/arch/i386/i386/thr_getcontext.S156
-rw-r--r--lib/libpthread/arch/i386/include/atomic_ops.h51
-rw-r--r--lib/libpthread/arch/i386/include/pthread_md.h264
-rw-r--r--lib/libpthread/arch/ia64/Makefile.inc5
-rw-r--r--lib/libpthread/arch/ia64/ia64/context.S351
-rw-r--r--lib/libpthread/arch/ia64/ia64/enter_uts.S60
-rw-r--r--lib/libpthread/arch/ia64/ia64/pthread_md.c75
-rw-r--r--lib/libpthread/arch/ia64/include/atomic_ops.h47
-rw-r--r--lib/libpthread/arch/ia64/include/pthread_md.h252
-rw-r--r--lib/libpthread/arch/powerpc/Makefile.inc8
-rw-r--r--lib/libpthread/arch/powerpc/include/atomic_ops.h62
-rw-r--r--lib/libpthread/arch/powerpc/include/pthread_md.h258
-rw-r--r--lib/libpthread/arch/powerpc/powerpc/assym.c113
-rw-r--r--lib/libpthread/arch/powerpc/powerpc/assym.s113
-rw-r--r--lib/libpthread/arch/powerpc/powerpc/context.S151
-rw-r--r--lib/libpthread/arch/powerpc/powerpc/enter_uts.S40
-rw-r--r--lib/libpthread/arch/powerpc/powerpc/pthread_md.c76
-rw-r--r--lib/libpthread/arch/sparc64/Makefile.inc5
-rw-r--r--lib/libpthread/arch/sparc64/include/atomic_ops.h75
-rw-r--r--lib/libpthread/arch/sparc64/include/pthread_md.h254
-rw-r--r--lib/libpthread/arch/sparc64/sparc64/assym.s15
-rw-r--r--lib/libpthread/arch/sparc64/sparc64/pthread_md.c91
-rw-r--r--lib/libpthread/arch/sparc64/sparc64/thr_getcontext.S87
43 files changed, 4708 insertions, 0 deletions
diff --git a/lib/libpthread/arch/alpha/Makefile.inc b/lib/libpthread/arch/alpha/Makefile.inc
new file mode 100644
index 0000000..7bb3ad9
--- /dev/null
+++ b/lib/libpthread/arch/alpha/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= enter_uts.S context.S pthread_md.c
diff --git a/lib/libpthread/arch/alpha/alpha/context.S b/lib/libpthread/arch/alpha/alpha/context.S
new file mode 100644
index 0000000..6ef42b6
--- /dev/null
+++ b/lib/libpthread/arch/alpha/alpha/context.S
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2001,3 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/* #include <machine/frame.h> */
+#define FRAME_V0 0
+#define FRAME_T0 1
+#define FRAME_T1 2
+#define FRAME_T2 3
+#define FRAME_T3 4
+#define FRAME_T4 5
+#define FRAME_T5 6
+#define FRAME_T6 7
+#define FRAME_T7 8
+#define FRAME_S0 9
+#define FRAME_S1 10
+#define FRAME_S2 11
+#define FRAME_S3 12
+#define FRAME_S4 13
+#define FRAME_S5 14
+#define FRAME_S6 15
+#define FRAME_A3 16
+#define FRAME_A4 17
+#define FRAME_A5 18
+#define FRAME_RA 23
+#define FRAME_T12 24
+#define FRAME_AT 25
+#define FRAME_SP 26
+#define FRAME_TRAPARG_A0 28
+#define FRAME_TRAPARG_A1 29
+#define FRAME_TRAPARG_A2 30
+#define FRAME_PC (FRAME_TRAPARG_A2 + 1 + 1)
+
+/* #include <machine/reg.h> */
+#define R_V0 0
+#define R_T0 1
+#define R_T1 2
+#define R_T2 3
+#define R_T3 4
+#define R_T4 5
+#define R_T5 6
+#define R_T6 7
+#define R_T7 8
+#define R_S0 9
+#define R_S1 10
+#define R_S2 11
+#define R_S3 12
+#define R_S4 13
+#define R_S5 14
+#define R_S6 15
+#define R_A0 16
+#define R_A1 17
+#define R_A2 18
+#define R_A3 19
+#define R_A4 20
+#define R_A5 21
+#define R_T8 22
+#define R_T9 23
+#define R_T10 24
+#define R_T11 25
+#define R_RA 26
+#define R_T12 27
+#define R_SP 30
+#define R_ZERO 31
+
+/*
+ * XXX - The rev id's are defined in <machine/ucontext.h>
+ */
+#define MC_FMT_OFFSET 73*8 /* offset to format from mcontext */
+#define REV0_SIGFRAME 0x0001 /* rev R0 sigcontext format */
+#define REV0_TRAPFRAME 0x0002 /* rev R0 trapframe format */
+
+/*
+ * int _alpha_restore_context(const mcontext_t *mcp,
+ * intptr_t val, intptr_t *loc);
+ *
+ * The format of the context is verified at the beginning.
+ * Returns -1 if invalid format.
+ */
+ .set noreorder
+LEAF(_alpha_restore_context, 3)
+ LDGP(pv)
+ bne a0, Lsc1 /* argument null? */
+Lscbad: ldiq v0, -1 /* return -1 */
+ br Lscend
+Lsc1: ldq t1, MC_FMT_OFFSET(a0) /* is mcontext valid format? */
+ ldiq t0, REV0_TRAPFRAME
+ cmpeq t0, t1, t0 /* is it trapframe format? */
+ bne t0, Lsc_fp /* if so, check fp state */
+ ldiq t0, REV0_SIGFRAME
+ cmpeq t0, t1, t0 /* is it sigcontext format? */
+ beq t0, Lscbad
+ /* supposedly sigcontext format, check magic number */
+ ldiq t0, 0xACEDBADE /* check magic number */
+ ldq t1, ((R_ZERO + 1) * 8)(a0) /* magic in mc_regs[R_ZERO] */
+ cmpeq t0, t1, t0
+ beq t0, Lscbad
+ /* restore floating point regs first */
+Lsc_fp: ldq t0, ((71 + 1) * 8)(a0) /* if FP regs not saved, */
+ beq t0, Lsc2 /* skip setting FP regs */
+ ldt $f0, ((37 + 1) * 8)(a0) /* restore FP regs using */
+ ldt $f1, ((38 + 1) * 8)(a0) /* hw name */
+ ldt $f2, ((39 + 1) * 8)(a0)
+ ldt $f3, ((40 + 1) * 8)(a0)
+ ldt $f4, ((41 + 1) * 8)(a0)
+ ldt $f5, ((42 + 1) * 8)(a0)
+ ldt $f6, ((43 + 1) * 8)(a0)
+ ldt $f7, ((44 + 1) * 8)(a0)
+ ldt $f8, ((45 + 1) * 8)(a0)
+ ldt $f9, ((46 + 1) * 8)(a0)
+ ldt $f10, ((47 + 1) * 8)(a0)
+ ldt $f11, ((48 + 1) * 8)(a0)
+ ldt $f12, ((49 + 1) * 8)(a0)
+ ldt $f13, ((50 + 1) * 8)(a0)
+ ldt $f14, ((51 + 1) * 8)(a0)
+ ldt $f15, ((52 + 1) * 8)(a0)
+ ldt $f16, ((53 + 1) * 8)(a0)
+ ldt $f17, ((54 + 1) * 8)(a0)
+ ldt $f18, ((55 + 1) * 8)(a0)
+ ldt $f19, ((56 + 1) * 8)(a0)
+ ldt $f20, ((57 + 1) * 8)(a0)
+ ldt $f21, ((58 + 1) * 8)(a0)
+ ldt $f22, ((59 + 1) * 8)(a0)
+ ldt $f23, ((60 + 1) * 8)(a0)
+ ldt $f24, ((61 + 1) * 8)(a0)
+ ldt $f25, ((62 + 1) * 8)(a0)
+ ldt $f26, ((63 + 1) * 8)(a0)
+ ldt $f27, ((64 + 1) * 8)(a0)
+ .set noat
+ ldt $f28, ((65 + 1) * 8)(a0)
+ .set at
+ ldt $f29, ((66 + 1) * 8)(a0)
+ ldt $f30, ((67 + 1) * 8)(a0)
+ /* $f31 is hardwired zero */
+ ldt ft0, ((69 + 1) * 8)(a0) /* restore FP control reg */
+ mt_fpcr ft0
+Lsc2: ldiq t0, REV0_SIGFRAME /* check the context format */
+ ldq t1, MC_FMT_OFFSET(a0) /* again. */
+ cmpeq t0, t1, t0 /* is it sigcontext format? */
+ bne t0, Lsc_sc
+ /* trapframe format */
+ ldq v0, ((FRAME_V0 + 1) * 8)(a0) /* restore v0 */
+ ldq t0, ((FRAME_T0 + 1) * 8)(a0) /* restore t0-t7 */
+ ldq t1, ((FRAME_T1 + 1) * 8)(a0)
+ ldq t2, ((FRAME_T2 + 1) * 8)(a0)
+ ldq t3, ((FRAME_T3 + 1) * 8)(a0)
+ ldq t4, ((FRAME_T4 + 1) * 8)(a0)
+ ldq t5, ((FRAME_T5 + 1) * 8)(a0)
+ ldq t6, ((FRAME_T6 + 1) * 8)(a0)
+ ldq t7, ((FRAME_T7 + 1) * 8)(a0)
+ ldq s0, ((FRAME_S0 + 1) * 8)(a0) /* restore s0-s6 */
+ ldq s1, ((FRAME_S1 + 1) * 8)(a0)
+ ldq s2, ((FRAME_S2 + 1) * 8)(a0)
+ ldq s3, ((FRAME_S3 + 1) * 8)(a0)
+ ldq s4, ((FRAME_S4 + 1) * 8)(a0)
+ ldq s5, ((FRAME_S5 + 1) * 8)(a0)
+ ldq s6, ((FRAME_S6 + 1) * 8)(a0)
+ ldq a4, ((FRAME_A4 + 1) * 8)(a0) /* restore a4, a5 */
+ ldq a5, ((FRAME_A5 + 1) * 8)(a0)
+ ldq ra, ((FRAME_RA + 1) * 8)(a0)
+ ldq sp, ((FRAME_SP + 1) * 8)(a0)
+ subq sp, 16, sp /* save room on stack */
+ ldq a3, ((FRAME_TRAPARG_A1 + 1) * 8)(a0)
+ stq a3, 0(a0) /* save a1 on stack */
+ ldq a3, ((FRAME_TRAPARG_A2 + 1) * 8)(a0)
+ stq a3, 8(a0) /* save a2 on stack */
+ .set noat
+ ldq at_reg, ((FRAME_PC + 1) * 8)(a0) /* PC at time of trap? */
+ .set at
+ ldq a3, ((FRAME_A3 + 1) * 8)(a0) /* restore a3 */
+ ldq a0, ((FRAME_TRAPARG_A0 + 1) * 8)(a0) /* restore a0 */
+ br Lsc3
+Lsc_sc: /* sigcontext format */
+ ldq v0, ((R_V0 + 1) * 8)(a0) /* restore v0 */
+ ldq t0, ((R_T0 + 1) * 8)(a0) /* restore t0-t7 */
+ ldq t1, ((R_T1 + 1) * 8)(a0)
+ ldq t2, ((R_T2 + 1) * 8)(a0)
+ ldq t3, ((R_T3 + 1) * 8)(a0)
+ ldq t4, ((R_T4 + 1) * 8)(a0)
+ ldq t5, ((R_T5 + 1) * 8)(a0)
+ ldq t6, ((R_T6 + 1) * 8)(a0)
+ ldq t7, ((R_T7 + 1) * 8)(a0)
+ ldq s0, ((R_S0 + 1) * 8)(a0) /* restore s0-s6 */
+ ldq s1, ((R_S1 + 1) * 8)(a0)
+ ldq s2, ((R_S2 + 1) * 8)(a0)
+ ldq s3, ((R_S3 + 1) * 8)(a0)
+ ldq s4, ((R_S4 + 1) * 8)(a0)
+ ldq s5, ((R_S5 + 1) * 8)(a0)
+ ldq s6, ((R_S6 + 1) * 8)(a0)
+ ldq a4, ((R_A4 + 1) * 8)(a0) /* restore a4, a5 */
+ ldq a5, ((R_A5 + 1) * 8)(a0)
+ ldq ra, ((R_RA + 1) * 8)(a0)
+ ldq sp, ((R_SP + 1) * 8)(a0)
+ subq sp, 16, sp /* save room on stack */
+ ldq a3, ((R_A1 + 1) * 8)(a0) /* get a1 */
+ stq a3, 0(a0) /* save a1 on stack */
+ ldq a3, ((R_A2 + 1) * 8)(a0) /* get a2 */
+ stq a3, 8(a0) /* save a2 on stack */
+ ldq a3, ((R_A3 + 1) * 8)(a0) /* restore a3 */
+ ldq a0, ((R_A0 + 1) * 8)(a0) /* restore a0 */
+Lsc3: beq a2, Lsc4
+ stq a1, 0(a2)
+Lsc4: ldq a1, 0(sp) /* restore a1, a2 */
+ ldq a2, 8(sp)
+ addq sp, 16, sp /* restore stack */
+Lscend: RET
+END(_alpha_restore_context)
+
+
+/*
+ * int _alpha_save_context(mcontext_t *);
+ *
+ * Always save in trapframe format. Floating point registers are
+ * saved but may be optimized away later (see comments below).
+ */
+LEAF(_alpha_save_context, 1)
+ LDGP(pv)
+ bne a0, Lgc1 /* argument null? */
+ ldiq v0, -1 /* return -1 */
+ br Lgcend
+Lgc1: ldiq v0, 1 /* save_context returns 1, */
+ stq v0, ((FRAME_V0 + 1) * 8)(a0) /* so save 1 in v0 */
+ stq t0, ((FRAME_T0 + 1) * 8)(a0) /* save t0-t7 */
+ stq t1, ((FRAME_T1 + 1) * 8)(a0)
+ stq t2, ((FRAME_T2 + 1) * 8)(a0)
+ stq t3, ((FRAME_T3 + 1) * 8)(a0)
+ stq t4, ((FRAME_T4 + 1) * 8)(a0)
+ stq t5, ((FRAME_T5 + 1) * 8)(a0)
+ stq t6, ((FRAME_T6 + 1) * 8)(a0)
+ stq t7, ((FRAME_T7 + 1) * 8)(a0)
+ stq s0, ((FRAME_S0 + 1) * 8)(a0) /* save s0-s6 */
+ stq s1, ((FRAME_S1 + 1) * 8)(a0)
+ stq s2, ((FRAME_S2 + 1) * 8)(a0)
+ stq s3, ((FRAME_S3 + 1) * 8)(a0)
+ stq s4, ((FRAME_S4 + 1) * 8)(a0)
+ stq s5, ((FRAME_S5 + 1) * 8)(a0)
+ stq s6, ((FRAME_S6 + 1) * 8)(a0)
+ stq a0, ((FRAME_TRAPARG_A0 + 1) * 8)(a0) /* save a0-a5 */
+ stq a1, ((FRAME_TRAPARG_A1 + 1) * 8)(a0)
+ stq a2, ((FRAME_TRAPARG_A2 + 1) * 8)(a0)
+ stq a3, ((FRAME_A3 + 1) * 8)(a0)
+ stq a4, ((FRAME_A4 + 1) * 8)(a0)
+ stq a5, ((FRAME_A5 + 1) * 8)(a0)
+ stq ra, ((FRAME_RA + 1) * 8)(a0)
+ stq sp, ((FRAME_SP + 1) * 8)(a0)
+ ldiq t0, REV0_TRAPFRAME /* store trapframe format in */
+ stq t0, MC_FMT_OFFSET(a0) /* ucp->uc-rev */
+ /*
+ * XXX - Do we really need to save floating point registers?
+ *
+ * This is an explicit call to get the current context, so
+ * shouldn't the caller be done with the floating point registers?
+ * Contexts formed by involuntary switches, such as signal delivery,
+ * should have floating point registers saved by the kernel.
+ */
+#if 1
+ stq zero, ((71 + 1) * 8)(a0) /* FP regs are not saved */
+#else
+ ldiq t0, 1 /* say we've used FP, */
+ stq t0, ((71 + 1) * 8)(a0) /* mc_ownedfp = 1 */
+ stt $f0, ((37 + 1) * 8)(a0) /* save first register, using */
+ stt $f1, ((38 + 1) * 8)(a0) /* hw name etc. */
+ stt $f2, ((39 + 1) * 8)(a0)
+ stt $f3, ((40 + 1) * 8)(a0)
+ stt $f4, ((41 + 1) * 8)(a0)
+ stt $f5, ((42 + 1) * 8)(a0)
+ stt $f6, ((43 + 1) * 8)(a0)
+ stt $f7, ((44 + 1) * 8)(a0)
+ stt $f8, ((45 + 1) * 8)(a0)
+ stt $f9, ((46 + 1) * 8)(a0)
+ stt $f10, ((47 + 1) * 8)(a0)
+ stt $f11, ((48 + 1) * 8)(a0)
+ stt $f12, ((49 + 1) * 8)(a0)
+ stt $f13, ((50 + 1) * 8)(a0)
+ stt $f14, ((51 + 1) * 8)(a0)
+ stt $f15, ((52 + 1) * 8)(a0)
+ stt $f16, ((53 + 1) * 8)(a0)
+ stt $f17, ((54 + 1) * 8)(a0)
+ stt $f18, ((55 + 1) * 8)(a0)
+ stt $f19, ((56 + 1) * 8)(a0)
+ stt $f20, ((57 + 1) * 8)(a0)
+ stt $f21, ((58 + 1) * 8)(a0)
+ stt $f22, ((59 + 1) * 8)(a0)
+ stt $f23, ((60 + 1) * 8)(a0)
+ stt $f24, ((61 + 1) * 8)(a0)
+ stt $f25, ((62 + 1) * 8)(a0)
+ stt $f26, ((63 + 1) * 8)(a0)
+ stt $f27, ((64 + 1) * 8)(a0)
+ .set noat
+ stt $f28, ((65 + 1) * 8)(a0)
+ .set at
+ stt $f29, ((66 + 1) * 8)(a0)
+ stt $f30, ((67 + 1) * 8)(a0)
+ /* $f31 is hardwired zero */
+#endif
+ mf_fpcr ft0 /* get FP control reg */
+ stt ft0, ((69 + 1) * 8)(a0) /* and store it in mc_fpcr */
+ stq zero, ((70 + 1) * 8)(a0) /* FP software control XXX */
+ mov zero, v0 /* return zero */
+Lgcend: RET
+END(_alpha_save_context)
diff --git a/lib/libpthread/arch/alpha/alpha/enter_uts.S b/lib/libpthread/arch/alpha/alpha/enter_uts.S
new file mode 100644
index 0000000..6de3bd7
--- /dev/null
+++ b/lib/libpthread/arch/alpha/alpha/enter_uts.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * _alpha_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * long stacksz);
+ */
+LEAF(_alpha_enter_uts, 4)
+ addq a2, a3, a2
+ ldiq a3, ~0xf
+ and a2, a3, a2
+ mov a2, sp
+ mov a1, ra
+ mov a1, t12
+ RET
+ END(_alpha_enter_uts)
diff --git a/lib/libpthread/arch/alpha/alpha/pthread_md.c b/lib/libpthread/arch/alpha/alpha/pthread_md.c
new file mode 100644
index 0000000..c8445b1
--- /dev/null
+++ b/lib/libpthread/arch/alpha/alpha/pthread_md.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdlib.h>
+#include <strings.h>
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if ((tcb = malloc(sizeof(struct tcb))) != NULL) {
+ bzero(tcb, sizeof(struct tcb));
+ tcb->tcb_thread = thread;
+ /* Allocate TDV */
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ /* Free TDV */
+ free(tcb);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ if ((kcb = malloc(sizeof(struct kcb))) != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_faketcb.tcb_isfake = 1;
+ kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL;
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/alpha/include/atomic_ops.h b/lib/libpthread/arch/alpha/include/atomic_ops.h
new file mode 100644
index 0000000..7c3e62b
--- /dev/null
+++ b/lib/libpthread/arch/alpha/include/atomic_ops.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2003 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap_long(long *dst, long val, long *res);
+ */
+static inline void
+atomic_swap_long(long *dst, long val, long *res)
+{
+ /* $1 and $2 are t0 and t1 respectively. */
+ __asm __volatile (
+ " ldq $1, %1\n" /* get cache line before lock */
+ "1: ldq_l $1, %1\n" /* load *dst asserting lock */
+ " mov %2, $2\n" /* save value to be swapped */
+ " stq_c $2, %1\n" /* attempt the store; $2 clobbered */
+ " beq $2, 1b\n" /* it didn't work, loop */
+ " stq $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
+ : "+m"(*res)
+ : "m"(*dst), "r"(val)
+ : "memory", "$1", "$2"); /* clobber t0 and t1 */
+}
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ /* $1 and $2 are t0 and t1 respectively. */
+ __asm __volatile (
+ " ldl $1, %1\n" /* get cache line before lock */
+ "1: ldl_l $1, %1\n" /* load *dst asserting lock */
+ " mov %2, $2\n" /* save value to be swapped */
+ " stl_c $2, %1\n" /* attempt the store; $2 clobbered */
+ " beq $2, 1b\n" /* it didn't work, loop */
+ " stl $1, %0\n" /* save value of *dst in *res */
+ " mb \n"
+ : "+m"(*res)
+ : "m"(*dst), "r"(val)
+ : "memory", "$1", "$2"); /* clobber t0 and t1 */
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+
+#endif
diff --git a/lib/libpthread/arch/alpha/include/pthread_md.h b/lib/libpthread/arch/alpha/include/pthread_md.h
new file mode 100644
index 0000000..c7a85f1
--- /dev/null
+++ b/lib/libpthread/arch/alpha/include/pthread_md.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
+
+#define THR_GETCONTEXT(ucp) _alpha_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) PANIC("THR_SETCONTEXT() now in use!\n")
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv; /* We don't know what this is yet? */
+
+/*
+ * tp points to one of these. We define the static TLS as an array
+ * of long double to enforce 16-byte alignment of the TLS memory,
+ * struct alpha_tp, struct tcb and also struct kcb. Both static and
+ * dynamic allocation of any of these structures will result in a
+ * valid, well-aligned thread pointer.
+ */
+struct alpha_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+ uint64_t _reserved_;
+ long double tp_tls[0]; /* static TLS */
+};
+
+struct tcb {
+ struct kse_thr_mailbox tcb_tmbx;
+ struct pthread *tcb_thread;
+ struct kcb *tcb_curkcb;
+ long tcb_isfake;
+ struct alpha_tp tcb_tp;
+};
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+#define _tp __builtin_thread_pointer()
+#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ __builtin_set_thread_pointer(&kcb->kcb_faketcb.tcb_tp);
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+ uint32_t flags;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We already are in a critical region since
+ * there is no current thread.
+ */
+ crit = NULL;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ /* No need to do anything if this is a fake tcb. */
+ if (_tcb->tcb_isfake == 0)
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+ __builtin_set_thread_pointer(&tcb->tcb_tp);
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+void _alpha_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+int _alpha_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _alpha_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ if (_alpha_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
+ /* Make the fake tcb the current thread. */
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ __builtin_set_thread_pointer(&kcb->kcb_faketcb.tcb_tp);
+ _alpha_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+
+ _tcb_set(kcb, tcb);
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox != 0)
+ _alpha_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _alpha_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ 0, NULL);
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libpthread/arch/amd64/Makefile.inc b/lib/libpthread/arch/amd64/Makefile.inc
new file mode 100644
index 0000000..c8b0362
--- /dev/null
+++ b/lib/libpthread/arch/amd64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= context.S enter_uts.S pthread_md.c
diff --git a/lib/libpthread/arch/amd64/amd64/context.S b/lib/libpthread/arch/amd64/amd64/context.S
new file mode 100644
index 0000000..6a6b558
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/context.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The following notes ("cheat sheet") was provided by Peter Wemm.
+ *
+ * scratch:
+ * rax (1st return)
+ * rcx (4th arg)
+ * rdx (3rd arg, 2nd return)
+ * rsi (2nd arg)
+ * rdi (1st arg)
+ * r8 (5th arg)
+ * r9 (6th arg)
+ * r10 (temp, static chain?)
+ * r11 (temp)
+ *
+ * preserved:
+ * rbx (base pointer)
+ * rsp (stack)
+ * rbp (frame)
+ * r12-r15 (general)
+ *
+ * calls:
+ * rdi 1
+ * rsi 2
+ * rdx 3
+ * rcx 4
+ * r8 5
+ * r9 6
+ *
+ * return:
+ * rax 1
+ * rdx 2
+ *
+ * This means:
+ * arg1 goes in %rdi, arg2 in %rsi, etc. return value is %rax (and
+ * secondary return, eg: pipe(2), in %rdx) %rcx,%rsi,%rdi etc are
+ * trashed by making a call to something. %rbx,%rbp,%r12-15 are the
+ * only registers preserved across a call. Note that unlike i386,
+ * %rsi and %rdi are scratch rather than preserved. FPU is
+ * different, args are in SSE registers rather than the x87 stack.
+ *
+ * Aside from the register calling conventions, amd64 can be treated
+ * very much like i386. Things like setjmp/longjmp etc were literal
+ * translations from i386 but with the register names updated, etc.
+ * The main gotcha is that FPU save/restore is in SSE format, which
+ * means a sparse 512 byte FPU context.
+ */
+
+
+/*
+ * Where do we define these?
+ */
+#define MC_SIZE 800 /* sizeof mcontext_t */
+#define MC_LEN_OFFSET (25*8) /* offset to mc_len from mcontext */
+#define MC_FPFMT_OFFSET (26*8) /* offset to mc_fpformat from mcontext */
+#define MC_FPFMT_NODEV 0x10000
+#define MC_OWNEDFP_OFFSET (27*8) /* offset to mc_ownedfp from mcontext */
+#define MC_OWNEDFP_NONE 0x20000
+#define MC_OWNEDFP_FPU 0x20001
+#define MC_OWNEDFP_PCB 0x20002
+#define MC_FPREGS_OFFSET (28*8) /* offset to FP registers */
+#define MC_FP_CW_OFFSET (28*8) /* offset to FP control word */
+
+#define MC_RDI (1 * 8)
+#define MC_RSI (2 * 8)
+#define MC_RDX (3 * 8)
+#define MC_RCX (4 * 8)
+#define MC_R8 (5 * 8)
+#define MC_R9 (6 * 8)
+#define MC_RAX (7 * 8)
+#define MC_RBX (8 * 8)
+#define MC_RBP (9 * 8)
+#define MC_R10 (10 * 8)
+#define MC_R11 (11 * 8)
+#define MC_R12 (12 * 8)
+#define MC_R13 (13 * 8)
+#define MC_R14 (14 * 8)
+#define MC_R15 (15 * 8)
+#define MC_FLAGS (18 * 8)
+#define MC_RIP (20 * 8)
+#define MC_CS (21 * 8)
+#define MC_RFLAGS (22 * 8)
+#define MC_RSP (23 * 8)
+#define MC_SS (24 * 8)
+
+#define REDZONE 128 /* size of the red zone */
+
+/*
+ * _amd64_ctx_save(mcontext_t *mcp)
+ *
+ * No values are saved to mc_trapno, mc_addr, mc_err and mc_cs.
+ * For the FPU state, only the floating point control word is stored.
+ */
+ENTRY(_amd64_save_context)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: movq %rdi, MC_RDI(%rdi)
+ movq %rsi, MC_RSI(%rdi)
+ movq %rdx, MC_RDX(%rdi)
+ movq %rcx, MC_RCX(%rdi)
+ movq %r8, MC_R8(%rdi)
+ movq %r9, MC_R9(%rdi)
+ movq $1, MC_RAX(%rdi) /* return 1 when restored */
+ movq %rbx, MC_RBX(%rdi)
+ movq %rbp, MC_RBP(%rdi)
+ movq %r10, MC_R10(%rdi)
+ movq %r11, MC_R11(%rdi)
+ movq %r12, MC_R12(%rdi)
+ movq %r13, MC_R13(%rdi)
+ movq %r14, MC_R14(%rdi)
+ movq %r15, MC_R15(%rdi)
+ movq (%rsp), %rax /* get return address */
+ movq %rax, MC_RIP(%rdi) /* save return address (%rip) */
+ pushfq /* get flags */
+ popq %rax
+ movq %rax, MC_RFLAGS(%rdi) /* save flags */
+ movq %rsp, %rax /* setcontext pushes the return */
+ addq $8, %rax /* address onto the stack; */
+ movq %rax, MC_RSP(%rdi) /* account for this -- ???. */
+ movw %ss, MC_SS(%rdi)
+ fnstcw MC_FP_CW_OFFSET(%rdi) /* save FPU control word */
+ movq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi) /* no FP */
+ movq $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%rdi)
+ movq $MC_SIZE, MC_LEN_OFFSET(%rdi)
+ xorq %rax, %rax /* return 0 */
+2: ret
+
+/*
+ * _amd64_ctx_restore(mcontext_t *mcp, intptr_t val, intptr_t *loc);
+ */
+ENTRY(_amd64_restore_context)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: cmpq $MC_SIZE, MC_LEN_OFFSET(%rdi) /* is context valid? */
+ je 2f
+ movq $-1, %rax /* bzzzt, invalid context */
+ ret
+2: movq MC_RCX(%rdi), %rcx
+ movq MC_R8(%rdi), %r8
+ movq MC_R9(%rdi), %r9
+ movq MC_RBX(%rdi), %rbx
+ movq MC_RBP(%rdi), %rbp
+ movq MC_R10(%rdi), %r10
+ movq MC_R11(%rdi), %r11
+ movq MC_R12(%rdi), %r12
+ movq MC_R13(%rdi), %r13
+ movq MC_R14(%rdi), %r14
+ movq MC_R15(%rdi), %r15
+ /*
+ * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB)
+ * restore XMM/SSE FP register format
+ */
+ cmpq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi)
+ je 4f
+ cmpq $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%rdi)
+ je 3f
+ cmpq $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%rdi)
+ jne 4f
+3: fxrstor MC_FPREGS_OFFSET(%rdi) /* restore XMM FP regs */
+ jmp 5f
+4: fninit
+ fldcw MC_FP_CW_OFFSET(%rdi)
+5: movq MC_RSP(%rdi), %rsp /* switch to context stack */
+ subq $REDZONE, %rsp
+ movq MC_RIP(%rdi), %rax /* return address on stack */
+ pushq %rax
+ movq MC_RDI(%rdi), %rax /* rdi on stack */
+ pushq %rax
+ movq MC_RDX(%rdi), %rax /* rdx on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rsi on stack */
+ pushq %rax
+ movq MC_RFLAGS(%rdi), %rax /* flags on stack*/
+ pushq %rax
+ movq MC_RAX(%rdi), %rax /* restore rax */
+ /* At this point we're done with the context. */
+ cmpq $0, %rdx /* set *loc to val */
+ je 6f
+ movq %rsi, (%rdx)
+6: popfq /* restore flags */
+ popq %rsi /* restore rsi, rdx, and rdi */
+ popq %rdx
+ popq %rdi
+ ret $REDZONE
+
diff --git a/lib/libpthread/arch/amd64/amd64/enter_uts.S b/lib/libpthread/arch/amd64/amd64/enter_uts.S
new file mode 100644
index 0000000..fb0df87
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/enter_uts.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ * _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * size_t stacksz);
+ */
+ENTRY(_amd64_enter_uts)
+ addq %rcx, %rdx /* get stack base */
+ andq $~0xf, %rdx /* align to 16 bytes */
+ movq %rdx, %rsp /* switch to UTS stack */
+ movq %rdx, %rbp /* set frame */
+ callq *%rsi
+ ret
diff --git a/lib/libpthread/arch/amd64/amd64/pthread_md.c b/lib/libpthread/arch/amd64/amd64/pthread_md.c
new file mode 100644
index 0000000..3aceec7
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/pthread_md.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdlib.h>
+#include <strings.h>
+#include "rtld_tls.h"
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial) {
+ __asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
+ } else {
+ oldtls = NULL;
+ }
+
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb) {
+ tcb->tcb_thread = thread;
+ bzero(&tcb->tcb_tmbx, sizeof(tcb->tcb_tmbx));
+ }
+
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ kcb = malloc(sizeof(struct kcb));
+ if (kcb != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_self = kcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/amd64/include/atomic_ops.h b/lib/libpthread/arch/amd64/include/atomic_ops.h
new file mode 100644
index 0000000..980eb8e
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/atomic_ops.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ __asm __volatile(
+ "xchgq %2, %1; movq %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap64((intptr_t *)(d), (intptr_t)(v), (intptr_t *)(r))
+
+#endif
diff --git a/lib/libpthread/arch/amd64/include/pthread_md.h b/lib/libpthread/arch/amd64/include/pthread_md.h
new file mode 100644
index 0000000..a7da5df
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/pthread_md.h
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/kse.h>
+#include <machine/sysarch.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+#define THR_GETCONTEXT(ucp) \
+ (void)_amd64_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) \
+ (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_KSE
+#undef PER_THREAD
+
+struct kse;
+struct pthread;
+struct tdv;
+
+/*
+ * %fs points to a struct kcb.
+ */
+struct kcb {
+ struct tcb *kcb_curtcb;
+ struct kcb *kcb_self; /* self reference */
+ struct kse *kcb_kse;
+ struct kse_mailbox kcb_kmbx;
+};
+
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread;
+ void *tcb_spare[1]; /* align tcb_tmbx to 16 bytes */
+ struct kse_thr_mailbox tcb_tmbx;
+};
+
+/*
+ * Evaluates to the byte offset of the per-kse variable name.
+ */
+#define __kcb_offset(name) __offsetof(struct kcb, name)
+
+/*
+ * Evaluates to the type of the per-kse variable name.
+ */
+#define __kcb_type(name) __typeof(((struct kcb *)0)->name)
+
+/*
+ * Evaluates to the value of the per-kse variable name.
+ */
+#define KCB_GET64(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ u_long __i; \
+ __asm __volatile("movq %%fs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_long *)(__kcb_offset(name)))); \
+ __result = (__kcb_type(name))__i; \
+ \
+ __result; \
+})
+
+/*
+ * Sets the value of the per-kse variable name to value val.
+ */
+#define KCB_SET64(name, val) ({ \
+ __kcb_type(name) __val = (val); \
+ \
+ u_long __i; \
+ __i = (u_long)__val; \
+ __asm __volatile("movq %1,%%fs:%0" \
+ : "=m" (*(u_long *)(__kcb_offset(name))) \
+ : "r" (__i)); \
+})
+
+static __inline u_long
+__kcb_readandclear64(volatile u_long *addr)
+{
+ u_long result;
+
+ __asm __volatile (
+ " xorq %0, %0;"
+ " xchgq %%fs:%1, %0;"
+ "# __kcb_readandclear64"
+ : "=&r" (result)
+ : "m" (*addr));
+ return (result);
+}
+
+#define KCB_READANDCLEAR64(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ __result = (__kcb_type(name)) \
+ __kcb_readandclear64((u_long *)__kcb_offset(name)); \
+ __result; \
+})
+
+
+#define _kcb_curkcb() KCB_GET64(kcb_self)
+#define _kcb_curtcb() KCB_GET64(kcb_curtcb)
+#define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata))
+#define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread)
+#define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value)
+#define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread)
+
+/*
+ * The constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *tcb);
+struct kcb *_kcb_ctor(struct kse *);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ amd64_set_fsbase(kcb);
+}
+
+/* Get the current kcb. */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_kcb_curkcb());
+}
+
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+
+ crit = _kcb_readandclear_tmbx();
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ _kcb_set_tmbx(crit);
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ return (_kcb_get_tmbx() == NULL);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ kcb->kcb_curtcb = tcb;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_kcb_curtcb());
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ struct tcb *tcb;
+
+ tcb = _kcb_curtcb();
+ if (tcb != NULL)
+ return (tcb->tcb_thread);
+ else
+ return (NULL);
+}
+
+static __inline struct kse *
+_get_curkse(void)
+{
+ return ((struct kse *)_kcb_curkse());
+}
+
+void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _amd64_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ int ret;
+
+ ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext);
+ if (ret == 0) {
+ _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ else if (ret < 0)
+ return (-1);
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+
+ if ((kcb == NULL) || (tcb == NULL))
+ return (-1);
+ kcb->kcb_curtcb = tcb;
+
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox != 0)
+ _amd64_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _amd64_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ 0, NULL);
+ /* We should not reach here. */
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ return (-1);
+}
+#endif
diff --git a/lib/libpthread/arch/arm/Makefile.inc b/lib/libpthread/arch/arm/Makefile.inc
new file mode 100644
index 0000000..ced7063
--- /dev/null
+++ b/lib/libpthread/arch/arm/Makefile.inc
@@ -0,0 +1,7 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+CFLAGS+=-DSYSTEM_SCOPE_ONLY
+
+SRCS+= pthread_md.c context.S
diff --git a/lib/libpthread/arch/arm/arm/context.S b/lib/libpthread/arch/arm/arm/context.S
new file mode 100644
index 0000000..c638804
--- /dev/null
+++ b/lib/libpthread/arch/arm/arm/context.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) Olivier Houchard
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * int thr_setcontext(mcontext_t *mcp, intptr_t val, intptr_t *loc)
+ *
+ * Restores the context in mcp.
+ *
+ * Returns 0 if there are no errors; -1 otherwise
+ */
+
+.weak _C_LABEL(_thr_setcontext)
+.set _C_LABEL(_thr_setcontext), _C_LABEL(__thr_setcontext)
+
+ENTRY(__thr_setcontext)
+/* Check for NULL pointer. */
+ cmp r0, #0
+ moveq r0, #-1
+ moveq pc, lr
+ cmp r2, #0
+ strne r1, [r2]
+ ldr r1, [r0, #(16 * 4)] /* CPSR */
+ msr cpsr, r1
+ ldmia r0, {r0-r15}
+ mov pc, lr
+ /* XXX: FP bits ? */
+
+/*
+ * int thr_getcontext(mcontext_t *mcp);
+ *
+ * Returns -1 if there is an error, 0 no errors; 1 upon return
+ * from a setcontext().
+ */
+.weak _C_LABEL(_thr_getcontext)
+.set _C_LABEL(_thr_getcontext), _C_LABEL(__thr_getcontext)
+
+ENTRY(__thr_getcontext)
+/* Check for NULL pointer. */
+ cmp r0, #0
+ moveq r0, #-1
+ moveq pc, lr
+ stmia r0, {r1-r14}
+ mov r1, #1
+ str r1, [r0] /* Return 1 from setcontext */
+ str lr, [r0, #(15 * 4)] /* PC */
+ mrs r1, cpsr
+ str r1, [r0, #(16 * 4)] /* CPSR */
+ mov r0, #0 /* Return 0. */
+ mov pc, lr
+
+ENTRY(_arm_enter_uts)
+ add sp, r2, r3 /* Stack addr + size. */
+ mov pc, r1
diff --git a/lib/libpthread/arch/arm/arm/pthread_md.c b/lib/libpthread/arch/arm/arm/pthread_md.c
new file mode 100644
index 0000000..72426d4
--- /dev/null
+++ b/lib/libpthread/arch/arm/arm/pthread_md.c
@@ -0,0 +1,86 @@
+/*-
+ * Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ucontext.h>
+
+#include <machine/sysarch.h>
+
+#include "pthread_md.h"
+
+struct arm_tp **arm_tp = (struct arm_tp **)ARM_TP_ADDRESS;
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if ((tcb = malloc(sizeof(struct tcb)))) {
+ bzero(tcb, sizeof(struct tcb));
+ tcb->tcb_thread = thread;
+ /* XXX - Allocate tdv/tls */
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+
+ free(tcb);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ kcb = malloc(sizeof(struct kcb));
+ if (kcb != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_faketcb.tcb_isfake = 1;
+ kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL;
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/arm/include/atomic_ops.h b/lib/libpthread/arch/arm/include/atomic_ops.h
new file mode 100644
index 0000000..3a209b3
--- /dev/null
+++ b/lib/libpthread/arch/arm/include/atomic_ops.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+#include <machine/atomic.h>
+#include "thr_private.h"
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ *res = __swp(val, dst);
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#endif
+
diff --git a/lib/libpthread/arch/arm/include/pthread_md.h b/lib/libpthread/arch/arm/include/pthread_md.h
new file mode 100644
index 0000000..857fa1b
--- /dev/null
+++ b/lib/libpthread/arch/arm/include/pthread_md.h
@@ -0,0 +1,257 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
+
+int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *);
+int _thr_getcontext(mcontext_t *);
+
+#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv; /* We don't know what this is yet? */
+
+
+/*
+ * %r6 points to one of these. We define the static TLS as an array
+ * of long double to enforce 16-byte alignment of the TLS memory.
+ *
+ * XXX - Both static and dynamic allocation of any of these structures
+ * will result in a valid, well-aligned thread pointer???
+ */
+struct arm_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+};
+
+struct tcb {
+ struct pthread *tcb_thread;
+ struct kcb *tcb_curkcb;
+ uint32_t tcb_isfake;
+ struct kse_thr_mailbox tcb_tmbx; /* needs 32-byte alignment */
+ struct arm_tp tcb_tp;
+};
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+extern struct arm_tp **arm_tp;
+#define _tp (*arm_tp)
+
+#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+static __inline uint32_t
+__kcb_swp(uint32_t val, void *ptr)
+{
+
+ __asm __volatile("swp %0, %1, [%2]"
+ : "=r" (val) : "r" (val) , "r" (ptr) : "memory");
+ return (val);
+}
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ __kcb_swp((uint32_t)&kcb->kcb_faketcb.tcb_tp, &_tp);
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+
+ if (_tcb->tcb_isfake)
+ return (NULL);
+ crit = (struct kse_thr_mailbox *)__kcb_swp((uint32_t)NULL,
+ &_tcb->tcb_curkcb->kcb_kmbx.km_curthread);
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+
+ if (_tcb->tcb_isfake == 0)
+ __kcb_swp((uint32_t)crit,
+ &_tcb->tcb_curkcb->kcb_kmbx.km_curthread);
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ return (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ __kcb_swp((uint32_t)&tcb->tcb_tp, &_tp);
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+void _arm_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ int ret;
+
+ if ((ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext))
+ == 0) {
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ __kcb_swp((int)&kcb->kcb_faketcb.tcb_tp, &_tp);
+ _arm_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ } else if (ret < 0)
+ return (-1);
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+ mcontext_t *mc;
+
+ if (!tcb || !kcb)
+ return (-1);
+ _tcb_set(kcb, tcb);
+ mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox)
+ _thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _thr_setcontext(mc, 0, NULL);
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libpthread/arch/i386/Makefile.inc b/lib/libpthread/arch/i386/Makefile.inc
new file mode 100644
index 0000000..73a9a8a
--- /dev/null
+++ b/lib/libpthread/arch/i386/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= thr_enter_uts.S thr_getcontext.S pthread_md.c
diff --git a/lib/libpthread/arch/i386/i386/pthread_md.c b/lib/libpthread/arch/i386/i386/pthread_md.c
new file mode 100644
index 0000000..cbea6d4
--- /dev/null
+++ b/lib/libpthread/arch/i386/i386/pthread_md.c
@@ -0,0 +1,100 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <machine/cpufunc.h>
+#include <machine/sysarch.h>
+
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ucontext.h>
+
+#include "rtld_tls.h"
+#include "pthread_md.h"
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial) {
+ __asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
+ } else {
+ oldtls = NULL;
+ }
+
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb) {
+ tcb->tcb_thread = thread;
+ tcb->tcb_spare = 0;
+ bzero(&tcb->tcb_tmbx, sizeof(tcb->tcb_tmbx));
+ }
+
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
+
+/*
+ * Initialize KSD. This also includes setting up the LDT.
+ */
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ kcb = malloc(sizeof(struct kcb));
+ if (kcb != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_self = kcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
+
+int
+i386_set_gsbase(void *addr)
+{
+
+ return (sysarch(I386_SET_GSBASE, &addr));
+}
diff --git a/lib/libpthread/arch/i386/i386/thr_enter_uts.S b/lib/libpthread/arch/i386/i386/thr_enter_uts.S
new file mode 100644
index 0000000..bf6df8f
--- /dev/null
+++ b/lib/libpthread/arch/i386/i386/thr_enter_uts.S
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2002 Jonathan Mini <mini@freebsd.org>.
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ * _i386_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * long stacksz);
+ * +4 = km, +8 = uts, +12 = stack, +16 = size
+ */
+ENTRY(_i386_enter_uts)
+ movl %esp, %edx /* save stack */
+ movl 12(%edx), %eax /* get bottom of stack */
+ addl 16(%edx), %eax /* add length */
+ movl %eax, %esp /* switch to uts stack */
+ pushl 4(%edx) /* push the address of the mailbox */
+ call *8(%edx)
+ ret
diff --git a/lib/libpthread/arch/i386/i386/thr_getcontext.S b/lib/libpthread/arch/i386/i386/thr_getcontext.S
new file mode 100644
index 0000000..d9d300f
--- /dev/null
+++ b/lib/libpthread/arch/i386/i386/thr_getcontext.S
@@ -0,0 +1,156 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Where do we define these?
+ */
+#define MC_LEN_OFFSET 80 /* offset to mc_len from mcontext */
+#define MC_LEN 640 /* mc_len <machine/ucontext.h> */
+#define MC_FPFMT_OFFSET 84
+#define MC_FPFMT_NODEV 0x10000
+#define MC_FPFMT_387 0x10001
+#define MC_FPFMT_XMM 0x10002
+#define MC_OWNEDFP_OFFSET 88
+#define MC_OWNEDFP_NONE 0x20000
+#define MC_OWNEDFP_FPU 0x20001
+#define MC_OWNEDFP_PCB 0x20002
+#define MC_FPREGS_OFFSET 96 /* offset to FP regs from mcontext */
+#define MC_FP_CW_OFFSET 96 /* offset to FP control word */
+
+/*
+ * int thr_setcontext(mcontext_t *mcp, intptr_t val, intptr_t *loc)
+ *
+ * Restores the context in mcp.
+ *
+ * Returns 0 if there are no errors; -1 otherwise
+ */
+ .weak CNAME(_thr_setcontext)
+ .set CNAME(_thr_setcontext),CNAME(__thr_setcontext)
+ENTRY(__thr_setcontext)
+ movl 4(%esp), %edx /* get address of mcontext */
+ cmpl $0, %edx /* check for null pointer */
+ jne 1f
+ movl $-1, %eax
+ jmp 8f
+1: cmpl $MC_LEN, MC_LEN_OFFSET(%edx) /* is context valid? */
+ je 2f
+ movl $-1, %eax /* bzzzt, invalid context */
+ jmp 8f
+2: /*movl 4(%edx), %gs*/ /* we don't touch %gs */
+ movw 8(%edx), %fs
+ movw 12(%edx), %es
+ movw 16(%edx), %ds
+ movw 76(%edx), %ss
+ movl 20(%edx), %edi
+ movl 24(%edx), %esi
+ movl 28(%edx), %ebp
+ movl %esp, %ecx /* save current stack in ecx */
+ movl 72(%edx), %esp /* switch to context defined stack */
+ pushl 60(%edx) /* push return address on stack */
+ pushl 44(%edx) /* push ecx on stack */
+ pushl 48(%edx) /* push eax on stack */
+ /*
+ * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB) {
+ * if (mc_fpformat == MC_FPFMT_387)
+ * restore 387 FP register format
+ * else if (mc_fpformat == MC_FPFMT_XMM)
+ * restore XMM/SSE FP register format
+ * }
+ */
+ cmpl $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%edx)
+ je 3f
+ cmpl $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%edx)
+ jne 5f
+3: cmpl $MC_FPFMT_387, MC_FPFMT_OFFSET(%edx)
+ jne 4f
+ frstor MC_FPREGS_OFFSET(%edx) /* restore 387 FP regs */
+ jmp 6f
+4: cmpl $MC_FPFMT_XMM, MC_FPFMT_OFFSET(%edx)
+ jne 5f
+ fxrstor MC_FPREGS_OFFSET(%edx) /* restore XMM FP regs */
+ jmp 6f
+5: fninit
+ fldcw MC_FP_CW_OFFSET(%edx)
+6: pushl 68(%edx) /* push flags register on stack*/
+ movl 36(%edx), %ebx /* restore ebx and edx */
+ movl 40(%edx), %edx
+ movl 12(%ecx), %eax /* get 3rd arg (loc) */
+ cmpl $0, %eax /* do nothing if loc == null */
+ je 7f
+ movl 8(%ecx), %ecx /* get 2nd arg (val) */
+ movl %ecx, (%eax) /* set loc = val */
+7: popfl /* restore flags after test */
+ popl %eax /* restore eax and ecx last */
+ popl %ecx
+8: ret
+
+/*
+ * int thr_getcontext(mcontext_t *mcp);
+ *
+ * Returns -1 if there is an error, 0 no errors; 1 upon return
+ * from a setcontext().
+ */
+ .weak CNAME(_thr_getcontext)
+ .set CNAME(_thr_getcontext),CNAME(__thr_getcontext)
+ENTRY(__thr_getcontext)
+ pushl %edx /* save edx */
+ movl 8(%esp), %edx /* get address of mcontext */
+ cmpl $0, %edx /* check for null pointer */
+ jne 1f
+ popl %edx /* restore edx and stack */
+ movl $-1, %eax
+ jmp 2f
+1: /*movw %gs, 4(%edx)*/ /* we don't touch %gs */
+ movw %fs, 8(%edx)
+ movw %es, 12(%edx)
+ movw %ds, 16(%edx)
+ movw %ss, 76(%edx)
+ movl %edi, 20(%edx)
+ movl %esi, 24(%edx)
+ movl %ebp, 28(%edx)
+ movl %ebx, 36(%edx)
+ movl $1, 48(%edx) /* store successful return in eax */
+ popl %eax /* get saved value of edx */
+ movl %eax, 40(%edx) /* save edx */
+ movl %ecx, 44(%edx)
+ movl (%esp), %eax /* get return address */
+ movl %eax, 60(%edx) /* save return address */
+ fnstcw MC_FP_CW_OFFSET(%edx)
+ movl $MC_LEN, MC_LEN_OFFSET(%edx)
+ movl $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%edx) /* no FP */
+ movl $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%edx) /* no FP */
+ pushfl
+ popl %eax /* get eflags */
+ movl %eax, 68(%edx) /* store eflags */
+ movl %esp, %eax /* setcontext pushes the return */
+ addl $4, %eax /* address onto the top of the */
+ movl %eax, 72(%edx) /* stack; account for this */
+ movl 40(%edx), %edx /* restore edx -- is this needed? */
+ xorl %eax, %eax /* return 0 */
+2: ret
diff --git a/lib/libpthread/arch/i386/include/atomic_ops.h b/lib/libpthread/arch/i386/include/atomic_ops.h
new file mode 100644
index 0000000..7bc3d1b
--- /dev/null
+++ b/lib/libpthread/arch/i386/include/atomic_ops.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#endif
diff --git a/lib/libpthread/arch/i386/include/pthread_md.h b/lib/libpthread/arch/i386/include/pthread_md.h
new file mode 100644
index 0000000..52afd6a
--- /dev/null
+++ b/lib/libpthread/arch/i386/include/pthread_md.h
@@ -0,0 +1,264 @@
+/*-
+ * Copyright (c) 2002 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/kse.h>
+#include <machine/sysarch.h>
+#include <ucontext.h>
+
+extern int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *);
+extern int _thr_getcontext(mcontext_t *);
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_KSE
+#undef PER_THREAD
+
+struct kse;
+struct pthread;
+
+/*
+ * %gs points to a struct kcb.
+ */
+struct kcb {
+ struct tcb *kcb_curtcb;
+ struct kcb *kcb_self; /* self reference */
+ struct kse *kcb_kse;
+ struct kse_mailbox kcb_kmbx;
+};
+
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread;
+ void *tcb_spare; /* align tcb_tmbx to 16 bytes */
+ struct kse_thr_mailbox tcb_tmbx;
+};
+
+/*
+ * Evaluates to the byte offset of the per-kse variable name.
+ */
+#define __kcb_offset(name) __offsetof(struct kcb, name)
+
+/*
+ * Evaluates to the type of the per-kse variable name.
+ */
+#define __kcb_type(name) __typeof(((struct kcb *)0)->name)
+
+/*
+ * Evaluates to the value of the per-kse variable name.
+ */
+#define KCB_GET32(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ u_int __i; \
+ __asm __volatile("movl %%gs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_int *)(__kcb_offset(name)))); \
+ __result = (__kcb_type(name))__i; \
+ \
+ __result; \
+})
+
+/*
+ * Sets the value of the per-kse variable name to value val.
+ */
+#define KCB_SET32(name, val) ({ \
+ __kcb_type(name) __val = (val); \
+ \
+ u_int __i; \
+ __i = (u_int)__val; \
+ __asm __volatile("movl %1,%%gs:%0" \
+ : "=m" (*(u_int *)(__kcb_offset(name))) \
+ : "r" (__i)); \
+})
+
+static __inline u_long
+__kcb_readandclear32(volatile u_long *addr)
+{
+ u_long result;
+
+ __asm __volatile (
+ " xorl %0, %0;"
+ " xchgl %%gs:%1, %0;"
+ "# __kcb_readandclear32"
+ : "=&r" (result)
+ : "m" (*addr));
+ return (result);
+}
+
+#define KCB_READANDCLEAR32(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ __result = (__kcb_type(name)) \
+ __kcb_readandclear32((u_long *)__kcb_offset(name)); \
+ __result; \
+})
+
+
+#define _kcb_curkcb() KCB_GET32(kcb_self)
+#define _kcb_curtcb() KCB_GET32(kcb_curtcb)
+#define _kcb_curkse() ((struct kse *)KCB_GET32(kcb_kmbx.km_udata))
+#define _kcb_get_tmbx() KCB_GET32(kcb_kmbx.km_curthread)
+#define _kcb_set_tmbx(value) KCB_SET32(kcb_kmbx.km_curthread, (void *)value)
+#define _kcb_readandclear_tmbx() KCB_READANDCLEAR32(kcb_kmbx.km_curthread)
+
+
+/*
+ * The constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *tcb);
+struct kcb *_kcb_ctor(struct kse *);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ i386_set_gsbase(kcb);
+}
+
+/* Get the current kcb. */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_kcb_curkcb());
+}
+
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+
+ crit = _kcb_readandclear_tmbx();
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ _kcb_set_tmbx(crit);
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ return (_kcb_get_tmbx() == NULL);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ kcb->kcb_curtcb = tcb;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_kcb_curtcb());
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ struct tcb *tcb;
+
+ tcb = _kcb_curtcb();
+ if (tcb != NULL)
+ return (tcb->tcb_thread);
+ else
+ return (NULL);
+}
+
+static __inline struct kse *
+_get_curkse(void)
+{
+ return ((struct kse *)_kcb_curkse());
+}
+
+void _i386_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ int ret;
+
+ ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext);
+ if (ret == 0) {
+ _i386_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ else if (ret < 0)
+ return (-1);
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+
+ if ((kcb == NULL) || (tcb == NULL))
+ return (-1);
+ kcb->kcb_curtcb = tcb;
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox != 0)
+ _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext,
+ (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext,
+ 0, NULL);
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif
diff --git a/lib/libpthread/arch/ia64/Makefile.inc b/lib/libpthread/arch/ia64/Makefile.inc
new file mode 100644
index 0000000..c8b0362
--- /dev/null
+++ b/lib/libpthread/arch/ia64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= context.S enter_uts.S pthread_md.c
diff --git a/lib/libpthread/arch/ia64/ia64/context.S b/lib/libpthread/arch/ia64/ia64/context.S
new file mode 100644
index 0000000..9411293
--- /dev/null
+++ b/lib/libpthread/arch/ia64/ia64/context.S
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/syscall.h>
+
+#define SIZEOF_SPECIAL (18*8)
+
+/*
+ * int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+ */
+ENTRY(_ia64_restore_context, 3)
+{ .mmi
+ invala
+ mov ar.rsc=0xc
+ add r32=16,r32
+ ;;
+}
+{ .mmi
+ loadrs
+ ld8 r12=[r32] // sp
+ add r31=8,r32
+ ;;
+}
+{ .mii
+ ld8 r16=[r31],16 // unat (before)
+ add r30=16,r32
+ add r14=SIZEOF_SPECIAL,r32
+ ;;
+}
+{ .mmi
+ ld8 r17=[r30],16 // rp
+ ld8 r18=[r31],16 // pr
+ mov r2=r33
+ ;;
+}
+{ .mmi
+ ld8 r19=[r30],16 // pfs
+ ld8 r20=[r31],32 // bspstore
+ mov rp=r17
+ ;;
+}
+{ .mmi
+ ld8 r21=[r30],32 // rnat
+ ld8 r22=[r31],16 // rsc
+ mov pr=r18,0x1fffe
+ ;;
+}
+{ .mmi
+ ld8 r23=[r30] // fpsr
+ ld8 r24=[r31] // psr -- not used
+ mov r3=r34
+ ;;
+}
+{ .mmi
+ ld8 r17=[r14],8 // unat (after)
+ mov ar.bspstore=r20
+ cmp.ne p15,p0=r0,r3
+ ;;
+}
+{ .mmi
+ mov ar.rnat=r21
+ mov ar.unat=r17
+ add r15=8,r14
+ ;;
+}
+{ .mmi
+ ld8.fill r4=[r14],16 // r4
+ ld8.fill r5=[r15],16 // r5
+ mov ar.pfs=r19
+ ;;
+}
+{ .mmi
+ ld8.fill r6=[r14],16 // r6
+ ld8.fill r7=[r15],16 // r7
+ nop 0
+ ;;
+}
+{ .mmi
+ mov ar.unat=r16
+ mov ar.rsc=r22
+ nop 0
+}
+{ .mmi
+ ld8 r17=[r14],16 // b1
+ ld8 r18=[r15],16 // b2
+ nop 0
+ ;;
+}
+{ .mmi
+ ld8 r19=[r14],16 // b3
+ ld8 r20=[r15],16 // b4
+ mov b1=r17
+ ;;
+}
+{ .mmi
+ ld8 r16=[r14],24 // b5
+ ld8 r17=[r15],32 // lc
+ mov b2=r18
+ ;;
+}
+{ .mmi
+ ldf.fill f2=[r14],32
+ ldf.fill f3=[r15],32
+ mov b3=r19
+ ;;
+}
+{ .mmi
+ ldf.fill f4=[r14],32
+ ldf.fill f5=[r15],32
+ mov b4=r20
+ ;;
+}
+{ .mmi
+ ldf.fill f16=[r14],32
+ ldf.fill f17=[r15],32
+ mov b5=r16
+ ;;
+}
+{ .mmi
+ ldf.fill f18=[r14],32
+ ldf.fill f19=[r15],32
+ mov ar.lc=r17
+ ;;
+}
+ ldf.fill f20=[r14],32
+ ldf.fill f21=[r15],32
+ ;;
+ ldf.fill f22=[r14],32
+ ldf.fill f23=[r15],32
+ ;;
+ ldf.fill f24=[r14],32
+ ldf.fill f25=[r15],32
+ ;;
+ ldf.fill f26=[r14],32
+ ldf.fill f27=[r15],32
+ ;;
+ ldf.fill f28=[r14],32
+ ldf.fill f29=[r15],32
+ ;;
+ ldf.fill f30=[r14],32+24
+ ldf.fill f31=[r15],24+24
+ ;;
+ ld8 r8=[r14],16
+ ld8 r9=[r15],16
+ ;;
+ ld8 r10=[r14]
+ ld8 r11=[r15]
+ ;;
+{ .mmb
+(p15) st8 [r3]=r2
+ mov ar.fpsr=r23
+ br.ret.sptk rp
+ ;;
+}
+END(_ia64_restore_context)
+
+/*
+ * int _ia64_save_context(mcontext_t *mc);
+ */
+ENTRY(_ia64_save_context, 1)
+{ .mmi
+ mov r14=ar.rsc
+ mov r15=ar.fpsr
+ add r31=8,r32
+ ;;
+}
+{ .mmi
+ st8 [r32]=r0,16
+ st8 [r31]=r0,16
+ nop 0
+ ;;
+}
+{ .mmi
+ mov ar.rsc=0xc
+ mov r16=ar.unat
+ nop 0
+ ;;
+}
+{ .mmi
+ flushrs
+ st8 [r32]=sp,16 // sp
+ mov r17=rp
+ ;;
+}
+{ .mmi
+ st8 [r31]=r16,16 // unat (before)
+ st8 [r32]=r17,16 // rp
+ mov r16=pr
+ ;;
+}
+{ .mmi
+ st8 [r31]=r16,16 // pr
+ mov r17=ar.bsp
+ mov r16=ar.pfs
+ ;;
+}
+{ .mmi
+ st8 [r32]=r16,16 // pfs
+ st8 [r31]=r17,16 // bspstore
+ nop 0
+ ;;
+}
+{ .mmi
+ mov r16=ar.rnat
+ mov ar.rsc=r14
+ add r30=SIZEOF_SPECIAL-(6*8),r32
+ ;;
+}
+{ .mmi
+ st8 [r32]=r16,16 // rnat
+ st8 [r31]=r0,16 // __spare
+ nop 0
+ ;;
+}
+{ .mmi
+ st8 [r32]=r13,16 // tp -- not used
+ st8 [r31]=r14,16 // rsc
+ mov r16=b1
+ ;;
+}
+{ .mmi
+ st8 [r32]=r15,10*8 // fpr
+ st8 [r31]=r0,8*8 // psr
+ nop 0
+ ;;
+}
+ /* callee_saved */
+{ .mmi
+ .mem.offset 8,0
+ st8.spill [r31]=r4,16 // r4
+ .mem.offset 16,0
+ st8.spill [r32]=r5,16 // r5
+ mov r17=b2
+ ;;
+}
+{ .mmi
+ .mem.offset 24,0
+ st8.spill [r31]=r6,16 // r6
+ .mem.offset 32,0
+ st8.spill [r32]=r7,16 // r7
+ mov r18=b3
+ ;;
+}
+{ .mmi
+ st8 [r31]=r16,16 // b1
+ mov r16=ar.unat
+ mov r19=b4
+ ;;
+}
+{ .mmi
+ st8 [r30]=r16 // unat (after)
+ st8 [r32]=r17,16 // b2
+ mov r16=b5
+ ;;
+}
+{ .mmi
+ st8 [r31]=r18,16 // b3
+ st8 [r32]=r19,16 // b4
+ mov r17=ar.lc
+ ;;
+}
+ st8 [r31]=r16,16 // b5
+ st8 [r32]=r17,16 // lc
+ ;;
+ st8 [r31]=r0,24 // __spare
+ stf.spill [r32]=f2,32
+ ;;
+ stf.spill [r31]=f3,32
+ stf.spill [r32]=f4,32
+ ;;
+ stf.spill [r31]=f5,32
+ stf.spill [r32]=f16,32
+ ;;
+ stf.spill [r31]=f17,32
+ stf.spill [r32]=f18,32
+ ;;
+ stf.spill [r31]=f19,32
+ stf.spill [r32]=f20,32
+ ;;
+ stf.spill [r31]=f21,32
+ stf.spill [r32]=f22,32
+ ;;
+ stf.spill [r31]=f23,32
+ stf.spill [r32]=f24,32
+ ;;
+ stf.spill [r31]=f25,32
+ stf.spill [r32]=f26,32
+ ;;
+ stf.spill [r31]=f27,32
+ stf.spill [r32]=f28,32
+ ;;
+{ .mmi
+ stf.spill [r31]=f29,32
+ stf.spill [r32]=f30,32+24
+ add r14=1,r0
+ ;;
+}
+{ .mmi
+ stf.spill [r31]=f31,24+24
+ st8 [r32]=r14,16 // r8
+ add r8=0,r0
+ ;;
+}
+ st8 [r31]=r0,16 // r9
+ st8 [r32]=r0 // r10
+ ;;
+{ .mmb
+ st8 [r31]=r0 // r11
+ mf
+ br.ret.sptk rp
+ ;;
+}
+END(_ia64_save_context)
+
+/*
+ * void _ia64_break_setcontext(mcontext_t *mc);
+ */
+ENTRY(_ia64_break_setcontext, 1)
+{ .mmi
+ mov r8=r32
+ break 0x180000
+ nop 0
+ ;;
+}
+END(_ia64_break_setcontext)
diff --git a/lib/libpthread/arch/ia64/ia64/enter_uts.S b/lib/libpthread/arch/ia64/ia64/enter_uts.S
new file mode 100644
index 0000000..5df4d93
--- /dev/null
+++ b/lib/libpthread/arch/ia64/ia64/enter_uts.S
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
+ * size_t stacksz);
+ */
+ENTRY(_ia64_enter_uts, 4)
+{ .mmi
+ ld8 r14=[in0],8
+ mov ar.rsc=0xc
+ add r15=in2,in3
+ ;;
+}
+{ .mmi
+ flushrs
+ ld8 r1=[in0]
+ mov b7=r14
+ ;;
+}
+{ .mii
+ mov ar.bspstore=in2
+ add sp=-16,r15
+ mov rp=r14
+ ;;
+}
+{ .mib
+ mov ar.rsc=0xf
+ mov in0=in1
+ br.cond.sptk b7
+ ;;
+}
+1: br.cond.sptk 1b
+END(_ia64_enter_uts)
diff --git a/lib/libpthread/arch/ia64/ia64/pthread_md.c b/lib/libpthread/arch/ia64/ia64/pthread_md.c
new file mode 100644
index 0000000..00e9a40
--- /dev/null
+++ b/lib/libpthread/arch/ia64/ia64/pthread_md.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdlib.h>
+#include <strings.h>
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if ((tcb = malloc(sizeof(struct tcb))) != NULL) {
+ bzero(tcb, sizeof(struct tcb));
+ tcb->tcb_thread = thread;
+ /* Allocate TDV */
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ /* Free TDV */
+ free(tcb);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ if ((kcb = malloc(sizeof(struct kcb))) != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_faketcb.tcb_isfake = 1;
+ kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL;
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/ia64/include/atomic_ops.h b/lib/libpthread/arch/ia64/include/atomic_ops.h
new file mode 100644
index 0000000..483c905
--- /dev/null
+++ b/lib/libpthread/arch/ia64/include/atomic_ops.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm("xchg4 %0=[%2],%1" : "=r"(*res) : "r"(val), "r"(dst));
+}
+
+static inline void
+atomic_swap_long(long *dst, long val, long *res)
+{
+ __asm("xchg8 %0=[%2],%1" : "=r"(*res) : "r"(val), "r"(dst));
+}
+
+#define atomic_swap_ptr(d,v,r) \
+ atomic_swap_long((long*)d, (long)v, (long*)r)
+
+#endif /* _ATOMIC_OPS_H_ */
diff --git a/lib/libpthread/arch/ia64/include/pthread_md.h b/lib/libpthread/arch/ia64/include/pthread_md.h
new file mode 100644
index 0000000..1df5046
--- /dev/null
+++ b/lib/libpthread/arch/ia64/include/pthread_md.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
+
+#define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) PANIC("THR_SETCONTEXT() now in use!\n")
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv; /* We don't know what this is yet? */
+
+/*
+ * tp points to one of these. We define the static TLS as an array
+ * of long double to enforce 16-byte alignment of the TLS memory,
+ * struct ia64_tp, struct tcb and also struct kcb. Both static and
+ * dynamic allocation of any of these structures will result in a
+ * valid, well-aligned thread pointer.
+ */
+struct ia64_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+ uint64_t _reserved_;
+ long double tp_tls[0]; /* static TLS */
+};
+
+struct tcb {
+ struct kse_thr_mailbox tcb_tmbx;
+ struct pthread *tcb_thread;
+ struct kcb *tcb_curkcb;
+ long tcb_isfake;
+ struct ia64_tp tcb_tp;
+};
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+register struct ia64_tp *_tp __asm("%r13");
+
+#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+ uint32_t flags;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We already are in a critical region since
+ * there is no current thread.
+ */
+ crit = NULL;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ /* No need to do anything if this is a fake tcb. */
+ if (_tcb->tcb_isfake == 0)
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+ _tp = &tcb->tcb_tp;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+void _ia64_break_setcontext(mcontext_t *mc);
+void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
+ size_t stacksz);
+int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _ia64_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
+ /* Make the fake tcb the current thread. */
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+ _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ mcontext_t *mc;
+
+ _tcb_set(kcb, tcb);
+ mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
+ if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
+ if (setmbox) {
+ mc->mc_flags |= _MC_FLAGS_KSE_SET_MBOX;
+ mc->mc_special.ifa =
+ (intptr_t)&kcb->kcb_kmbx.km_curthread;
+ mc->mc_special.isr = (intptr_t)&tcb->tcb_tmbx;
+ }
+ _ia64_break_setcontext(mc);
+ } else if (mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ } else {
+ if (setmbox)
+ _ia64_restore_context(mc, (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _ia64_restore_context(mc, 0, NULL);
+ }
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libpthread/arch/powerpc/Makefile.inc b/lib/libpthread/arch/powerpc/Makefile.inc
new file mode 100644
index 0000000..f4417a6
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/Makefile.inc
@@ -0,0 +1,8 @@
+# $FreeBSD$
+
+# XXX temporary
+CFLAGS+=-DSYSTEM_SCOPE_ONLY
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= enter_uts.S context.S pthread_md.c
diff --git a/lib/libpthread/arch/powerpc/include/atomic_ops.h b/lib/libpthread/arch/powerpc/include/atomic_ops.h
new file mode 100644
index 0000000..8068e6f
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/include/atomic_ops.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2004 by Peter Grehan. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ int tmp;
+
+ tmp = 0; /* should be a better way to quieten cc1... */
+#ifdef __GNUC__
+ __asm __volatile(
+ "1: lwarx %0, 0, %4\n" /* load with reservation */
+ " stwcx. %3, 0, %4\n" /* attempt to store val */
+ " bne- 1b\n" /* interrupted? retry */
+ " stw %0, %1\n" /* else, *dst -> *res */
+ : "=&r" (tmp), "=m" (*res), "+m" (*dst)
+ : "r" (val), "r" (dst)
+ : "cc", "memory");
+#endif
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#endif
diff --git a/lib/libpthread/arch/powerpc/include/pthread_md.h b/lib/libpthread/arch/powerpc/include/pthread_md.h
new file mode 100644
index 0000000..33a58b5
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/include/pthread_md.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2004 by Peter Grehan. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+extern void _ppc32_enter_uts(struct kse_mailbox *, kse_func_t, void *, size_t);
+extern int _ppc32_setcontext(mcontext_t *, intptr_t, intptr_t *);
+extern int _ppc32_getcontext(mcontext_t *);
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
+
+#define THR_GETCONTEXT(ucp) _ppc32_getcontext(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) _ppc32_setcontext(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv;
+
+/*
+ * %r2 points to a struct kcb.
+ */
+struct ppc32_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+ uint32_t _reserved_;
+ long double tp_tls[0]; /* static TLS */
+};
+
+struct tcb {
+ struct kse_thr_mailbox tcb_tmbx;
+ struct pthread *tcb_thread;
+ struct kcb *tcb_curkcb;
+ long tcb_isfake;
+ struct ppc32_tp tcb_tp;
+};
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+/*
+ * From the PowerPC32 TLS spec:
+ *
+ * "r2 is the thread pointer, and points 0x7000 past the end of the
+ * thread control block." Or, 0x7008 past the start of the 8-byte tcb
+ */
+#define TP_OFFSET 0x7008
+register uint8_t *_tpr __asm("%r2");
+
+#define _tcb ((struct tcb *)(_tpr - TP_OFFSET - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ _tpr = (uint8_t *)&kcb->kcb_faketcb.tcb_tp + TP_OFFSET;
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+ uint32_t flags;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We already are in a critical region since
+ * there is no current thread.
+ */
+ crit = NULL;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ /* No need to do anything if this is a fake tcb. */
+ if (_tcb->tcb_isfake == 0)
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+ _tpr = (uint8_t *)&tcb->tcb_tp + TP_OFFSET;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ if (_ppc32_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
+ /* Make the fake tcb the current thread. */
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ _tpr = (uint8_t *)&kcb->kcb_faketcb.tcb_tp + TP_OFFSET;
+ _ppc32_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size - 32);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ mcontext_t *mc;
+ extern int _libkse_debug;
+
+ _tcb_set(kcb, tcb);
+ mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
+
+ /*
+ * A full context needs a system call to restore, so use
+ * kse_switchin. Otherwise, the partial context can be
+ * restored with _ppc32_setcontext
+ */
+ if (mc->mc_vers != _MC_VERSION_KSE && _libkse_debug != 0) {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ } else {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox)
+ _ppc32_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _ppc32_setcontext(mc, 0, NULL);
+ }
+
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libpthread/arch/powerpc/powerpc/assym.c b/lib/libpthread/arch/powerpc/powerpc/assym.c
new file mode 100644
index 0000000..a8479e7
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/powerpc/assym.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2004 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* Used to generate mcontext_t offsets */
+
+#include <sys/types.h>
+#include <sys/assym.h>
+#include <sys/ucontext.h>
+
+#include <stddef.h>
+
+ASSYM(_MC_VERSION, _MC_VERSION);
+ASSYM(_MC_VERSION_KSE, _MC_VERSION_KSE);
+ASSYM(_MC_FP_VALID, _MC_FP_VALID);
+
+ASSYM(_MC_VERS, offsetof(mcontext_t, mc_vers));
+ASSYM(_MC_FLAGS, offsetof(mcontext_t, mc_flags));
+
+ASSYM(_MC_R0, offsetof(mcontext_t, mc_frame[0]));
+ASSYM(_MC_R1, offsetof(mcontext_t, mc_frame[1]));
+ASSYM(_MC_R2, offsetof(mcontext_t, mc_frame[2]));
+ASSYM(_MC_R3, offsetof(mcontext_t, mc_frame[3]));
+ASSYM(_MC_R4, offsetof(mcontext_t, mc_frame[4]));
+ASSYM(_MC_R5, offsetof(mcontext_t, mc_frame[5]));
+ASSYM(_MC_R6, offsetof(mcontext_t, mc_frame[6]));
+ASSYM(_MC_R7, offsetof(mcontext_t, mc_frame[7]));
+ASSYM(_MC_R8, offsetof(mcontext_t, mc_frame[8]));
+ASSYM(_MC_R9, offsetof(mcontext_t, mc_frame[9]));
+ASSYM(_MC_R10, offsetof(mcontext_t, mc_frame[10]));
+ASSYM(_MC_R11, offsetof(mcontext_t, mc_frame[11]));
+ASSYM(_MC_R12, offsetof(mcontext_t, mc_frame[12]));
+ASSYM(_MC_R13, offsetof(mcontext_t, mc_frame[13]));
+ASSYM(_MC_R14, offsetof(mcontext_t, mc_frame[14]));
+ASSYM(_MC_R15, offsetof(mcontext_t, mc_frame[15]));
+ASSYM(_MC_R16, offsetof(mcontext_t, mc_frame[16]));
+ASSYM(_MC_R17, offsetof(mcontext_t, mc_frame[17]));
+ASSYM(_MC_R18, offsetof(mcontext_t, mc_frame[18]));
+ASSYM(_MC_R19, offsetof(mcontext_t, mc_frame[19]));
+ASSYM(_MC_R20, offsetof(mcontext_t, mc_frame[20]));
+ASSYM(_MC_R21, offsetof(mcontext_t, mc_frame[21]));
+ASSYM(_MC_R22, offsetof(mcontext_t, mc_frame[22]));
+ASSYM(_MC_R23, offsetof(mcontext_t, mc_frame[23]));
+ASSYM(_MC_R24, offsetof(mcontext_t, mc_frame[24]));
+ASSYM(_MC_R25, offsetof(mcontext_t, mc_frame[25]));
+ASSYM(_MC_R26, offsetof(mcontext_t, mc_frame[26]));
+ASSYM(_MC_R27, offsetof(mcontext_t, mc_frame[27]));
+ASSYM(_MC_R28, offsetof(mcontext_t, mc_frame[28]));
+ASSYM(_MC_R29, offsetof(mcontext_t, mc_frame[29]));
+ASSYM(_MC_R30, offsetof(mcontext_t, mc_frame[30]));
+ASSYM(_MC_R31, offsetof(mcontext_t, mc_frame[31]));
+ASSYM(_MC_LR, offsetof(mcontext_t, mc_frame[32]));
+ASSYM(_MC_CR, offsetof(mcontext_t, mc_frame[33]));
+ASSYM(_MC_XER, offsetof(mcontext_t, mc_frame[34]));
+ASSYM(_MC_CTR, offsetof(mcontext_t, mc_frame[35]));
+
+ASSYM(_MC_FPSCR, offsetof(mcontext_t, mc_fpreg[32]));
+ASSYM(_MC_F0, offsetof(mcontext_t, mc_fpreg[0]));
+ASSYM(_MC_F1, offsetof(mcontext_t, mc_fpreg[1]));
+ASSYM(_MC_F2, offsetof(mcontext_t, mc_fpreg[2]));
+ASSYM(_MC_F3, offsetof(mcontext_t, mc_fpreg[3]));
+ASSYM(_MC_F4, offsetof(mcontext_t, mc_fpreg[4]));
+ASSYM(_MC_F5, offsetof(mcontext_t, mc_fpreg[5]));
+ASSYM(_MC_F6, offsetof(mcontext_t, mc_fpreg[6]));
+ASSYM(_MC_F7, offsetof(mcontext_t, mc_fpreg[7]));
+ASSYM(_MC_F8, offsetof(mcontext_t, mc_fpreg[8]));
+ASSYM(_MC_F9, offsetof(mcontext_t, mc_fpreg[9]));
+ASSYM(_MC_F10, offsetof(mcontext_t, mc_fpreg[10]));
+ASSYM(_MC_F11, offsetof(mcontext_t, mc_fpreg[11]));
+ASSYM(_MC_F12, offsetof(mcontext_t, mc_fpreg[12]));
+ASSYM(_MC_F13, offsetof(mcontext_t, mc_fpreg[13]));
+ASSYM(_MC_F14, offsetof(mcontext_t, mc_fpreg[14]));
+ASSYM(_MC_F15, offsetof(mcontext_t, mc_fpreg[15]));
+ASSYM(_MC_F16, offsetof(mcontext_t, mc_fpreg[16]));
+ASSYM(_MC_F17, offsetof(mcontext_t, mc_fpreg[17]));
+ASSYM(_MC_F18, offsetof(mcontext_t, mc_fpreg[18]));
+ASSYM(_MC_F19, offsetof(mcontext_t, mc_fpreg[19]));
+ASSYM(_MC_F20, offsetof(mcontext_t, mc_fpreg[20]));
+ASSYM(_MC_F21, offsetof(mcontext_t, mc_fpreg[21]));
+ASSYM(_MC_F22, offsetof(mcontext_t, mc_fpreg[22]));
+ASSYM(_MC_F23, offsetof(mcontext_t, mc_fpreg[23]));
+ASSYM(_MC_F24, offsetof(mcontext_t, mc_fpreg[24]));
+ASSYM(_MC_F25, offsetof(mcontext_t, mc_fpreg[25]));
+ASSYM(_MC_F26, offsetof(mcontext_t, mc_fpreg[26]));
+ASSYM(_MC_F27, offsetof(mcontext_t, mc_fpreg[27]));
+ASSYM(_MC_F28, offsetof(mcontext_t, mc_fpreg[28]));
+ASSYM(_MC_F29, offsetof(mcontext_t, mc_fpreg[29]));
+ASSYM(_MC_F30, offsetof(mcontext_t, mc_fpreg[30]));
+ASSYM(_MC_F31, offsetof(mcontext_t, mc_fpreg[31]));
diff --git a/lib/libpthread/arch/powerpc/powerpc/assym.s b/lib/libpthread/arch/powerpc/powerpc/assym.s
new file mode 100644
index 0000000..7017c15
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/powerpc/assym.s
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2004 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Struct offsets for version 0x1 of the mcontext struct.
+ * Generated with
+ * cc -c assym.c
+ * ${SYSSRC}/kern/genassym.sh assym.o > assym_syms.s
+ * hand-edit output
+ */
+#define _MC_VERSION 0x1
+#define _MC_VERSION_KSE 0xee
+#define _MC_FP_VALID 0x1
+
+#define _MC_VERS 0x0
+#define _MC_FLAGS 0x4
+
+#define _MC_R0 0x298
+#define _MC_R1 0x21c
+#define _MC_R2 0x220
+#define _MC_R3 0x224
+#define _MC_R4 0x228
+#define _MC_R5 0x22c
+#define _MC_R6 0x230
+#define _MC_R7 0x234
+#define _MC_R8 0x238
+#define _MC_R9 0x23c
+#define _MC_R10 0x240
+#define _MC_R11 0x244
+#define _MC_R12 0x248
+#define _MC_R13 0x24c
+#define _MC_R14 0x250
+#define _MC_R15 0x254
+#define _MC_R16 0x258
+#define _MC_R17 0x25c
+#define _MC_R18 0x260
+#define _MC_R19 0x264
+#define _MC_R20 0x268
+#define _MC_R21 0x26c
+#define _MC_R22 0x270
+#define _MC_R23 0x274
+#define _MC_R24 0x278
+#define _MC_R25 0x27c
+#define _MC_R26 0x280
+#define _MC_R27 0x284
+#define _MC_R28 0x288
+#define _MC_R29 0x28c
+#define _MC_R30 0x290
+#define _MC_R31 0x294
+#define _MC_LR 0x298
+#define _MC_CR 0x29c
+#define _MC_XER 0x2a0
+#define _MC_CTR 0x2a4
+
+#define _MC_FPSCR 0x3c0
+#define _MC_F0 0x2c0
+#define _MC_F1 0x2c8
+#define _MC_F2 0x2d0
+#define _MC_F3 0x2d8
+#define _MC_F4 0x2e0
+#define _MC_F5 0x2e8
+#define _MC_F6 0x2f0
+#define _MC_F7 0x2f8
+#define _MC_F8 0x300
+#define _MC_F9 0x308
+#define _MC_F10 0x310
+#define _MC_F11 0x318
+#define _MC_F12 0x320
+#define _MC_F13 0x328
+#define _MC_F14 0x330
+#define _MC_F15 0x338
+#define _MC_F16 0x340
+#define _MC_F17 0x348
+#define _MC_F18 0x350
+#define _MC_F19 0x358
+#define _MC_F20 0x360
+#define _MC_F21 0x368
+#define _MC_F22 0x370
+#define _MC_F23 0x378
+#define _MC_F24 0x380
+#define _MC_F25 0x388
+#define _MC_F26 0x390
+#define _MC_F27 0x398
+#define _MC_F28 0x3a0
+#define _MC_F29 0x3a8
+#define _MC_F30 0x3b0
+#define _MC_F31 0x3b8
+
diff --git a/lib/libpthread/arch/powerpc/powerpc/context.S b/lib/libpthread/arch/powerpc/powerpc/context.S
new file mode 100644
index 0000000..34d175a
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/powerpc/context.S
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2004 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.s"
+
+/*
+ * int _ppc32_getcontext(mcontext_t *mcp)
+ *
+ * Save register state from a voluntary context switch.
+ * Only volatile registers, and those needed to complete
+ * a setcontext call, need to be saved.
+ *
+ * r1
+ * r14-31
+ * f14-31 XXX
+ * lr
+ *
+ * Return 0 for this call, and set up the context so it will return
+ * 1 when restored with _ppc32_setcontext().
+ *
+ * XXX XXX
+ * Floating-point is a big issue. Since there's no way to determine
+ * if the caller has used FP, all volatile register need to be saved.
+ * If FP hasn't been used, this results in a lazy FP exception in
+ * the kernel and from that point on FP is always switched in/out
+ * for the thread, which may be a big performance drag for the system.
+ * An alternative is to use a system call to get the context, which
+ * will do the right thing for floating point, but will save all
+ * registers rather than the caller-saved subset, and has the overhead
+ * of a syscall.
+ * Maybe another option would be to give a light-weight way for a
+ * thread to determine if FP is in used: perhaps a syscall that
+ * returns in the asm traphandler, or an OSX-style read-only page
+ * with a flag to indicate FP state.
+ *
+ * For now, punt the issue ala Alpha 1:1 model and fix in the future.
+ */
+ENTRY(_ppc32_getcontext)
+ stw %r1, _MC_R1(%r3)
+ stw %r13, _MC_R13(%r3)
+ stw %r14, _MC_R14(%r3)
+ stw %r15, _MC_R15(%r3)
+ stw %r16, _MC_R16(%r3)
+ stw %r17, _MC_R17(%r3)
+ stw %r18, _MC_R18(%r3)
+ stw %r19, _MC_R19(%r3)
+ stw %r20, _MC_R20(%r3)
+ stw %r21, _MC_R21(%r3)
+ stw %r22, _MC_R22(%r3)
+ stw %r23, _MC_R23(%r3)
+ stw %r24, _MC_R24(%r3)
+ stw %r25, _MC_R25(%r3)
+ stw %r26, _MC_R26(%r3)
+ stw %r27, _MC_R27(%r3)
+ stw %r28, _MC_R28(%r3)
+ stw %r29, _MC_R28(%r3)
+ stw %r30, _MC_R30(%r3)
+ stw %r31, _MC_R31(%r3)
+ mflr %r4
+ stw %r4, _MC_LR(%r3)
+ mfcr %r4
+ stw %r4, _MC_CR(%r3)
+
+ /* XXX f14-31 ? */
+
+ li %r4, _MC_VERSION_KSE /* partial ucontext version */
+ stw %r4, _MC_VERS(%r3)
+
+ /* Return 0 */
+ li %r3, 0
+ blr
+
+/*
+ * int _ppc32_setcontext(const mcontext_t *mcp, intptr_t val,
+ * intptr_t *loc);
+ *
+ * Should only be called for partial KSE contexts. The full context
+ * case is handled by kse_switchin() in _thread_switch()
+ *
+ * Returns -1 on error and 1 for return from a saved context
+ */
+
+ENTRY(_ppc32_setcontext)
+ lwz %r6, _MC_VERS(%r3)
+ cmpwi %r6, _MC_VERSION_KSE /* KSE partial context ? */
+ beq 1f
+ li %r3, -1 /* invalid context type, return -1 */
+ blr
+
+1: /* partial format, callee-saved regs assumed */
+ lwz %r1, _MC_R1(%r3)
+ lwz %r13, _MC_R13(%r3)
+ lwz %r14, _MC_R14(%r3)
+ lwz %r15, _MC_R15(%r3)
+ lwz %r16, _MC_R16(%r3)
+ lwz %r17, _MC_R17(%r3)
+ lwz %r18, _MC_R18(%r3)
+ lwz %r19, _MC_R19(%r3)
+ lwz %r20, _MC_R20(%r3)
+ lwz %r21, _MC_R21(%r3)
+ lwz %r22, _MC_R22(%r3)
+ lwz %r23, _MC_R23(%r3)
+ lwz %r24, _MC_R24(%r3)
+ lwz %r25, _MC_R25(%r3)
+ lwz %r26, _MC_R26(%r3)
+ lwz %r27, _MC_R27(%r3)
+ lwz %r28, _MC_R28(%r3)
+ lwz %r29, _MC_R28(%r3)
+ lwz %r30, _MC_R30(%r3)
+ lwz %r31, _MC_R31(%r3)
+ lwz %r6, _MC_LR(%r3)
+ mtlr %r6
+ lwz %r6, _MC_CR(%r3)
+ mtcr %r6
+
+ /* XXX f14-31 ? */
+
+ /* if (loc != NULL) *loc = val */
+ cmpwi %r5, 0
+ beq 2f
+ stw %r4, 0(%r5)
+
+ /* Return 1 */
+2: li %r3, 1
+ blr
diff --git a/lib/libpthread/arch/powerpc/powerpc/enter_uts.S b/lib/libpthread/arch/powerpc/powerpc/enter_uts.S
new file mode 100644
index 0000000..7cc4d7f
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/powerpc/enter_uts.S
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2004 Peter Grehan.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * _ppc32_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * long stacksz);
+ *
+ * Call (*uts)(km) on the requested stack. This function doesn't
+ * return. The km parameter stays in %r3.
+ */
+ENTRY(_ppc32_enter_uts)
+ add %r1,%r5,%r6 /* new stack = stack + stacksz */
+ mtlr %r4 /* link register = uts */
+ blrl /* (*uts)(km) */
diff --git a/lib/libpthread/arch/powerpc/powerpc/pthread_md.c b/lib/libpthread/arch/powerpc/powerpc/pthread_md.c
new file mode 100644
index 0000000..c8445b1
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/powerpc/pthread_md.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdlib.h>
+#include <strings.h>
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+
+ if ((tcb = malloc(sizeof(struct tcb))) != NULL) {
+ bzero(tcb, sizeof(struct tcb));
+ tcb->tcb_thread = thread;
+ /* Allocate TDV */
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ /* Free TDV */
+ free(tcb);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ if ((kcb = malloc(sizeof(struct kcb))) != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_faketcb.tcb_isfake = 1;
+ kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL;
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/sparc64/Makefile.inc b/lib/libpthread/arch/sparc64/Makefile.inc
new file mode 100644
index 0000000..07107b4
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= pthread_md.c thr_getcontext.S
diff --git a/lib/libpthread/arch/sparc64/include/atomic_ops.h b/lib/libpthread/arch/sparc64/include/atomic_ops.h
new file mode 100644
index 0000000..4f4d8af
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/include/atomic_ops.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+#include <machine/atomic.h>
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap_long(long *dst, long val, long *res);
+ */
+static __inline void
+atomic_swap_long(long *dst, long val, long *res)
+{
+ long tmp;
+ long r;
+
+ tmp = *dst;
+ for (;;) {
+ r = atomic_cas_64(dst, tmp, val);
+ if (r == tmp)
+ break;
+ tmp = r;
+ }
+ *res = tmp;
+}
+
+static __inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ int tmp;
+ int r;
+
+ tmp = *dst;
+ for (;;) {
+ r = atomic_cas_32(dst, tmp, val);
+ if (r == tmp)
+ break;
+ tmp = r;
+ }
+ *res = tmp;
+}
+
+#define atomic_swap_ptr(dst, val, res) \
+ atomic_swap_long((long *)dst, (long)val, (long *)res)
+
+#endif
diff --git a/lib/libpthread/arch/sparc64/include/pthread_md.h b/lib/libpthread/arch/sparc64/include/pthread_md.h
new file mode 100644
index 0000000..fac62c2
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/include/pthread_md.h
@@ -0,0 +1,254 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
+
+int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *);
+int _thr_getcontext(mcontext_t *);
+
+#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv; /* We don't know what this is yet? */
+
+
+/*
+ * %g6 points to one of these. We define the static TLS as an array
+ * of long double to enforce 16-byte alignment of the TLS memory.
+ *
+ * XXX - Both static and dynamic allocation of any of these structures
+ * will result in a valid, well-aligned thread pointer???
+ */
+struct sparc64_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+ uint64_t _reserved_;
+ long double tp_tls[0]; /* static TLS */
+};
+
+struct tcb {
+ struct pthread *tcb_thread;
+ void *tcb_addr; /* allocated tcb address */
+ struct kcb *tcb_curkcb;
+ uint64_t tcb_isfake;
+ uint64_t tcb_spare[4];
+ struct kse_thr_mailbox tcb_tmbx; /* needs 64-byte alignment */
+ struct sparc64_tp tcb_tp;
+} __aligned(64);
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+register struct sparc64_tp *_tp __asm("%g6");
+
+#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+ uint32_t flags;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We already are in a critical region since
+ * there is no current thread.
+ */
+ crit = NULL;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ /* No need to do anything if this is a fake tcb. */
+ if (_tcb->tcb_isfake == 0)
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+ _tp = &tcb->tcb_tp;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+void _sparc64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
+ size_t stacksz);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ if (_thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
+ /* Make the fake tcb the current thread. */
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+ _sparc64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+ mcontext_t *mc;
+
+ _tcb_set(kcb, tcb);
+ mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox)
+ _thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _thr_setcontext(mc, 0, NULL);
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
diff --git a/lib/libpthread/arch/sparc64/sparc64/assym.s b/lib/libpthread/arch/sparc64/sparc64/assym.s
new file mode 100644
index 0000000..3e22c9f
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/sparc64/assym.s
@@ -0,0 +1,15 @@
+/*
+ * Offsets into structures used from asm. Must be kept in sync with
+ * appropriate headers.
+ *
+ * $FreeBSD$
+ */
+
+#define UC_MCONTEXT 0x40
+
+#define MC_FLAGS 0x0
+#define MC_VALID_FLAGS 0x1
+#define MC_GLOBAL 0x0
+#define MC_OUT 0x40
+#define MC_TPC 0xc8
+#define MC_TNPC 0xc0
diff --git a/lib/libpthread/arch/sparc64/sparc64/pthread_md.c b/lib/libpthread/arch/sparc64/sparc64/pthread_md.c
new file mode 100644
index 0000000..d6bf95d
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/sparc64/pthread_md.c
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ucontext.h>
+
+#include "pthread_md.h"
+
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *addr;
+
+ addr = malloc(sizeof(struct tcb) + 63);
+ if (addr == NULL)
+ tcb = NULL;
+ else {
+ tcb = (struct tcb *)(((uintptr_t)(addr) + 63) & ~63);
+ bzero(tcb, sizeof(struct tcb));
+ tcb->tcb_addr = addr;
+ tcb->tcb_thread = thread;
+ /* XXX - Allocate tdv/tls */
+ }
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ void *addr;
+
+ addr = tcb->tcb_addr;
+ tcb->tcb_addr = NULL;
+ free(addr);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ kcb = malloc(sizeof(struct kcb));
+ if (kcb != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_faketcb.tcb_isfake = 1;
+ kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL;
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/sparc64/sparc64/thr_getcontext.S b/lib/libpthread/arch/sparc64/sparc64/thr_getcontext.S
new file mode 100644
index 0000000..ca6473a
--- /dev/null
+++ b/lib/libpthread/arch/sparc64/sparc64/thr_getcontext.S
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.s"
+
+ .weak CNAME(_thr_getcontext)
+ .set CNAME(_thr_getcontext),CNAME(__thr_getcontext)
+ENTRY(__thr_getcontext)
+ add %o7, 8, %o1
+ add %o1, 4, %o2
+ stx %sp, [%o0 + MC_OUT + (6 * 8)]
+ stx %o1, [%o0 + MC_TPC]
+ stx %o2, [%o0 + MC_TNPC]
+ mov MC_VALID_FLAGS, %l0 /* Validate the context. */
+ stx %l0, [%o0 + MC_FLAGS]
+ mov 1, %l0
+ stx %l0, [%o0 + MC_OUT + (0 * 8)] /* return 1 when resumed */
+ retl
+ mov 0, %o0 /* return 0 */
+END(__thr_getcontext)
+
+ .weak CNAME(_thr_setcontext)
+ .set CNAME(_thr_setcontext),CNAME(__thr_setcontext)
+ENTRY(__thr_setcontext)
+ save %sp, -CCFSZ, %sp
+ flushw
+ mov %i0, %l0
+ mov %i1, %l1
+ mov %i2, %l2
+ ldx [%l0 + MC_GLOBAL + (1 * 8)], %g1
+ ldx [%l0 + MC_GLOBAL + (2 * 8)], %g2
+ ldx [%l0 + MC_GLOBAL + (3 * 8)], %g3
+ ldx [%l0 + MC_GLOBAL + (4 * 8)], %g4
+ ldx [%l0 + MC_GLOBAL + (5 * 8)], %g5
+ ldx [%l0 + MC_GLOBAL + (6 * 8)], %g6
+ ldx [%l0 + MC_GLOBAL + (7 * 8)], %g7
+ ldx [%l0 + MC_OUT + (0 * 8)], %i0
+ ldx [%l0 + MC_OUT + (1 * 8)], %i1
+ ldx [%l0 + MC_OUT + (2 * 8)], %i2
+ ldx [%l0 + MC_OUT + (3 * 8)], %i3
+ ldx [%l0 + MC_OUT + (4 * 8)], %i4
+ ldx [%l0 + MC_OUT + (5 * 8)], %i5
+ ldx [%l0 + MC_OUT + (6 * 8)], %i6
+ ldx [%l0 + MC_OUT + (7 * 8)], %i7
+ ldx [%l0 + MC_TPC], %l4
+ ldx [%l0 + MC_TNPC], %l3
+ brz %l2, 1f
+ nop
+ stx %l1, [%l2]
+1: jmpl %l3, %g0
+ return %l4
+END(__thr_setcontext)
+
+ENTRY(_sparc64_enter_uts)
+ save %sp, -CCFSZ, %sp
+ flushw
+ add %i2, %i3, %i2
+ sub %i2, SPOFF + CCFSZ, %sp
+ jmpl %i0, %g0
+ mov %i1, %o0
+END(_sparc64_enter_uts)
OpenPOWER on IntegriCloud