summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authordeischen <deischen@FreeBSD.org>2003-07-19 04:44:21 +0000
committerdeischen <deischen@FreeBSD.org>2003-07-19 04:44:21 +0000
commitf34d7dc27d87419ac6129c42ef8ff4f92882d1e6 (patch)
tree8604245f0b55b0d2f7f608966f8b24d8af164c17 /lib
parent0800e2f8d165a62038aea1d18fc367168e60b617 (diff)
downloadFreeBSD-src-f34d7dc27d87419ac6129c42ef8ff4f92882d1e6.zip
FreeBSD-src-f34d7dc27d87419ac6129c42ef8ff4f92882d1e6.tar.gz
Add some very beta amd64 bits. These will also need some tweaking.
Diffstat (limited to 'lib')
-rw-r--r--lib/libkse/arch/amd64/Makefile.inc5
-rw-r--r--lib/libkse/arch/amd64/amd64/context.S218
-rw-r--r--lib/libkse/arch/amd64/amd64/enter_uts.S41
-rw-r--r--lib/libkse/arch/amd64/include/atomic_ops.h49
-rw-r--r--lib/libkse/arch/amd64/include/pthread_md.h80
-rw-r--r--lib/libpthread/arch/amd64/Makefile.inc5
-rw-r--r--lib/libpthread/arch/amd64/amd64/context.S218
-rw-r--r--lib/libpthread/arch/amd64/amd64/enter_uts.S41
-rw-r--r--lib/libpthread/arch/amd64/include/atomic_ops.h49
-rw-r--r--lib/libpthread/arch/amd64/include/ksd.h135
-rw-r--r--lib/libpthread/arch/amd64/include/pthread_md.h80
11 files changed, 921 insertions, 0 deletions
diff --git a/lib/libkse/arch/amd64/Makefile.inc b/lib/libkse/arch/amd64/Makefile.inc
new file mode 100644
index 0000000..c0e4c47
--- /dev/null
+++ b/lib/libkse/arch/amd64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= context.S enter_uts.S
diff --git a/lib/libkse/arch/amd64/amd64/context.S b/lib/libkse/arch/amd64/amd64/context.S
new file mode 100644
index 0000000..50616e0
--- /dev/null
+++ b/lib/libkse/arch/amd64/amd64/context.S
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The following notes ("cheat sheet") was provided by Peter Wemm.
+ *
+ * scratch:
+ * rax (1st return)
+ * rcx (4th arg)
+ * rdx (3rd arg, 2nd return)
+ * rsi (2nd arg)
+ * rdi (1st arg)
+ * r8 (5th arg)
+ * r9 (6th arg)
+ * r10 (temp, static chain?)
+ * r11 (temp)
+ *
+ * preserved:
+ * rbx (base pointer)
+ * rsp (stack)
+ * rbp (frame)
+ * r12-r15 (general)
+ *
+ * calls:
+ * rdi 1
+ * rsi 2
+ * rdx 3
+ * rcx 4
+ * r8 5
+ * r9 6
+ *
+ * return:
+ * rax 1
+ * rdx 2
+ *
+ * This means:
+ * arg1 goes in %rdi, arg2 in %rsi, etc. return value is %rax (and
+ * secondary return, eg: pipe(2), in %rdx) %rcx,%rsi,%rdi etc are
+ * trashed by making a call to something. %rbx,%rbp,%r12-15 are the
+ * only registers preserved across a call. Note that unlike i386,
+ * %rsi and %rdi are scratch rather than preserved. FPU is
+ * different, args are in SSE registers rather than the x87 stack.
+ *
+ * Aside from the register calling conventions, amd64 can be treated
+ * very much like i386. Things like setjmp/longjmp etc were literal
+ * translations from i386 but with the register names updated, etc.
+ * The main gotcha is that FPU save/restore is in SSE format, which
+ * means a sparse 512 byte FPU context.
+ *
+ * Note that the FPU is suppose to be 512 bytes but that the
+ * definition for the FPU bits in struct mcontext does not
+ * agree:
+ *
+ * long mc_fpstate[128] __aligned(16);
+ *
+ * This would actually use 1024 bytes, not 512, since long is
+ * 8 bytes on amd64.
+ */
+
+
+/*
+ * Where do we define these?
+ */
+#define MC_SIZE 1312 /* sizeof mcontext_t */
+#define MC_LEN_OFFSET (24*8) /* offset to mc_len from mcontext */
+#define MC_FPFMT_OFFSET (25*8) /* offset to mc_fpformat from mcontext */
+#define MC_FPFMT_NODEV 0x10000
+#define MC_OWNEDFP_OFFSET (26*8) /* offset to mc_ownedfp from mcontext */
+#define MC_OWNEDFP_NONE 0x20000
+#define MC_OWNEDFP_FPU 0x20001
+#define MC_OWNEDFP_PCB 0x20002
+#define MC_FPREGS_OFFSET (28*8) /* offset to FP registers */
+#define MC_FP_CW_OFFSET (28*8) /* offset to FP control word */
+
+#define MC_RDI (1 * 8)
+#define MC_RSI (2 * 8)
+#define MC_RDX (3 * 8)
+#define MC_RCX (4 * 8)
+#define MC_R8 (5 * 8)
+#define MC_R9 (6 * 8)
+#define MC_RAX (7 * 8)
+#define MC_RBX (8 * 8)
+#define MC_RBP (9 * 8)
+#define MC_R10 (10 * 8)
+#define MC_R11 (11 * 8)
+#define MC_R12 (12 * 8)
+#define MC_R13 (13 * 8)
+#define MC_R14 (14 * 8)
+#define MC_R15 (15 * 8)
+#define MC_RIP (19 * 8)
+#define MC_RFLAGS (21 * 8)
+#define MC_RSP (22 * 8)
+
+/*
+ * _amd64_ctx_save(mcontext_t *mcp)
+ *
+ * No values are saved to mc_trapno, mc_addr, mc_err, mc_cs, or mc_ss.
+ * For the FPU state, only the floating point control word is stored.
+ */
+ENTRY(_amd64_ctx_save)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: movq %rdi, MC_RDI(%rdi)
+ movq %rsi, MC_RSI(%rdi)
+ movq %rdx, MC_RDX(%rdi)
+ movq %rcx, MC_RCX(%rdi)
+ movq %r8, MC_R8(%rdi)
+ movq %r9, MC_R9(%rdi)
+ movq $1, MC_RAX(%rdi) /* return 1 when restored */
+ movq %rbx, MC_RBX(%rdi)
+ movq %rbp, MC_RBP(%rdi)
+ movq %r10, MC_R10(%rdi)
+ movq %r11, MC_R11(%rdi)
+ movq %r12, MC_R12(%rdi)
+ movq %r13, MC_R13(%rdi)
+ movq %r14, MC_R14(%rdi)
+ movq %r15, MC_R15(%rdi)
+ movq (%rsp), %rax /* get return address */
+ movq %rax, MC_RIP(%rdi) /* save return address (%rip) */
+ pushfq /* get flags */
+ popq %rax
+ movq %rax, MC_RFLAGS(%rdi) /* save flags */
+ movq %rsp, %rax /* setcontext pushes the return */
+ addq $4, %rax /* address onto the stack; */
+ movq %rax, MC_RSP(%rdi) /* account for this -- ???. */
+ fnstcw MC_FP_CW_OFFSET(%rdi) /* save FPU control word */
+ movq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi) /* no FP */
+ /*movq $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%rdi)*/ /* unused for amd64 */
+ movq $MC_SIZE, MC_LEN_OFFSET(%rdi)
+ xorq %rax, %rax /* return 0 */
+2: ret
+
+/*
+ * _amd64_ctx_restore(mcontext_t *mcp, intptr_t val, intptr_t *loc);
+ */
+ENTRY(_amd64_ctx_restore)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 7f
+1: cmpq $MC_SIZE, MC_LEN_OFFSET(%rdi) /* is context valid? */
+ je 2f
+ movq $-1, %rax /* bzzzt, invalid context */
+ jmp 7f
+2: movq MC_RCX(%rdi), %rcx
+ movq MC_R8(%rdi), %r8
+ movq MC_R9(%rdi), %r9
+ movq MC_RBX(%rdi), %rbx
+ movq MC_RBP(%rdi), %rbp
+ movq MC_R10(%rdi), %r10
+ movq MC_R11(%rdi), %r11
+ movq MC_R12(%rdi), %r12
+ movq MC_R13(%rdi), %r13
+ movq MC_R14(%rdi), %r14
+ movq MC_R15(%rdi), %r15
+ /*
+ * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB)
+ * restore XMM/SSE FP register format
+ */
+ cmpq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi)
+ je 4f
+ cmpq $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%rdi)
+ je 3f
+ cmpq $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%rdi)
+ jne 4f
+3: fxrstor MC_FPREGS_OFFSET(%rdi) /* restore XMM FP regs */
+ jmp 5f
+4: fninit
+ fldcw MC_FP_CW_OFFSET(%rdi)
+5: movq MC_RSP(%rdi), %rsp /* switch to context stack */
+ movq MC_RIP(%rdi), %rax /* return address on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rdi on stack */
+ pushq %rax
+ movq MC_RDX(%rdi), %rax /* rdx on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rsi on stack */
+ pushq %rax
+ movq MC_RFLAGS(%rdi), %rax /* flags on stack*/
+ pushq %rax
+ movq MC_RAX(%rdi), %rax /* restore rax */
+ /* At this point we're done with the context. */
+ cmpq $0, %rdx /* set *loc to val */
+ je 7f
+ movq %rsi, (%rdx)
+6: popfq /* restore flags */
+ popq %rsi /* restore rsi, rdx, and rdi */
+ popq %rdx
+ popq %rdi
+7: ret
diff --git a/lib/libkse/arch/amd64/amd64/enter_uts.S b/lib/libkse/arch/amd64/amd64/enter_uts.S
new file mode 100644
index 0000000..b36eb98
--- /dev/null
+++ b/lib/libkse/arch/amd64/amd64/enter_uts.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ * _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * size_t stacksz);
+ */
+ENTRY(_amd64_enter_uts)
+ addq %rcx, %rdx /* get stack base */
+ andq -15, %rdx /* align to 16 bytes */
+ movq %rdx, %rsp /* switch to UTS stack */
+ movq %rdx, %rbp /* set frame */
+ jmp *(%rsi) /* jump to UTS entry point */
+ ret
diff --git a/lib/libkse/arch/amd64/include/atomic_ops.h b/lib/libkse/arch/amd64/include/atomic_ops.h
new file mode 100644
index 0000000..5edb533
--- /dev/null
+++ b/lib/libkse/arch/amd64/include/atomic_ops.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap_long(long *dst, long val, long *res);
+ */
+static inline void
+atomic_swap_long(long *dst, long val, long *res)
+{
+ __asm __volatile(
+ "xchgq %2, %1; movq %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+
+#endif
diff --git a/lib/libkse/arch/amd64/include/pthread_md.h b/lib/libkse/arch/amd64/include/pthread_md.h
new file mode 100644
index 0000000..e14357f
--- /dev/null
+++ b/lib/libkse/arch/amd64/include/pthread_md.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <ucontext.h>
+
+#define THR_GETCONTEXT(ucp) (void)_amd64_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) (void)_amd64_restore_context(&(ucp)->uc_mcontext)
+
+#define THR_ALIGNBYTES 15
+#define THR_ALIGN(td) (((uintptr_t)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES)
+
+/*
+ * KSE Specific Data.
+ */
+struct ksd {
+ void *base;
+ long size;
+};
+
+void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _amd64_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km)
+{
+ if (tm == NULL)
+ return (-1);
+ if (!_amd64_save_context(&tm->tm_context.uc_mcontext)) {
+ _amd64_enter_uts(km, km->km_func, km->km_stack.ss_sp,
+ km->km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp)
+{
+ if (tm == NULL)
+ return (-1);
+ _amd64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm,
+ (intptr_t*)thrp);
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif
diff --git a/lib/libpthread/arch/amd64/Makefile.inc b/lib/libpthread/arch/amd64/Makefile.inc
new file mode 100644
index 0000000..c0e4c47
--- /dev/null
+++ b/lib/libpthread/arch/amd64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= context.S enter_uts.S
diff --git a/lib/libpthread/arch/amd64/amd64/context.S b/lib/libpthread/arch/amd64/amd64/context.S
new file mode 100644
index 0000000..50616e0
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/context.S
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The following notes ("cheat sheet") was provided by Peter Wemm.
+ *
+ * scratch:
+ * rax (1st return)
+ * rcx (4th arg)
+ * rdx (3rd arg, 2nd return)
+ * rsi (2nd arg)
+ * rdi (1st arg)
+ * r8 (5th arg)
+ * r9 (6th arg)
+ * r10 (temp, static chain?)
+ * r11 (temp)
+ *
+ * preserved:
+ * rbx (base pointer)
+ * rsp (stack)
+ * rbp (frame)
+ * r12-r15 (general)
+ *
+ * calls:
+ * rdi 1
+ * rsi 2
+ * rdx 3
+ * rcx 4
+ * r8 5
+ * r9 6
+ *
+ * return:
+ * rax 1
+ * rdx 2
+ *
+ * This means:
+ * arg1 goes in %rdi, arg2 in %rsi, etc. return value is %rax (and
+ * secondary return, eg: pipe(2), in %rdx) %rcx,%rsi,%rdi etc are
+ * trashed by making a call to something. %rbx,%rbp,%r12-15 are the
+ * only registers preserved across a call. Note that unlike i386,
+ * %rsi and %rdi are scratch rather than preserved. FPU is
+ * different, args are in SSE registers rather than the x87 stack.
+ *
+ * Aside from the register calling conventions, amd64 can be treated
+ * very much like i386. Things like setjmp/longjmp etc were literal
+ * translations from i386 but with the register names updated, etc.
+ * The main gotcha is that FPU save/restore is in SSE format, which
+ * means a sparse 512 byte FPU context.
+ *
+ * Note that the FPU is suppose to be 512 bytes but that the
+ * definition for the FPU bits in struct mcontext does not
+ * agree:
+ *
+ * long mc_fpstate[128] __aligned(16);
+ *
+ * This would actually use 1024 bytes, not 512, since long is
+ * 8 bytes on amd64.
+ */
+
+
+/*
+ * Where do we define these?
+ */
+#define MC_SIZE 1312 /* sizeof mcontext_t */
+#define MC_LEN_OFFSET (24*8) /* offset to mc_len from mcontext */
+#define MC_FPFMT_OFFSET (25*8) /* offset to mc_fpformat from mcontext */
+#define MC_FPFMT_NODEV 0x10000
+#define MC_OWNEDFP_OFFSET (26*8) /* offset to mc_ownedfp from mcontext */
+#define MC_OWNEDFP_NONE 0x20000
+#define MC_OWNEDFP_FPU 0x20001
+#define MC_OWNEDFP_PCB 0x20002
+#define MC_FPREGS_OFFSET (28*8) /* offset to FP registers */
+#define MC_FP_CW_OFFSET (28*8) /* offset to FP control word */
+
+#define MC_RDI (1 * 8)
+#define MC_RSI (2 * 8)
+#define MC_RDX (3 * 8)
+#define MC_RCX (4 * 8)
+#define MC_R8 (5 * 8)
+#define MC_R9 (6 * 8)
+#define MC_RAX (7 * 8)
+#define MC_RBX (8 * 8)
+#define MC_RBP (9 * 8)
+#define MC_R10 (10 * 8)
+#define MC_R11 (11 * 8)
+#define MC_R12 (12 * 8)
+#define MC_R13 (13 * 8)
+#define MC_R14 (14 * 8)
+#define MC_R15 (15 * 8)
+#define MC_RIP (19 * 8)
+#define MC_RFLAGS (21 * 8)
+#define MC_RSP (22 * 8)
+
+/*
+ * _amd64_ctx_save(mcontext_t *mcp)
+ *
+ * No values are saved to mc_trapno, mc_addr, mc_err, mc_cs, or mc_ss.
+ * For the FPU state, only the floating point control word is stored.
+ */
+ENTRY(_amd64_ctx_save)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: movq %rdi, MC_RDI(%rdi)
+ movq %rsi, MC_RSI(%rdi)
+ movq %rdx, MC_RDX(%rdi)
+ movq %rcx, MC_RCX(%rdi)
+ movq %r8, MC_R8(%rdi)
+ movq %r9, MC_R9(%rdi)
+ movq $1, MC_RAX(%rdi) /* return 1 when restored */
+ movq %rbx, MC_RBX(%rdi)
+ movq %rbp, MC_RBP(%rdi)
+ movq %r10, MC_R10(%rdi)
+ movq %r11, MC_R11(%rdi)
+ movq %r12, MC_R12(%rdi)
+ movq %r13, MC_R13(%rdi)
+ movq %r14, MC_R14(%rdi)
+ movq %r15, MC_R15(%rdi)
+ movq (%rsp), %rax /* get return address */
+ movq %rax, MC_RIP(%rdi) /* save return address (%rip) */
+ pushfq /* get flags */
+ popq %rax
+ movq %rax, MC_RFLAGS(%rdi) /* save flags */
+ movq %rsp, %rax /* setcontext pushes the return */
+ addq $4, %rax /* address onto the stack; */
+ movq %rax, MC_RSP(%rdi) /* account for this -- ???. */
+ fnstcw MC_FP_CW_OFFSET(%rdi) /* save FPU control word */
+ movq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi) /* no FP */
+ /*movq $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%rdi)*/ /* unused for amd64 */
+ movq $MC_SIZE, MC_LEN_OFFSET(%rdi)
+ xorq %rax, %rax /* return 0 */
+2: ret
+
+/*
+ * _amd64_ctx_restore(mcontext_t *mcp, intptr_t val, intptr_t *loc);
+ */
+ENTRY(_amd64_ctx_restore)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 7f
+1: cmpq $MC_SIZE, MC_LEN_OFFSET(%rdi) /* is context valid? */
+ je 2f
+ movq $-1, %rax /* bzzzt, invalid context */
+ jmp 7f
+2: movq MC_RCX(%rdi), %rcx
+ movq MC_R8(%rdi), %r8
+ movq MC_R9(%rdi), %r9
+ movq MC_RBX(%rdi), %rbx
+ movq MC_RBP(%rdi), %rbp
+ movq MC_R10(%rdi), %r10
+ movq MC_R11(%rdi), %r11
+ movq MC_R12(%rdi), %r12
+ movq MC_R13(%rdi), %r13
+ movq MC_R14(%rdi), %r14
+ movq MC_R15(%rdi), %r15
+ /*
+ * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB)
+ * restore XMM/SSE FP register format
+ */
+ cmpq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi)
+ je 4f
+ cmpq $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%rdi)
+ je 3f
+ cmpq $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%rdi)
+ jne 4f
+3: fxrstor MC_FPREGS_OFFSET(%rdi) /* restore XMM FP regs */
+ jmp 5f
+4: fninit
+ fldcw MC_FP_CW_OFFSET(%rdi)
+5: movq MC_RSP(%rdi), %rsp /* switch to context stack */
+ movq MC_RIP(%rdi), %rax /* return address on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rdi on stack */
+ pushq %rax
+ movq MC_RDX(%rdi), %rax /* rdx on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rsi on stack */
+ pushq %rax
+ movq MC_RFLAGS(%rdi), %rax /* flags on stack*/
+ pushq %rax
+ movq MC_RAX(%rdi), %rax /* restore rax */
+ /* At this point we're done with the context. */
+ cmpq $0, %rdx /* set *loc to val */
+ je 7f
+ movq %rsi, (%rdx)
+6: popfq /* restore flags */
+ popq %rsi /* restore rsi, rdx, and rdi */
+ popq %rdx
+ popq %rdi
+7: ret
diff --git a/lib/libpthread/arch/amd64/amd64/enter_uts.S b/lib/libpthread/arch/amd64/amd64/enter_uts.S
new file mode 100644
index 0000000..b36eb98
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/enter_uts.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ * _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * size_t stacksz);
+ */
+ENTRY(_amd64_enter_uts)
+ addq %rcx, %rdx /* get stack base */
+ andq -15, %rdx /* align to 16 bytes */
+ movq %rdx, %rsp /* switch to UTS stack */
+ movq %rdx, %rbp /* set frame */
+ jmp *(%rsi) /* jump to UTS entry point */
+ ret
diff --git a/lib/libpthread/arch/amd64/include/atomic_ops.h b/lib/libpthread/arch/amd64/include/atomic_ops.h
new file mode 100644
index 0000000..5edb533
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/atomic_ops.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap_long(long *dst, long val, long *res);
+ */
+static inline void
+atomic_swap_long(long *dst, long val, long *res)
+{
+ __asm __volatile(
+ "xchgq %2, %1; movq %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap_long((long *)(d), (long)(v), (long *)(r))
+
+#endif
diff --git a/lib/libpthread/arch/amd64/include/ksd.h b/lib/libpthread/arch/amd64/include/ksd.h
new file mode 100644
index 0000000..26725ad
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/ksd.h
@@ -0,0 +1,135 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+
+#ifndef _KSD_H_
+#define _KSD_H_
+
+#include <sys/types.h>
+
+struct kse;
+struct pthread;
+
+/*
+ * Evaluates to the byte offset of the per-kse variable name.
+ */
+#define __ksd_offset(name) __offsetof(struct kse, name)
+
+/*
+ * Evaluates to the type of the per-kse variable name.
+ */
+#define __ksd_type(name) __typeof(((struct kse *)0)->name)
+
+
+/*
+ * Evaluates to the value of the per-kse variable name.
+ */
+#define KSD_GET64(name) ({ \
+ __ksd_type(name) __result; \
+ \
+ u_long __i; \
+ __asm __volatile("movq %%gs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_long *)(__ksd_offset(name)))); \
+ __result = *(__ksd_type(name) *)&__i; \
+ \
+ __result; \
+})
+
+/*
+ * Sets the value of the per-cpu variable name to value val.
+ */
+#define KSD_SET64(name, val) ({ \
+ __ksd_type(name) __val = (val); \
+ \
+ u_long __i; \
+ __i = *(u_long *)&__val; \
+ __asm __volatile("movq %1,%%gs:%0" \
+ : "=m" (*(u_long *)(__ksd_offset(name))) \
+ : "r" (__i)); \
+})
+
+static __inline u_long
+__ksd_readandclear64(volatile u_long *addr)
+{
+ u_long result;
+
+ __asm __volatile (
+ " xorq %0, %0;"
+ " xchgq %%gs:%1, %0;"
+ "# __ksd_readandclear64"
+ : "=&r" (result)
+ : "m" (*addr));
+ return (result);
+}
+
+#define KSD_READANDCLEAR64(name) ({ \
+ __ksd_type(name) __result; \
+ \
+ __result = (__ksd_type(name)) \
+ __ksd_readandclear64((u_long *)__ksd_offset(name)); \
+ __result; \
+})
+
+
+#define _ksd_curkse() ((struct kse *)KSD_GET64(k_mbx.km_udata))
+#define _ksd_curthread() KSD_GET64(k_curthread)
+#define _ksd_get_tmbx() KSD_GET64(k_mbx.km_curthread)
+#define _ksd_set_tmbx(value) KSD_SET64(k_mbx.km_curthread, (void *)value);
+#define _ksd_readandclear_tmbx() KSD_READANDCLEAR64(k_mbx.km_curthread)
+
+
+static __inline int
+_ksd_create(struct ksd *ksd, void *base, int size)
+{
+ ksd->base = base;
+ ksd->size = size;
+ return (0);
+}
+
+static __inline void
+_ksd_destroy(struct ksd *ksd)
+{
+ ksd->base = 0;
+ ksd->size = 0;
+}
+
+static __inline int
+_ksd_setprivate(struct ksd *ksd)
+{
+ /*
+ * Make it fail; only the kernel can do this on amd64.
+ * This interface is going to be removed. The KSD
+ * will be set by the kernel when the kse is created.
+ */
+ return (-1);
+}
+
+#endif
diff --git a/lib/libpthread/arch/amd64/include/pthread_md.h b/lib/libpthread/arch/amd64/include/pthread_md.h
new file mode 100644
index 0000000..e14357f
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/pthread_md.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <ucontext.h>
+
+#define THR_GETCONTEXT(ucp) (void)_amd64_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) (void)_amd64_restore_context(&(ucp)->uc_mcontext)
+
+#define THR_ALIGNBYTES 15
+#define THR_ALIGN(td) (((uintptr_t)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES)
+
+/*
+ * KSE Specific Data.
+ */
+struct ksd {
+ void *base;
+ long size;
+};
+
+void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _amd64_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km)
+{
+ if (tm == NULL)
+ return (-1);
+ if (!_amd64_save_context(&tm->tm_context.uc_mcontext)) {
+ _amd64_enter_uts(km, km->km_func, km->km_stack.ss_sp,
+ km->km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp)
+{
+ if (tm == NULL)
+ return (-1);
+ _amd64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm,
+ (intptr_t*)thrp);
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif
OpenPOWER on IntegriCloud