summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/arch/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libpthread/arch/amd64')
-rw-r--r--lib/libpthread/arch/amd64/Makefile.inc5
-rw-r--r--lib/libpthread/arch/amd64/amd64/context.S217
-rw-r--r--lib/libpthread/arch/amd64/amd64/enter_uts.S41
-rw-r--r--lib/libpthread/arch/amd64/amd64/pthread_md.c82
-rw-r--r--lib/libpthread/arch/amd64/include/atomic_ops.h57
-rw-r--r--lib/libpthread/arch/amd64/include/pthread_md.h268
6 files changed, 670 insertions, 0 deletions
diff --git a/lib/libpthread/arch/amd64/Makefile.inc b/lib/libpthread/arch/amd64/Makefile.inc
new file mode 100644
index 0000000..c8b0362
--- /dev/null
+++ b/lib/libpthread/arch/amd64/Makefile.inc
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
+
+SRCS+= context.S enter_uts.S pthread_md.c
diff --git a/lib/libpthread/arch/amd64/amd64/context.S b/lib/libpthread/arch/amd64/amd64/context.S
new file mode 100644
index 0000000..6a6b558
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/context.S
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The following notes ("cheat sheet") was provided by Peter Wemm.
+ *
+ * scratch:
+ * rax (1st return)
+ * rcx (4th arg)
+ * rdx (3rd arg, 2nd return)
+ * rsi (2nd arg)
+ * rdi (1st arg)
+ * r8 (5th arg)
+ * r9 (6th arg)
+ * r10 (temp, static chain?)
+ * r11 (temp)
+ *
+ * preserved:
+ * rbx (base pointer)
+ * rsp (stack)
+ * rbp (frame)
+ * r12-r15 (general)
+ *
+ * calls:
+ * rdi 1
+ * rsi 2
+ * rdx 3
+ * rcx 4
+ * r8 5
+ * r9 6
+ *
+ * return:
+ * rax 1
+ * rdx 2
+ *
+ * This means:
+ * arg1 goes in %rdi, arg2 in %rsi, etc. return value is %rax (and
+ * secondary return, eg: pipe(2), in %rdx) %rcx,%rsi,%rdi etc are
+ * trashed by making a call to something. %rbx,%rbp,%r12-15 are the
+ * only registers preserved across a call. Note that unlike i386,
+ * %rsi and %rdi are scratch rather than preserved. FPU is
+ * different, args are in SSE registers rather than the x87 stack.
+ *
+ * Aside from the register calling conventions, amd64 can be treated
+ * very much like i386. Things like setjmp/longjmp etc were literal
+ * translations from i386 but with the register names updated, etc.
+ * The main gotcha is that FPU save/restore is in SSE format, which
+ * means a sparse 512 byte FPU context.
+ */
+
+
+/*
+ * Where do we define these?
+ */
+#define MC_SIZE 800 /* sizeof mcontext_t */
+#define MC_LEN_OFFSET (25*8) /* offset to mc_len from mcontext */
+#define MC_FPFMT_OFFSET (26*8) /* offset to mc_fpformat from mcontext */
+#define MC_FPFMT_NODEV 0x10000
+#define MC_OWNEDFP_OFFSET (27*8) /* offset to mc_ownedfp from mcontext */
+#define MC_OWNEDFP_NONE 0x20000
+#define MC_OWNEDFP_FPU 0x20001
+#define MC_OWNEDFP_PCB 0x20002
+#define MC_FPREGS_OFFSET (28*8) /* offset to FP registers */
+#define MC_FP_CW_OFFSET (28*8) /* offset to FP control word */
+
+#define MC_RDI (1 * 8)
+#define MC_RSI (2 * 8)
+#define MC_RDX (3 * 8)
+#define MC_RCX (4 * 8)
+#define MC_R8 (5 * 8)
+#define MC_R9 (6 * 8)
+#define MC_RAX (7 * 8)
+#define MC_RBX (8 * 8)
+#define MC_RBP (9 * 8)
+#define MC_R10 (10 * 8)
+#define MC_R11 (11 * 8)
+#define MC_R12 (12 * 8)
+#define MC_R13 (13 * 8)
+#define MC_R14 (14 * 8)
+#define MC_R15 (15 * 8)
+#define MC_FLAGS (18 * 8)
+#define MC_RIP (20 * 8)
+#define MC_CS (21 * 8)
+#define MC_RFLAGS (22 * 8)
+#define MC_RSP (23 * 8)
+#define MC_SS (24 * 8)
+
+#define REDZONE 128 /* size of the red zone */
+
+/*
+ * _amd64_ctx_save(mcontext_t *mcp)
+ *
+ * No values are saved to mc_trapno, mc_addr, mc_err and mc_cs.
+ * For the FPU state, only the floating point control word is stored.
+ */
+ENTRY(_amd64_save_context)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: movq %rdi, MC_RDI(%rdi)
+ movq %rsi, MC_RSI(%rdi)
+ movq %rdx, MC_RDX(%rdi)
+ movq %rcx, MC_RCX(%rdi)
+ movq %r8, MC_R8(%rdi)
+ movq %r9, MC_R9(%rdi)
+ movq $1, MC_RAX(%rdi) /* return 1 when restored */
+ movq %rbx, MC_RBX(%rdi)
+ movq %rbp, MC_RBP(%rdi)
+ movq %r10, MC_R10(%rdi)
+ movq %r11, MC_R11(%rdi)
+ movq %r12, MC_R12(%rdi)
+ movq %r13, MC_R13(%rdi)
+ movq %r14, MC_R14(%rdi)
+ movq %r15, MC_R15(%rdi)
+ movq (%rsp), %rax /* get return address */
+ movq %rax, MC_RIP(%rdi) /* save return address (%rip) */
+ pushfq /* get flags */
+ popq %rax
+ movq %rax, MC_RFLAGS(%rdi) /* save flags */
+ movq %rsp, %rax /* setcontext pushes the return */
+ addq $8, %rax /* address onto the stack; */
+ movq %rax, MC_RSP(%rdi) /* account for this -- ???. */
+ movw %ss, MC_SS(%rdi)
+ fnstcw MC_FP_CW_OFFSET(%rdi) /* save FPU control word */
+ movq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi) /* no FP */
+ movq $MC_FPFMT_NODEV, MC_FPFMT_OFFSET(%rdi)
+ movq $MC_SIZE, MC_LEN_OFFSET(%rdi)
+ xorq %rax, %rax /* return 0 */
+2: ret
+
+/*
+ * _amd64_ctx_restore(mcontext_t *mcp, intptr_t val, intptr_t *loc);
+ */
+ENTRY(_amd64_restore_context)
+ cmpq $0, %rdi /* check for null pointer */
+ jne 1f
+ movq $-1, %rax
+ jmp 2f
+1: cmpq $MC_SIZE, MC_LEN_OFFSET(%rdi) /* is context valid? */
+ je 2f
+ movq $-1, %rax /* bzzzt, invalid context */
+ ret
+2: movq MC_RCX(%rdi), %rcx
+ movq MC_R8(%rdi), %r8
+ movq MC_R9(%rdi), %r9
+ movq MC_RBX(%rdi), %rbx
+ movq MC_RBP(%rdi), %rbp
+ movq MC_R10(%rdi), %r10
+ movq MC_R11(%rdi), %r11
+ movq MC_R12(%rdi), %r12
+ movq MC_R13(%rdi), %r13
+ movq MC_R14(%rdi), %r14
+ movq MC_R15(%rdi), %r15
+ /*
+ * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB)
+ * restore XMM/SSE FP register format
+ */
+ cmpq $MC_OWNEDFP_NONE, MC_OWNEDFP_OFFSET(%rdi)
+ je 4f
+ cmpq $MC_OWNEDFP_PCB, MC_OWNEDFP_OFFSET(%rdi)
+ je 3f
+ cmpq $MC_OWNEDFP_FPU, MC_OWNEDFP_OFFSET(%rdi)
+ jne 4f
+3: fxrstor MC_FPREGS_OFFSET(%rdi) /* restore XMM FP regs */
+ jmp 5f
+4: fninit
+ fldcw MC_FP_CW_OFFSET(%rdi)
+5: movq MC_RSP(%rdi), %rsp /* switch to context stack */
+ subq $REDZONE, %rsp
+ movq MC_RIP(%rdi), %rax /* return address on stack */
+ pushq %rax
+ movq MC_RDI(%rdi), %rax /* rdi on stack */
+ pushq %rax
+ movq MC_RDX(%rdi), %rax /* rdx on stack */
+ pushq %rax
+ movq MC_RSI(%rdi), %rax /* rsi on stack */
+ pushq %rax
+ movq MC_RFLAGS(%rdi), %rax /* flags on stack*/
+ pushq %rax
+ movq MC_RAX(%rdi), %rax /* restore rax */
+ /* At this point we're done with the context. */
+ cmpq $0, %rdx /* set *loc to val */
+ je 6f
+ movq %rsi, (%rdx)
+6: popfq /* restore flags */
+ popq %rsi /* restore rsi, rdx, and rdi */
+ popq %rdx
+ popq %rdi
+ ret $REDZONE
+
diff --git a/lib/libpthread/arch/amd64/amd64/enter_uts.S b/lib/libpthread/arch/amd64/amd64/enter_uts.S
new file mode 100644
index 0000000..fb0df87
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/enter_uts.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ * _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ * size_t stacksz);
+ */
+ENTRY(_amd64_enter_uts)
+ addq %rcx, %rdx /* get stack base */
+ andq $~0xf, %rdx /* align to 16 bytes */
+ movq %rdx, %rsp /* switch to UTS stack */
+ movq %rdx, %rbp /* set frame */
+ callq *%rsi
+ ret
diff --git a/lib/libpthread/arch/amd64/amd64/pthread_md.c b/lib/libpthread/arch/amd64/amd64/pthread_md.c
new file mode 100644
index 0000000..3aceec7
--- /dev/null
+++ b/lib/libpthread/arch/amd64/amd64/pthread_md.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <stdlib.h>
+#include <strings.h>
+#include "rtld_tls.h"
+#include "pthread_md.h"
+
+/*
+ * The constructors.
+ */
+struct tcb *
+_tcb_ctor(struct pthread *thread, int initial)
+{
+ struct tcb *tcb;
+ void *oldtls;
+
+ if (initial) {
+ __asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
+ } else {
+ oldtls = NULL;
+ }
+
+ tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
+ if (tcb) {
+ tcb->tcb_thread = thread;
+ bzero(&tcb->tcb_tmbx, sizeof(tcb->tcb_tmbx));
+ }
+
+ return (tcb);
+}
+
+void
+_tcb_dtor(struct tcb *tcb)
+{
+ _rtld_free_tls(tcb, sizeof(struct tcb), 16);
+}
+
+struct kcb *
+_kcb_ctor(struct kse *kse)
+{
+ struct kcb *kcb;
+
+ kcb = malloc(sizeof(struct kcb));
+ if (kcb != NULL) {
+ bzero(kcb, sizeof(struct kcb));
+ kcb->kcb_self = kcb;
+ kcb->kcb_kse = kse;
+ }
+ return (kcb);
+}
+
+void
+_kcb_dtor(struct kcb *kcb)
+{
+ free(kcb);
+}
diff --git a/lib/libpthread/arch/amd64/include/atomic_ops.h b/lib/libpthread/arch/amd64/include/atomic_ops.h
new file mode 100644
index 0000000..980eb8e
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/atomic_ops.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap64(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ __asm __volatile(
+ "xchgq %2, %1; movq %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+static inline void
+atomic_swap_int(int *dst, int val, int *res)
+{
+ __asm __volatile(
+ "xchgl %2, %1; movl %2, %0"
+ : "=m" (*res) : "m" (*dst), "r" (val) : "memory");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap64((intptr_t *)(d), (intptr_t)(v), (intptr_t *)(r))
+
+#endif
diff --git a/lib/libpthread/arch/amd64/include/pthread_md.h b/lib/libpthread/arch/amd64/include/pthread_md.h
new file mode 100644
index 0000000..a7da5df
--- /dev/null
+++ b/lib/libpthread/arch/amd64/include/pthread_md.h
@@ -0,0 +1,268 @@
+/*-
+ * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/kse.h>
+#include <machine/sysarch.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
+
+#define THR_GETCONTEXT(ucp) \
+ (void)_amd64_save_context(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) \
+ (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_KSE
+#undef PER_THREAD
+
+struct kse;
+struct pthread;
+struct tdv;
+
+/*
+ * %fs points to a struct kcb.
+ */
+struct kcb {
+ struct tcb *kcb_curtcb;
+ struct kcb *kcb_self; /* self reference */
+ struct kse *kcb_kse;
+ struct kse_mailbox kcb_kmbx;
+};
+
+struct tcb {
+ struct tcb *tcb_self; /* required by rtld */
+ void *tcb_dtv; /* required by rtld */
+ struct pthread *tcb_thread;
+ void *tcb_spare[1]; /* align tcb_tmbx to 16 bytes */
+ struct kse_thr_mailbox tcb_tmbx;
+};
+
+/*
+ * Evaluates to the byte offset of the per-kse variable name.
+ */
+#define __kcb_offset(name) __offsetof(struct kcb, name)
+
+/*
+ * Evaluates to the type of the per-kse variable name.
+ */
+#define __kcb_type(name) __typeof(((struct kcb *)0)->name)
+
+/*
+ * Evaluates to the value of the per-kse variable name.
+ */
+#define KCB_GET64(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ u_long __i; \
+ __asm __volatile("movq %%fs:%1, %0" \
+ : "=r" (__i) \
+ : "m" (*(u_long *)(__kcb_offset(name)))); \
+ __result = (__kcb_type(name))__i; \
+ \
+ __result; \
+})
+
+/*
+ * Sets the value of the per-kse variable name to value val.
+ */
+#define KCB_SET64(name, val) ({ \
+ __kcb_type(name) __val = (val); \
+ \
+ u_long __i; \
+ __i = (u_long)__val; \
+ __asm __volatile("movq %1,%%fs:%0" \
+ : "=m" (*(u_long *)(__kcb_offset(name))) \
+ : "r" (__i)); \
+})
+
+static __inline u_long
+__kcb_readandclear64(volatile u_long *addr)
+{
+ u_long result;
+
+ __asm __volatile (
+ " xorq %0, %0;"
+ " xchgq %%fs:%1, %0;"
+ "# __kcb_readandclear64"
+ : "=&r" (result)
+ : "m" (*addr));
+ return (result);
+}
+
+#define KCB_READANDCLEAR64(name) ({ \
+ __kcb_type(name) __result; \
+ \
+ __result = (__kcb_type(name)) \
+ __kcb_readandclear64((u_long *)__kcb_offset(name)); \
+ __result; \
+})
+
+
+#define _kcb_curkcb() KCB_GET64(kcb_self)
+#define _kcb_curtcb() KCB_GET64(kcb_curtcb)
+#define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata))
+#define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread)
+#define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value)
+#define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread)
+
+/*
+ * The constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *, int);
+void _tcb_dtor(struct tcb *tcb);
+struct kcb *_kcb_ctor(struct kse *);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ amd64_set_fsbase(kcb);
+}
+
+/* Get the current kcb. */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_kcb_curkcb());
+}
+
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+
+ crit = _kcb_readandclear_tmbx();
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ _kcb_set_tmbx(crit);
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ return (_kcb_get_tmbx() == NULL);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ kcb->kcb_curtcb = tcb;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_kcb_curtcb());
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ struct tcb *tcb;
+
+ tcb = _kcb_curtcb();
+ if (tcb != NULL)
+ return (tcb->tcb_thread);
+ else
+ return (NULL);
+}
+
+static __inline struct kse *
+_get_curkse(void)
+{
+ return ((struct kse *)_kcb_curkse());
+}
+
+void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
+ size_t stacksz);
+int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
+int _amd64_save_context(mcontext_t *mc);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ int ret;
+
+ ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext);
+ if (ret == 0) {
+ _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ else if (ret < 0)
+ return (-1);
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ extern int _libkse_debug;
+
+ if ((kcb == NULL) || (tcb == NULL))
+ return (-1);
+ kcb->kcb_curtcb = tcb;
+
+ if (_libkse_debug == 0) {
+ tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
+ if (setmbox != 0)
+ _amd64_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _amd64_restore_context(
+ &tcb->tcb_tmbx.tm_context.uc_mcontext,
+ 0, NULL);
+ /* We should not reach here. */
+ } else {
+ if (setmbox)
+ kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
+ else
+ kse_switchin(&tcb->tcb_tmbx, 0);
+ }
+
+ return (-1);
+}
+#endif
OpenPOWER on IntegriCloud