diff options
Diffstat (limited to 'lib/libpthread/arch/ia64')
-rw-r--r-- | lib/libpthread/arch/ia64/Makefile.inc | 5 | ||||
-rw-r--r-- | lib/libpthread/arch/ia64/ia64/context.S | 351 | ||||
-rw-r--r-- | lib/libpthread/arch/ia64/ia64/enter_uts.S | 60 | ||||
-rw-r--r-- | lib/libpthread/arch/ia64/ia64/pthread_md.c | 75 | ||||
-rw-r--r-- | lib/libpthread/arch/ia64/include/atomic_ops.h | 47 | ||||
-rw-r--r-- | lib/libpthread/arch/ia64/include/pthread_md.h | 252 |
6 files changed, 790 insertions, 0 deletions
diff --git a/lib/libpthread/arch/ia64/Makefile.inc b/lib/libpthread/arch/ia64/Makefile.inc new file mode 100644 index 0000000..c8b0362 --- /dev/null +++ b/lib/libpthread/arch/ia64/Makefile.inc @@ -0,0 +1,5 @@ +# $FreeBSD$ + +.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} + +SRCS+= context.S enter_uts.S pthread_md.c diff --git a/lib/libpthread/arch/ia64/ia64/context.S b/lib/libpthread/arch/ia64/ia64/context.S new file mode 100644 index 0000000..9411293 --- /dev/null +++ b/lib/libpthread/arch/ia64/ia64/context.S @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> +__FBSDID("$FreeBSD$"); + +#include <sys/syscall.h> + +#define SIZEOF_SPECIAL (18*8) + +/* + * int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); + */ +ENTRY(_ia64_restore_context, 3) +{ .mmi + invala + mov ar.rsc=0xc + add r32=16,r32 + ;; +} +{ .mmi + loadrs + ld8 r12=[r32] // sp + add r31=8,r32 + ;; +} +{ .mii + ld8 r16=[r31],16 // unat (before) + add r30=16,r32 + add r14=SIZEOF_SPECIAL,r32 + ;; +} +{ .mmi + ld8 r17=[r30],16 // rp + ld8 r18=[r31],16 // pr + mov r2=r33 + ;; +} +{ .mmi + ld8 r19=[r30],16 // pfs + ld8 r20=[r31],32 // bspstore + mov rp=r17 + ;; +} +{ .mmi + ld8 r21=[r30],32 // rnat + ld8 r22=[r31],16 // rsc + mov pr=r18,0x1fffe + ;; +} +{ .mmi + ld8 r23=[r30] // fpsr + ld8 r24=[r31] // psr -- not used + mov r3=r34 + ;; +} +{ .mmi + ld8 r17=[r14],8 // unat (after) + mov ar.bspstore=r20 + cmp.ne p15,p0=r0,r3 + ;; +} +{ .mmi + mov ar.rnat=r21 + mov ar.unat=r17 + add r15=8,r14 + ;; +} +{ .mmi + ld8.fill r4=[r14],16 // r4 + ld8.fill r5=[r15],16 // r5 + mov ar.pfs=r19 + ;; +} +{ .mmi + ld8.fill r6=[r14],16 // r6 + ld8.fill r7=[r15],16 // r7 + nop 0 + ;; +} +{ .mmi + mov ar.unat=r16 + mov ar.rsc=r22 + nop 0 +} +{ .mmi + ld8 r17=[r14],16 // b1 + ld8 r18=[r15],16 // b2 + nop 0 + ;; +} +{ .mmi + ld8 r19=[r14],16 // b3 + ld8 r20=[r15],16 // b4 + mov b1=r17 + ;; +} +{ .mmi + ld8 r16=[r14],24 // b5 + ld8 r17=[r15],32 // lc + mov b2=r18 + ;; +} +{ .mmi + ldf.fill f2=[r14],32 + ldf.fill f3=[r15],32 + mov b3=r19 + ;; +} +{ .mmi + ldf.fill f4=[r14],32 + ldf.fill f5=[r15],32 + mov b4=r20 + ;; +} +{ .mmi + ldf.fill f16=[r14],32 + ldf.fill f17=[r15],32 + mov b5=r16 + ;; +} +{ .mmi + ldf.fill f18=[r14],32 + ldf.fill f19=[r15],32 + mov ar.lc=r17 + ;; +} + ldf.fill f20=[r14],32 + ldf.fill f21=[r15],32 + ;; + ldf.fill f22=[r14],32 + ldf.fill f23=[r15],32 + ;; + ldf.fill f24=[r14],32 + ldf.fill f25=[r15],32 + ;; + ldf.fill f26=[r14],32 + ldf.fill f27=[r15],32 + ;; + ldf.fill f28=[r14],32 + ldf.fill f29=[r15],32 + ;; + ldf.fill f30=[r14],32+24 + ldf.fill f31=[r15],24+24 + ;; + ld8 r8=[r14],16 + ld8 r9=[r15],16 + ;; + ld8 r10=[r14] + ld8 r11=[r15] + ;; +{ .mmb +(p15) st8 [r3]=r2 + mov ar.fpsr=r23 + br.ret.sptk rp + ;; +} +END(_ia64_restore_context) + +/* + * int _ia64_save_context(mcontext_t *mc); + */ +ENTRY(_ia64_save_context, 1) +{ .mmi + mov r14=ar.rsc + mov r15=ar.fpsr + add r31=8,r32 + ;; +} +{ .mmi + st8 [r32]=r0,16 + st8 [r31]=r0,16 + nop 0 + ;; +} +{ .mmi + mov ar.rsc=0xc + mov r16=ar.unat + nop 0 + ;; +} +{ .mmi + flushrs + st8 [r32]=sp,16 // sp + mov r17=rp + ;; +} +{ .mmi + st8 [r31]=r16,16 // unat (before) + st8 [r32]=r17,16 // rp + mov r16=pr + ;; +} +{ .mmi + st8 [r31]=r16,16 // pr + mov r17=ar.bsp + mov r16=ar.pfs + ;; +} +{ .mmi + st8 [r32]=r16,16 // pfs + st8 [r31]=r17,16 // bspstore + nop 0 + ;; +} +{ .mmi + mov r16=ar.rnat + mov ar.rsc=r14 + add r30=SIZEOF_SPECIAL-(6*8),r32 + ;; +} +{ .mmi + st8 [r32]=r16,16 // rnat + st8 [r31]=r0,16 // __spare + nop 0 + ;; +} +{ .mmi + st8 [r32]=r13,16 // tp -- not used + st8 [r31]=r14,16 // rsc + mov r16=b1 + ;; +} +{ .mmi + st8 [r32]=r15,10*8 // fpr + st8 [r31]=r0,8*8 // psr + nop 0 + ;; +} + /* callee_saved */ +{ .mmi + .mem.offset 8,0 + st8.spill [r31]=r4,16 // r4 + .mem.offset 16,0 + st8.spill [r32]=r5,16 // r5 + mov r17=b2 + ;; +} +{ .mmi + .mem.offset 24,0 + st8.spill [r31]=r6,16 // r6 + .mem.offset 32,0 + st8.spill [r32]=r7,16 // r7 + mov r18=b3 + ;; +} +{ .mmi + st8 [r31]=r16,16 // b1 + mov r16=ar.unat + mov r19=b4 + ;; +} +{ .mmi + st8 [r30]=r16 // unat (after) + st8 [r32]=r17,16 // b2 + mov r16=b5 + ;; +} +{ .mmi + st8 [r31]=r18,16 // b3 + st8 [r32]=r19,16 // b4 + mov r17=ar.lc + ;; +} + st8 [r31]=r16,16 // b5 + st8 [r32]=r17,16 // lc + ;; + st8 [r31]=r0,24 // __spare + stf.spill [r32]=f2,32 + ;; + stf.spill [r31]=f3,32 + stf.spill [r32]=f4,32 + ;; + stf.spill [r31]=f5,32 + stf.spill [r32]=f16,32 + ;; + stf.spill [r31]=f17,32 + stf.spill [r32]=f18,32 + ;; + stf.spill [r31]=f19,32 + stf.spill [r32]=f20,32 + ;; + stf.spill [r31]=f21,32 + stf.spill [r32]=f22,32 + ;; + stf.spill [r31]=f23,32 + stf.spill [r32]=f24,32 + ;; + stf.spill [r31]=f25,32 + stf.spill [r32]=f26,32 + ;; + stf.spill [r31]=f27,32 + stf.spill [r32]=f28,32 + ;; +{ .mmi + stf.spill [r31]=f29,32 + stf.spill [r32]=f30,32+24 + add r14=1,r0 + ;; +} +{ .mmi + stf.spill [r31]=f31,24+24 + st8 [r32]=r14,16 // r8 + add r8=0,r0 + ;; +} + st8 [r31]=r0,16 // r9 + st8 [r32]=r0 // r10 + ;; +{ .mmb + st8 [r31]=r0 // r11 + mf + br.ret.sptk rp + ;; +} +END(_ia64_save_context) + +/* + * void _ia64_break_setcontext(mcontext_t *mc); + */ +ENTRY(_ia64_break_setcontext, 1) +{ .mmi + mov r8=r32 + break 0x180000 + nop 0 + ;; +} +END(_ia64_break_setcontext) diff --git a/lib/libpthread/arch/ia64/ia64/enter_uts.S b/lib/libpthread/arch/ia64/ia64/enter_uts.S new file mode 100644 index 0000000..5df4d93 --- /dev/null +++ b/lib/libpthread/arch/ia64/ia64/enter_uts.S @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/asm.h> +__FBSDID("$FreeBSD$"); + +/* + * void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, + * size_t stacksz); + */ +ENTRY(_ia64_enter_uts, 4) +{ .mmi + ld8 r14=[in0],8 + mov ar.rsc=0xc + add r15=in2,in3 + ;; +} +{ .mmi + flushrs + ld8 r1=[in0] + mov b7=r14 + ;; +} +{ .mii + mov ar.bspstore=in2 + add sp=-16,r15 + mov rp=r14 + ;; +} +{ .mib + mov ar.rsc=0xf + mov in0=in1 + br.cond.sptk b7 + ;; +} +1: br.cond.sptk 1b +END(_ia64_enter_uts) diff --git a/lib/libpthread/arch/ia64/ia64/pthread_md.c b/lib/libpthread/arch/ia64/ia64/pthread_md.c new file mode 100644 index 0000000..00e9a40 --- /dev/null +++ b/lib/libpthread/arch/ia64/ia64/pthread_md.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include <stdlib.h> +#include <strings.h> +#include "pthread_md.h" + +/* + * The constructors. + */ +struct tcb * +_tcb_ctor(struct pthread *thread, int initial) +{ + struct tcb *tcb; + + if ((tcb = malloc(sizeof(struct tcb))) != NULL) { + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_thread = thread; + /* Allocate TDV */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + /* Free TDV */ + free(tcb); +} + +struct kcb * +_kcb_ctor(struct kse *kse) +{ + struct kcb *kcb; + + if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_faketcb.tcb_isfake = 1; + kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL; + kcb->kcb_curtcb = &kcb->kcb_faketcb; + kcb->kcb_kse = kse; + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + free(kcb); +} diff --git a/lib/libpthread/arch/ia64/include/atomic_ops.h b/lib/libpthread/arch/ia64/include/atomic_ops.h new file mode 100644 index 0000000..483c905 --- /dev/null +++ b/lib/libpthread/arch/ia64/include/atomic_ops.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _ATOMIC_OPS_H_ +#define _ATOMIC_OPS_H_ + +static inline void +atomic_swap_int(int *dst, int val, int *res) +{ + __asm("xchg4 %0=[%2],%1" : "=r"(*res) : "r"(val), "r"(dst)); +} + +static inline void +atomic_swap_long(long *dst, long val, long *res) +{ + __asm("xchg8 %0=[%2],%1" : "=r"(*res) : "r"(val), "r"(dst)); +} + +#define atomic_swap_ptr(d,v,r) \ + atomic_swap_long((long*)d, (long)v, (long*)r) + +#endif /* _ATOMIC_OPS_H_ */ diff --git a/lib/libpthread/arch/ia64/include/pthread_md.h b/lib/libpthread/arch/ia64/include/pthread_md.h new file mode 100644 index 0000000..1df5046 --- /dev/null +++ b/lib/libpthread/arch/ia64/include/pthread_md.h @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _PTHREAD_MD_H_ +#define _PTHREAD_MD_H_ + +#include <sys/kse.h> +#include <stddef.h> +#include <ucontext.h> + +#define KSE_STACKSIZE 16384 +#define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv) + +#define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext) +#define THR_SETCONTEXT(ucp) PANIC("THR_SETCONTEXT() now in use!\n") + +#define PER_THREAD + +struct kcb; +struct kse; +struct pthread; +struct tcb; +struct tdv; /* We don't know what this is yet? */ + +/* + * tp points to one of these. We define the static TLS as an array + * of long double to enforce 16-byte alignment of the TLS memory, + * struct ia64_tp, struct tcb and also struct kcb. Both static and + * dynamic allocation of any of these structures will result in a + * valid, well-aligned thread pointer. + */ +struct ia64_tp { + struct tdv *tp_tdv; /* dynamic TLS */ + uint64_t _reserved_; + long double tp_tls[0]; /* static TLS */ +}; + +struct tcb { + struct kse_thr_mailbox tcb_tmbx; + struct pthread *tcb_thread; + struct kcb *tcb_curkcb; + long tcb_isfake; + struct ia64_tp tcb_tp; +}; + +struct kcb { + struct kse_mailbox kcb_kmbx; + struct tcb kcb_faketcb; + struct tcb *kcb_curtcb; + struct kse *kcb_kse; +}; + +register struct ia64_tp *_tp __asm("%r13"); + +#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp))) + +/* + * The kcb and tcb constructors. + */ +struct tcb *_tcb_ctor(struct pthread *, int); +void _tcb_dtor(struct tcb *); +struct kcb *_kcb_ctor(struct kse *kse); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + /* There is no thread yet; use the fake tcb. */ + _tp = &kcb->kcb_faketcb.tcb_tp; +} + +/* + * Get the current kcb. + * + * This can only be called while in a critical region; don't + * worry about having the kcb changed out from under us. + */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_tcb->tcb_curkcb); +} + +/* + * Enter a critical region. + * + * Read and clear km_curthread in the kse mailbox. + */ +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + uint32_t flags; + + if (_tcb->tcb_isfake != 0) { + /* + * We already are in a critical region since + * there is no current thread. + */ + crit = NULL; + } else { + flags = _tcb->tcb_tmbx.tm_flags; + _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread; + _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL; + _tcb->tcb_tmbx.tm_flags = flags; + } + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + /* No need to do anything if this is a fake tcb. */ + if (_tcb->tcb_isfake == 0) + _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit; +} + +static __inline int +_kcb_in_critical(void) +{ + uint32_t flags; + int ret; + + if (_tcb->tcb_isfake != 0) { + /* + * We are in a critical region since there is no + * current thread. + */ + ret = 1; + } else { + flags = _tcb->tcb_tmbx.tm_flags; + _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); + _tcb->tcb_tmbx.tm_flags = flags; + } + return (ret); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + if (tcb == NULL) + tcb = &kcb->kcb_faketcb; + kcb->kcb_curtcb = tcb; + tcb->tcb_curkcb = kcb; + _tp = &tcb->tcb_tp; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_tcb); +} + +static __inline struct pthread * +_get_curthread(void) +{ + return (_tcb->tcb_thread); +} + +/* + * Get the current kse. + * + * Like _kcb_get(), this can only be called while in a critical region. + */ +static __inline struct kse * +_get_curkse(void) +{ + return (_tcb->tcb_curkcb->kcb_kse); +} + +void _ia64_break_setcontext(mcontext_t *mc); +void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, + size_t stacksz); +int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); +int _ia64_save_context(mcontext_t *mc); + +static __inline int +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) +{ + if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) { + /* Make the fake tcb the current thread. */ + kcb->kcb_curtcb = &kcb->kcb_faketcb; + _tp = &kcb->kcb_faketcb.tcb_tp; + _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); + /* We should not reach here. */ + return (-1); + } + return (0); +} + +static __inline int +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) +{ + mcontext_t *mc; + + _tcb_set(kcb, tcb); + mc = &tcb->tcb_tmbx.tm_context.uc_mcontext; + if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) { + if (setmbox) { + mc->mc_flags |= _MC_FLAGS_KSE_SET_MBOX; + mc->mc_special.ifa = + (intptr_t)&kcb->kcb_kmbx.km_curthread; + mc->mc_special.isr = (intptr_t)&tcb->tcb_tmbx; + } + _ia64_break_setcontext(mc); + } else if (mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) { + if (setmbox) + kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX); + else + kse_switchin(&tcb->tcb_tmbx, 0); + } else { + if (setmbox) + _ia64_restore_context(mc, (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _ia64_restore_context(mc, 0, NULL); + } + /* We should not reach here. */ + return (-1); +} + +#endif /* _PTHREAD_MD_H_ */ |