diff options
Diffstat (limited to 'lib/libpthread/arch/i386/include')
-rw-r--r-- | lib/libpthread/arch/i386/include/atomic_ops.h | 51 | ||||
-rw-r--r-- | lib/libpthread/arch/i386/include/pthread_md.h | 264 |
2 files changed, 315 insertions, 0 deletions
diff --git a/lib/libpthread/arch/i386/include/atomic_ops.h b/lib/libpthread/arch/i386/include/atomic_ops.h new file mode 100644 index 0000000..7bc3d1b --- /dev/null +++ b/lib/libpthread/arch/i386/include/atomic_ops.h @@ -0,0 +1,51 @@ +/*- + * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _ATOMIC_OPS_H_ +#define _ATOMIC_OPS_H_ + +/* + * Atomic swap: + * Atomic (tmp = *dst, *dst = val), then *res = tmp + * + * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res); + */ +static inline void +atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res) +{ + __asm __volatile( + "xchgl %2, %1; movl %2, %0" + : "=m" (*res) : "m" (*dst), "r" (val) : "memory"); +} + +#define atomic_swap_ptr(d, v, r) \ + atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) + +#define atomic_swap_int(d, v, r) \ + atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) +#endif diff --git a/lib/libpthread/arch/i386/include/pthread_md.h b/lib/libpthread/arch/i386/include/pthread_md.h new file mode 100644 index 0000000..52afd6a --- /dev/null +++ b/lib/libpthread/arch/i386/include/pthread_md.h @@ -0,0 +1,264 @@ +/*- + * Copyright (c) 2002 Daniel Eischen <deischen@freebsd.org>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ +/* + * Machine-dependent thread prototypes/definitions for the thread kernel. + */ +#ifndef _PTHREAD_MD_H_ +#define _PTHREAD_MD_H_ + +#include <stddef.h> +#include <sys/types.h> +#include <sys/kse.h> +#include <machine/sysarch.h> +#include <ucontext.h> + +extern int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); +extern int _thr_getcontext(mcontext_t *); + +#define KSE_STACKSIZE 16384 +#define DTV_OFFSET offsetof(struct tcb, tcb_dtv) + +#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext) +#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL) + +#define PER_KSE +#undef PER_THREAD + +struct kse; +struct pthread; + +/* + * %gs points to a struct kcb. + */ +struct kcb { + struct tcb *kcb_curtcb; + struct kcb *kcb_self; /* self reference */ + struct kse *kcb_kse; + struct kse_mailbox kcb_kmbx; +}; + +struct tcb { + struct tcb *tcb_self; /* required by rtld */ + void *tcb_dtv; /* required by rtld */ + struct pthread *tcb_thread; + void *tcb_spare; /* align tcb_tmbx to 16 bytes */ + struct kse_thr_mailbox tcb_tmbx; +}; + +/* + * Evaluates to the byte offset of the per-kse variable name. + */ +#define __kcb_offset(name) __offsetof(struct kcb, name) + +/* + * Evaluates to the type of the per-kse variable name. + */ +#define __kcb_type(name) __typeof(((struct kcb *)0)->name) + +/* + * Evaluates to the value of the per-kse variable name. + */ +#define KCB_GET32(name) ({ \ + __kcb_type(name) __result; \ + \ + u_int __i; \ + __asm __volatile("movl %%gs:%1, %0" \ + : "=r" (__i) \ + : "m" (*(u_int *)(__kcb_offset(name)))); \ + __result = (__kcb_type(name))__i; \ + \ + __result; \ +}) + +/* + * Sets the value of the per-kse variable name to value val. + */ +#define KCB_SET32(name, val) ({ \ + __kcb_type(name) __val = (val); \ + \ + u_int __i; \ + __i = (u_int)__val; \ + __asm __volatile("movl %1,%%gs:%0" \ + : "=m" (*(u_int *)(__kcb_offset(name))) \ + : "r" (__i)); \ +}) + +static __inline u_long +__kcb_readandclear32(volatile u_long *addr) +{ + u_long result; + + __asm __volatile ( + " xorl %0, %0;" + " xchgl %%gs:%1, %0;" + "# __kcb_readandclear32" + : "=&r" (result) + : "m" (*addr)); + return (result); +} + +#define KCB_READANDCLEAR32(name) ({ \ + __kcb_type(name) __result; \ + \ + __result = (__kcb_type(name)) \ + __kcb_readandclear32((u_long *)__kcb_offset(name)); \ + __result; \ +}) + + +#define _kcb_curkcb() KCB_GET32(kcb_self) +#define _kcb_curtcb() KCB_GET32(kcb_curtcb) +#define _kcb_curkse() ((struct kse *)KCB_GET32(kcb_kmbx.km_udata)) +#define _kcb_get_tmbx() KCB_GET32(kcb_kmbx.km_curthread) +#define _kcb_set_tmbx(value) KCB_SET32(kcb_kmbx.km_curthread, (void *)value) +#define _kcb_readandclear_tmbx() KCB_READANDCLEAR32(kcb_kmbx.km_curthread) + + +/* + * The constructors. + */ +struct tcb *_tcb_ctor(struct pthread *, int); +void _tcb_dtor(struct tcb *tcb); +struct kcb *_kcb_ctor(struct kse *); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + i386_set_gsbase(kcb); +} + +/* Get the current kcb. */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_kcb_curkcb()); +} + +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + + crit = _kcb_readandclear_tmbx(); + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + _kcb_set_tmbx(crit); +} + +static __inline int +_kcb_in_critical(void) +{ + return (_kcb_get_tmbx() == NULL); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + kcb->kcb_curtcb = tcb; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_kcb_curtcb()); +} + +static __inline struct pthread * +_get_curthread(void) +{ + struct tcb *tcb; + + tcb = _kcb_curtcb(); + if (tcb != NULL) + return (tcb->tcb_thread); + else + return (NULL); +} + +static __inline struct kse * +_get_curkse(void) +{ + return ((struct kse *)_kcb_curkse()); +} + +void _i386_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, + size_t stacksz); + +static __inline int +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) +{ + int ret; + + ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext); + if (ret == 0) { + _i386_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); + /* We should not reach here. */ + return (-1); + } + else if (ret < 0) + return (-1); + return (0); +} + +static __inline int +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) +{ + extern int _libkse_debug; + + if ((kcb == NULL) || (tcb == NULL)) + return (-1); + kcb->kcb_curtcb = tcb; + if (_libkse_debug == 0) { + tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp; + if (setmbox != 0) + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, + 0, NULL); + } else { + if (setmbox) + kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX); + else + kse_switchin(&tcb->tcb_tmbx, 0); + } + + /* We should not reach here. */ + return (-1); +} + +#endif |