summaryrefslogtreecommitdiffstats
path: root/lib/libkse/arch/arm/include
diff options
context:
space:
mode:
authorcognet <cognet@FreeBSD.org>2004-05-14 12:21:29 +0000
committercognet <cognet@FreeBSD.org>2004-05-14 12:21:29 +0000
commit0ff01bbcf834678a7bdfe37cd91f67f0ef79f809 (patch)
tree4291176523e22937ec50ea1983716b8bc605267c /lib/libkse/arch/arm/include
parent0bf8d71f621b5d2707d81b544ee812ca9e4d3ed5 (diff)
downloadFreeBSD-src-0ff01bbcf834678a7bdfe37cd91f67f0ef79f809.zip
FreeBSD-src-0ff01bbcf834678a7bdfe37cd91f67f0ef79f809.tar.gz
Arm bits for libpthread. It has no chances to work and should be considered
as stubs.
Diffstat (limited to 'lib/libkse/arch/arm/include')
-rw-r--r--lib/libkse/arch/arm/include/atomic_ops.h51
-rw-r--r--lib/libkse/arch/arm/include/pthread_md.h243
2 files changed, 294 insertions, 0 deletions
diff --git a/lib/libkse/arch/arm/include/atomic_ops.h b/lib/libkse/arch/arm/include/atomic_ops.h
new file mode 100644
index 0000000..e36a6bf
--- /dev/null
+++ b/lib/libkse/arch/arm/include/atomic_ops.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATOMIC_OPS_H_
+#define _ATOMIC_OPS_H_
+
+/*
+ * Atomic swap:
+ * Atomic (tmp = *dst, *dst = val), then *res = tmp
+ *
+ * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res);
+ */
+static inline void
+atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res)
+{
+ __asm __volatile(
+ "swp %2, %2, [%1]; mov %2, %0"
+ : "=r" (*res) : "r" (dst), "r" (val) : "cc");
+}
+
+#define atomic_swap_ptr(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+
+#define atomic_swap_int(d, v, r) \
+ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r)
+#endif
diff --git a/lib/libkse/arch/arm/include/pthread_md.h b/lib/libkse/arch/arm/include/pthread_md.h
new file mode 100644
index 0000000..107eb03
--- /dev/null
+++ b/lib/libkse/arch/arm/include/pthread_md.h
@@ -0,0 +1,243 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Machine-dependent thread prototypes/definitions for the thread kernel.
+ */
+#ifndef _PTHREAD_MD_H_
+#define _PTHREAD_MD_H_
+
+#include <sys/kse.h>
+#include <stddef.h>
+#include <ucontext.h>
+
+#define KSE_STACKSIZE 16384
+
+int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *);
+int _thr_getcontext(mcontext_t *);
+
+#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext)
+#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL)
+
+#define PER_THREAD
+
+struct kcb;
+struct kse;
+struct pthread;
+struct tcb;
+struct tdv; /* We don't know what this is yet? */
+
+
+/*
+ * %r6 points to one of these. We define the static TLS as an array
+ * of long double to enforce 16-byte alignment of the TLS memory.
+ *
+ * XXX - Both static and dynamic allocation of any of these structures
+ * will result in a valid, well-aligned thread pointer???
+ */
+struct arm_tp {
+ struct tdv *tp_tdv; /* dynamic TLS */
+ uint32_t _reserved_;
+ long double tp_tls[0]; /* static TLS */
+};
+
+struct tcb {
+ struct pthread *tcb_thread;
+ void *tcb_addr; /* allocated tcb address */
+ struct kcb *tcb_curkcb;
+ uint32_t tcb_isfake;
+ uint32_t tcb_spare[4];
+ struct kse_thr_mailbox tcb_tmbx; /* needs 32-byte alignment */
+ struct arm_tp tcb_tp;
+} __aligned(32);
+
+struct kcb {
+ struct kse_mailbox kcb_kmbx;
+ struct tcb kcb_faketcb;
+ struct tcb *kcb_curtcb;
+ struct kse *kcb_kse;
+};
+
+register struct arm_tp *_tp __asm("%r6");
+
+#define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
+
+/*
+ * The kcb and tcb constructors.
+ */
+struct tcb *_tcb_ctor(struct pthread *);
+void _tcb_dtor(struct tcb *);
+struct kcb *_kcb_ctor(struct kse *kse);
+void _kcb_dtor(struct kcb *);
+
+/* Called from the KSE to set its private data. */
+static __inline void
+_kcb_set(struct kcb *kcb)
+{
+ /* There is no thread yet; use the fake tcb. */
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+}
+
+/*
+ * Get the current kcb.
+ *
+ * This can only be called while in a critical region; don't
+ * worry about having the kcb changed out from under us.
+ */
+static __inline struct kcb *
+_kcb_get(void)
+{
+ return (_tcb->tcb_curkcb);
+}
+
+/*
+ * Enter a critical region.
+ *
+ * Read and clear km_curthread in the kse mailbox.
+ */
+static __inline struct kse_thr_mailbox *
+_kcb_critical_enter(void)
+{
+ struct kse_thr_mailbox *crit;
+ uint32_t flags;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We already are in a critical region since
+ * there is no current thread.
+ */
+ crit = NULL;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (crit);
+}
+
+static __inline void
+_kcb_critical_leave(struct kse_thr_mailbox *crit)
+{
+ /* No need to do anything if this is a fake tcb. */
+ if (_tcb->tcb_isfake == 0)
+ _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
+}
+
+static __inline int
+_kcb_in_critical(void)
+{
+ uint32_t flags;
+ int ret;
+
+ if (_tcb->tcb_isfake != 0) {
+ /*
+ * We are in a critical region since there is no
+ * current thread.
+ */
+ ret = 1;
+ } else {
+ flags = _tcb->tcb_tmbx.tm_flags;
+ _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
+ ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
+ _tcb->tcb_tmbx.tm_flags = flags;
+ }
+ return (ret);
+}
+
+static __inline void
+_tcb_set(struct kcb *kcb, struct tcb *tcb)
+{
+ if (tcb == NULL)
+ tcb = &kcb->kcb_faketcb;
+ kcb->kcb_curtcb = tcb;
+ tcb->tcb_curkcb = kcb;
+ _tp = &tcb->tcb_tp;
+}
+
+static __inline struct tcb *
+_tcb_get(void)
+{
+ return (_tcb);
+}
+
+static __inline struct pthread *
+_get_curthread(void)
+{
+ return (_tcb->tcb_thread);
+}
+
+/*
+ * Get the current kse.
+ *
+ * Like _kcb_get(), this can only be called while in a critical region.
+ */
+static __inline struct kse *
+_get_curkse(void)
+{
+ return (_tcb->tcb_curkcb->kcb_kse);
+}
+
+void _arm_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
+ size_t stacksz);
+
+static __inline int
+_thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
+{
+ if (_thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
+ /* Make the fake tcb the current thread. */
+ kcb->kcb_curtcb = &kcb->kcb_faketcb;
+ _tp = &kcb->kcb_faketcb.tcb_tp;
+ _arm_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
+ kcb->kcb_kmbx.km_stack.ss_sp,
+ kcb->kcb_kmbx.km_stack.ss_size);
+ /* We should not reach here. */
+ return (-1);
+ }
+ return (0);
+}
+
+static __inline int
+_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
+{
+ mcontext_t *mc;
+
+ _tcb_set(kcb, tcb);
+ mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
+ if (setmbox)
+ _thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
+ (intptr_t *)&kcb->kcb_kmbx.km_curthread);
+ else
+ _thr_setcontext(mc, 0, NULL);
+ /* We should not reach here. */
+ return (-1);
+}
+
+#endif /* _PTHREAD_MD_H_ */
OpenPOWER on IntegriCloud