summaryrefslogtreecommitdiffstats
path: root/sys/arm/include/atomic.h
diff options
context:
space:
mode:
authorgonzo <gonzo@FreeBSD.org>2012-08-15 03:03:03 +0000
committergonzo <gonzo@FreeBSD.org>2012-08-15 03:03:03 +0000
commit032427f3e9854fccfdddaea8fb15ae4603391a11 (patch)
tree68d86df1ea7d9bfea335c91632747716f5a0df4a /sys/arm/include/atomic.h
parenteca813ad76756aea4f70787cf7827d4b319cfe94 (diff)
downloadFreeBSD-src-032427f3e9854fccfdddaea8fb15ae4603391a11.zip
FreeBSD-src-032427f3e9854fccfdddaea8fb15ae4603391a11.tar.gz
Merging projects/armv6, part 1
Cummulative patch of changes that are not vendor-specific: - ARMv6 and ARMv7 architecture support - ARM SMP support - VFP/Neon support - ARM Generic Interrupt Controller driver - Simplification of startup code for all platforms
Diffstat (limited to 'sys/arm/include/atomic.h')
-rw-r--r--sys/arm/include/atomic.h514
1 files changed, 455 insertions, 59 deletions
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h
index a64fc4a..72d91e2 100644
--- a/sys/arm/include/atomic.h
+++ b/sys/arm/include/atomic.h
@@ -39,17 +39,17 @@
#ifndef _MACHINE_ATOMIC_H_
#define _MACHINE_ATOMIC_H_
-#ifndef _LOCORE
-
#include <sys/types.h>
#ifndef _KERNEL
#include <machine/sysarch.h>
+#else
+#include <machine/cpuconf.h>
#endif
-#define mb()
-#define wmb()
-#define rmb()
+#define mb()
+#define wmb()
+#define rmb()
#ifndef I32_bit
#define I32_bit (1 << 7) /* IRQ disable */
@@ -58,6 +58,356 @@
#define F32_bit (1 << 6) /* FIQ disable */
#endif
+/*
+ * It would be nice to use _HAVE_ARMv6_INSTRUCTIONS from machine/asm.h
+ * here, but that header can't be included here because this is C
+ * code. I would like to move the _HAVE_ARMv6_INSTRUCTIONS definition
+ * out of asm.h so it can be used in both asm and C code. - kientzle@
+ */
+#if defined (__ARM_ARCH_7__) || \
+ defined (__ARM_ARCH_7A__) || \
+ defined (__ARM_ARCH_6__) || \
+ defined (__ARM_ARCH_6J__) || \
+ defined (__ARM_ARCH_6K__) || \
+ defined (__ARM_ARCH_6Z__) || \
+ defined (__ARM_ARCH_6ZK__)
+static __inline void
+__do_dmb(void)
+{
+
+#if defined (__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
+ __asm __volatile("dmb" : : : "memory");
+#else
+ __asm __volatile("mcr p15, 0, r0, c7, c10, 5" : : : "memory");
+#endif
+}
+
+#define ATOMIC_ACQ_REL_LONG(NAME) \
+static __inline void \
+atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \
+{ \
+ atomic_##NAME##_long(p, v); \
+ __do_dmb(); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \
+{ \
+ __do_dmb(); \
+ atomic_##NAME##_long(p, v); \
+}
+
+#define ATOMIC_ACQ_REL(NAME, WIDTH) \
+static __inline void \
+atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
+{ \
+ atomic_##NAME##_##WIDTH(p, v); \
+ __do_dmb(); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
+{ \
+ __do_dmb(); \
+ atomic_##NAME##_##WIDTH(p, v); \
+}
+
+static __inline void
+atomic_set_32(volatile uint32_t *address, uint32_t setmask)
+{
+ uint32_t tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "orr %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ , "+r" (address), "+r" (setmask) : : "memory");
+
+}
+
+static __inline void
+atomic_set_long(volatile u_long *address, u_long setmask)
+{
+ u_long tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "orr %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ , "+r" (address), "+r" (setmask) : : "memory");
+
+}
+
+static __inline void
+atomic_clear_32(volatile uint32_t *address, uint32_t setmask)
+{
+ uint32_t tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "bic %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (address), "+r" (setmask) : : "memory");
+}
+
+static __inline void
+atomic_clear_long(volatile u_long *address, u_long setmask)
+{
+ u_long tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "bic %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (address), "+r" (setmask) : : "memory");
+}
+
+static __inline u_int32_t
+atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
+{
+ uint32_t ret;
+
+ __asm __volatile("1: ldrex %0, [%1]\n"
+ "cmp %0, %2\n"
+ "movne %0, #0\n"
+ "bne 2f\n"
+ "strex %0, %3, [%1]\n"
+ "cmp %0, #0\n"
+ "bne 1b\n"
+ "moveq %0, #1\n"
+ "2:"
+ : "=&r" (ret)
+ ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "memory");
+ return (ret);
+}
+
+static __inline u_long
+atomic_cmpset_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
+{
+ u_long ret;
+
+ __asm __volatile("1: ldrex %0, [%1]\n"
+ "cmp %0, %2\n"
+ "movne %0, #0\n"
+ "bne 2f\n"
+ "strex %0, %3, [%1]\n"
+ "cmp %0, #0\n"
+ "bne 1b\n"
+ "moveq %0, #1\n"
+ "2:"
+ : "=&r" (ret)
+ ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "memory");
+ return (ret);
+}
+
+static __inline u_int32_t
+atomic_cmpset_acq_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
+{
+ u_int32_t ret = atomic_cmpset_32(p, cmpval, newval);
+
+ __do_dmb();
+ return (ret);
+}
+
+static __inline u_long
+atomic_cmpset_acq_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
+{
+ u_long ret = atomic_cmpset_long(p, cmpval, newval);
+
+ __do_dmb();
+ return (ret);
+}
+
+static __inline u_int32_t
+atomic_cmpset_rel_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
+{
+
+ __do_dmb();
+ return (atomic_cmpset_32(p, cmpval, newval));
+}
+
+static __inline u_long
+atomic_cmpset_rel_long(volatile u_long *p, volatile u_long cmpval, volatile u_long newval)
+{
+
+ __do_dmb();
+ return (atomic_cmpset_long(p, cmpval, newval));
+}
+
+
+static __inline void
+atomic_add_32(volatile u_int32_t *p, u_int32_t val)
+{
+ uint32_t tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "add %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+}
+
+static __inline void
+atomic_add_long(volatile u_long *p, u_long val)
+{
+ u_long tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "add %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+}
+
+static __inline void
+atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
+{
+ uint32_t tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "sub %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+}
+
+static __inline void
+atomic_subtract_long(volatile u_long *p, u_long val)
+{
+ u_long tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%2]\n"
+ "sub %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "cmp %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+}
+
+ATOMIC_ACQ_REL(clear, 32)
+ATOMIC_ACQ_REL(add, 32)
+ATOMIC_ACQ_REL(subtract, 32)
+ATOMIC_ACQ_REL(set, 32)
+ATOMIC_ACQ_REL_LONG(clear)
+ATOMIC_ACQ_REL_LONG(add)
+ATOMIC_ACQ_REL_LONG(subtract)
+ATOMIC_ACQ_REL_LONG(set)
+
+#undef ATOMIC_ACQ_REL
+#undef ATOMIC_ACQ_REL_LONG
+
+static __inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
+{
+ uint32_t tmp = 0, tmp2 = 0, ret = 0;
+
+ __asm __volatile("1: ldrex %0, [%3]\n"
+ "add %1, %0, %4\n"
+ "strex %2, %1, [%3]\n"
+ "cmp %2, #0\n"
+ "bne 1b\n"
+ : "+r" (ret), "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+ return (ret);
+}
+
+static __inline uint32_t
+atomic_readandclear_32(volatile u_int32_t *p)
+{
+ uint32_t ret, tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%3]\n"
+ "mov %1, #0\n"
+ "strex %2, %1, [%3]\n"
+ "cmp %2, #0\n"
+ "bne 1b\n"
+ : "=r" (ret), "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p) : : "memory");
+ return (ret);
+}
+
+static __inline uint32_t
+atomic_load_acq_32(volatile uint32_t *p)
+{
+ uint32_t v;
+
+ v = *p;
+ __do_dmb();
+ return (v);
+}
+
+static __inline void
+atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
+{
+
+ __do_dmb();
+ *p = v;
+}
+
+static __inline u_long
+atomic_fetchadd_long(volatile u_long *p, u_long val)
+{
+ u_long tmp = 0, tmp2 = 0, ret = 0;
+
+ __asm __volatile("1: ldrex %0, [%3]\n"
+ "add %1, %0, %4\n"
+ "strex %2, %1, [%3]\n"
+ "cmp %2, #0\n"
+ "bne 1b\n"
+ : "+r" (ret), "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p), "+r" (val) : : "memory");
+ return (ret);
+}
+
+static __inline u_long
+atomic_readandclear_long(volatile u_long *p)
+{
+ u_long ret, tmp = 0, tmp2 = 0;
+
+ __asm __volatile("1: ldrex %0, [%3]\n"
+ "mov %1, #0\n"
+ "strex %2, %1, [%3]\n"
+ "cmp %2, #0\n"
+ "bne 1b\n"
+ : "=r" (ret), "=&r" (tmp), "+r" (tmp2)
+ ,"+r" (p) : : "memory");
+ return (ret);
+}
+
+static __inline u_long
+atomic_load_acq_long(volatile u_long *p)
+{
+ u_long v;
+
+ v = *p;
+ __do_dmb();
+ return (v);
+}
+
+static __inline void
+atomic_store_rel_long(volatile u_long *p, u_long v)
+{
+
+ __do_dmb();
+ *p = v;
+}
+#else /* < armv6 */
+
#define __with_interrupts_disabled(expr) \
do { \
u_int cpsr_save, tmp; \
@@ -287,6 +637,83 @@ atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
#endif /* _KERNEL */
+
+static __inline uint32_t
+atomic_readandclear_32(volatile u_int32_t *p)
+{
+
+ return (__swp(0, p));
+}
+
+#define atomic_cmpset_rel_32 atomic_cmpset_32
+#define atomic_cmpset_acq_32 atomic_cmpset_32
+#define atomic_set_rel_32 atomic_set_32
+#define atomic_set_acq_32 atomic_set_32
+#define atomic_clear_rel_32 atomic_clear_32
+#define atomic_clear_acq_32 atomic_clear_32
+#define atomic_add_rel_32 atomic_add_32
+#define atomic_add_acq_32 atomic_add_32
+#define atomic_subtract_rel_32 atomic_subtract_32
+#define atomic_subtract_acq_32 atomic_subtract_32
+#define atomic_store_rel_32 atomic_store_32
+#define atomic_store_rel_long atomic_store_long
+#define atomic_load_acq_32 atomic_load_32
+#define atomic_load_acq_long atomic_load_long
+#undef __with_interrupts_disabled
+
+static __inline void
+atomic_add_long(volatile u_long *p, u_long v)
+{
+
+ atomic_add_32((volatile uint32_t *)p, v);
+}
+
+static __inline void
+atomic_clear_long(volatile u_long *p, u_long v)
+{
+
+ atomic_clear_32((volatile uint32_t *)p, v);
+}
+
+static __inline int
+atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe)
+{
+
+ return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe));
+}
+
+static __inline u_long
+atomic_fetchadd_long(volatile u_long *p, u_long v)
+{
+
+ return (atomic_fetchadd_32((volatile uint32_t *)p, v));
+}
+
+static __inline void
+atomic_readandclear_long(volatile u_long *p)
+{
+
+ atomic_readandclear_32((volatile uint32_t *)p);
+}
+
+static __inline void
+atomic_set_long(volatile u_long *p, u_long v)
+{
+
+ atomic_set_32((volatile uint32_t *)p, v);
+}
+
+static __inline void
+atomic_subtract_long(volatile u_long *p, u_long v)
+{
+
+ atomic_subtract_32((volatile uint32_t *)p, v);
+}
+
+
+
+#endif /* Arch >= v6 */
+
static __inline int
atomic_load_32(volatile uint32_t *v)
{
@@ -300,88 +727,57 @@ atomic_store_32(volatile uint32_t *dst, uint32_t src)
*dst = src;
}
-static __inline uint32_t
-atomic_readandclear_32(volatile u_int32_t *p)
+static __inline int
+atomic_load_long(volatile u_long *v)
{
- return (__swp(0, p));
+ return (*v);
}
-#undef __with_interrupts_disabled
-
-#endif /* _LOCORE */
+static __inline void
+atomic_store_long(volatile u_long *dst, u_long src)
+{
+ *dst = src;
+}
-#define atomic_add_long(p, v) \
- atomic_add_32((volatile u_int *)(p), (u_int)(v))
#define atomic_add_acq_long atomic_add_long
#define atomic_add_rel_long atomic_add_long
-#define atomic_subtract_long(p, v) \
- atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
#define atomic_subtract_acq_long atomic_subtract_long
#define atomic_subtract_rel_long atomic_subtract_long
-#define atomic_clear_long(p, v) \
- atomic_clear_32((volatile u_int *)(p), (u_int)(v))
#define atomic_clear_acq_long atomic_clear_long
#define atomic_clear_rel_long atomic_clear_long
-#define atomic_set_long(p, v) \
- atomic_set_32((volatile u_int *)(p), (u_int)(v))
#define atomic_set_acq_long atomic_set_long
#define atomic_set_rel_long atomic_set_long
-#define atomic_cmpset_long(dst, old, new) \
- atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
#define atomic_cmpset_acq_long atomic_cmpset_long
#define atomic_cmpset_rel_long atomic_cmpset_long
-#define atomic_fetchadd_long(p, v) \
- atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
-#define atomic_readandclear_long(p) \
- atomic_readandclear_long((volatile u_int *)(p))
-#define atomic_load_long(p) \
- atomic_load_32((volatile u_int *)(p))
#define atomic_load_acq_long atomic_load_long
-#define atomic_store_rel_long(p, v) \
- atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
-
#define atomic_clear_ptr atomic_clear_32
#define atomic_set_ptr atomic_set_32
-#define atomic_cmpset_ptr(dst, old, new) \
- atomic_cmpset_32((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
-#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
-#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
+#define atomic_cmpset_ptr atomic_cmpset_32
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_32
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_32
#define atomic_store_ptr atomic_store_32
#define atomic_store_rel_ptr atomic_store_ptr
#define atomic_add_int atomic_add_32
-#define atomic_add_acq_int atomic_add_int
-#define atomic_add_rel_int atomic_add_int
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_add_rel_int atomic_add_rel_32
#define atomic_subtract_int atomic_subtract_32
-#define atomic_subtract_acq_int atomic_subtract_int
-#define atomic_subtract_rel_int atomic_subtract_int
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
#define atomic_clear_int atomic_clear_32
-#define atomic_clear_acq_int atomic_clear_int
-#define atomic_clear_rel_int atomic_clear_int
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_clear_rel_int atomic_clear_rel_32
#define atomic_set_int atomic_set_32
-#define atomic_set_acq_int atomic_set_int
-#define atomic_set_rel_int atomic_set_int
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_set_rel_int atomic_set_rel_32
#define atomic_cmpset_int atomic_cmpset_32
-#define atomic_cmpset_acq_int atomic_cmpset_int
-#define atomic_cmpset_rel_int atomic_cmpset_int
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
#define atomic_fetchadd_int atomic_fetchadd_32
#define atomic_readandclear_int atomic_readandclear_32
-#define atomic_load_acq_int atomic_load_32
-#define atomic_store_rel_int atomic_store_32
-
-#define atomic_add_acq_32 atomic_add_32
-#define atomic_add_rel_32 atomic_add_32
-#define atomic_subtract_acq_32 atomic_subtract_32
-#define atomic_subtract_rel_32 atomic_subtract_32
-#define atomic_clear_acq_32 atomic_clear_32
-#define atomic_clear_rel_32 atomic_clear_32
-#define atomic_set_acq_32 atomic_set_32
-#define atomic_set_rel_32 atomic_set_32
-#define atomic_cmpset_acq_32 atomic_cmpset_32
-#define atomic_cmpset_rel_32 atomic_cmpset_32
-#define atomic_load_acq_32 atomic_load_32
-#define atomic_store_rel_32 atomic_store_32
+#define atomic_load_acq_int atomic_load_acq_32
+#define atomic_store_rel_int atomic_store_rel_32
#endif /* _MACHINE_ATOMIC_H_ */
OpenPOWER on IntegriCloud