summaryrefslogtreecommitdiffstats
path: root/sys/mips
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2017-03-16 06:00:27 +0000
committermjg <mjg@FreeBSD.org>2017-03-16 06:00:27 +0000
commitb8af6b78be197b6a24bf372442dcb6e044d729b3 (patch)
tree0496f4e01ec297971a9b02afb62b80c63b0dd969 /sys/mips
parent3100d6f6dfd7bea82a2fe7136cf72d8423b715bd (diff)
downloadFreeBSD-src-b8af6b78be197b6a24bf372442dcb6e044d729b3.zip
FreeBSD-src-b8af6b78be197b6a24bf372442dcb6e044d729b3.tar.gz
MFC r311169,r311898,r312925,r312973,r312975,r313007,r313040,r313080,
r313254,r313341 amd64: add atomic_fcmpset == sparc64: add atomic_fcmpset == Implement atomic_fcmpset_* for arm and arm64. == Add atomic_fcmpset_*() inlines for powerpc Summary: atomic_fcmpset_*() is analogous to atomic_cmpset(), but saves off the read value from the target memory location into the 'old' pointer in the case of failure. == i386: add atomic_fcmpset == Don't retry a lost reservation in atomic_fcmpset() The desired behavior of atomic_fcmpset_() is to always exit on error. Instead of retrying on lost reservation, leave the retry to the caller, and return == Add atomic_fcmpset_*() inlines for MIPS atomic_fcmpset_*() is analogous to atomic_cmpset(), but saves off the read value from the target memory location into the 'old' pointer. == i386: fixup fcmpset An incorrect output specifier was used which worked with clang by accident, but breaks with the in-tree gcc version. While here plug a whitespace nit. == Implement atomic_fcmpset_*() for RISC-V. == Use 64bit store instruction in atomic_fcmpset_64.
Diffstat (limited to 'sys/mips')
-rw-r--r--sys/mips/include/atomic.h103
1 files changed, 101 insertions, 2 deletions
diff --git a/sys/mips/include/atomic.h b/sys/mips/include/atomic.h
index d4b951e..f29ad8c 100644
--- a/sys/mips/include/atomic.h
+++ b/sys/mips/include/atomic.h
@@ -362,7 +362,7 @@ atomic_load_64(__volatile uint64_t *p, uint64_t *v)
* zero if the compare failed, nonzero otherwise.
*/
static __inline uint32_t
-atomic_cmpset_32(__volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
+atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
{
uint32_t ret;
@@ -405,6 +405,46 @@ atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
return (atomic_cmpset_32(p, cmpval, newval));
}
+static __inline uint32_t
+atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
+{
+ uint32_t ret;
+
+ __asm __volatile (
+ "1:\n\t"
+ "ll %0, %1\n\t" /* load old value */
+ "bne %0, %4, 2f\n\t" /* compare */
+ "move %0, %3\n\t" /* value to store */
+ "sc %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* if it failed, spin */
+ "j 3f\n\t"
+ "2:\n\t"
+ "sw %0, %2\n\t" /* save old value */
+ "li %0, 0\n\t"
+ "3:\n"
+ : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
+ : "r" (newval), "r" (*cmpval)
+ : "memory");
+ return ret;
+}
+
+static __inline uint32_t
+atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
+{
+ int retval;
+
+ retval = atomic_fcmpset_32(p, cmpval, newval);
+ mips_sync();
+ return (retval);
+}
+
+static __inline uint32_t
+atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
+{
+ mips_sync();
+ return (atomic_fcmpset_32(p, cmpval, newval));
+}
+
/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
@@ -431,7 +471,7 @@ atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
* zero if the compare failed, nonzero otherwise.
*/
static __inline uint64_t
-atomic_cmpset_64(__volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
+atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
{
uint64_t ret;
@@ -475,6 +515,47 @@ atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
return (atomic_cmpset_64(p, cmpval, newval));
}
+static __inline uint32_t
+atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
+{
+ uint32_t ret;
+
+ __asm __volatile (
+ "1:\n\t"
+ "lld %0, %1\n\t" /* load old value */
+ "bne %0, %4, 2f\n\t" /* compare */
+ "move %0, %3\n\t" /* value to store */
+ "scd %0, %1\n\t" /* attempt to store */
+ "beqz %0, 1b\n\t" /* if it failed, spin */
+ "j 3f\n\t"
+ "2:\n\t"
+ "sd %0, %2\n\t" /* save old value */
+ "li %0, 0\n\t"
+ "3:\n"
+ : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
+ : "r" (newval), "r" (*cmpval)
+ : "memory");
+
+ return ret;
+}
+
+static __inline uint64_t
+atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
+{
+ int retval;
+
+ retval = atomic_fcmpset_64(p, cmpval, newval);
+ mips_sync();
+ return (retval);
+}
+
+static __inline uint64_t
+atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
+{
+ mips_sync();
+ return (atomic_fcmpset_64(p, cmpval, newval));
+}
+
/*
* Atomically add the value of v to the integer pointed to by p and return
* the previous value of *p.
@@ -568,6 +649,9 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_int atomic_cmpset_32
#define atomic_cmpset_acq_int atomic_cmpset_acq_32
#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_fcmpset_int atomic_fcmpset_32
+#define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
+#define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
#define atomic_load_acq_int atomic_load_acq_32
#define atomic_store_rel_int atomic_store_rel_32
#define atomic_readandclear_int atomic_readandclear_32
@@ -597,6 +681,9 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_long atomic_cmpset_64
#define atomic_cmpset_acq_long atomic_cmpset_acq_64
#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_fcmpset_long atomic_fcmpset_64
+#define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
+#define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
#define atomic_load_acq_long atomic_load_acq_64
#define atomic_store_rel_long atomic_store_rel_64
#define atomic_fetchadd_long atomic_fetchadd_64
@@ -638,6 +725,15 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_rel_long(p, cmpval, newval) \
atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval), \
(u_int)(newval))
+#define atomic_fcmpset_long(p, cmpval, newval) \
+ atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval), \
+ (u_int)(newval))
+#define atomic_fcmpset_acq_long(p, cmpval, newval) \
+ atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval), \
+ (u_int)(newval))
+#define atomic_fcmpset_rel_long(p, cmpval, newval) \
+ atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval), \
+ (u_int)(newval))
#define atomic_load_acq_long(p) \
(u_long)atomic_load_acq_32((volatile u_int *)(p))
#define atomic_store_rel_long(p, v) \
@@ -665,6 +761,9 @@ atomic_thread_fence_seq_cst(void)
#define atomic_cmpset_ptr atomic_cmpset_long
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
+#define atomic_fcmpset_ptr atomic_fcmpset_long
+#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
+#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
#define atomic_load_acq_ptr atomic_load_acq_long
#define atomic_store_rel_ptr atomic_store_rel_long
#define atomic_readandclear_ptr atomic_readandclear_long
OpenPOWER on IntegriCloud