summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2017-03-16 06:00:27 +0000
committermjg <mjg@FreeBSD.org>2017-03-16 06:00:27 +0000
commitb8af6b78be197b6a24bf372442dcb6e044d729b3 (patch)
tree0496f4e01ec297971a9b02afb62b80c63b0dd969 /sys/powerpc
parent3100d6f6dfd7bea82a2fe7136cf72d8423b715bd (diff)
downloadFreeBSD-src-b8af6b78be197b6a24bf372442dcb6e044d729b3.zip
FreeBSD-src-b8af6b78be197b6a24bf372442dcb6e044d729b3.tar.gz
MFC r311169,r311898,r312925,r312973,r312975,r313007,r313040,r313080,
r313254,r313341 amd64: add atomic_fcmpset == sparc64: add atomic_fcmpset == Implement atomic_fcmpset_* for arm and arm64. == Add atomic_fcmpset_*() inlines for powerpc Summary: atomic_fcmpset_*() is analogous to atomic_cmpset(), but saves off the read value from the target memory location into the 'old' pointer in the case of failure. == i386: add atomic_fcmpset == Don't retry a lost reservation in atomic_fcmpset() The desired behavior of atomic_fcmpset_() is to always exit on error. Instead of retrying on lost reservation, leave the retry to the caller, and return == Add atomic_fcmpset_*() inlines for MIPS atomic_fcmpset_*() is analogous to atomic_cmpset(), but saves off the read value from the target memory location into the 'old' pointer. == i386: fixup fcmpset An incorrect output specifier was used which worked with clang by accident, but breaks with the in-tree gcc version. While here plug a whitespace nit. == Implement atomic_fcmpset_*() for RISC-V. == Use 64bit store instruction in atomic_fcmpset_64.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/include/atomic.h123
1 files changed, 123 insertions, 0 deletions
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index c4db5ff..cd9fa0d 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -674,6 +674,129 @@ atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval)
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_int
#endif
+/*
+ * Atomically compare the value stored at *p with *cmpval and if the
+ * two values are equal, update the value of *p with newval. Returns
+ * zero if the compare failed and sets *cmpval to the read value from *p,
+ * nonzero otherwise.
+ */
+static __inline int
+atomic_fcmpset_int(volatile u_int *p, u_int *cmpval, u_int newval)
+{
+ int ret;
+
+#ifdef __GNUCLIKE_ASM
+ __asm __volatile (
+ "lwarx %0, 0, %3\n\t" /* load old value */
+ "cmplw %4, %0\n\t" /* compare */
+ "bne 1f\n\t" /* exit if not equal */
+ "stwcx. %5, 0, %3\n\t" /* attempt to store */
+ "bne- 1f\n\t" /* exit if failed */
+ "li %0, 1\n\t" /* success - retval = 1 */
+ "b 2f\n\t" /* we've succeeded */
+ "1:\n\t"
+ "stwcx. %0, 0, %3\n\t" /* clear reservation (74xx) */
+ "stwx %0, 0, %7\n\t"
+ "li %0, 0\n\t" /* failure - retval = 0 */
+ "2:\n\t"
+ : "=&r" (ret), "=m" (*p), "=m" (*cmpval)
+ : "r" (p), "r" (*cmpval), "r" (newval), "m" (*p), "r"(cmpval)
+ : "cr0", "memory");
+#endif
+
+ return (ret);
+}
+static __inline int
+atomic_fcmpset_long(volatile u_long *p, u_long *cmpval, u_long newval)
+{
+ int ret;
+
+#ifdef __GNUCLIKE_ASM
+ __asm __volatile (
+ #ifdef __powerpc64__
+ "ldarx %0, 0, %3\n\t" /* load old value */
+ "cmpld %4, %0\n\t" /* compare */
+ "bne 1f\n\t" /* exit if not equal */
+ "stdcx. %5, 0, %3\n\t" /* attempt to store */
+ #else
+ "lwarx %0, 0, %3\n\t" /* load old value */
+ "cmplw %4, %0\n\t" /* compare */
+ "bne 1f\n\t" /* exit if not equal */
+ "stwcx. %5, 0, %3\n\t" /* attempt to store */
+ #endif
+ "bne- 1f\n\t" /* exit if failed */
+ "li %0, 1\n\t" /* success - retval = 1 */
+ "b 2f\n\t" /* we've succeeded */
+ "1:\n\t"
+ #ifdef __powerpc64__
+ "stdcx. %0, 0, %3\n\t" /* clear reservation (74xx) */
+ "stdx %0, 0, %7\n\t"
+ #else
+ "stwcx. %0, 0, %3\n\t" /* clear reservation (74xx) */
+ "stwx %0, 0, %7\n\t"
+ #endif
+ "li %0, 0\n\t" /* failure - retval = 0 */
+ "2:\n\t"
+ : "=&r" (ret), "=m" (*p), "=m" (*cmpval)
+ : "r" (p), "r" (*cmpval), "r" (newval), "m" (*p), "r"(cmpval)
+ : "cr0", "memory");
+#endif
+
+ return (ret);
+}
+
+static __inline int
+atomic_fcmpset_acq_int(volatile u_int *p, u_int *cmpval, u_int newval)
+{
+ int retval;
+
+ retval = atomic_fcmpset_int(p, cmpval, newval);
+ __ATOMIC_ACQ();
+ return (retval);
+}
+
+static __inline int
+atomic_fcmpset_rel_int(volatile u_int *p, u_int *cmpval, u_int newval)
+{
+ __ATOMIC_REL();
+ return (atomic_fcmpset_int(p, cmpval, newval));
+}
+
+static __inline int
+atomic_fcmpset_acq_long(volatile u_long *p, u_long *cmpval, u_long newval)
+{
+ u_long retval;
+
+ retval = atomic_fcmpset_long(p, cmpval, newval);
+ __ATOMIC_ACQ();
+ return (retval);
+}
+
+static __inline int
+atomic_fcmpset_rel_long(volatile u_long *p, u_long *cmpval, u_long newval)
+{
+ __ATOMIC_REL();
+ return (atomic_fcmpset_long(p, cmpval, newval));
+}
+
+#define atomic_fcmpset_32 atomic_fcmpset_int
+#define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
+#define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
+
+#ifdef __powerpc64__
+#define atomic_fcmpset_64 atomic_fcmpset_long
+#define atomic_fcmpset_acq_64 atomic_fcmpset_acq_long
+#define atomic_fcmpset_rel_64 atomic_fcmpset_rel_long
+
+#define atomic_fcmpset_ptr atomic_fcmpset_long
+#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
+#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
+#else
+#define atomic_fcmpset_ptr atomic_fcmpset_int
+#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_int
+#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_int
+#endif
+
static __inline u_int
atomic_fetchadd_int(volatile u_int *p, u_int v)
{
OpenPOWER on IntegriCloud