summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/include
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2012-04-22 21:11:01 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2012-04-22 21:11:01 +0000
commitf4ccf1d6d01abe8a14bcf252ce5721ccec28c4c4 (patch)
tree522290899c0ba89d09703a8850b32252b711a6bc /sys/powerpc/include
parent9735a829855418c1921d6e55aef20eae4cb177be (diff)
downloadFreeBSD-src-f4ccf1d6d01abe8a14bcf252ce5721ccec28c4c4.zip
FreeBSD-src-f4ccf1d6d01abe8a14bcf252ce5721ccec28c4c4.tar.gz
Clarify what we are doing in r234583 a little better: eieio and isync do
not provide general barriers, but only barriers in the context of the atomic sequences here. As such, make them private and keep the global *mb() routines using a variant of sync.
Diffstat (limited to 'sys/powerpc/include')
-rw-r--r--sys/powerpc/include/atomic.h38
1 files changed, 23 insertions, 15 deletions
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 5ff64fb..ce347e9 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -37,13 +37,21 @@
#endif
/* NOTE: lwsync is equivalent to sync on systems without lwsync */
-#define mb() __asm __volatile("lwsync" : : : "memory")
+#define mb() __asm __volatile("lwsync" : : : "memory")
+#define wmb() __asm __volatile("lwsync" : : : "memory")
+#define rmb() __asm __volatile("lwsync" : : : "memory")
+
+/*
+ * The __ATOMIC_XMB() macros provide memory barriers only in conjunction
+ * with the atomic lXarx/stXcx. sequences below. See Appendix B.2 of Book II
+ * of the architecture manual.
+ */
#ifdef __powerpc64__
-#define wmb() __asm __volatile("lwsync" : : : "memory")
-#define rmb() __asm __volatile("lwsync" : : : "memory")
+#define __ATOMIC_WMB() __asm __volatile("lwsync" : : : "memory")
+#define __ATOMIC_RMB() __asm __volatile("lwsync" : : : "memory")
#else
-#define wmb() __asm __volatile("eieio" : : : "memory")
-#define rmb() __asm __volatile("isync" : : : "memory")
+#define __ATOMIC_WMB() __asm __volatile("eieio" : : : "memory")
+#define __ATOMIC_RMB() __asm __volatile("isync" : : : "memory")
#endif
/*
@@ -97,13 +105,13 @@
atomic_add_acq_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
__atomic_add_##type(p, v, t); \
- rmb(); \
+ __ATOMIC_RMB(); \
} \
\
static __inline void \
atomic_add_rel_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
- wmb(); \
+ __ATOMIC_WMB(); \
__atomic_add_##type(p, v, t); \
} \
/* _ATOMIC_ADD */
@@ -183,13 +191,13 @@ _ATOMIC_ADD(long)
atomic_clear_acq_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
__atomic_clear_##type(p, v, t); \
- rmb(); \
+ __ATOMIC_RMB(); \
} \
\
static __inline void \
atomic_clear_rel_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
- wmb(); \
+ __ATOMIC_WMB(); \
__atomic_clear_##type(p, v, t); \
} \
/* _ATOMIC_CLEAR */
@@ -285,13 +293,13 @@ _ATOMIC_CLEAR(long)
atomic_set_acq_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
__atomic_set_##type(p, v, t); \
- rmb(); \
+ __ATOMIC_RMB(); \
} \
\
static __inline void \
atomic_set_rel_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
- wmb(); \
+ __ATOMIC_WMB(); \
__atomic_set_##type(p, v, t); \
} \
/* _ATOMIC_SET */
@@ -371,13 +379,13 @@ _ATOMIC_SET(long)
atomic_subtract_acq_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
__atomic_subtract_##type(p, v, t); \
- rmb(); \
+ __ATOMIC_RMB(); \
} \
\
static __inline void \
atomic_subtract_rel_##type(volatile u_##type *p, u_##type v) { \
u_##type t; \
- wmb(); \
+ __ATOMIC_WMB(); \
__atomic_subtract_##type(p, v, t); \
} \
/* _ATOMIC_SUBTRACT */
@@ -601,7 +609,7 @@ atomic_cmpset_acq_int(volatile u_int *p, u_int cmpval, u_int newval)
int retval;
retval = atomic_cmpset_int(p, cmpval, newval);
- rmb();
+ __ATOMIC_RMB();
return (retval);
}
@@ -618,7 +626,7 @@ atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval)
u_long retval;
retval = atomic_cmpset_long(p, cmpval, newval);
- rmb();
+ __ATOMIC_RMB();
return (retval);
}
OpenPOWER on IntegriCloud