summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/include
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2012-05-24 20:45:44 +0000
committermarcel <marcel@FreeBSD.org>2012-05-24 20:45:44 +0000
commitc933e51f6c2b709f1a98f25c0f49570cbad0f12f (patch)
tree2f32ea4188d977812092dc53aea48154792e91b5 /sys/powerpc/include
parent01d5f155a139065732bdf67658a39743e192332b (diff)
downloadFreeBSD-src-c933e51f6c2b709f1a98f25c0f49570cbad0f12f.zip
FreeBSD-src-c933e51f6c2b709f1a98f25c0f49570cbad0f12f.tar.gz
Fix the memory barriers for CPUs that do not like lwsync and wedge or cause
exceptions early enough during boot that the kernel will do ithe same. Use lwsync only when compiling for LP64 and revert to the more proven isync when compiling for ILP32. Note that in the end (i.e. between revision 222198 and this change) ILP32 changed from using sync to using isync. As per Nathan the isync is needed to make sure I/O accesses are properly serialized with locks and isync tends to be more effecient than sync. While here, undefine __ATOMIC_ACQ and __ATOMIC_REL at the end of the file so as not to leak their definitions. Discussed with: nwhitehorn
Diffstat (limited to 'sys/powerpc/include')
-rw-r--r--sys/powerpc/include/atomic.h34
1 files changed, 22 insertions, 12 deletions
diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h
index 8ec3ea0..20cffba 100644
--- a/sys/powerpc/include/atomic.h
+++ b/sys/powerpc/include/atomic.h
@@ -36,23 +36,30 @@
#error this file needs sys/cdefs.h as a prerequisite
#endif
-/* NOTE: lwsync is equivalent to sync on systems without lwsync */
-#define mb() __asm __volatile("lwsync" : : : "memory")
-#ifdef __powerpc64__
-#define rmb() __asm __volatile("lwsync" : : : "memory")
-#define wmb() __asm __volatile("lwsync" : : : "memory")
-#else
-#define rmb() __asm __volatile("lwsync" : : : "memory")
-#define wmb() __asm __volatile("eieio" : : : "memory")
-#endif
-
/*
* The __ATOMIC_REL/ACQ() macros provide memory barriers only in conjunction
- * with the atomic lXarx/stXcx. sequences below. See Appendix B.2 of Book II
- * of the architecture manual.
+ * with the atomic lXarx/stXcx. sequences below. They are not exposed outside
+ * of this file. See also Appendix B.2 of Book II of the architecture manual.
+ *
+ * Note that not all Book-E processors accept the light-weight sync variant.
+ * In particular, early models of E500 cores are known to wedge. Bank on all
+ * 64-bit capable CPUs to accept lwsync properly and pressimize 32-bit CPUs
+ * to use the heavier-weight sync.
*/
+
+#ifdef __powerpc64__
+#define mb() __asm __volatile("lwsync" : : : "memory")
+#define rmb() __asm __volatile("lwsync" : : : "memory")
+#define wmb() __asm __volatile("lwsync" : : : "memory")
#define __ATOMIC_REL() __asm __volatile("lwsync" : : : "memory")
+#define __ATOMIC_ACQ() __asm __volatile("lwsync" : : : "memory")
+#else
+#define mb() __asm __volatile("isync" : : : "memory")
+#define rmb() __asm __volatile("isync" : : : "memory")
+#define wmb() __asm __volatile("isync" : : : "memory")
+#define __ATOMIC_REL() __asm __volatile("isync" : : : "memory")
#define __ATOMIC_ACQ() __asm __volatile("isync" : : : "memory")
+#endif
/*
* atomic_add(p, v)
@@ -683,4 +690,7 @@ atomic_fetchadd_long(volatile u_long *p, u_long v)
#define atomic_fetchadd_64 atomic_fetchadd_long
#endif
+#undef __ATOMIC_REL
+#undef __ATOMIC_ACQ
+
#endif /* ! _MACHINE_ATOMIC_H_ */
OpenPOWER on IntegriCloud