From f4ccf1d6d01abe8a14bcf252ce5721ccec28c4c4 Mon Sep 17 00:00:00 2001 From: nwhitehorn Date: Sun, 22 Apr 2012 21:11:01 +0000 Subject: Clarify what we are doing in r234583 a little better: eieio and isync do not provide general barriers, but only barriers in the context of the atomic sequences here. As such, make them private and keep the global *mb() routines using a variant of sync. --- sys/powerpc/include/atomic.h | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'sys/powerpc/include') diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h index 5ff64fb..ce347e9 100644 --- a/sys/powerpc/include/atomic.h +++ b/sys/powerpc/include/atomic.h @@ -37,13 +37,21 @@ #endif /* NOTE: lwsync is equivalent to sync on systems without lwsync */ -#define mb() __asm __volatile("lwsync" : : : "memory") +#define mb() __asm __volatile("lwsync" : : : "memory") +#define wmb() __asm __volatile("lwsync" : : : "memory") +#define rmb() __asm __volatile("lwsync" : : : "memory") + +/* + * The __ATOMIC_XMB() macros provide memory barriers only in conjunction + * with the atomic lXarx/stXcx. sequences below. See Appendix B.2 of Book II + * of the architecture manual. + */ #ifdef __powerpc64__ -#define wmb() __asm __volatile("lwsync" : : : "memory") -#define rmb() __asm __volatile("lwsync" : : : "memory") +#define __ATOMIC_WMB() __asm __volatile("lwsync" : : : "memory") +#define __ATOMIC_RMB() __asm __volatile("lwsync" : : : "memory") #else -#define wmb() __asm __volatile("eieio" : : : "memory") -#define rmb() __asm __volatile("isync" : : : "memory") +#define __ATOMIC_WMB() __asm __volatile("eieio" : : : "memory") +#define __ATOMIC_RMB() __asm __volatile("isync" : : : "memory") #endif /* @@ -97,13 +105,13 @@ atomic_add_acq_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ __atomic_add_##type(p, v, t); \ - rmb(); \ + __ATOMIC_RMB(); \ } \ \ static __inline void \ atomic_add_rel_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ - wmb(); \ + __ATOMIC_WMB(); \ __atomic_add_##type(p, v, t); \ } \ /* _ATOMIC_ADD */ @@ -183,13 +191,13 @@ _ATOMIC_ADD(long) atomic_clear_acq_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ __atomic_clear_##type(p, v, t); \ - rmb(); \ + __ATOMIC_RMB(); \ } \ \ static __inline void \ atomic_clear_rel_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ - wmb(); \ + __ATOMIC_WMB(); \ __atomic_clear_##type(p, v, t); \ } \ /* _ATOMIC_CLEAR */ @@ -285,13 +293,13 @@ _ATOMIC_CLEAR(long) atomic_set_acq_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ __atomic_set_##type(p, v, t); \ - rmb(); \ + __ATOMIC_RMB(); \ } \ \ static __inline void \ atomic_set_rel_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ - wmb(); \ + __ATOMIC_WMB(); \ __atomic_set_##type(p, v, t); \ } \ /* _ATOMIC_SET */ @@ -371,13 +379,13 @@ _ATOMIC_SET(long) atomic_subtract_acq_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ __atomic_subtract_##type(p, v, t); \ - rmb(); \ + __ATOMIC_RMB(); \ } \ \ static __inline void \ atomic_subtract_rel_##type(volatile u_##type *p, u_##type v) { \ u_##type t; \ - wmb(); \ + __ATOMIC_WMB(); \ __atomic_subtract_##type(p, v, t); \ } \ /* _ATOMIC_SUBTRACT */ @@ -601,7 +609,7 @@ atomic_cmpset_acq_int(volatile u_int *p, u_int cmpval, u_int newval) int retval; retval = atomic_cmpset_int(p, cmpval, newval); - rmb(); + __ATOMIC_RMB(); return (retval); } @@ -618,7 +626,7 @@ atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval) u_long retval; retval = atomic_cmpset_long(p, cmpval, newval); - rmb(); + __ATOMIC_RMB(); return (retval); } -- cgit v1.1