diff options
author | marcel <marcel@FreeBSD.org> | 2002-05-19 20:19:07 +0000 |
---|---|---|
committer | marcel <marcel@FreeBSD.org> | 2002-05-19 20:19:07 +0000 |
commit | be8327ddfe88e208db8b8ab9b57a43b0dd4b2c61 (patch) | |
tree | 4ba1af45a70f240ce2d8dc2646d594a321a0e635 /sys/ia64/include/atomic.h | |
parent | 982963acb935ae0389976769dc093c1c286a132e (diff) | |
download | FreeBSD-src-be8327ddfe88e208db8b8ab9b57a43b0dd4b2c61.zip FreeBSD-src-be8327ddfe88e208db8b8ab9b57a43b0dd4b2c61.tar.gz |
Simplify IA64_CMPXCHG to avoid having braced-groups in expressions.
As a minor positive side-effect, code at -O0 is more optimal. As a
minor negative side-effect, certain boundary cases yield no better
code than non-boundary cases. For example, atomic_set_acq_32(p, 0)
does a useless logical OR with value 0. This was previously elimina-
ted as part of if/while optimizations. Non-boundary cases yield
identical code at -O1 and -O2.
Diffstat (limited to 'sys/ia64/include/atomic.h')
-rw-r--r-- | sys/ia64/include/atomic.h | 42 |
1 files changed, 22 insertions, 20 deletions
diff --git a/sys/ia64/include/atomic.h b/sys/ia64/include/atomic.h index 4fbe8f5..f919ae6 100644 --- a/sys/ia64/include/atomic.h +++ b/sys/ia64/include/atomic.h @@ -37,21 +37,13 @@ /* * Everything is built out of cmpxchg. */ -#define IA64_CMPXCHG(sz, sem, type, p, cmpval, newval) \ -({ \ - type _cmpval = cmpval; \ - type _newval = newval; \ - volatile type *_p = (volatile type *) p; \ - type _ret; \ - \ +#define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret) \ __asm __volatile ( \ "mov ar.ccv=%2;;\n\t" \ "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \ - : "=r" (_ret), "=m" (*_p) \ - : "r" (_cmpval), "r" (_newval), "m" (*_p) \ - : "memory"); \ - _ret; \ -}) + : "=r" (ret), "=m" (*p) \ + : "r" (cmpval), "r" (newval), "m" (*p) \ + : "memory") /* * Some common forms of cmpxch. @@ -59,25 +51,33 @@ static __inline u_int32_t ia64_cmpxchg_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) { - return IA64_CMPXCHG(4, acq, u_int32_t, p, cmpval, newval); + u_int32_t ret; + IA64_CMPXCHG(4, acq, p, cmpval, newval, ret); + return (ret); } static __inline u_int32_t ia64_cmpxchg_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval) { - return IA64_CMPXCHG(4, rel, u_int32_t, p, cmpval, newval); + u_int32_t ret; + IA64_CMPXCHG(4, rel, p, cmpval, newval, ret); + return (ret); } static __inline u_int64_t ia64_cmpxchg_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) { - return IA64_CMPXCHG(8, acq, u_int64_t, p, cmpval, newval); + u_int64_t ret; + IA64_CMPXCHG(8, acq, p, cmpval, newval, ret); + return (ret); } static __inline u_int64_t ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval) { - return IA64_CMPXCHG(8, rel, u_int64_t, p, cmpval, newval); + u_int64_t ret; + IA64_CMPXCHG(8, rel, p, cmpval, newval, ret); + return (ret); } #define ATOMIC_STORE_LOAD(type, width, size) \ @@ -156,19 +156,21 @@ ATOMIC_STORE_LOAD(long, 64, "8") static __inline void \ atomic_##name##_acq_##width(volatile type *p, type v) \ { \ - type old; \ + type old, ret; \ do { \ old = *p; \ - } while (IA64_CMPXCHG(sz, acq, type, p, old, old op v) != old); \ + IA64_CMPXCHG(sz, acq, p, old, old op v, ret); \ + } while (ret != old); \ } \ \ static __inline void \ atomic_##name##_rel_##width(volatile type *p, type v) \ { \ - type old; \ + type old, ret; \ do { \ old = *p; \ - } while (IA64_CMPXCHG(sz, rel, type, p, old, old op v) != old); \ + IA64_CMPXCHG(sz, rel, p, old, old op v, ret); \ + } while (ret != old); \ } IA64_ATOMIC(1, u_int8_t, set, 8, |) |