summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2004-09-22 19:47:42 +0000
committermarcel <marcel@FreeBSD.org>2004-09-22 19:47:42 +0000
commite6d3324397962a33439920b6d079a2da2c2fa177 (patch)
tree15021e3e701546de6c05e125d091d0fdc9d82538 /sys
parent116dc0a8ccfec1265291af53605a03bb20122df1 (diff)
downloadFreeBSD-src-e6d3324397962a33439920b6d079a2da2c2fa177.zip
FreeBSD-src-e6d3324397962a33439920b6d079a2da2c2fa177.tar.gz
MFp4: various style fixes, including
o s/u_int/uint/g o s/#define<sp>/#define<tab>/g o indent macro definitions o Improve vertical spacing o Globally align line continuation character
Diffstat (limited to 'sys')
-rw-r--r--sys/ia64/include/atomic.h487
1 files changed, 239 insertions, 248 deletions
diff --git a/sys/ia64/include/atomic.h b/sys/ia64/include/atomic.h
index f919ae6..f3b496c 100644
--- a/sys/ia64/include/atomic.h
+++ b/sys/ia64/include/atomic.h
@@ -27,7 +27,7 @@
*/
#ifndef _MACHINE_ATOMIC_H_
-#define _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
/*
* Various simple arithmetic on memory which is atomic in the presence
@@ -37,252 +37,239 @@
/*
* Everything is built out of cmpxchg.
*/
-#define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret) \
- __asm __volatile ( \
- "mov ar.ccv=%2;;\n\t" \
- "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \
- : "=r" (ret), "=m" (*p) \
- : "r" (cmpval), "r" (newval), "m" (*p) \
+#define IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret) \
+ __asm __volatile ( \
+ "mov ar.ccv=%2;;\n\t" \
+ "cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t" \
+ : "=r" (ret), "=m" (*p) \
+ : "r" (cmpval), "r" (newval), "m" (*p) \
: "memory")
/*
* Some common forms of cmpxch.
*/
-static __inline u_int32_t
-ia64_cmpxchg_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+static __inline uint32_t
+ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
- u_int32_t ret;
+ uint32_t ret;
IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
return (ret);
}
-static __inline u_int32_t
-ia64_cmpxchg_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+static __inline uint32_t
+ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
- u_int32_t ret;
+ uint32_t ret;
IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
return (ret);
}
-static __inline u_int64_t
-ia64_cmpxchg_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+static __inline uint64_t
+ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
- u_int64_t ret;
+ uint64_t ret;
IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
return (ret);
}
-static __inline u_int64_t
-ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+static __inline uint64_t
+ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
- u_int64_t ret;
+ uint64_t ret;
IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
return (ret);
}
-#define ATOMIC_STORE_LOAD(type, width, size) \
-static __inline u_int##width##_t \
-ia64_ld_acq_##width(volatile u_int##width##_t* p) \
-{ \
- u_int##width##_t v; \
- \
- __asm __volatile ("ld" size ".acq %0=%1" \
- : "=r" (v) \
- : "m" (*p) \
- : "memory"); \
- return (v); \
-} \
- \
-static __inline u_int##width##_t \
-atomic_load_acq_##width(volatile u_int##width##_t* p) \
-{ \
- u_int##width##_t v; \
- \
- __asm __volatile ("ld" size ".acq %0=%1" \
- : "=r" (v) \
- : "m" (*p) \
- : "memory"); \
- return (v); \
-} \
- \
-static __inline u_int##width##_t \
-atomic_load_acq_##type(volatile u_int##width##_t* p) \
-{ \
- u_int##width##_t v; \
- \
- __asm __volatile ("ld" size ".acq %0=%1" \
- : "=r" (v) \
- : "m" (*p) \
- : "memory"); \
- return (v); \
-} \
- \
-static __inline void \
-ia64_st_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
-{ \
- __asm __volatile ("st" size ".rel %0=%1" \
- : "=m" (*p) \
- : "r" (v) \
- : "memory"); \
-} \
- \
-static __inline void \
-atomic_store_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
-{ \
- __asm __volatile ("st" size ".rel %0=%1" \
- : "=m" (*p) \
- : "r" (v) \
- : "memory"); \
-} \
- \
-static __inline void \
-atomic_store_rel_##type(volatile u_int##width##_t* p, u_int##width##_t v)\
-{ \
- __asm __volatile ("st" size ".rel %0=%1" \
- : "=m" (*p) \
- : "r" (v) \
- : "memory"); \
-}
-
-ATOMIC_STORE_LOAD(char, 8, "1")
-ATOMIC_STORE_LOAD(short, 16, "2")
-ATOMIC_STORE_LOAD(int, 32, "4")
-ATOMIC_STORE_LOAD(long, 64, "8")
-
-#undef ATOMIC_STORE_LOAD
-
-#define IA64_ATOMIC(sz, type, name, width, op) \
+#define ATOMIC_STORE_LOAD(type, width, size) \
+ static __inline uint##width##_t \
+ ia64_ld_acq_##width(volatile uint##width##_t* p) \
+ { \
+ uint##width##_t v; \
+ __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
+ : "m" (*p) : "memory"); \
+ return (v); \
+ } \
\
-static __inline void \
-atomic_##name##_acq_##width(volatile type *p, type v) \
-{ \
- type old, ret; \
- do { \
- old = *p; \
- IA64_CMPXCHG(sz, acq, p, old, old op v, ret); \
- } while (ret != old); \
-} \
+ static __inline uint##width##_t \
+ atomic_load_acq_##width(volatile uint##width##_t* p) \
+ { \
+ uint##width##_t v; \
+ __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
+ : "m" (*p) : "memory"); \
+ return (v); \
+ } \
\
-static __inline void \
-atomic_##name##_rel_##width(volatile type *p, type v) \
-{ \
- type old, ret; \
- do { \
- old = *p; \
- IA64_CMPXCHG(sz, rel, p, old, old op v, ret); \
- } while (ret != old); \
-}
+ static __inline uint##width##_t \
+ atomic_load_acq_##type(volatile uint##width##_t* p) \
+ { \
+ uint##width##_t v; \
+ __asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
+ : "m" (*p) : "memory"); \
+ return (v); \
+ } \
+ \
+ static __inline void \
+ ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
+ { \
+ __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
+ : "r" (v) : "memory"); \
+ } \
+ \
+ static __inline void \
+ atomic_store_rel_##width(volatile uint##width##_t* p, \
+ uint##width##_t v) \
+ { \
+ __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
+ : "r" (v) : "memory"); \
+ } \
+ \
+ static __inline void \
+ atomic_store_rel_##type(volatile uint##width##_t* p, \
+ uint##width##_t v) \
+ { \
+ __asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
+ : "r" (v) : "memory"); \
+ }
+
+ATOMIC_STORE_LOAD(char, 8, "1")
+ATOMIC_STORE_LOAD(short, 16, "2")
+ATOMIC_STORE_LOAD(int, 32, "4")
+ATOMIC_STORE_LOAD(long, 64, "8")
-IA64_ATOMIC(1, u_int8_t, set, 8, |)
-IA64_ATOMIC(2, u_int16_t, set, 16, |)
-IA64_ATOMIC(4, u_int32_t, set, 32, |)
-IA64_ATOMIC(8, u_int64_t, set, 64, |)
+#undef ATOMIC_STORE_LOAD
-IA64_ATOMIC(1, u_int8_t, clear, 8, &~)
-IA64_ATOMIC(2, u_int16_t, clear, 16, &~)
-IA64_ATOMIC(4, u_int32_t, clear, 32, &~)
-IA64_ATOMIC(8, u_int64_t, clear, 64, &~)
+#define IA64_ATOMIC(sz, type, name, width, op) \
+ static __inline void \
+ atomic_##name##_acq_##width(volatile type *p, type v) \
+ { \
+ type old, ret; \
+ do { \
+ old = *p; \
+ IA64_CMPXCHG(sz, acq, p, old, old op v, ret); \
+ } while (ret != old); \
+ } \
+ \
+ static __inline void \
+ atomic_##name##_rel_##width(volatile type *p, type v) \
+ { \
+ type old, ret; \
+ do { \
+ old = *p; \
+ IA64_CMPXCHG(sz, rel, p, old, old op v, ret); \
+ } while (ret != old); \
+ }
+
+IA64_ATOMIC(1, uint8_t, set, 8, |)
+IA64_ATOMIC(2, uint16_t, set, 16, |)
+IA64_ATOMIC(4, uint32_t, set, 32, |)
+IA64_ATOMIC(8, uint64_t, set, 64, |)
+
+IA64_ATOMIC(1, uint8_t, clear, 8, &~)
+IA64_ATOMIC(2, uint16_t, clear, 16, &~)
+IA64_ATOMIC(4, uint32_t, clear, 32, &~)
+IA64_ATOMIC(8, uint64_t, clear, 64, &~)
+
+IA64_ATOMIC(1, uint8_t, add, 8, +)
+IA64_ATOMIC(2, uint16_t, add, 16, +)
+IA64_ATOMIC(4, uint32_t, add, 32, +)
+IA64_ATOMIC(8, uint64_t, add, 64, +)
+
+IA64_ATOMIC(1, uint8_t, subtract, 8, -)
+IA64_ATOMIC(2, uint16_t, subtract, 16, -)
+IA64_ATOMIC(4, uint32_t, subtract, 32, -)
+IA64_ATOMIC(8, uint64_t, subtract, 64, -)
-IA64_ATOMIC(1, u_int8_t, add, 8, +)
-IA64_ATOMIC(2, u_int16_t, add, 16, +)
-IA64_ATOMIC(4, u_int32_t, add, 32, +)
-IA64_ATOMIC(8, u_int64_t, add, 64, +)
+#undef IA64_ATOMIC
-IA64_ATOMIC(1, u_int8_t, subtract, 8, -)
-IA64_ATOMIC(2, u_int16_t, subtract, 16, -)
-IA64_ATOMIC(4, u_int32_t, subtract, 32, -)
-IA64_ATOMIC(8, u_int64_t, subtract, 64, -)
+#define atomic_set_8 atomic_set_acq_8
+#define atomic_clear_8 atomic_clear_acq_8
+#define atomic_add_8 atomic_add_acq_8
+#define atomic_subtract_8 atomic_subtract_acq_8
+
+#define atomic_set_16 atomic_set_acq_16
+#define atomic_clear_16 atomic_clear_acq_16
+#define atomic_add_16 atomic_add_acq_16
+#define atomic_subtract_16 atomic_subtract_acq_16
+
+#define atomic_set_32 atomic_set_acq_32
+#define atomic_clear_32 atomic_clear_acq_32
+#define atomic_add_32 atomic_add_acq_32
+#define atomic_subtract_32 atomic_subtract_acq_32
+
+#define atomic_set_64 atomic_set_acq_64
+#define atomic_clear_64 atomic_clear_acq_64
+#define atomic_add_64 atomic_add_acq_64
+#define atomic_subtract_64 atomic_subtract_acq_64
+
+#define atomic_set_char atomic_set_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_add_char atomic_add_8
+#define atomic_subtract_char atomic_subtract_8
+#define atomic_set_acq_char atomic_set_acq_8
+#define atomic_clear_acq_char atomic_clear_acq_8
+#define atomic_add_acq_char atomic_add_acq_8
+#define atomic_subtract_acq_char atomic_subtract_acq_8
+#define atomic_set_rel_char atomic_set_rel_8
+#define atomic_clear_rel_char atomic_clear_rel_8
+#define atomic_add_rel_char atomic_add_rel_8
+#define atomic_subtract_rel_char atomic_subtract_rel_8
+
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_add_short atomic_add_16
+#define atomic_subtract_short atomic_subtract_16
+#define atomic_set_acq_short atomic_set_acq_16
+#define atomic_clear_acq_short atomic_clear_acq_16
+#define atomic_add_acq_short atomic_add_acq_16
+#define atomic_subtract_acq_short atomic_subtract_acq_16
+#define atomic_set_rel_short atomic_set_rel_16
+#define atomic_clear_rel_short atomic_clear_rel_16
+#define atomic_add_rel_short atomic_add_rel_16
+#define atomic_subtract_rel_short atomic_subtract_rel_16
+
+#define atomic_set_int atomic_set_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_add_int atomic_add_32
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+#define atomic_set_rel_int atomic_set_rel_32
+#define atomic_clear_rel_int atomic_clear_rel_32
+#define atomic_add_rel_int atomic_add_rel_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
+
+#define atomic_set_long atomic_set_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_add_long atomic_add_64
+#define atomic_subtract_long atomic_subtract_64
+#define atomic_set_acq_long atomic_set_acq_64
+#define atomic_clear_acq_long atomic_clear_acq_64
+#define atomic_add_acq_long atomic_add_acq_64
+#define atomic_subtract_acq_long atomic_subtract_acq_64
+#define atomic_set_rel_long atomic_set_rel_64
+#define atomic_clear_rel_long atomic_clear_rel_64
+#define atomic_add_rel_long atomic_add_rel_64
+#define atomic_subtract_rel_long atomic_subtract_rel_64
-#undef IA64_ATOMIC
#undef IA64_CMPXCHG
-#define atomic_set_8 atomic_set_acq_8
-#define atomic_clear_8 atomic_clear_acq_8
-#define atomic_add_8 atomic_add_acq_8
-#define atomic_subtract_8 atomic_subtract_acq_8
-
-#define atomic_set_16 atomic_set_acq_16
-#define atomic_clear_16 atomic_clear_acq_16
-#define atomic_add_16 atomic_add_acq_16
-#define atomic_subtract_16 atomic_subtract_acq_16
-
-#define atomic_set_32 atomic_set_acq_32
-#define atomic_clear_32 atomic_clear_acq_32
-#define atomic_add_32 atomic_add_acq_32
-#define atomic_subtract_32 atomic_subtract_acq_32
-
-#define atomic_set_64 atomic_set_acq_64
-#define atomic_clear_64 atomic_clear_acq_64
-#define atomic_add_64 atomic_add_acq_64
-#define atomic_subtract_64 atomic_subtract_acq_64
-
-#define atomic_set_char atomic_set_8
-#define atomic_clear_char atomic_clear_8
-#define atomic_add_char atomic_add_8
-#define atomic_subtract_char atomic_subtract_8
-#define atomic_set_acq_char atomic_set_acq_8
-#define atomic_clear_acq_char atomic_clear_acq_8
-#define atomic_add_acq_char atomic_add_acq_8
-#define atomic_subtract_acq_char atomic_subtract_acq_8
-#define atomic_set_rel_char atomic_set_rel_8
-#define atomic_clear_rel_char atomic_clear_rel_8
-#define atomic_add_rel_char atomic_add_rel_8
-#define atomic_subtract_rel_char atomic_subtract_rel_8
-
-#define atomic_set_short atomic_set_16
-#define atomic_clear_short atomic_clear_16
-#define atomic_add_short atomic_add_16
-#define atomic_subtract_short atomic_subtract_16
-#define atomic_set_acq_short atomic_set_acq_16
-#define atomic_clear_acq_short atomic_clear_acq_16
-#define atomic_add_acq_short atomic_add_acq_16
-#define atomic_subtract_acq_short atomic_subtract_acq_16
-#define atomic_set_rel_short atomic_set_rel_16
-#define atomic_clear_rel_short atomic_clear_rel_16
-#define atomic_add_rel_short atomic_add_rel_16
-#define atomic_subtract_rel_short atomic_subtract_rel_16
-
-#define atomic_set_int atomic_set_32
-#define atomic_clear_int atomic_clear_32
-#define atomic_add_int atomic_add_32
-#define atomic_subtract_int atomic_subtract_32
-#define atomic_set_acq_int atomic_set_acq_32
-#define atomic_clear_acq_int atomic_clear_acq_32
-#define atomic_add_acq_int atomic_add_acq_32
-#define atomic_subtract_acq_int atomic_subtract_acq_32
-#define atomic_set_rel_int atomic_set_rel_32
-#define atomic_clear_rel_int atomic_clear_rel_32
-#define atomic_add_rel_int atomic_add_rel_32
-#define atomic_subtract_rel_int atomic_subtract_rel_32
-
-#define atomic_set_long atomic_set_64
-#define atomic_clear_long atomic_clear_64
-#define atomic_add_long atomic_add_64
-#define atomic_subtract_long atomic_subtract_64
-#define atomic_set_acq_long atomic_set_acq_64
-#define atomic_clear_acq_long atomic_clear_acq_64
-#define atomic_add_acq_long atomic_add_acq_64
-#define atomic_subtract_acq_long atomic_subtract_acq_64
-#define atomic_set_rel_long atomic_set_rel_64
-#define atomic_clear_rel_long atomic_clear_rel_64
-#define atomic_add_rel_long atomic_add_rel_64
-#define atomic_subtract_rel_long atomic_subtract_rel_64
-
/*
* Atomically compare the value stored at *p with cmpval and if the
* two values are equal, update the value of *p with newval. Returns
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
-atomic_cmpset_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
- return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval;
+ return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
}
static __inline int
-atomic_cmpset_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
- return ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval;
+ return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
}
/*
@@ -291,46 +278,50 @@ atomic_cmpset_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
-atomic_cmpset_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
- return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval;
+ return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
}
static __inline int
-atomic_cmpset_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
- return ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval;
+ return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
}
-#define atomic_cmpset_32 atomic_cmpset_acq_32
-#define atomic_cmpset_64 atomic_cmpset_acq_64
-#define atomic_cmpset_int atomic_cmpset_32
-#define atomic_cmpset_long atomic_cmpset_64
-#define atomic_cmpset_acq_int atomic_cmpset_acq_32
-#define atomic_cmpset_rel_int atomic_cmpset_rel_32
-#define atomic_cmpset_acq_long atomic_cmpset_acq_64
-#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_cmpset_32 atomic_cmpset_acq_32
+#define atomic_cmpset_64 atomic_cmpset_acq_64
+#define atomic_cmpset_int atomic_cmpset_32
+#define atomic_cmpset_long atomic_cmpset_64
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
static __inline int
atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
{
- return atomic_cmpset_acq_long((volatile u_long *)dst,
- (u_long)exp, (u_long)src);
+ int ret;
+ ret = atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp,
+ (u_long)src);
+ return (ret);
}
static __inline int
atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
{
- return atomic_cmpset_rel_long((volatile u_long *)dst,
- (u_long)exp, (u_long)src);
+ int ret;
+ ret = atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp,
+ (u_long)src);
+ return (ret);
}
-#define atomic_cmpset_ptr atomic_cmpset_acq_ptr
+#define atomic_cmpset_ptr atomic_cmpset_acq_ptr
static __inline void *
atomic_load_acq_ptr(volatile void *p)
{
- return (void *)atomic_load_acq_long((volatile u_long *)p);
+ return ((void *)atomic_load_acq_long((volatile u_long *)p));
}
static __inline void
@@ -339,24 +330,24 @@ atomic_store_rel_ptr(volatile void *p, void *v)
atomic_store_rel_long((volatile u_long *)p, (u_long)v);
}
-#define ATOMIC_PTR(NAME) \
-static __inline void \
-atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
-{ \
- atomic_##NAME##_long((volatile u_long *)p, v); \
-} \
- \
-static __inline void \
-atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
-{ \
- atomic_##NAME##_acq_long((volatile u_long *)p, v);\
-} \
- \
-static __inline void \
-atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
-{ \
- atomic_##NAME##_rel_long((volatile u_long *)p, v);\
-}
+#define ATOMIC_PTR(NAME) \
+ static __inline void \
+ atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
+ { \
+ atomic_##NAME##_long((volatile u_long *)p, v); \
+ } \
+ \
+ static __inline void \
+ atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
+ { \
+ atomic_##NAME##_acq_long((volatile u_long *)p, v); \
+ } \
+ \
+ static __inline void \
+ atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
+ { \
+ atomic_##NAME##_rel_long((volatile u_long *)p, v); \
+ }
ATOMIC_PTR(set)
ATOMIC_PTR(clear)
@@ -365,27 +356,27 @@ ATOMIC_PTR(subtract)
#undef ATOMIC_PTR
-static __inline u_int32_t
-atomic_readandclear_32(volatile u_int32_t* p)
+static __inline uint32_t
+atomic_readandclear_32(volatile uint32_t* p)
{
- u_int32_t val;
+ uint32_t val;
do {
val = *p;
} while (!atomic_cmpset_32(p, val, 0));
- return val;
+ return (val);
}
-static __inline u_int64_t
-atomic_readandclear_64(volatile u_int64_t* p)
+static __inline uint64_t
+atomic_readandclear_64(volatile uint64_t* p)
{
- u_int64_t val;
+ uint64_t val;
do {
val = *p;
} while (!atomic_cmpset_64(p, val, 0));
- return val;
+ return (val);
}
-#define atomic_readandclear_int atomic_readandclear_32
-#define atomic_readandclear_long atomic_readandclear_64
+#define atomic_readandclear_int atomic_readandclear_32
+#define atomic_readandclear_long atomic_readandclear_64
#endif /* ! _MACHINE_ATOMIC_H_ */
OpenPOWER on IntegriCloud