summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/include/atomic.h169
-rw-r--r--sys/amd64/include/atomic.h110
-rw-r--r--sys/i386/include/atomic.h110
-rw-r--r--sys/ia64/include/atomic.h264
4 files changed, 592 insertions, 61 deletions
diff --git a/sys/alpha/include/atomic.h b/sys/alpha/include/atomic.h
index 3532a06..9a6adc9 100644
--- a/sys/alpha/include/atomic.h
+++ b/sys/alpha/include/atomic.h
@@ -29,6 +29,8 @@
#ifndef _MACHINE_ATOMIC_H_
#define _MACHINE_ATOMIC_H_
+#include <machine/alpha_cpu.h>
+
/*
* Various simple arithmetic on memory which is atomic in the presence
* of interrupts and SMP safe.
@@ -250,6 +252,92 @@ static __inline u_int64_t atomic_readandclear_64(volatile u_int64_t *addr)
#define atomic_subtract_long atomic_subtract_64
#define atomic_readandclear_long atomic_readandclear_64
+#define ATOMIC_ACQ_REL(NAME, WIDTH, TYPE) \
+static __inline void \
+atomic_##NAME##_acq_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
+{ \
+ alpha_mb(); \
+ atomic_##NAME##_##WIDTH(p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##WIDTH(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
+{ \
+ atomic_##NAME##_##WIDTH(p, v); \
+ alpha_wmb(); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_acq_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
+{ \
+ alpha_mb(); \
+ atomic_##NAME##_##WIDTH(p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##TYPE(volatile u_int##WIDTH##_t *p, u_int##WIDTH##_t v)\
+{ \
+ atomic_##NAME##_##WIDTH(p, v); \
+ alpha_wmb(); \
+}
+
+ATOMIC_ACQ_REL(set, 8, char)
+ATOMIC_ACQ_REL(clear, 8, char)
+ATOMIC_ACQ_REL(add, 8, char)
+ATOMIC_ACQ_REL(subtract, 8, char)
+ATOMIC_ACQ_REL(set, 16, short)
+ATOMIC_ACQ_REL(clear, 16, short)
+ATOMIC_ACQ_REL(add, 16, short)
+ATOMIC_ACQ_REL(subtract, 16, short)
+ATOMIC_ACQ_REL(set, 32, int)
+ATOMIC_ACQ_REL(clear, 32, int)
+ATOMIC_ACQ_REL(add, 32, int)
+ATOMIC_ACQ_REL(subtract, 32, int)
+ATOMIC_ACQ_REL(set, 64, long)
+ATOMIC_ACQ_REL(clear, 64, long)
+ATOMIC_ACQ_REL(add, 64, long)
+ATOMIC_ACQ_REL(subtract, 64, long)
+
+#undef ATOMIC_ACQ_REL
+
+/*
+ * We assume that a = b will do atomic loads and stores.
+ */
+#define ATOMIC_STORE_LOAD(TYPE, WIDTH) \
+static __inline u_##TYPE \
+atomic_load_acq_##WIDTH(volatile u_##TYPE *p) \
+{ \
+ alpha_mb(); \
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##WIDTH(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+ alpha_wmb(); \
+} \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ alpha_mb(); \
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+ alpha_wmb(); \
+}
+
+ATOMIC_STORE_LOAD(char, 8)
+ATOMIC_STORE_LOAD(short, 16)
+ATOMIC_STORE_LOAD(int, 32)
+ATOMIC_STORE_LOAD(long, 64)
+
+#undef ATOMIC_STORE_LOAD
+
/*
* Atomically compare the value stored at *p with cmpval and if the
* two values are equal, update the value of *p with newval. Returns
@@ -317,4 +405,85 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
atomic_cmpset_long((volatile u_long *)dst, (u_long)exp, (u_long)src));
}
+static __inline u_int32_t
+atomic_cmpset_acq_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
+{
+ alpha_mb();
+ return (atomic_cmpset_32(p, cmpval, newval));
+}
+
+static __inline u_int32_t
+atomic_cmpset_rel_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
+{
+ int retval;
+
+ retval = atomic_cmpset_32(p, cmpval, newval);
+ alpha_wmb();
+ return (retval);
+}
+
+static __inline u_int64_t
+atomic_cmpset_acq_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
+{
+ alpha_mb();
+ return (atomic_cmpset_64(p, cmpval, newval));
+}
+
+static __inline u_int64_t
+atomic_cmpset_rel_64(volatile u_int64_t *p, u_int64_t cmpval, u_int64_t newval)
+{
+ int retval;
+
+ retval = atomic_cmpset_64(p, cmpval, newval);
+ alpha_wmb();
+ return (retval);
+}
+
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
+#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
+#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
+
+static __inline void *
+atomic_load_acq_ptr(volatile void *p)
+{
+ return (void *)atomic_load_acq_long((volatile u_long *)p);
+}
+
+static __inline void
+atomic_store_rel_ptr(volatile void *p, void *v)
+{
+ atomic_store_rel_long((volatile u_long *)p, (u_long)v);
+}
+
+#define ATOMIC_PTR(NAME) \
+static __inline void \
+atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_long((volatile u_long *)p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
+{ \
+ alpha_mb(); \
+ atomic_##NAME##_acq_long((volatile u_long *)p, v);\
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_rel_long((volatile u_long *)p, v);\
+ alpha_wmb(); \
+}
+
+ATOMIC_PTR(set)
+ATOMIC_PTR(clear)
+ATOMIC_PTR(add)
+ATOMIC_PTR(subtract)
+
+#undef ATOMIC_PTR
+
#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index b38fd83..b791da9 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -65,7 +65,7 @@
*/
#if defined(KLD_MODULE)
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
- void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
+void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
@@ -151,6 +151,9 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
}
#endif /* defined(I386_CPU) */
+#define atomic_cmpset_acq_int atomic_cmpset_int
+#define atomic_cmpset_rel_int atomic_cmpset_int
+
#else
/* gcc <= 2.8 version */
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
@@ -160,7 +163,9 @@ atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
__asm __volatile(MPLOCKED OP \
: "=m" (*p) \
: "ir" (V)); \
-}
+} \
+ \
+
#endif
#endif /* KLD_MODULE */
@@ -212,7 +217,67 @@ ATOMIC_ASM(subtract, long, "subl %1,%0", v)
#endif
+#undef ATOMIC_ASM
+
#ifndef WANT_FUNCTIONS
+#define ATOMIC_ACQ_REL(NAME, TYPE) \
+static __inline void \
+atomic_##NAME##_acq_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ atomic_##NAME##_##TYPE(p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ atomic_##NAME##_##TYPE(p, v); \
+}
+
+ATOMIC_ACQ_REL(set, char)
+ATOMIC_ACQ_REL(clear, char)
+ATOMIC_ACQ_REL(add, char)
+ATOMIC_ACQ_REL(subtract, char)
+ATOMIC_ACQ_REL(set, short)
+ATOMIC_ACQ_REL(clear, short)
+ATOMIC_ACQ_REL(add, short)
+ATOMIC_ACQ_REL(subtract, short)
+ATOMIC_ACQ_REL(set, int)
+ATOMIC_ACQ_REL(clear, int)
+ATOMIC_ACQ_REL(add, int)
+ATOMIC_ACQ_REL(subtract, int)
+ATOMIC_ACQ_REL(set, long)
+ATOMIC_ACQ_REL(clear, long)
+ATOMIC_ACQ_REL(add, long)
+ATOMIC_ACQ_REL(subtract, long)
+
+#undef ATOMIC_ACQ_REL
+
+/*
+ * We assume that a = b will do atomic loads and stores.
+ */
+#define ATOMIC_STORE_LOAD(TYPE) \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+ __asm __volatile("" : : : "memory"); \
+}
+
+ATOMIC_STORE_LOAD(char)
+ATOMIC_STORE_LOAD(short)
+ATOMIC_STORE_LOAD(int)
+ATOMIC_STORE_LOAD(long)
+
+#undef ATOMIC_STORE_LOAD
+
static __inline int
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
{
@@ -221,6 +286,47 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
atomic_cmpset_int((volatile u_int *)dst, (u_int)exp, (u_int)src));
}
+#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
+#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
+
+static __inline void *
+atomic_load_acq_ptr(volatile void *p)
+{
+ return (void *)atomic_load_acq_int((volatile u_int *)p);
+}
+
+static __inline void
+atomic_store_rel_ptr(volatile void *p, void *v)
+{
+ atomic_store_rel_int((volatile u_int *)p, (u_int)v);
+}
+
+#define ATOMIC_PTR(NAME) \
+static __inline void \
+atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_int((volatile u_int *)p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_acq_int((volatile u_int *)p, v);\
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_rel_int((volatile u_int *)p, v);\
+}
+
+ATOMIC_PTR(set)
+ATOMIC_PTR(clear)
+ATOMIC_PTR(add)
+ATOMIC_PTR(subtract)
+
+#undef ATOMIC_PTR
+
static __inline u_int
atomic_readandclear_int(volatile u_int *addr)
{
diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h
index b38fd83..b791da9 100644
--- a/sys/i386/include/atomic.h
+++ b/sys/i386/include/atomic.h
@@ -65,7 +65,7 @@
*/
#if defined(KLD_MODULE)
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
- void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
+void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
@@ -151,6 +151,9 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
}
#endif /* defined(I386_CPU) */
+#define atomic_cmpset_acq_int atomic_cmpset_int
+#define atomic_cmpset_rel_int atomic_cmpset_int
+
#else
/* gcc <= 2.8 version */
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
@@ -160,7 +163,9 @@ atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
__asm __volatile(MPLOCKED OP \
: "=m" (*p) \
: "ir" (V)); \
-}
+} \
+ \
+
#endif
#endif /* KLD_MODULE */
@@ -212,7 +217,67 @@ ATOMIC_ASM(subtract, long, "subl %1,%0", v)
#endif
+#undef ATOMIC_ASM
+
#ifndef WANT_FUNCTIONS
+#define ATOMIC_ACQ_REL(NAME, TYPE) \
+static __inline void \
+atomic_##NAME##_acq_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ atomic_##NAME##_##TYPE(p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ atomic_##NAME##_##TYPE(p, v); \
+}
+
+ATOMIC_ACQ_REL(set, char)
+ATOMIC_ACQ_REL(clear, char)
+ATOMIC_ACQ_REL(add, char)
+ATOMIC_ACQ_REL(subtract, char)
+ATOMIC_ACQ_REL(set, short)
+ATOMIC_ACQ_REL(clear, short)
+ATOMIC_ACQ_REL(add, short)
+ATOMIC_ACQ_REL(subtract, short)
+ATOMIC_ACQ_REL(set, int)
+ATOMIC_ACQ_REL(clear, int)
+ATOMIC_ACQ_REL(add, int)
+ATOMIC_ACQ_REL(subtract, int)
+ATOMIC_ACQ_REL(set, long)
+ATOMIC_ACQ_REL(clear, long)
+ATOMIC_ACQ_REL(add, long)
+ATOMIC_ACQ_REL(subtract, long)
+
+#undef ATOMIC_ACQ_REL
+
+/*
+ * We assume that a = b will do atomic loads and stores.
+ */
+#define ATOMIC_STORE_LOAD(TYPE) \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+ __asm __volatile("" : : : "memory"); \
+}
+
+ATOMIC_STORE_LOAD(char)
+ATOMIC_STORE_LOAD(short)
+ATOMIC_STORE_LOAD(int)
+ATOMIC_STORE_LOAD(long)
+
+#undef ATOMIC_STORE_LOAD
+
static __inline int
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
{
@@ -221,6 +286,47 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
atomic_cmpset_int((volatile u_int *)dst, (u_int)exp, (u_int)src));
}
+#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
+#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
+
+static __inline void *
+atomic_load_acq_ptr(volatile void *p)
+{
+ return (void *)atomic_load_acq_int((volatile u_int *)p);
+}
+
+static __inline void
+atomic_store_rel_ptr(volatile void *p, void *v)
+{
+ atomic_store_rel_int((volatile u_int *)p, (u_int)v);
+}
+
+#define ATOMIC_PTR(NAME) \
+static __inline void \
+atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_int((volatile u_int *)p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_acq_int((volatile u_int *)p, v);\
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_rel_int((volatile u_int *)p, v);\
+}
+
+ATOMIC_PTR(set)
+ATOMIC_PTR(clear)
+ATOMIC_PTR(add)
+ATOMIC_PTR(subtract)
+
+#undef ATOMIC_PTR
+
static __inline u_int
atomic_readandclear_int(volatile u_int *addr)
{
diff --git a/sys/ia64/include/atomic.h b/sys/ia64/include/atomic.h
index d4d2bbc..5966930 100644
--- a/sys/ia64/include/atomic.h
+++ b/sys/ia64/include/atomic.h
@@ -80,80 +80,191 @@ ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
return IA64_CMPXCHG(8, rel, u_int64_t, p, cmpval, newval);
}
-/*
- * Store with release semantics is used to release locks.
- */
-static __inline void
-ia64_st_rel_32(volatile u_int32_t* p, u_int32_t v)
-{
- __asm __volatile ("st4.rel %0=%1"
- : "=m" (*p)
- : "r" (v)
- : "memory");
+#define ATOMIC_STORE_LOAD(type, width, size) \
+static __inline u_int##width##_t \
+ia64_ld_acq_##width(volatile u_int##width##_t* p) \
+{ \
+ u_int##width_t v; \
+ \
+ __asm __volatile ("ld" size ".acq %0=%1" \
+ : "r" (v) \
+ : "=m" (*p) \
+ : "memory"); \
+ return (v); \
+} \
+ \
+static __inline u_int##width##_t \
+atomic_load_acq_##width(volatile u_int##width##_t* p) \
+{ \
+ u_int##width_t v; \
+ \
+ __asm __volatile ("ld" size ".acq %0=%1" \
+ : "r" (v) \
+ : "=m" (*p) \
+ : "memory"); \
+ return (v); \
+} \
+ \
+static __inline u_int##width##_t \
+atomic_load_acq_##type(volatile u_int##width##_t* p) \
+{ \
+ u_int##width_t v; \
+ \
+ __asm __volatile ("ld" size ".acq %0=%1" \
+ : "r" (v) \
+ : "=m" (*p) \
+ : "memory"); \
+ return (v); \
+} \
+ \
+static __inline void \
+ia64_st_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
+{ \
+ __asm __volatile ("st" size ".rel %0=%1" \
+ : "=m" (*p) \
+ : "r" (v) \
+ : "memory"); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
+{ \
+ __asm __volatile ("st" size ".rel %0=%1" \
+ : "=m" (*p) \
+ : "r" (v) \
+ : "memory"); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##type(volatile u_int##width##_t* p, u_int##width##_t v)\
+{ \
+ __asm __volatile ("st" size ".rel %0=%1" \
+ : "=m" (*p) \
+ : "r" (v) \
+ : "memory"); \
}
-static __inline void
-ia64_st_rel_64(volatile u_int64_t* p, u_int64_t v)
-{
- __asm __volatile ("st8.rel %0=%1"
- : "=m" (*p)
- : "r" (v)
- : "memory");
-}
+ATOMIC_STORE_LOAD(char, 8, "1")
+ATOMIC_STORE_LOAD(short, 16, "2")
+ATOMIC_STORE_LOAD(int, 32, "4")
+ATOMIC_STORE_LOAD(long, 64, "8")
+
+#undef ATOMIC_STORE_LOAD
-#define IA64_ATOMIC(sz, type, name, op) \
+#define IA64_ATOMIC(sz, type, name, width, op) \
\
static __inline void \
-atomic_##name(volatile type *p, type v) \
+atomic_##name##_acq_##width(volatile type *p, type v) \
{ \
type old; \
do { \
old = *p; \
} while (IA64_CMPXCHG(sz, acq, type, p, old, old op v) != old); \
+} \
+ \
+static __inline void \
+atomic_##name##_rel_##width(volatile type *p, type v) \
+{ \
+ type old; \
+ do { \
+ old = *p; \
+ } while (IA64_CMPXCHG(sz, rel, type, p, old, old op v) != old); \
}
-IA64_ATOMIC(1, u_int8_t, set_8, |)
-IA64_ATOMIC(2, u_int16_t, set_16, |)
-IA64_ATOMIC(4, u_int32_t, set_32, |)
-IA64_ATOMIC(8, u_int64_t, set_64, |)
+IA64_ATOMIC(1, u_int8_t, set, 8, |)
+IA64_ATOMIC(2, u_int16_t, set, 16, |)
+IA64_ATOMIC(4, u_int32_t, set, 32, |)
+IA64_ATOMIC(8, u_int64_t, set, 64, |)
-IA64_ATOMIC(1, u_int8_t, clear_8, &~)
-IA64_ATOMIC(2, u_int16_t, clear_16, &~)
-IA64_ATOMIC(4, u_int32_t, clear_32, &~)
-IA64_ATOMIC(8, u_int64_t, clear_64, &~)
+IA64_ATOMIC(1, u_int8_t, clear, 8, &~)
+IA64_ATOMIC(2, u_int16_t, clear, 16, &~)
+IA64_ATOMIC(4, u_int32_t, clear, 32, &~)
+IA64_ATOMIC(8, u_int64_t, clear, 64, &~)
-IA64_ATOMIC(1, u_int8_t, add_8, +)
-IA64_ATOMIC(2, u_int16_t, add_16, +)
-IA64_ATOMIC(4, u_int32_t, add_32, +)
-IA64_ATOMIC(8, u_int64_t, add_64, +)
+IA64_ATOMIC(1, u_int8_t, add, 8, +)
+IA64_ATOMIC(2, u_int16_t, add, 16, +)
+IA64_ATOMIC(4, u_int32_t, add, 32, +)
+IA64_ATOMIC(8, u_int64_t, add, 64, +)
-IA64_ATOMIC(1, u_int8_t, subtract_8, -)
-IA64_ATOMIC(2, u_int16_t, subtract_16, -)
-IA64_ATOMIC(4, u_int32_t, subtract_32, -)
-IA64_ATOMIC(8, u_int64_t, subtract_64, -)
+IA64_ATOMIC(1, u_int8_t, subtract, 8, -)
+IA64_ATOMIC(2, u_int16_t, subtract, 16, -)
+IA64_ATOMIC(4, u_int32_t, subtract, 32, -)
+IA64_ATOMIC(8, u_int64_t, subtract, 64, -)
#undef IA64_ATOMIC
#undef IA64_CMPXCHG
-#define atomic_set_char atomic_set_8
-#define atomic_clear_char atomic_clear_8
-#define atomic_add_char atomic_add_8
-#define atomic_subtract_char atomic_subtract_8
+#define atomic_set_8 atomic_set_acq_8
+#define atomic_clear_8 atomic_clear_acq_8
+#define atomic_add_8 atomic_add_acq_8
+#define atomic_subtract_8 atomic_subtract_acq_8
+
+#define atomic_set_16 atomic_set_acq_16
+#define atomic_clear_16 atomic_clear_acq_16
+#define atomic_add_16 atomic_add_acq_16
+#define atomic_subtract_16 atomic_subtract_acq_16
+
+#define atomic_set_32 atomic_set_acq_32
+#define atomic_clear_32 atomic_clear_acq_32
+#define atomic_add_32 atomic_add_acq_32
+#define atomic_subtract_32 atomic_subtract_acq_32
+
+#define atomic_set_64 atomic_set_acq_64
+#define atomic_clear_64 atomic_clear_acq_64
+#define atomic_add_64 atomic_add_acq_64
+#define atomic_subtract_64 atomic_subtract_acq_64
-#define atomic_set_short atomic_set_16
-#define atomic_clear_short atomic_clear_16
-#define atomic_add_short atomic_add_16
-#define atomic_subtract_short atomic_subtract_16
+#define atomic_set_char atomic_set_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_add_char atomic_add_8
+#define atomic_subtract_char atomic_subtract_8
+#define atomic_set_acq_char atomic_set_acq_8
+#define atomic_clear_acq_char atomic_clear_acq_8
+#define atomic_add_acq_char atomic_add_acq_8
+#define atomic_subtract_acq_char atomic_subtract_acq_8
+#define atomic_set_rel_char atomic_set_rel_8
+#define atomic_clear_rel_char atomic_clear_rel_8
+#define atomic_add_rel_char atomic_add_rel_8
+#define atomic_subtract_rel_char atomic_subtract_rel_8
-#define atomic_set_int atomic_set_32
-#define atomic_clear_int atomic_clear_32
-#define atomic_add_int atomic_add_32
-#define atomic_subtract_int atomic_subtract_32
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_add_short atomic_add_16
+#define atomic_subtract_short atomic_subtract_16
+#define atomic_set_acq_short atomic_set_acq_16
+#define atomic_clear_acq_short atomic_clear_acq_16
+#define atomic_add_acq_short atomic_add_acq_16
+#define atomic_subtract_acq_short atomic_subtract_acq_16
+#define atomic_set_rel_short atomic_set_rel_16
+#define atomic_clear_rel_short atomic_clear_rel_16
+#define atomic_add_rel_short atomic_add_rel_16
+#define atomic_subtract_rel_short atomic_subtract_rel_16
-#define atomic_set_long atomic_set_64
-#define atomic_clear_long atomic_clear_64
-#define atomic_add_long atomic_add_64
-#define atomic_subtract_long atomic_subtract_64
+#define atomic_set_int atomic_set_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_add_int atomic_add_32
+#define atomic_subtract_int atomic_subtract_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+#define atomic_set_rel_int atomic_set_rel_32
+#define atomic_clear_rel_int atomic_clear_rel_32
+#define atomic_add_rel_int atomic_add_rel_32
+#define atomic_subtract_rel_int atomic_subtract_rel_32
+
+#define atomic_set_long atomic_set_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_add_long atomic_add_64
+#define atomic_subtract_long atomic_subtract_64
+#define atomic_set_acq_long atomic_set_acq_64
+#define atomic_clear_acq_long atomic_clear_acq_64
+#define atomic_add_acq_long atomic_add_acq_64
+#define atomic_subtract_acq_long atomic_subtract_acq_64
+#define atomic_set_rel_long atomic_set_rel_64
+#define atomic_clear_rel_long atomic_clear_rel_64
+#define atomic_add_rel_long atomic_add_rel_64
+#define atomic_subtract_rel_long atomic_subtract_rel_64
/*
* Atomically compare the value stored at *p with cmpval and if the
@@ -161,30 +272,69 @@ IA64_ATOMIC(8, u_int64_t, subtract_64, -)
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
-atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+atomic_cmpset_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
{
return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval;
}
+static __inline int
+atomic_cmpset_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
+{
+ return ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval;
+}
+
/*
* Atomically compare the value stored at *p with cmpval and if the
* two values are equal, update the value of *p with newval. Returns
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
-atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+atomic_cmpset_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
{
return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval;
}
+static __inline int
+atomic_cmpset_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
+{
+ return ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval;
+}
+
+#define atomic_cmpset_32 atomic_cmpset_acq_32
+#define atomic_cmpset_64 atomic_cmpset_acq_64
#define atomic_cmpset_int atomic_cmpset_32
#define atomic_cmpset_long atomic_cmpset_64
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_cmpset_rel_int atomic_cmpset_rel_32
+#define atomic_cmpset_acq_long atomic_cmpset_acq_64
+#define atomic_cmpset_rel_long atomic_cmpset_rel_64
static __inline int
-atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
+atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
+{
+ return atomic_cmpset_acq_long((volatile u_long *)dst,
+ (u_long)exp, (u_long)src);
+}
+
+static __inline int
+atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
+{
+ return atomic_cmpset_rel_long((volatile u_long *)dst,
+ (u_long)exp, (u_long)src);
+}
+
+#define atomic_cmpset_ptr atomic_cmpset_acq_ptr
+
+static __inline void *
+atomic_load_acq_ptr(volatile void *p)
+{
+ return (void *)atomic_load_acq_long((volatile u_long *)p);
+}
+
+static __inline void
+atomic_store_rel_ptr(volatile void *p, void *v)
{
- return atomic_cmpset_long((volatile u_long *)dst,
- (u_long)exp, (u_long)src);
+ atomic_store_rel_long((volatile u_long *)p, (u_long)v);
}
static __inline u_int32_t
OpenPOWER on IntegriCloud