summaryrefslogtreecommitdiffstats
path: root/sys/amd64/include
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-10-20 07:00:48 +0000
committerjhb <jhb@FreeBSD.org>2000-10-20 07:00:48 +0000
commit787712af1c7ffb2642f8250f2111301ad77fdaf8 (patch)
treef7bc16926cd7cd48b8522813ef1ec7be7e9992ae /sys/amd64/include
parent16168b364a2ba06b962dc30e7a667864b5900d63 (diff)
downloadFreeBSD-src-787712af1c7ffb2642f8250f2111301ad77fdaf8.zip
FreeBSD-src-787712af1c7ffb2642f8250f2111301ad77fdaf8.tar.gz
- Expand the set of atomic operations to optionally include memory barriers
in most of the atomic operations. Now for these operations, you can use the normal atomic operation, you can use the operation with a read barrier, or you can use the operation with a write barrier. The function names follow the same semantics used in the ia64 instruction set. An atomic operation with a read barrier has the extra suffix 'acq', due to it having "acquire" semantics. An atomic operation with a write barrier has the extra suffix 'rel'. These suffixes are inserted between the name of the operation to perform and the typename. For example, the atomic_add_int() function now has 3 variants: - atomic_add_int() - this is the same as the previous function - atomic_add_acq_int() - this function combines the add operation with a read memory barrier - atomic_add_rel_int() - this function combines the add operation with a write memory barrier - Add 'ptr' to the list of types that we can perform atomic operations on. This allows one to do atomic operations on uintptr_t's. This is useful in the mutex code, for example, because the actual mutex lock is a pointer. - Add two new operations for doing loads and stores with memory barriers. The new load operations use a read barrier before the load, and the new store operations use a write barrier after the load. For example, atomic_load_acq_int() will atomically load an integer as well as enforcing a read barrier.
Diffstat (limited to 'sys/amd64/include')
-rw-r--r--sys/amd64/include/atomic.h110
1 files changed, 108 insertions, 2 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index b38fd83..b791da9 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -65,7 +65,7 @@
*/
#if defined(KLD_MODULE)
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
- void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
+void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
@@ -151,6 +151,9 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
}
#endif /* defined(I386_CPU) */
+#define atomic_cmpset_acq_int atomic_cmpset_int
+#define atomic_cmpset_rel_int atomic_cmpset_int
+
#else
/* gcc <= 2.8 version */
#define ATOMIC_ASM(NAME, TYPE, OP, V) \
@@ -160,7 +163,9 @@ atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
__asm __volatile(MPLOCKED OP \
: "=m" (*p) \
: "ir" (V)); \
-}
+} \
+ \
+
#endif
#endif /* KLD_MODULE */
@@ -212,7 +217,67 @@ ATOMIC_ASM(subtract, long, "subl %1,%0", v)
#endif
+#undef ATOMIC_ASM
+
#ifndef WANT_FUNCTIONS
+#define ATOMIC_ACQ_REL(NAME, TYPE) \
+static __inline void \
+atomic_##NAME##_acq_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ atomic_##NAME##_##TYPE(p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ atomic_##NAME##_##TYPE(p, v); \
+}
+
+ATOMIC_ACQ_REL(set, char)
+ATOMIC_ACQ_REL(clear, char)
+ATOMIC_ACQ_REL(add, char)
+ATOMIC_ACQ_REL(subtract, char)
+ATOMIC_ACQ_REL(set, short)
+ATOMIC_ACQ_REL(clear, short)
+ATOMIC_ACQ_REL(add, short)
+ATOMIC_ACQ_REL(subtract, short)
+ATOMIC_ACQ_REL(set, int)
+ATOMIC_ACQ_REL(clear, int)
+ATOMIC_ACQ_REL(add, int)
+ATOMIC_ACQ_REL(subtract, int)
+ATOMIC_ACQ_REL(set, long)
+ATOMIC_ACQ_REL(clear, long)
+ATOMIC_ACQ_REL(add, long)
+ATOMIC_ACQ_REL(subtract, long)
+
+#undef ATOMIC_ACQ_REL
+
+/*
+ * We assume that a = b will do atomic loads and stores.
+ */
+#define ATOMIC_STORE_LOAD(TYPE) \
+static __inline u_##TYPE \
+atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
+{ \
+ __asm __volatile("lock; addl $0,0(%esp)" : : : "memory");\
+ return (*p); \
+} \
+ \
+static __inline void \
+atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
+{ \
+ *p = v; \
+ __asm __volatile("" : : : "memory"); \
+}
+
+ATOMIC_STORE_LOAD(char)
+ATOMIC_STORE_LOAD(short)
+ATOMIC_STORE_LOAD(int)
+ATOMIC_STORE_LOAD(long)
+
+#undef ATOMIC_STORE_LOAD
+
static __inline int
atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
{
@@ -221,6 +286,47 @@ atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
atomic_cmpset_int((volatile u_int *)dst, (u_int)exp, (u_int)src));
}
+#define atomic_cmpset_acq_ptr atomic_cmpset_ptr
+#define atomic_cmpset_rel_ptr atomic_cmpset_ptr
+
+static __inline void *
+atomic_load_acq_ptr(volatile void *p)
+{
+ return (void *)atomic_load_acq_int((volatile u_int *)p);
+}
+
+static __inline void
+atomic_store_rel_ptr(volatile void *p, void *v)
+{
+ atomic_store_rel_int((volatile u_int *)p, (u_int)v);
+}
+
+#define ATOMIC_PTR(NAME) \
+static __inline void \
+atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_int((volatile u_int *)p, v); \
+} \
+ \
+static __inline void \
+atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_acq_int((volatile u_int *)p, v);\
+} \
+ \
+static __inline void \
+atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
+{ \
+ atomic_##NAME##_rel_int((volatile u_int *)p, v);\
+}
+
+ATOMIC_PTR(set)
+ATOMIC_PTR(clear)
+ATOMIC_PTR(add)
+ATOMIC_PTR(subtract)
+
+#undef ATOMIC_PTR
+
static __inline u_int
atomic_readandclear_int(volatile u_int *addr)
{
OpenPOWER on IntegriCloud