summaryrefslogtreecommitdiffstats
path: root/sys/amd64/include/atomic.h
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-06-29 09:58:40 +0000
committerkib <kib@FreeBSD.org>2015-06-29 09:58:40 +0000
commitf6cfae6dab50dc678aaac4311af58a17c322cf97 (patch)
tree84c4fa88f9644193bcb94c03b48ab4d8554cae1d /sys/amd64/include/atomic.h
parente85612a06dab6982c547776bf09273b97d2f995f (diff)
downloadFreeBSD-src-f6cfae6dab50dc678aaac4311af58a17c322cf97.zip
FreeBSD-src-f6cfae6dab50dc678aaac4311af58a17c322cf97.tar.gz
Add a comment about too strong semantic of atomic_load_acq() on x86.
Submitted by: bde MFC after: 2 weeks
Diffstat (limited to 'sys/amd64/include/atomic.h')
-rw-r--r--sys/amd64/include/atomic.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index dceb3dc..6d5c6b6 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -296,6 +296,15 @@ __storeload_barrier(void)
}
#endif /* _KERNEL*/
+/*
+ * C11-standard acq/rel semantics only apply when the variable in the
+ * call is the same for acq as it is for rel. However, our previous
+ * (x86) implementations provided much stronger ordering than required
+ * (essentially what is called seq_cst order in C11). This
+ * implementation provides the historical strong ordering since some
+ * callers depend on it.
+ */
+
#define ATOMIC_LOAD(TYPE) \
static __inline u_##TYPE \
atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
OpenPOWER on IntegriCloud