summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2015-07-24 19:43:18 +0000
committeralc <alc@FreeBSD.org>2015-07-24 19:43:18 +0000
commit268d180ee5877ca35783a2e7bb0aa730c60dffef (patch)
tree39f04b47647a37949a882ec5f13c43db94f5d3c9
parentb634dbb0e0152c8de4c418d309bb6cecaf6ccc84 (diff)
downloadFreeBSD-src-268d180ee5877ca35783a2e7bb0aa730c60dffef.zip
FreeBSD-src-268d180ee5877ca35783a2e7bb0aa730c60dffef.tar.gz
Add a comment discussing the appropriate use of the atomic_*() functions
with acquire and release semantics versus the *mb() functions on amd64 processors. Reviewed by: bde (an earlier version), kib Sponsored by: EMC / Isilon Storage Division
-rw-r--r--sys/amd64/include/atomic.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
index 7892d51..016aa70 100644
--- a/sys/amd64/include/atomic.h
+++ b/sys/amd64/include/atomic.h
@@ -32,6 +32,25 @@
#error this file needs sys/cdefs.h as a prerequisite
#endif
+/*
+ * To express interprocessor (as opposed to processor and device) memory
+ * ordering constraints, use the atomic_*() functions with acquire and release
+ * semantics rather than the *mb() functions. An architecture's memory
+ * ordering (or memory consistency) model governs the order in which a
+ * program's accesses to different locations may be performed by an
+ * implementation of that architecture. In general, for memory regions
+ * defined as writeback cacheable, the memory ordering implemented by amd64
+ * processors preserves the program ordering of a load followed by a load, a
+ * load followed by a store, and a store followed by a store. Only a store
+ * followed by a load to a different memory location may be reordered.
+ * Therefore, except for special cases, like non-temporal memory accesses or
+ * memory regions defined as write combining, the memory ordering effects
+ * provided by the sfence instruction in the wmb() function and the lfence
+ * instruction in the rmb() function are redundant. In contrast, the
+ * atomic_*() functions with acquire and release semantics do not perform
+ * redundant instructions for ordinary cases of interprocessor memory
+ * ordering on any architecture.
+ */
#define mb() __asm __volatile("mfence;" : : : "memory")
#define wmb() __asm __volatile("sfence;" : : : "memory")
#define rmb() __asm __volatile("lfence;" : : : "memory")
OpenPOWER on IntegriCloud