summaryrefslogtreecommitdiffstats
path: root/sys/sys/seq.h
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-07-08 18:37:08 +0000
committerkib <kib@FreeBSD.org>2015-07-08 18:37:08 +0000
commit042a10eee09b21f323507b317235d996816bf52b (patch)
treec26e129451975cecd2f8d4e8e913427f3cc380d1 /sys/sys/seq.h
parentaa3c990c7d11ba901b301e1e445dfdfce2ffbf03 (diff)
downloadFreeBSD-src-042a10eee09b21f323507b317235d996816bf52b.zip
FreeBSD-src-042a10eee09b21f323507b317235d996816bf52b.tar.gz
Use atomic_fence_fence_rel() to ensure ordering in the
seq_write_begin(), instead of the load_rmb/rbm_load functions. The update does not need to be atomic due to the write lock owned. Similarly, in seq_write_end(), update of *seqp needs not be atomic. Only store must be atomic with release. For seq_read(), the natural operation is the load acquire of the sequence value, express this directly with atomic_load_acq_int() instead of using custom partial fence implementation atomic_load_rmb_int(). In seq_consistent, use atomic_thread_fence_acq() which provides the desired semantic of ordering reads before fence before the re-reading of *seqp, instead of custom atomic_rmb_load_int(). Reviewed by: alc, bde Sponsored by: The FreeBSD Foundation MFC after: 3 weeks
Diffstat (limited to 'sys/sys/seq.h')
-rw-r--r--sys/sys/seq.h39
1 files changed, 6 insertions, 33 deletions
diff --git a/sys/sys/seq.h b/sys/sys/seq.h
index 21067cb..c5dcaeb 100644
--- a/sys/sys/seq.h
+++ b/sys/sys/seq.h
@@ -69,35 +69,6 @@ typedef uint32_t seq_t;
#include <machine/cpu.h>
-/*
- * Stuff below is going away when we gain suitable memory barriers.
- *
- * atomic_load_acq_int at least on amd64 provides a full memory barrier,
- * in a way which affects performance.
- *
- * Hack below covers all architectures and avoids most of the penalty at least
- * on amd64 but still has unnecessary cost.
- */
-static __inline int
-atomic_load_rmb_int(volatile const u_int *p)
-{
- volatile u_int v;
-
- v = *p;
- atomic_load_acq_int(&v);
- return (v);
-}
-
-static __inline int
-atomic_rmb_load_int(volatile const u_int *p)
-{
- volatile u_int v = 0;
-
- atomic_load_acq_int(&v);
- v = *p;
- return (v);
-}
-
static __inline bool
seq_in_modify(seq_t seqp)
{
@@ -110,14 +81,15 @@ seq_write_begin(seq_t *seqp)
{
MPASS(!seq_in_modify(*seqp));
- atomic_add_acq_int(seqp, 1);
+ *seqp += 1;
+ atomic_thread_fence_rel();
}
static __inline void
seq_write_end(seq_t *seqp)
{
- atomic_add_rel_int(seqp, 1);
+ atomic_store_rel_int(seqp, *seqp + 1);
MPASS(!seq_in_modify(*seqp));
}
@@ -127,7 +99,7 @@ seq_read(const seq_t *seqp)
seq_t ret;
for (;;) {
- ret = atomic_load_rmb_int(seqp);
+ ret = atomic_load_acq_int(__DECONST(seq_t *, seqp));
if (seq_in_modify(ret)) {
cpu_spinwait();
continue;
@@ -142,7 +114,8 @@ static __inline seq_t
seq_consistent(const seq_t *seqp, seq_t oldseq)
{
- return (atomic_rmb_load_int(seqp) == oldseq);
+ atomic_thread_fence_acq();
+ return (*seqp == oldseq);
}
static __inline seq_t
OpenPOWER on IntegriCloud