summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2014-11-03 13:14:34 +0000
committermjg <mjg@FreeBSD.org>2014-11-03 13:14:34 +0000
commit8969e775550978c1c5fed290d41cf9bcb171e735 (patch)
treec1173afc62708a9648fb1621d13e1fc01a4a6763 /sys
parenta8147b2f482df00c3d01addaa0681face98b9b88 (diff)
downloadFreeBSD-src-8969e775550978c1c5fed290d41cf9bcb171e735.zip
FreeBSD-src-8969e775550978c1c5fed290d41cf9bcb171e735.tar.gz
Fix misplaced read memory barrier in seq.
Impact on capability races was small: it was possible to get a spurious ENOTCAPABLE (early return), but it was not possible to bypass checks. Tidy up some comments.
Diffstat (limited to 'sys')
-rw-r--r--sys/sys/seq.h22
1 files changed, 16 insertions, 6 deletions
diff --git a/sys/sys/seq.h b/sys/sys/seq.h
index 799b3a9..eaab86e 100644
--- a/sys/sys/seq.h
+++ b/sys/sys/seq.h
@@ -70,16 +70,16 @@ typedef uint32_t seq_t;
#include <machine/cpu.h>
/*
- * This is a temporary hack until memory barriers are cleaned up.
+ * Stuff below is going away when we gain suitable memory barriers.
*
* atomic_load_acq_int at least on amd64 provides a full memory barrier,
- * in a way which affects perforance.
+ * in a way which affects performance.
*
* Hack below covers all architectures and avoids most of the penalty at least
- * on amd64.
+ * on amd64 but still has unnecessary cost.
*/
static __inline int
-atomic_load_acq_rmb_int(volatile u_int *p)
+atomic_load_rmb_int(volatile u_int *p)
{
volatile u_int v;
@@ -88,6 +88,16 @@ atomic_load_acq_rmb_int(volatile u_int *p)
return (v);
}
+static __inline int
+atomic_rmb_load_int(volatile u_int *p)
+{
+ volatile u_int v = 0;
+
+ atomic_load_acq_int(&v);
+ v = *p;
+ return (v);
+}
+
static __inline bool
seq_in_modify(seq_t seqp)
{
@@ -117,7 +127,7 @@ seq_read(seq_t *seqp)
seq_t ret;
for (;;) {
- ret = atomic_load_acq_rmb_int(seqp);
+ ret = atomic_load_rmb_int(seqp);
if (seq_in_modify(ret)) {
cpu_spinwait();
continue;
@@ -132,7 +142,7 @@ static __inline seq_t
seq_consistent(seq_t *seqp, seq_t oldseq)
{
- return (atomic_load_acq_rmb_int(seqp) == oldseq);
+ return (atomic_rmb_load_int(seqp) == oldseq);
}
static __inline seq_t
OpenPOWER on IntegriCloud