summaryrefslogtreecommitdiffstats
path: root/sys/i386/include
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-11-11 22:42:25 +0000
committerjhb <jhb@FreeBSD.org>2004-11-11 22:42:25 +0000
commitdabf0d0c4fca853271757af24e434aa6926addbf (patch)
tree886cc72f942f530d2855c532d90cf1dfd0217cd1 /sys/i386/include
parentfeb4d6ee906dc73878fcd6dc412376db11f445a5 (diff)
downloadFreeBSD-src-dabf0d0c4fca853271757af24e434aa6926addbf.zip
FreeBSD-src-dabf0d0c4fca853271757af24e434aa6926addbf.tar.gz
- Place the gcc memory barrier hint in the right place in the 80386 version
of atomic_store_rel(). - Use the 80386 versions of atomic_load_acq() and atomic_store_rel() that do not use serializing instructions on all UP kernels since a UP machine does need to synchronize with other CPUs. This trims lots of cycles from spin locks on UP kernels among other things. Benchmarked by: rwatson
Diffstat (limited to 'sys/i386/include')
-rw-r--r--sys/i386/include/atomic.h17
1 files changed, 9 insertions, 8 deletions
diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h
index feba606..5f5f197 100644
--- a/sys/i386/include/atomic.h
+++ b/sys/i386/include/atomic.h
@@ -172,13 +172,14 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-#if defined(I386_CPU)
+#if !defined(SMP)
/*
- * We assume that a = b will do atomic loads and stores.
- *
- * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
- * memory ordering. These should only be used on a 386.
+ * We assume that a = b will do atomic loads and stores. However, on a
+ * PentiumPro or higher, reads may pass writes, so for that case we have
+ * to use a serializing instruction (i.e. with LOCK) to do the load in
+ * SMP kernels. For UP kernels, however, the cache of the single processor
+ * is always consistent, so we don't need any memory barriers.
*/
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
static __inline u_##TYPE \
@@ -190,12 +191,12 @@ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
- *p = v; \
__asm __volatile("" : : : "memory"); \
+ *p = v; \
} \
struct __hack
-#else /* !defined(I386_CPU) */
+#else /* defined(SMP) */
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
static __inline u_##TYPE \
@@ -224,7 +225,7 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
} \
struct __hack
-#endif /* defined(I386_CPU) */
+#endif /* !defined(SMP) */
#else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */
OpenPOWER on IntegriCloud