diff options
author | jhb <jhb@FreeBSD.org> | 2001-01-14 09:55:21 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2001-01-14 09:55:21 +0000 |
commit | 7fa2e618323810ab15555badb802f18dee979944 (patch) | |
tree | 1f61c9a67edf0715df2629176002395c7914993a /sys/amd64 | |
parent | b8126f74e59f8a20309260d608149d1af95efdbf (diff) | |
download | FreeBSD-src-7fa2e618323810ab15555badb802f18dee979944.zip FreeBSD-src-7fa2e618323810ab15555badb802f18dee979944.tar.gz |
Fix the atomic_load_acq() and atomic_store_rel() functions to properly
implement memory fences for the 486+. The 386 still uses versions w/o
memory fences as all operations on the 386 are not program ordered.
The 386 versions are not MP safe.
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/include/atomic.h | 48 |
1 files changed, 43 insertions, 5 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h index 252cb0d..54ed51d 100644 --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -255,10 +255,19 @@ ATOMIC_ACQ_REL(subtract, long) #undef ATOMIC_ACQ_REL +#if defined(KLD_MODULE) +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ +void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); +#else +#if defined(I386_CPU) /* * We assume that a = b will do atomic loads and stores. + * + * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee + * memory ordering. These should only be used on a 386. */ -#define ATOMIC_STORE_LOAD(TYPE) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ @@ -271,11 +280,40 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ *p = v; \ __asm __volatile("" : : : "memory"); \ } +#else + +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +static __inline u_##TYPE \ +atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +{ \ + u_##TYPE res; \ + \ + __asm __volatile(MPLOCKED LOP \ + : "+a" (res), /* 0 (result) */\ + "+m" (*p) /* 1 */ \ + : : "memory"); \ + \ + return (res); \ +} \ + \ +/* \ + * The XCHG instruction asserts LOCK automagically. \ + */ \ +static __inline void \ +atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ +{ \ + __asm __volatile(SOP \ + : "+m" (*p), /* 0 */ \ + "+r" (v) /* 1 */ \ + : : "memory"); \ +} +#endif /* defined(I386_CPU) */ +#endif /* defined(KLD_MODULE) */ -ATOMIC_STORE_LOAD(char) -ATOMIC_STORE_LOAD(short) -ATOMIC_STORE_LOAD(int) -ATOMIC_STORE_LOAD(long) +ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0") +ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0") +ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0") +ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0") #undef ATOMIC_STORE_LOAD |