diff options
author | peter <peter@FreeBSD.org> | 2005-07-21 22:35:02 +0000 |
---|---|---|
committer | peter <peter@FreeBSD.org> | 2005-07-21 22:35:02 +0000 |
commit | c0016138302575371346a4f1f21c21640b64ef0e (patch) | |
tree | 568cd8c5ab4735dc35f5775af7e4d7ac46c0ead8 /sys/amd64 | |
parent | 0ebd5ed0df9182971867e22d5cbe279e964eb499 (diff) | |
download | FreeBSD-src-c0016138302575371346a4f1f21c21640b64ef0e.zip FreeBSD-src-c0016138302575371346a4f1f21c21640b64ef0e.tar.gz |
Like on i386, bypass lock prefix for atomic ops on !SMP kernels.
Diffstat (limited to 'sys/amd64')
-rw-r--r-- | sys/amd64/include/atomic.h | 27 |
1 files changed, 27 insertions, 0 deletions
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h index c737a22..be3a20d 100644 --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -152,6 +152,31 @@ atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src) return (res); } +#if defined(_KERNEL) && !defined(SMP) + +/* + * We assume that a = b will do atomic loads and stores. However, on a + * PentiumPro or higher, reads may pass writes, so for that case we have + * to use a serializing instruction (i.e. with LOCK) to do the load in + * SMP kernels. For UP kernels, however, the cache of the single processor + * is always consistent, so we don't need any memory barriers. + */ +#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +static __inline u_##TYPE \ +atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ +{ \ + return (*p); \ +} \ + \ +static __inline void \ +atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ +{ \ + *p = v; \ +} \ +struct __hack + +#else /* defined(SMP) */ + #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ @@ -179,6 +204,8 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ } \ struct __hack +#endif /* SMP */ + #endif /* KLD_MODULE || !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); |