diff options
author | ian <ian@FreeBSD.org> | 2014-08-01 22:56:41 +0000 |
---|---|---|
committer | ian <ian@FreeBSD.org> | 2014-08-01 22:56:41 +0000 |
commit | 0ad8d826c35ff14064f7d5eb2fc7702dc7099ada (patch) | |
tree | ec8b31389ab23421dee620e1401e7470fa20fdef /sys/arm/include/atomic.h | |
parent | 3582d4b4c2b42c1ec0bcb73779e4f08ec87cbf4d (diff) | |
download | FreeBSD-src-0ad8d826c35ff14064f7d5eb2fc7702dc7099ada.zip FreeBSD-src-0ad8d826c35ff14064f7d5eb2fc7702dc7099ada.tar.gz |
Add 64-bit atomic ops for armv4, only for kernel code, mostly so that we
don't need any #ifdef stuff to use atomic_load/store_64() elsewhere in
the kernel. For armv4 the atomics are trivial to implement for kernel
code (just disable interrupts), less so for user mode, so this only has
the kernel mode implementations for now.
Diffstat (limited to 'sys/arm/include/atomic.h')
-rw-r--r-- | sys/arm/include/atomic.h | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h index f00600b..c6b0008 100644 --- a/sys/arm/include/atomic.h +++ b/sys/arm/include/atomic.h @@ -729,11 +729,23 @@ atomic_set_32(volatile uint32_t *address, uint32_t setmask) } static __inline void +atomic_set_64(volatile uint64_t *address, uint64_t setmask) +{ + __with_interrupts_disabled(*address |= setmask); +} + +static __inline void atomic_clear_32(volatile uint32_t *address, uint32_t clearmask) { __with_interrupts_disabled(*address &= ~clearmask); } +static __inline void +atomic_clear_64(volatile uint64_t *address, uint64_t clearmask) +{ + __with_interrupts_disabled(*address &= ~clearmask); +} + static __inline u_int32_t atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) { @@ -751,6 +763,23 @@ atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_in return (ret); } +static __inline u_int64_t +atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval) +{ + int ret; + + __with_interrupts_disabled( + { + if (*p == cmpval) { + *p = newval; + ret = 1; + } else { + ret = 0; + } + }); + return (ret); +} + static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t val) { @@ -758,11 +787,23 @@ atomic_add_32(volatile u_int32_t *p, u_int32_t val) } static __inline void +atomic_add_64(volatile u_int64_t *p, u_int64_t val) +{ + __with_interrupts_disabled(*p += val); +} + +static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) { __with_interrupts_disabled(*p -= val); } +static __inline void +atomic_subtract_64(volatile u_int64_t *p, u_int64_t val) +{ + __with_interrupts_disabled(*p -= val); +} + static __inline uint32_t atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) { @@ -776,6 +817,34 @@ atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) return (value); } +static __inline uint64_t +atomic_fetchadd_64(volatile uint64_t *p, uint64_t v) +{ + uint64_t value; + + __with_interrupts_disabled( + { + value = *p; + *p += v; + }); + return (value); +} + +static __inline uint64_t +atomic_load_64(volatile uint64_t *p) +{ + uint64_t value; + + __with_interrupts_disabled(value = *p); + return (value); +} + +static __inline void +atomic_store_64(volatile uint64_t *p, uint64_t value) +{ + __with_interrupts_disabled(*p = value); +} + #else /* !_KERNEL */ static __inline u_int32_t |