diff options
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 26 |
1 files changed, 7 insertions, 19 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9ed2377..d0daeab 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -19,31 +19,21 @@ #ifdef __KERNEL__ +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ #define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 /* * ARMv6 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. Writing to 'v->counter' - * without using the following operations WILL break the atomic - * nature of these ops. + * to ensure that the update happens. */ -static inline void atomic_set(atomic_t *v, int i) -{ - unsigned long tmp; - - __asm__ __volatile__("@ atomic_set\n" -"1: ldrex %0, [%1]\n" -" strex %0, %2, [%1]\n" -" teq %0, #0\n" -" bne 1b" - : "=&r" (tmp) - : "r" (&v->counter), "r" (i) - : "cc"); -} - static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; @@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #error SMP not supported on pre-ARMv6 CPUs #endif -#define atomic_set(v,i) (((v)->counter) = (i)) - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; |