diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 10:52:32 +0200 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 10:52:32 +0200 |
commit | fb2e28485679418e459583605f9b19807a72ceca (patch) | |
tree | c3e6bf7f75fb9c6ed286ef09eebf281388761cfe /include/asm-i386 | |
parent | 8b059d2373c16b6d32787a49daf8ccf72dc61b71 (diff) | |
download | op-kernel-dev-fb2e28485679418e459583605f9b19807a72ceca.zip op-kernel-dev-fb2e28485679418e459583605f9b19807a72ceca.tar.gz |
[PATCH] i386: Clean up spin/rwlocks
- Inline spinlock strings into their inline functions
- Convert macros to typesafe inlines
- Replace some leftover __asm__ __volatile__s with asm volatile
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/rwlock.h | 14 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 131 |
2 files changed, 68 insertions, 77 deletions
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index f40ccbd..c3e5db3 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h @@ -20,18 +20,6 @@ #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __build_read_lock(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \ - "jns 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") - -#define __build_write_lock(rw, helper) \ - asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jz 1f\n" \ - "call " helper "\n\t" \ - "1:\n" \ - ::"a" (rw) : "memory") +/* Code is in asm-i386/spinlock.h */ #endif diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d102036..3243293 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -4,6 +4,7 @@ #include <asm/atomic.h> #include <asm/rwlock.h> #include <asm/page.h> +#include <asm/processor.h> #include <linux/compiler.h> /* @@ -17,67 +18,64 @@ * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) \ - (*(volatile signed char *)(&(x)->slock) <= 0) - -#define __raw_spin_lock_string \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 3f\n" \ - "2:\t" \ - "rep;nop\n\t" \ - "cmpb $0,%0\n\t" \ - "jle 2b\n\t" \ - "jmp 1b\n" \ - "3:\n\t" - -/* - * NOTE: there's an irqs-on section here, which normally would have to be - * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use - * __raw_spin_lock_string_flags(). - */ -#define __raw_spin_lock_string_flags \ - "\n1:\t" \ - LOCK_PREFIX " ; decb %0\n\t" \ - "jns 5f\n" \ - "2:\t" \ - "testl $0x200, %1\n\t" \ - "jz 4f\n\t" \ - "sti\n" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "cli\n\t" \ - "jmp 1b\n" \ - "4:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jg 1b\n\t" \ - "jmp 4b\n" \ - "5:\n\t" +static inline int __raw_spin_is_locked(raw_spinlock_t *x) +{ + return *(volatile signed char *)(&(x)->slock) <= 0; +} static inline void __raw_spin_lock(raw_spinlock_t *lock) { - asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); + asm volatile("\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 3f\n" + "2:\t" + "rep;nop\n\t" + "cmpb $0,%0\n\t" + "jle 2b\n\t" + "jmp 1b\n" + "3:\n\t" + : "+m" (lock->slock) : : "memory"); } /* * It is easier for the lock validator if interrupts are not re-enabled * in the middle of a lock-acquire. This is a performance feature anyway * so we turn it off: + * + * NOTE: there's an irqs-on section here, which normally would have to be + * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. */ #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); + asm volatile( + "\n1:\t" + LOCK_PREFIX " ; decb %0\n\t" + "jns 5f\n" + "2:\t" + "testl $0x200, %1\n\t" + "jz 4f\n\t" + "sti\n" + "3:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jle 3b\n\t" + "cli\n\t" + "jmp 1b\n" + "4:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jg 1b\n\t" + "jmp 4b\n" + "5:\n\t" + : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; - __asm__ __volatile__( + asm volatile( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); @@ -93,38 +91,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) -#define __raw_spin_unlock_string \ - "movb $1,%0" \ - :"+m" (lock->slock) : : "memory" - - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); } #else -#define __raw_spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "+m" (lock->slock) \ - :"0" (oldval) : "memory" - static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("xchgb %b0, %1" + : "=q" (oldval), "+m" (lock->slock) + : "0" (oldval) : "memory"); } #endif -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} /* * Read-write spinlocks, allowing multiple readers @@ -151,22 +140,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +static inline int __raw_read_can_lock(raw_rwlock_t *x) +{ + return (int)(x)->lock > 0; +} /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +static inline int __raw_write_can_lock(raw_rwlock_t *x) +{ + return (x)->lock == RW_LOCK_BIAS; +} static inline void __raw_read_lock(raw_rwlock_t *rw) { - __build_read_lock(rw, "__read_lock_failed"); + asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __build_write_lock(rw, "__write_lock_failed"); + asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" + "jz 1f\n" + "call __write_lock_failed\n\t" + "1:\n" + ::"a" (rw) : "memory"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) |