diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-22 16:34:38 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-25 10:52:13 +0200 |
commit | bbae71bf9c2fe90dc5642d4cddbbc1994861fd92 (patch) | |
tree | 80ce9213a904817502ffc588eb31d5731cc9a250 /arch | |
parent | 94232a4332de3bc210e7067fd43521b3eb12336a (diff) | |
download | op-kernel-dev-bbae71bf9c2fe90dc5642d4cddbbc1994861fd92.zip op-kernel-dev-bbae71bf9c2fe90dc5642d4cddbbc1994861fd92.tar.gz |
s390/rwlock: use the interlocked-access facility 1 instructions
Make use of the load-and-add, load-and-or and load-and-and instructions
to atomically update the read-write lock without a compare-and-swap loop.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 76 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 34 |
2 files changed, 108 insertions, 2 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index f9537b9..d6bdf90 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -130,8 +130,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) */ #define arch_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(arch_rwlock_t *lp); -extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern int _raw_read_trylock_retry(arch_rwlock_t *lp); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); @@ -152,6 +150,78 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); } +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __RAW_OP_OR "lao" +#define __RAW_OP_AND "lan" +#define __RAW_OP_ADD "laa" + +#define __RAW_LOCK(ptr, op_val, op_string) \ +({ \ + unsigned int old_val; \ + \ + typecheck(unsigned int *, ptr); \ + asm volatile( \ + op_string " %0,%2,%1\n" \ + "bcr 14,0\n" \ + : "=d" (old_val), "+Q" (*ptr) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +#define __RAW_UNLOCK(ptr, op_val, op_string) \ +({ \ + unsigned int old_val; \ + \ + typecheck(unsigned int *, ptr); \ + asm volatile( \ + "bcr 14,0\n" \ + op_string " %0,%2,%1\n" \ + : "=d" (old_val), "+Q" (*ptr) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int old; + + old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); + if ((int) old < 0) + _raw_read_lock_wait(rw); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int old; + + old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); + if (old != 0) + _raw_write_lock_wait(rw, old); + rw->owner = SPINLOCK_LOCKVAL; +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + rw->owner = 0; + __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); +} + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); + static inline void arch_read_lock(arch_rwlock_t *rw) { if (!arch_read_trylock_once(rw)) @@ -187,6 +257,8 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) : "cc", "memory"); } +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + static inline int arch_read_trylock(arch_rwlock_t *rw) { if (!arch_read_trylock_once(rw)) diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 01f29bb..034a35a 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -114,6 +114,9 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) unsigned int owner, old; int count = spin_retry; +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); +#endif owner = 0; while (1) { if (count-- <= 0) { @@ -147,6 +150,35 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) +{ + unsigned int owner, old; + int count = spin_retry; + + owner = 0; + while (1) { + if (count-- <= 0) { + if (owner && !smp_vcpu_scheduled(~owner)) + smp_yield_cpu(~owner); + count = spin_retry; + } + old = ACCESS_ONCE(rw->lock); + owner = ACCESS_ONCE(rw->owner); + smp_rmb(); + if ((int) old >= 0) { + prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); + old = prev; + } + if ((old & 0x7fffffff) == 0 && (int) prev >= 0) + break; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait); + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + void _raw_write_lock_wait(arch_rwlock_t *rw) { unsigned int owner, old, prev; @@ -173,6 +205,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + int _raw_write_trylock_retry(arch_rwlock_t *rw) { unsigned int old; |