summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-30 15:46:29 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-04-02 12:13:49 -0400
commitab306cae660e524edbeb8889e4e23d3c97717b9c (patch)
treec588cb46c9497e9271e8bc5b5b4f5e53244c6cf9
parentb14f21906774be181627412fed5b6b5fae2b53a2 (diff)
downloadop-kernel-dev-ab306cae660e524edbeb8889e4e23d3c97717b9c.zip
op-kernel-dev-ab306cae660e524edbeb8889e4e23d3c97717b9c.tar.gz
arch/tile: use atomic exchange in arch_write_unlock()
This idiom is used elsewhere when we do an unlock by writing a zero, but I missed it here. Using an atomic operation avoids waiting on the write buffer for the unlocking write to be sent to the home cache. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
-rw-r--r--arch/tile/include/asm/spinlock_64.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 72be590..5f8b6a0 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__insn_mf();
- rw->lock = 0;
+ __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
OpenPOWER on IntegriCloud