summaryrefslogtreecommitdiffstats
path: root/arch/tile/lib
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-06-29 15:53:02 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-08-17 08:08:59 -0700
commit952111d7db02573e7165e338de8d4871fa447b21 (patch)
tree9b66fe680204959a22ef8cf72c97180de2a6643b /arch/tile/lib
parentd3a024abbc438277851c510b51ec9b158821488b (diff)
downloadop-kernel-dev-952111d7db02573e7165e338de8d4871fa447b21.zip
op-kernel-dev-952111d7db02573e7165e338de8d4871fa447b21.tar.gz
arch: Remove spin_unlock_wait() arch-specific definitions
There is no agreed-upon definition of spin_unlock_wait()'s semantics, and it appears that all callers could do just as well with a lock/unlock pair. This commit therefore removes the underlying arch-specific arch_spin_unlock_wait() for all architectures providing them. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: <linux-arch@vger.kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Andrea Parri <parri.andrea@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Will Deacon <will.deacon@arm.com> Acked-by: Boqun Feng <boqun.feng@gmail.com>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r--arch/tile/lib/spinlock_32.c23
-rw-r--r--arch/tile/lib/spinlock_64.c22
2 files changed, 0 insertions, 45 deletions
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 076c6cc..db9333f 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u32 iterations = 0;
- int curr = READ_ONCE(lock->current_ticket);
- int next = READ_ONCE(lock->next_ticket);
-
- /* Return immediately if unlocked. */
- if (next == curr)
- return;
-
- /* Wait until the current locker has released the lock. */
- do {
- delay_backoff(iterations++);
- } while (READ_ONCE(lock->current_ticket) == curr);
-
- /*
- * The TILE architecture doesn't do read speculation; therefore
- * a control dependency guarantees a LOAD->{LOAD,STORE} order.
- */
- barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
-
/*
* The low byte is always reserved to be the marker for a "tns" operation
* since the low bit is set to "1" by a tns. The next seven bits are
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
index a4b5b2c..de414c2 100644
--- a/arch/tile/lib/spinlock_64.c
+++ b/arch/tile/lib/spinlock_64.c
@@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u32 iterations = 0;
- u32 val = READ_ONCE(lock->lock);
- u32 curr = arch_spin_current(val);
-
- /* Return immediately if unlocked. */
- if (arch_spin_next(val) == curr)
- return;
-
- /* Wait until the current locker has released the lock. */
- do {
- delay_backoff(iterations++);
- } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
-
- /*
- * The TILE architecture doesn't do read speculation; therefore
- * a control dependency guarantees a LOAD->{LOAD,STORE} order.
- */
- barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
/*
* If the read lock fails due to a writer, we retry periodically
OpenPOWER on IntegriCloud