summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-05-07 06:45:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 07:20:16 -0700
commit5ede972df1cd9294c82e9515949fd2103be81d7b (patch)
tree2c6df4c16fbe4430ea2723f2aec284105aab08db
parented00f64346631dff035adfb9b0240daaa8b46c4e (diff)
downloadop-kernel-dev-5ede972df1cd9294c82e9515949fd2103be81d7b.zip
op-kernel-dev-5ede972df1cd9294c82e9515949fd2103be81d7b.tar.gz
rwsem: use cmpxchg for trying to steal write lock
Using rwsem_atomic_update to try stealing the write lock forced us to undo the adjustment in the failure path. We can have simpler and faster code by using cmpxchg instead. Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Peter Hurley <peter@hurleysoftware.com> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/rwsem.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 2360bf20..64c2dc0 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -142,25 +142,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
return sem;
}
-/* Try to get write sem, caller holds sem->wait_lock: */
-static int try_get_writer_sem(struct rw_semaphore *sem)
-{
- long oldcount, adjustment;
-
- adjustment = RWSEM_ACTIVE_WRITE_BIAS;
- if (list_is_singular(&sem->wait_list))
- adjustment -= RWSEM_WAITING_BIAS;
-
-try_again_write:
- oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
- if (!(oldcount & RWSEM_ACTIVE_MASK))
- return 1;
- /* some one grabbed the sem already */
- if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
- return 0;
- goto try_again_write;
-}
-
/*
* wait for the read lock to be granted
*/
@@ -236,7 +217,12 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
while (true) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (try_get_writer_sem(sem))
+ /* Try acquiring the write lock. */
+ count = RWSEM_ACTIVE_WRITE_BIAS;
+ if (!list_is_singular(&sem->wait_list))
+ count += RWSEM_WAITING_BIAS;
+ if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
+ RWSEM_WAITING_BIAS)
break;
raw_spin_unlock_irq(&sem->wait_lock);
OpenPOWER on IntegriCloud