summaryrefslogtreecommitdiffstats
path: root/kernel/locking/qrwlock.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-09-14 00:37:22 -0700
committerIngo Molnar <mingo@kernel.org>2015-09-18 09:27:29 +0200
commit6e1e5196975fb7ecc501b3fe1075b77aea2b7839 (patch)
tree42ff6a299f0b1d30da4d9c992ba438c883aac9ff /kernel/locking/qrwlock.c
parente58cdf585a38412f9f56c512d20e8e12637d9892 (diff)
downloadop-kernel-dev-6e1e5196975fb7ecc501b3fe1075b77aea2b7839.zip
op-kernel-dev-6e1e5196975fb7ecc501b3fe1075b77aea2b7839.tar.gz
locking/qrwlock: Rename ->lock to ->wait_lock
... trivial, but reads a little nicer when we name our actual primitive 'lock'. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hpe.com> Link: http://lkml.kernel.org/r/1442216244-4409-1-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r--kernel/locking/qrwlock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index f17a3e3..fec0823 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Put the reader into the wait queue
*/
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/*
* The ACQUIRE semantics of the following spinning code ensure
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Signal the next one in queue to become queue head
*/
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_read_lock_slowpath);
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
u32 cnts;
/* Put the writer into the wait queue */
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) &&
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
cpu_relax_lowlatency();
}
unlock:
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_write_lock_slowpath);
OpenPOWER on IntegriCloud