summaryrefslogtreecommitdiffstats
path: root/kernel/locking/qspinlock.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@s-opensource.com>2016-07-23 07:59:19 -0300
committerMauro Carvalho Chehab <mchehab@s-opensource.com>2016-07-23 07:59:19 -0300
commitc278256d05a2fc75b427fa6a5dc0024faa93465d (patch)
tree2c09c6c65a4b6c597a568ec2425adb75eff7d5d5 /kernel/locking/qspinlock.c
parent7e5b7d1b3a8facd4dc1ddb5d9ec53c0687d13de7 (diff)
parent009a620848218d521f008141c62f56bf19294dd9 (diff)
downloadop-kernel-dev-c278256d05a2fc75b427fa6a5dc0024faa93465d.zip
op-kernel-dev-c278256d05a2fc75b427fa6a5dc0024faa93465d.tar.gz
Merge branch 'patchwork' into topic/docs-next
* patchwork: (1492 commits) [media] cec: always check all_device_types and features [media] cec: poll should check if there is room in the tx queue [media] vivid: support monitor all mode [media] cec: fix test for unconfigured adapter in main message loop [media] cec: limit the size of the transmit queue [media] cec: zero unused msg part after msg->len [media] cec: don't set fh to NULL in CEC_TRANSMIT [media] cec: clear all status fields before transmit and always fill in sequence [media] cec: CEC_RECEIVE overwrote the timeout field [media] cxd2841er: Reading SNR for DVB-C added [media] cxd2841er: Reading BER and UCB for DVB-C added [media] cxd2841er: fix switch-case for DVB-C [media] cxd2841er: fix signal strength scale for ISDB-T [media] cxd2841er: adjust the dB scale for DVB-C [media] cxd2841er: provide signal strength for DVB-C [media] cxd2841er: fix BER report via DVBv5 stats API [media] mb86a20s: apply mask to val after checking for read failure [media] airspy: fix error logic during device register [media] s5p-cec/TODO: add TODO item [media] cec/TODO: drop comment about sphinx documentation ... Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Diffstat (limited to 'kernel/locking/qspinlock.c')
-rw-r--r--kernel/locking/qspinlock.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ce2f75e..5fc8c31 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
+ *
+ * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
+ * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ u32 val;
+
+ for (;;) {
+ val = atomic_read(&lock->val);
+
+ if (!val) /* not locked, we're done */
+ goto done;
+
+ if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+ break;
+
+ /* not locked, but pending, wait until we observe the lock */
+ cpu_relax();
+ }
+
+ /* any unlock is good */
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+
+done:
+ smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
#endif /* _GEN_PV_LOCK_SLOWPATH */
/**
OpenPOWER on IntegriCloud