diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-11-14 10:01:49 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-11-14 10:01:49 +0100 |
commit | d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9 (patch) | |
tree | 6b419b8497c7d57ddec20a3558697ef36ea37b11 /kernel/locking/qspinlock_paravirt.h | |
parent | 8a7a8e1eab929eb3a5b735a788a23b9731139046 (diff) | |
parent | b29c6ef7bb1257853c1e31616d84f55e561cf631 (diff) | |
download | op-kernel-dev-d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9.zip op-kernel-dev-d4bfeabe9ff7967f4b8c24aabf2de1ce3a909cd9.tar.gz |
Merge branch 'linus' into timers/urgent
Get upstream changes so dependent patches can be applied.
Diffstat (limited to 'kernel/locking/qspinlock_paravirt.h')
-rw-r--r-- | kernel/locking/qspinlock_paravirt.h | 47 |
1 files changed, 38 insertions, 9 deletions
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 15b6a39..6ee4777 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -61,21 +61,50 @@ struct pv_node { #include "qspinlock_stat.h" /* + * Hybrid PV queued/unfair lock + * * By replacing the regular queued_spin_trylock() with the function below, * it will be called once when a lock waiter enter the PV slowpath before - * being queued. By allowing one lock stealing attempt here when the pending - * bit is off, it helps to reduce the performance impact of lock waiter - * preemption without the drawback of lock starvation. + * being queued. + * + * The pending bit is set by the queue head vCPU of the MCS wait queue in + * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. + * When that bit becomes visible to the incoming waiters, no lock stealing + * is allowed. The function will return immediately to make the waiters + * enter the MCS wait queue. So lock starvation shouldn't happen as long + * as the queued mode vCPUs are actively running to set the pending bit + * and hence disabling lock stealing. + * + * When the pending bit isn't set, the lock waiters will stay in the unfair + * mode spinning on the lock unless the MCS wait queue is empty. In this + * case, the lock waiters will enter the queued mode slowpath trying to + * become the queue head and set the pending bit. + * + * This hybrid PV queued/unfair lock combines the best attributes of a + * queued lock (no lock starvation) and an unfair lock (good performance + * on not heavily contended locks). */ -#define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) -static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) +#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) +static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { struct __qspinlock *l = (void *)lock; - if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && - (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { - qstat_inc(qstat_pv_lock_stealing, true); - return true; + /* + * Stay in unfair lock mode as long as queued mode waiters are + * present in the MCS wait queue but the pending bit isn't set. + */ + for (;;) { + int val = atomic_read(&lock->val); + + if (!(val & _Q_LOCKED_PENDING_MASK) && + (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { + qstat_inc(qstat_pv_lock_stealing, true); + return true; + } + if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) + break; + + cpu_relax(); } return false; |