summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
committermjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
commit25669dd1d9562b9b1717d5ef59b15e1716c81634 (patch)
treefd835dd80fc66ba43f6cdb85dc37e7ec1c12c055 /sys/kern/kern_lock.c
parent1a924c729cc82916157766dbb0389b73fd4a79d8 (diff)
downloadFreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.zip
FreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.tar.gz
Microoptimize locking primitives by avoiding unnecessary atomic ops.
Inline version of primitives do an atomic op and if it fails they fallback to actual primitives, which immediately retry the atomic op. The obvious optimisation is to check if the lock is free and only then proceed to do an atomic op. Reviewed by: jhb, vangyzen
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 34d7b34..1b10623 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -787,8 +787,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
}
- while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
- tid)) {
+ for (;;) {
+ if (lk->lk_lock == LK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+ break;
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -1124,7 +1126,11 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, iwmesg, file, line);
}
- while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
+ for (;;) {
+ if (lk->lk_lock == LK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+ break;
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
OpenPOWER on IntegriCloud