From 25669dd1d9562b9b1717d5ef59b15e1716c81634 Mon Sep 17 00:00:00 2001 From: mjg Date: Wed, 1 Jun 2016 18:32:20 +0000 Subject: Microoptimize locking primitives by avoiding unnecessary atomic ops. Inline version of primitives do an atomic op and if it fails they fallback to actual primitives, which immediately retry the atomic op. The obvious optimisation is to check if the lock is free and only then proceed to do an atomic op. Reviewed by: jhb, vangyzen --- sys/kern/kern_lock.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'sys/kern/kern_lock.c') diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 34d7b34..1b10623 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -787,8 +787,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; } - while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, - tid)) { + for (;;) { + if (lk->lk_lock == LK_UNLOCKED && + atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) + break; #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -1124,7 +1126,11 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, __func__, iwmesg, file, line); } - while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { + for (;;) { + if (lk->lk_lock == LK_UNLOCKED && + atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) + break; + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif -- cgit v1.1