summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
committermjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
commit25669dd1d9562b9b1717d5ef59b15e1716c81634 (patch)
treefd835dd80fc66ba43f6cdb85dc37e7ec1c12c055 /sys/kern/kern_sx.c
parent1a924c729cc82916157766dbb0389b73fd4a79d8 (diff)
downloadFreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.zip
FreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.tar.gz
Microoptimize locking primitives by avoiding unnecessary atomic ops.
Inline version of primitives do an atomic op and if it fails they fallback to actual primitives, which immediately retry the atomic op. The obvious optimisation is to check if the lock is free and only then proceed to do an atomic op. Reviewed by: jhb, vangyzen
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 96e117b..2a81c04 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -544,7 +544,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
all_time -= lockstat_nsecs(&sx->lock_object);
state = sx->sx_lock;
#endif
- while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
+ for (;;) {
+ if (sx->sx_lock == SX_LOCK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+ break;
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
OpenPOWER on IntegriCloud