summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_rwlock.c
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
committermjg <mjg@FreeBSD.org>2016-06-01 18:32:20 +0000
commit25669dd1d9562b9b1717d5ef59b15e1716c81634 (patch)
treefd835dd80fc66ba43f6cdb85dc37e7ec1c12c055 /sys/kern/kern_rwlock.c
parent1a924c729cc82916157766dbb0389b73fd4a79d8 (diff)
downloadFreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.zip
FreeBSD-src-25669dd1d9562b9b1717d5ef59b15e1716c81634.tar.gz
Microoptimize locking primitives by avoiding unnecessary atomic ops.
Inline version of primitives do an atomic op and if it fails they fallback to actual primitives, which immediately retry the atomic op. The obvious optimisation is to check if the lock is free and only then proceed to do an atomic op. Reviewed by: jhb, vangyzen
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r--sys/kern/kern_rwlock.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 6541724..6a904d2 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -771,7 +771,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
all_time -= lockstat_nsecs(&rw->lock_object);
state = rw->rw_lock;
#endif
- while (!_rw_write_lock(rw, tid)) {
+ for (;;) {
+ if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
+ break;
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
OpenPOWER on IntegriCloud