summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-12-31 16:37:47 +0000
committermjg <mjg@FreeBSD.org>2016-12-31 16:37:47 +0000
commit4d5f81f319b8c07d6c5caff90308903ca20a6668 (patch)
tree4326353ab58864342ca1345f0d4c12b8891f25e5 /sys/kern
parentf3626becbf8107ecc4188b9c807fa02b06527c46 (diff)
downloadFreeBSD-src-4d5f81f319b8c07d6c5caff90308903ca20a6668.zip
FreeBSD-src-4d5f81f319b8c07d6c5caff90308903ca20a6668.tar.gz
MFC r301157:
Microoptimize locking primitives by avoiding unnecessary atomic ops. Inline version of primitives do an atomic op and if it fails they fallback to actual primitives, which immediately retry the atomic op. The obvious optimisation is to check if the lock is free and only then proceed to do an atomic op.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c12
-rw-r--r--sys/kern/kern_mutex.c13
-rw-r--r--sys/kern/kern_rwlock.c4
-rw-r--r--sys/kern/kern_sx.c5
4 files changed, 25 insertions, 9 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index e3e946e..9681c87 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -792,8 +792,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
}
- while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
- tid)) {
+ for (;;) {
+ if (lk->lk_lock == LK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+ break;
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -1129,7 +1131,11 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, iwmesg, file, line);
}
- while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
+ for (;;) {
+ if (lk->lk_lock == LK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
+ break;
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 8d19f2e..6107e79 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -451,7 +451,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
all_time -= lockstat_nsecs(&m->lock_object);
#endif
- while (!_mtx_obtain_lock(m, tid)) {
+ for (;;) {
+ if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+ break;
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
@@ -634,8 +636,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
#ifdef KDTRACE_HOOKS
spin_time -= lockstat_nsecs(&m->lock_object);
#endif
- while (!_mtx_obtain_lock(m, tid)) {
-
+ for (;;) {
+ if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+ break;
/* Give interrupts a chance while we spin. */
spinlock_exit();
while (m->mtx_lock != MTX_UNOWNED) {
@@ -714,7 +717,9 @@ retry:
m->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
- while (!_mtx_obtain_lock(m, tid)) {
+ for (;;) {
+ if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
+ break;
if (m->mtx_lock == tid) {
m->mtx_recurse++;
break;
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 334d83d..3a42d7a 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -768,7 +768,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
all_time -= lockstat_nsecs(&rw->lock_object);
state = rw->rw_lock;
#endif
- while (!_rw_write_lock(rw, tid)) {
+ for (;;) {
+ if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
+ break;
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 952f7d4..3ba482b 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -547,7 +547,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
all_time -= lockstat_nsecs(&sx->lock_object);
state = sx->sx_lock;
#endif
- while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
+ for (;;) {
+ if (sx->sx_lock == SX_LOCK_UNLOCKED &&
+ atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+ break;
#ifdef KDTRACE_HOOKS
spin_cnt++;
#endif
OpenPOWER on IntegriCloud