summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2009-09-02 17:33:51 +0000
committerattilio <attilio@FreeBSD.org>2009-09-02 17:33:51 +0000
commitce3b4baf3c197905cedc13d17c7a220f68240510 (patch)
tree6d6740433b14bf13d7e3fd94a480ec836e8f5f6c /sys/kern/kern_lock.c
parent7cb9716a594877e05d088ad1c888a1b184340af0 (diff)
downloadFreeBSD-src-ce3b4baf3c197905cedc13d17c7a220f68240510.zip
FreeBSD-src-ce3b4baf3c197905cedc13d17c7a220f68240510.tar.gz
Fix some bugs related to adaptive spinning:
In the lockmgr support: - GIANT_RESTORE() is just called when the sleep finishes, so the current code can ends up into a giant unlock problem. Fix it by appropriately call GIANT_RESTORE() when needed. Note that this is not exactly ideal because for any interation of the adaptive spinning we drop and restore Giant, but the overhead should be not a factor. - In the lock held in exclusive mode case, after the adaptive spinning is brought to completition, we should just retry to acquire the lock instead to fallthrough. Fix that. - Fix a style nit In the sx support: - Call GIANT_SAVE() before than looping. This saves some overhead because in the current code GIANT_SAVE() is called several times. Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 29ae4ac..e6f2f53 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -467,7 +467,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
- * changes.
+ * changes. We need a double-state handle here
+ * because for a failed acquisition the lock can be
+ * either held in exclusive mode or shared mode
+ * (for the writer starvation avoidance technique).
*/
if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
LK_HOLDER(x) != LK_KERNPROC) {
@@ -491,8 +494,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ GIANT_RESTORE();
+ continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
- (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
+ (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
@@ -511,6 +516,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ GIANT_RESTORE();
if (i != alk_loops)
continue;
}
@@ -704,6 +710,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ GIANT_RESTORE();
+ continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
@@ -727,6 +735,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ GIANT_RESTORE();
if (i != alk_loops)
continue;
}
OpenPOWER on IntegriCloud