summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2008-08-05 20:02:31 +0000
committerjhb <jhb@FreeBSD.org>2008-08-05 20:02:31 +0000
commit8af56fb6875120edaae014a4cc547f5e14609a12 (patch)
tree80c45ebfc07976bf62d8de55267fa69fa52fdc1d /sys/kern/kern_lock.c
parentbe95b0fe3c10d07ed8c0887e7695a358ee6a2e17 (diff)
downloadFreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.zip
FreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.tar.gz
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in. Historically, this has been done by waking up proc0 directly from setrunnable() itself via a wakeup(). When waking up a sleeping thread that was swapped out (the usual case when waking proc0 since only sleeping threads are eligible to be swapped out), this resulted in a bit of recursion (e.g. wakeup() -> setrunnable() -> wakeup()). With sleep queues having separate locks in 6.x and later, this caused a spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock). An attempt was made to fix this in 7.0 by making the proc0 wakeup use the ithread mechanism for doing the wakeup. However, this required grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated into the same LOR since the thread lock would be some other sleepq lock. Fix this by deferring the wakeup of the swapper until after the sleepq lock held by the upper layer has been locked. The setrunnable() routine now returns a boolean value to indicate whether or not proc0 needs to be woken up. The end result is that consumers of the sleepq API such as *sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup proc0 if they get a non-zero return value from sleepq_abort(), sleepq_broadcast(), or sleepq_signal(). Discussed with: jeff Glanced at by: sam Tested by: Jurgen Weber jurgen - ish com au MFC after: 2 weeks
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index b3f0e25..2f71efa 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -196,17 +196,18 @@ sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
return (error);
}
-static __inline void
+static __inline int
wakeupshlk(struct lock *lk, const char *file, int line)
{
uintptr_t v, x;
- int queue;
+ int queue, wakeup_swapper;
TD_LOCKS_DEC(curthread);
TD_SLOCKS_DEC(curthread);
WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
+ wakeup_swapper = 0;
for (;;) {
x = lk->lk_lock;
@@ -261,12 +262,14 @@ wakeupshlk(struct lock *lk, const char *file, int line)
LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
+ 0, queue);
sleepq_release(&lk->lock_object);
break;
}
lock_profile_release_lock(&lk->lock_object);
+ return (wakeup_swapper);
}
static void
@@ -335,7 +338,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *iwmesg;
uintptr_t tid, v, x;
u_int op;
- int contested, error, ipri, itimo, queue;
+ int contested, error, ipri, itimo, queue, wakeup_swapper;
contested = 0;
error = 0;
@@ -367,6 +370,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
op = LK_EXCLUSIVE;
+ wakeup_swapper = 0;
switch (op) {
case LK_SHARED:
if (LK_CAN_WITNESS(flags))
@@ -495,7 +499,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
* We have been unable to succeed in upgrading, so just
* give up the shared lock.
*/
- wakeupshlk(lk, file, line);
+ wakeup_swapper += wakeupshlk(lk, file, line);
/* FALLTHROUGH */
case LK_EXCLUSIVE:
@@ -710,11 +714,12 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&lk->lk_lock, v);
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&lk->lock_object,
+ SLEEPQ_LK, 0, queue);
sleepq_release(&lk->lock_object);
break;
} else
- wakeupshlk(lk, file, line);
+ wakeup_swapper = wakeupshlk(lk, file, line);
break;
case LK_DRAIN:
if (LK_CAN_WITNESS(flags))
@@ -782,8 +787,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
"%s: %p waking up all threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ?
"shared" : "exclusive");
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
- 0, queue);
+ wakeup_swapper += sleepq_broadcast(
+ &lk->lock_object, SLEEPQ_LK, 0, queue);
/*
* If shared waiters have been woken up we need
@@ -850,6 +855,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
if (flags & LK_INTERLOCK)
class->lc_unlock(ilk);
+ if (wakeup_swapper)
+ kick_proc0();
return (error);
}
OpenPOWER on IntegriCloud