summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2008-08-05 20:02:31 +0000
committerjhb <jhb@FreeBSD.org>2008-08-05 20:02:31 +0000
commit8af56fb6875120edaae014a4cc547f5e14609a12 (patch)
tree80c45ebfc07976bf62d8de55267fa69fa52fdc1d /sys/kern/kern_sx.c
parentbe95b0fe3c10d07ed8c0887e7695a358ee6a2e17 (diff)
downloadFreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.zip
FreeBSD-src-8af56fb6875120edaae014a4cc547f5e14609a12.tar.gz
If a thread that is swapped out is made runnable, then the setrunnable()
routine wakes up proc0 so that proc0 can swap the thread back in. Historically, this has been done by waking up proc0 directly from setrunnable() itself via a wakeup(). When waking up a sleeping thread that was swapped out (the usual case when waking proc0 since only sleeping threads are eligible to be swapped out), this resulted in a bit of recursion (e.g. wakeup() -> setrunnable() -> wakeup()). With sleep queues having separate locks in 6.x and later, this caused a spin lock LOR (sleepq lock -> sched_lock/thread lock -> sleepq lock). An attempt was made to fix this in 7.0 by making the proc0 wakeup use the ithread mechanism for doing the wakeup. However, this required grabbing proc0's thread lock to perform the wakeup. If proc0 was asleep elsewhere in the kernel (e.g. waiting for disk I/O), then this degenerated into the same LOR since the thread lock would be some other sleepq lock. Fix this by deferring the wakeup of the swapper until after the sleepq lock held by the upper layer has been locked. The setrunnable() routine now returns a boolean value to indicate whether or not proc0 needs to be woken up. The end result is that consumers of the sleepq API such as *sleep/wakeup, condition variables, sx locks, and lockmgr, have to wakeup proc0 if they get a non-zero return value from sleepq_abort(), sleepq_broadcast(), or sleepq_signal(). Discussed with: jeff Glanced at by: sam Tested by: Jurgen Weber jurgen - ish com au MFC after: 2 weeks
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 1f0011c..5006793 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -361,6 +361,7 @@ void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
@@ -401,15 +402,19 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
* shared lock. If there are any shared waiters, wake them up.
*/
+ wakeup_swapper = 0;
x = sx->sx_lock;
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
(x & SX_LOCK_EXCLUSIVE_WAITERS));
if (x & SX_LOCK_SHARED_WAITERS)
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
- SQ_SHARED_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ 0, SQ_SHARED_QUEUE);
sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
+
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -589,7 +594,7 @@ void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
uintptr_t x;
- int queue;
+ int queue, wakeup_swapper;
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
@@ -627,8 +632,11 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&sx->sx_lock, x);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
+ queue);
sleepq_release(&sx->lock_object);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -795,6 +803,7 @@ void
_sx_sunlock_hard(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
for (;;) {
x = sx->sx_lock;
@@ -862,9 +871,11 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p waking up all thread on"
"exclusive queue", __func__, sx);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
- SQ_EXCLUSIVE_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ 0, SQ_EXCLUSIVE_QUEUE);
sleepq_release(&sx->lock_object);
+ if (wakeup_swapper)
+ kick_proc0();
break;
}
}
OpenPOWER on IntegriCloud