summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_condvar.c13
-rw-r--r--sys/kern/kern_lock.c25
-rw-r--r--sys/kern/kern_sig.c12
-rw-r--r--sys/kern/kern_sx.c23
-rw-r--r--sys/kern/kern_synch.c32
-rw-r--r--sys/kern/kern_thread.c14
-rw-r--r--sys/kern/subr_sleepqueue.c67
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/sys/sleepqueue.h6
-rw-r--r--sys/vm/vm_glue.c53
10 files changed, 150 insertions, 97 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index eb804ca..5ee40a3 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -389,13 +389,17 @@ _cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo)
void
cv_signal(struct cv *cvp)
{
+ int wakeup_swapper;
+ wakeup_swapper = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters--;
- sleepq_signal(cvp, SLEEPQ_CONDVAR, 0, 0);
+ wakeup_swapper = sleepq_signal(cvp, SLEEPQ_CONDVAR, 0, 0);
}
sleepq_release(cvp);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -405,16 +409,21 @@ cv_signal(struct cv *cvp)
void
cv_broadcastpri(struct cv *cvp, int pri)
{
+ int wakeup_swapper;
+
/*
* XXX sleepq_broadcast pri argument changed from -1 meaning
* no pri to 0 meaning no pri.
*/
+ wakeup_swapper = 0;
if (pri == -1)
pri = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
- sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
+ wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
}
sleepq_release(cvp);
+ if (wakeup_swapper)
+ kick_proc0();
}
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index b3f0e25..2f71efa 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -196,17 +196,18 @@ sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
return (error);
}
-static __inline void
+static __inline int
wakeupshlk(struct lock *lk, const char *file, int line)
{
uintptr_t v, x;
- int queue;
+ int queue, wakeup_swapper;
TD_LOCKS_DEC(curthread);
TD_SLOCKS_DEC(curthread);
WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
+ wakeup_swapper = 0;
for (;;) {
x = lk->lk_lock;
@@ -261,12 +262,14 @@ wakeupshlk(struct lock *lk, const char *file, int line)
LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
+ 0, queue);
sleepq_release(&lk->lock_object);
break;
}
lock_profile_release_lock(&lk->lock_object);
+ return (wakeup_swapper);
}
static void
@@ -335,7 +338,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *iwmesg;
uintptr_t tid, v, x;
u_int op;
- int contested, error, ipri, itimo, queue;
+ int contested, error, ipri, itimo, queue, wakeup_swapper;
contested = 0;
error = 0;
@@ -367,6 +370,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
op = LK_EXCLUSIVE;
+ wakeup_swapper = 0;
switch (op) {
case LK_SHARED:
if (LK_CAN_WITNESS(flags))
@@ -495,7 +499,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
* We have been unable to succeed in upgrading, so just
* give up the shared lock.
*/
- wakeupshlk(lk, file, line);
+ wakeup_swapper += wakeupshlk(lk, file, line);
/* FALLTHROUGH */
case LK_EXCLUSIVE:
@@ -710,11 +714,12 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&lk->lk_lock, v);
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&lk->lock_object,
+ SLEEPQ_LK, 0, queue);
sleepq_release(&lk->lock_object);
break;
} else
- wakeupshlk(lk, file, line);
+ wakeup_swapper = wakeupshlk(lk, file, line);
break;
case LK_DRAIN:
if (LK_CAN_WITNESS(flags))
@@ -782,8 +787,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
"%s: %p waking up all threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ?
"shared" : "exclusive");
- sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
- 0, queue);
+ wakeup_swapper += sleepq_broadcast(
+ &lk->lock_object, SLEEPQ_LK, 0, queue);
/*
* If shared waiters have been woken up we need
@@ -850,6 +855,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
if (flags & LK_INTERLOCK)
class->lc_unlock(ilk);
+ if (wakeup_swapper)
+ kick_proc0();
return (error);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index ff02ee3..00c29d1 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1957,6 +1957,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
struct sigacts *ps;
int intrval;
int ret = 0;
+ int wakeup_swapper;
PROC_LOCK_ASSERT(p, MA_OWNED);
@@ -2165,12 +2166,15 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* the PROCESS runnable, leave it stopped.
* It may run a bit until it hits a thread_suspend_check().
*/
+ wakeup_swapper = 0;
PROC_SLOCK(p);
thread_lock(td);
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
- sleepq_abort(td, intrval);
+ wakeup_swapper = sleepq_abort(td, intrval);
thread_unlock(td);
PROC_SUNLOCK(p);
+ if (wakeup_swapper)
+ kick_proc0();
goto out;
/*
* Mutexes are short lived. Threads waiting on them will
@@ -2237,7 +2241,9 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
{
struct proc *p = td->td_proc;
register int prop;
+ int wakeup_swapper;
+ wakeup_swapper = 0;
PROC_LOCK_ASSERT(p, MA_OWNED);
prop = sigprop(sig);
@@ -2281,7 +2287,7 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
if (td->td_priority > PUSER)
sched_prio(td, PUSER);
- sleepq_abort(td, intrval);
+ wakeup_swapper = sleepq_abort(td, intrval);
} else {
/*
* Other states do nothing with the signal immediately,
@@ -2296,6 +2302,8 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
out:
PROC_SUNLOCK(p);
thread_unlock(td);
+ if (wakeup_swapper)
+ kick_proc0();
}
static void
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 1f0011c..5006793 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -361,6 +361,7 @@ void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_downgrade() of destroyed sx @ %s:%d", file, line));
@@ -401,15 +402,19 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
* shared lock. If there are any shared waiters, wake them up.
*/
+ wakeup_swapper = 0;
x = sx->sx_lock;
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
(x & SX_LOCK_EXCLUSIVE_WAITERS));
if (x & SX_LOCK_SHARED_WAITERS)
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
- SQ_SHARED_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ 0, SQ_SHARED_QUEUE);
sleepq_release(&sx->lock_object);
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
+
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -589,7 +594,7 @@ void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
uintptr_t x;
- int queue;
+ int queue, wakeup_swapper;
MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
@@ -627,8 +632,11 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&sx->sx_lock, x);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
+ queue);
sleepq_release(&sx->lock_object);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -795,6 +803,7 @@ void
_sx_sunlock_hard(struct sx *sx, const char *file, int line)
{
uintptr_t x;
+ int wakeup_swapper;
for (;;) {
x = sx->sx_lock;
@@ -862,9 +871,11 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p waking up all thread on"
"exclusive queue", __func__, sx);
- sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
- SQ_EXCLUSIVE_QUEUE);
+ wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
+ 0, SQ_EXCLUSIVE_QUEUE);
sleepq_release(&sx->lock_object);
+ if (wakeup_swapper)
+ kick_proc0();
break;
}
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index c322ace..6c10ba4 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -327,10 +327,13 @@ pause(const char *wmesg, int timo)
void
wakeup(void *ident)
{
+ int wakeup_swapper;
sleepq_lock(ident);
- sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
+ wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_release(ident);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -341,10 +344,13 @@ wakeup(void *ident)
void
wakeup_one(void *ident)
{
+ int wakeup_swapper;
sleepq_lock(ident);
- sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
+ wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_release(ident);
+ if (wakeup_swapper)
+ kick_proc0();
}
static void
@@ -440,11 +446,11 @@ mi_switch(int flags, struct thread *newtd)
}
/*
- * Change process state to be runnable,
- * placing it on the run queue if it is in memory,
- * and awakening the swapper if it isn't in memory.
+ * Change thread state to be runnable, placing it on the run queue if
+ * it is in memory. If it is swapped out, return true so our caller
+ * will know to awaken the swapper.
*/
-void
+int
setrunnable(struct thread *td)
{
@@ -454,15 +460,15 @@ setrunnable(struct thread *td)
switch (td->td_state) {
case TDS_RUNNING:
case TDS_RUNQ:
- return;
+ return (0);
case TDS_INHIBITED:
/*
* If we are only inhibited because we are swapped out
* then arange to swap in this process. Otherwise just return.
*/
if (td->td_inhibitors != TDI_SWAPPED)
- return;
- /* XXX: intentional fall-through ? */
+ return (0);
+ /* FALLTHROUGH */
case TDS_CAN_RUN:
break;
default:
@@ -472,15 +478,11 @@ setrunnable(struct thread *td)
if ((td->td_flags & TDF_INMEM) == 0) {
if ((td->td_flags & TDF_SWAPINREQ) == 0) {
td->td_flags |= TDF_SWAPINREQ;
- /*
- * due to a LOR between the thread lock and
- * the sleepqueue chain locks, use
- * lower level scheduling functions.
- */
- kick_proc0();
+ return (1);
}
} else
sched_wakeup(td);
+ return (0);
}
/*
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index a3d5da7..6b4ea6f 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -508,7 +508,7 @@ thread_single(int mode)
struct thread *td;
struct thread *td2;
struct proc *p;
- int remaining;
+ int remaining, wakeup_swapper;
td = curthread;
p = td->td_proc;
@@ -545,6 +545,7 @@ thread_single(int mode)
while (remaining != 1) {
if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
goto stopme;
+ wakeup_swapper = 0;
FOREACH_THREAD_IN_PROC(p, td2) {
if (td2 == td)
continue;
@@ -559,7 +560,8 @@ thread_single(int mode)
thread_unsuspend_one(td2);
if (TD_ON_SLEEPQ(td2) &&
(td2->td_flags & TDF_SINTR))
- sleepq_abort(td2, EINTR);
+ wakeup_swapper =
+ sleepq_abort(td2, EINTR);
break;
case SINGLE_BOUNDARY:
break;
@@ -585,6 +587,8 @@ thread_single(int mode)
#endif
thread_unlock(td2);
}
+ if (wakeup_swapper)
+ kick_proc0();
if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
else if (mode == SINGLE_BOUNDARY)
@@ -787,7 +791,11 @@ thread_unsuspend_one(struct thread *td)
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
p->p_suspcount--;
- setrunnable(td);
+ if (setrunnable(td)) {
+#ifdef INVARIANTS
+ panic("not waking up swapper");
+#endif
+ }
}
/*
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 3878d81..313786a 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -160,7 +160,7 @@ static int sleepq_check_timeout(void);
static void sleepq_dtor(void *mem, int size, void *arg);
#endif
static int sleepq_init(void *mem, int size, int flags);
-static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
+static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
int pri);
static void sleepq_switch(void *wchan, int pri);
static void sleepq_timeout(void *arg);
@@ -434,7 +434,15 @@ sleepq_catch_signals(void *wchan, int pri)
*/
if (TD_ON_SLEEPQ(td)) {
sq = sleepq_lookup(wchan);
- sleepq_resume_thread(sq, td, 0);
+ if (sleepq_resume_thread(sq, td, 0)) {
+#ifdef INVARIANTS
+ /*
+ * This thread hasn't gone to sleep yet, so it
+ * should not be swapped out.
+ */
+ panic("not waking up swapper");
+#endif
+ }
}
mtx_unlock_spin(&sc->sc_lock);
MPASS(td->td_lock != &sc->sc_lock);
@@ -474,7 +482,15 @@ sleepq_switch(void *wchan, int pri)
if (td->td_flags & TDF_TIMEOUT) {
MPASS(TD_ON_SLEEPQ(td));
sq = sleepq_lookup(wchan);
- sleepq_resume_thread(sq, td, 0);
+ if (sleepq_resume_thread(sq, td, 0)) {
+#ifdef INVARIANTS
+ /*
+ * This thread hasn't gone to sleep yet, so it
+ * should not be swapped out.
+ */
+ panic("not waking up swapper");
+#endif
+ }
mtx_unlock_spin(&sc->sc_lock);
return;
}
@@ -631,7 +647,7 @@ sleepq_timedwait_sig(void *wchan, int pri)
* Removes a thread from a sleep queue and makes it
* runnable.
*/
-static void
+static int
sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
{
struct sleepqueue_chain *sc;
@@ -683,7 +699,7 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
if (pri != 0 && td->td_priority > pri)
sched_prio(td, pri);
- setrunnable(td);
+ return (setrunnable(td));
}
#ifdef INVARIANTS
@@ -722,18 +738,19 @@ sleepq_init(void *mem, int size, int flags)
/*
* Find the highest priority thread sleeping on a wait channel and resume it.
*/
-void
+int
sleepq_signal(void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;
struct thread *td, *besttd;
+ int wakeup_swapper;
CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sq = sleepq_lookup(wchan);
if (sq == NULL)
- return;
+ return (0);
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
@@ -750,35 +767,40 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
}
MPASS(besttd != NULL);
thread_lock(besttd);
- sleepq_resume_thread(sq, besttd, pri);
+ wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
thread_unlock(besttd);
+ return (wakeup_swapper);
}
/*
* Resume all threads sleeping on a specified wait channel.
*/
-void
+int
sleepq_broadcast(void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;
struct thread *td;
+ int wakeup_swapper;
CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sq = sleepq_lookup(wchan);
if (sq == NULL)
- return;
+ return (0);
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
/* Resume all blocked threads on the sleep queue. */
+ wakeup_swapper = 0;
while (!TAILQ_EMPTY(&sq->sq_blocked[queue])) {
td = TAILQ_FIRST(&sq->sq_blocked[queue]);
thread_lock(td);
- sleepq_resume_thread(sq, td, pri);
+ if (sleepq_resume_thread(sq, td, pri))
+ wakeup_swapper = 1;
thread_unlock(td);
}
+ return (wakeup_swapper);
}
/*
@@ -792,8 +814,10 @@ sleepq_timeout(void *arg)
struct sleepqueue *sq;
struct thread *td;
void *wchan;
+ int wakeup_swapper;
td = arg;
+ wakeup_swapper = 0;
CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
@@ -809,8 +833,10 @@ sleepq_timeout(void *arg)
sq = sleepq_lookup(wchan);
MPASS(sq != NULL);
td->td_flags |= TDF_TIMEOUT;
- sleepq_resume_thread(sq, td, 0);
+ wakeup_swapper = sleepq_resume_thread(sq, td, 0);
thread_unlock(td);
+ if (wakeup_swapper)
+ kick_proc0();
return;
}
@@ -839,10 +865,12 @@ sleepq_timeout(void *arg)
MPASS(TD_IS_SLEEPING(td));
td->td_flags &= ~TDF_TIMEOUT;
TD_CLR_SLEEPING(td);
- setrunnable(td);
+ wakeup_swapper = setrunnable(td);
} else
td->td_flags |= TDF_TIMOFAIL;
thread_unlock(td);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
@@ -853,6 +881,7 @@ void
sleepq_remove(struct thread *td, void *wchan)
{
struct sleepqueue *sq;
+ int wakeup_swapper;
/*
* Look up the sleep queue for this wait channel, then re-check
@@ -876,16 +905,18 @@ sleepq_remove(struct thread *td, void *wchan)
thread_lock(td);
MPASS(sq != NULL);
MPASS(td->td_wchan == wchan);
- sleepq_resume_thread(sq, td, 0);
+ wakeup_swapper = sleepq_resume_thread(sq, td, 0);
thread_unlock(td);
sleepq_release(wchan);
+ if (wakeup_swapper)
+ kick_proc0();
}
/*
* Abort a thread as if an interrupt had occurred. Only abort
* interruptible waits (unfortunately it isn't safe to abort others).
*/
-void
+int
sleepq_abort(struct thread *td, int intrval)
{
struct sleepqueue *sq;
@@ -901,7 +932,7 @@ sleepq_abort(struct thread *td, int intrval)
* timeout is scheduled anyhow.
*/
if (td->td_flags & TDF_TIMEOUT)
- return;
+ return (0);
CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
@@ -913,14 +944,14 @@ sleepq_abort(struct thread *td, int intrval)
* we have to do it here.
*/
if (!TD_IS_SLEEPING(td))
- return;
+ return (0);
wchan = td->td_wchan;
MPASS(wchan != NULL);
sq = sleepq_lookup(wchan);
MPASS(sq != NULL);
/* Thread is asleep on sleep queue sq, so wake it up. */
- sleepq_resume_thread(sq, td, 0);
+ return (sleepq_resume_thread(sq, td, 0));
}
#ifdef SLEEPQUEUE_PROFILING
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 654e3bf..3779591 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -808,7 +808,7 @@ void pstats_free(struct pstats *ps);
int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void sessrele(struct session *);
-void setrunnable(struct thread *);
+int setrunnable(struct thread *);
void setsugid(struct proc *p);
int sigonstack(size_t sp);
void sleepinit(void);
diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h
index 57b2c43..dfd960f 100644
--- a/sys/sys/sleepqueue.h
+++ b/sys/sys/sleepqueue.h
@@ -91,17 +91,17 @@ struct thread;
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
void init_sleepqueues(void);
-void sleepq_abort(struct thread *td, int intrval);
+int sleepq_abort(struct thread *td, int intrval);
void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg,
int flags, int queue);
struct sleepqueue *sleepq_alloc(void);
-void sleepq_broadcast(void *wchan, int flags, int pri, int queue);
+int sleepq_broadcast(void *wchan, int flags, int pri, int queue);
void sleepq_free(struct sleepqueue *sq);
void sleepq_lock(void *wchan);
struct sleepqueue *sleepq_lookup(void *wchan);
void sleepq_release(void *wchan);
void sleepq_remove(struct thread *td, void *wchan);
-void sleepq_signal(void *wchan, int flags, int pri, int queue);
+int sleepq_signal(void *wchan, int flags, int pri, int queue);
void sleepq_set_timeout(void *wchan, int timo);
int sleepq_timedwait(void *wchan, int pri);
int sleepq_timedwait_sig(void *wchan, int pri);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 462c460..d6ec2ba 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -116,10 +116,6 @@ static int swapout(struct proc *);
static void swapclear(struct proc *);
#endif
-
-static volatile int proc0_rescan;
-
-
/*
* MPSAFE
*
@@ -683,9 +679,6 @@ scheduler(dummy)
loop:
if (vm_page_count_min()) {
VM_WAIT;
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
@@ -732,13 +725,7 @@ loop:
* Nothing to do, back to sleep.
*/
if ((p = pp) == NULL) {
- thread_lock(&thread0);
- if (!proc0_rescan) {
- TD_SET_IWAIT(&thread0);
- mi_switch(SW_VOL | SWT_IWAIT, NULL);
- }
- proc0_rescan = 0;
- thread_unlock(&thread0);
+ tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
goto loop;
}
PROC_LOCK(p);
@@ -750,9 +737,6 @@ loop:
*/
if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
PROC_UNLOCK(p);
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
@@ -762,31 +746,15 @@ loop:
*/
faultin(p);
PROC_UNLOCK(p);
- thread_lock(&thread0);
- proc0_rescan = 0;
- thread_unlock(&thread0);
goto loop;
}
-void kick_proc0(void)
+void
+kick_proc0(void)
{
- struct thread *td = &thread0;
-
- /* XXX This will probably cause a LOR in some cases */
- thread_lock(td);
- if (TD_AWAITING_INTR(td)) {
- CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
- TD_CLR_IWAIT(td);
- sched_add(td, SRQ_INTR);
- } else {
- proc0_rescan = 1;
- CTR2(KTR_INTR, "%s: state %d",
- __func__, td->td_state);
- }
- thread_unlock(td);
-
-}
+ wakeup(&proc0);
+}
#ifndef NO_SWAPPING
@@ -980,7 +948,16 @@ swapclear(p)
td->td_flags &= ~TDF_SWAPINREQ;
TD_CLR_SWAPPED(td);
if (TD_CAN_RUN(td))
- setrunnable(td);
+ if (setrunnable(td)) {
+#ifdef INVARIANTS
+ /*
+ * XXX: We just cleared TDI_SWAPPED
+ * above and set TDF_INMEM, so this
+ * should never happen.
+ */
+ panic("not waking up swapper");
+#endif
+ }
thread_unlock(td);
}
p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
OpenPOWER on IntegriCloud