summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortanimura <tanimura@FreeBSD.org>2002-07-30 06:54:05 +0000
committertanimura <tanimura@FreeBSD.org>2002-07-30 06:54:05 +0000
commit8eb7238cade123cbd553b5d57bbb5117f538adbb (patch)
tree0c83be03d1e2e71dfad7f8b959ebd86dd9b9546f
parent18acb2ea275ccf40f6db01964cd3438104000486 (diff)
downloadFreeBSD-src-8eb7238cade123cbd553b5d57bbb5117f538adbb.zip
FreeBSD-src-8eb7238cade123cbd553b5d57bbb5117f538adbb.tar.gz
- Optimize wakeup() and its friends; if a thread waken up is being
swapped in, we do not have to ask for the scheduler thread to do that. - Assert that a process is not swapped out in runq functions and swapout(). - Introduce thread_safetoswapout() for readability. - In swapout_procs(), perform a test that may block (check of a thread working on its vm map) first. This lets us call swapout() with the sched_lock held, providing a better atomicity.
-rw-r--r--sys/kern/kern_condvar.c6
-rw-r--r--sys/kern/kern_switch.c6
-rw-r--r--sys/kern/kern_synch.c18
-rw-r--r--sys/sys/proc.h5
-rw-r--r--sys/vm/vm_glue.c129
5 files changed, 92 insertions, 72 deletions
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 822a4c9..55fba60 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -534,8 +534,10 @@ cv_wakeup(struct cv *cvp)
maybe_resched(td);
} else {
td->td_state = TDS_SWAPPED;
- td->td_proc->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0); /* XXXKSE */
+ if ((td->td_proc->p_sflag & PS_SWAPPINGIN) == 0) {
+ td->td_proc->p_sflag |= PS_SWAPINREQ;
+ wakeup(&proc0);
+ }
}
/* END INLINE EXPANSION */
}
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 9629ac2..97dd771 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -525,6 +525,8 @@ runq_add(struct runq *rq, struct kse *ke)
KASSERT(ke->ke_state != KES_ONRUNQ,
("runq_add: kse %p (%s) already in run queue", ke,
ke->ke_proc->p_comm));
+ KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ ("runq_add: process swapped out"));
pri = ke->ke_thread->td_priority / RQ_PPQ;
ke->ke_rqindex = pri;
runq_setbit(rq, pri);
@@ -590,6 +592,8 @@ runq_choose(struct runq *rq)
("runq_choose: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL),
("runq_choose: No KSE on thread"));
+ KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ ("runq_choose: process swapped out"));
return (ke);
}
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
@@ -610,6 +614,8 @@ runq_remove(struct runq *rq, struct kse *ke)
KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
+ KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
+ ("runq_remove: process swapped out"));
pri = ke->ke_rqindex;
rqh = &rq->rq_queues[pri];
CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index e4bef85..9e60459 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -719,8 +719,10 @@ restart:
maybe_resched(td);
} else {
td->td_state = TDS_SWAPPED;
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
+ if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
+ p->p_sflag |= PS_SWAPINREQ;
+ wakeup(&proc0);
+ }
}
/* END INLINE EXPANSION */
}
@@ -766,8 +768,10 @@ restart:
break;
} else {
td->td_state = TDS_SWAPPED;
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
+ if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
+ p->p_sflag |= PS_SWAPINREQ;
+ wakeup(&proc0);
+ }
}
/* END INLINE EXPANSION */
goto restart;
@@ -941,8 +945,10 @@ setrunnable(struct thread *td)
td->td_ksegrp->kg_slptime = 0;
if ((p->p_sflag & PS_INMEM) == 0) {
td->td_state = TDS_SWAPPED;
- p->p_sflag |= PS_SWAPINREQ;
- wakeup(&proc0);
+ if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
+ p->p_sflag |= PS_SWAPINREQ;
+ wakeup(&proc0);
+ }
} else {
if (td->td_state != TDS_RUNQ)
setrunqueue(td); /* XXXKSE */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index af931ab..b8730e2 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -730,6 +730,11 @@ sigonstack(size_t sp)
(--(p)->p_lock); \
} while (0)
+/* Check whether a thread is safe to be swapped out. */
+#define thread_safetoswapout(td) \
+ ((td)->td_state == TDS_RUNQ || \
+ (td)->td_state == TDS_SLP)
+
/* Lock and unlock process arguments. */
#define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock)
#define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock)
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 8572a23..ff8b3df 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -511,12 +511,12 @@ faultin(p)
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
+ p->p_sflag &= ~PS_SWAPPINGIN;
+ p->p_sflag |= PS_INMEM;
FOREACH_THREAD_IN_PROC (p, td)
if (td->td_state == TDS_SWAPPED)
setrunqueue(td);
- p->p_sflag &= ~PS_SWAPPINGIN;
- p->p_sflag |= PS_INMEM;
wakeup(&p->p_sflag);
/* undo the effect of setting SLOCK above */
@@ -671,32 +671,42 @@ retry:
struct vmspace *vm;
int minslptime = 100000;
+ /*
+ * Do not swapout a process that
+ * is waiting for VM data
+ * structures there is a possible
+ * deadlock. Test this first as
+ * this may block.
+ *
+ * Lock the map until swapout
+ * finishes, or a thread of this
+ * process may attempt to alter
+ * the map.
+ */
+ vm = p->p_vmspace;
+ ++vm->vm_refcnt;
+ if (!vm_map_trylock(&vm->vm_map))
+ goto nextproc1;
+
PROC_LOCK(p);
if (p->p_lock != 0 ||
(p->p_flag & (P_STOPPED_SNGL|P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
- PROC_UNLOCK(p);
- continue;
+ goto nextproc2;
}
/*
* only aiod changes vmspace, however it will be
* skipped because of the if statement above checking
* for P_SYSTEM
*/
- vm = p->p_vmspace;
mtx_lock_spin(&sched_lock);
- if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- continue;
- }
+ if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM)
+ goto nextproc;
switch (p->p_state) {
default:
/* Don't swap out processes in any sort
* of 'special' state. */
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
- continue;
+ goto nextproc;
case PRS_NORMAL:
/*
@@ -704,39 +714,29 @@ retry:
* Check all the thread groups..
*/
FOREACH_KSEGRP_IN_PROC(p, kg) {
- if (PRI_IS_REALTIME(kg->kg_pri_class)) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
+ if (PRI_IS_REALTIME(kg->kg_pri_class))
goto nextproc;
- }
/*
- * Do not swapout a process waiting
- * on a critical event of some kind.
- * Also guarantee swap_idle_threshold1
+ * Guarantee swap_idle_threshold1
* time in memory.
*/
- if (kg->kg_slptime < swap_idle_threshold1) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
+ if (kg->kg_slptime < swap_idle_threshold1)
goto nextproc;
- }
+
/*
- * Do not swapout a process if there is
- * a thread whose pageable memory may
- * be accessed.
+ * Do not swapout a process if it is
+ * waiting on a critical event of some
+ * kind or there is a thread whose
+ * pageable memory may be accessed.
*
* This could be refined to support
* swapping out a thread.
*/
- FOREACH_THREAD_IN_PROC(p, td) {
+ FOREACH_THREAD_IN_GROUP(kg, td) {
if ((td->td_priority) < PSOCK ||
- !(td->td_state == TDS_SLP ||
- td->td_state == TDS_RUNQ)) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
+ !thread_safetoswapout(td))
goto nextproc;
- }
}
/*
* If the system is under memory stress,
@@ -746,29 +746,13 @@ retry:
*/
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
- (kg->kg_slptime < swap_idle_threshold2))) {
- mtx_unlock_spin(&sched_lock);
- PROC_UNLOCK(p);
+ (kg->kg_slptime < swap_idle_threshold2)))
goto nextproc;
- }
+
if (minslptime > kg->kg_slptime)
minslptime = kg->kg_slptime;
}
- mtx_unlock_spin(&sched_lock);
- ++vm->vm_refcnt;
- /*
- * do not swapout a process that
- * is waiting for VM
- * data structures there is a
- * possible deadlock.
- */
- if (!vm_map_trylock(&vm->vm_map)) {
- vmspace_free(vm);
- PROC_UNLOCK(p);
- goto nextproc;
- }
- vm_map_unlock(&vm->vm_map);
/*
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
@@ -776,16 +760,27 @@ retry:
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
(minslptime > swap_idle_threshold2))) {
- sx_sunlock(&allproc_lock);
swapout(p);
- vmspace_free(vm);
didswap++;
+
+ /*
+ * swapout() unlocks a proc lock. This is
+ * ugly, but avoids superfluous lock.
+ */
+ mtx_unlock_spin(&sched_lock);
+ vm_map_unlock(&vm->vm_map);
+ vmspace_free(vm);
+ sx_sunlock(&allproc_lock);
goto retry;
}
- PROC_UNLOCK(p);
- vmspace_free(vm);
}
nextproc:
+ mtx_unlock_spin(&sched_lock);
+nextproc2:
+ PROC_UNLOCK(p);
+ vm_map_unlock(&vm->vm_map);
+nextproc1:
+ vmspace_free(vm);
continue;
}
sx_sunlock(&allproc_lock);
@@ -804,23 +799,30 @@ swapout(p)
struct thread *td;
PROC_LOCK_ASSERT(p, MA_OWNED);
+ mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
#if defined(SWAP_DEBUG)
printf("swapping out %d\n", p->p_pid);
#endif
- mtx_lock_spin(&sched_lock);
/*
+ * The states of this process and its threads may have changed
+ * by now. Assuming that there is only one pageout daemon thread,
+ * this process should still be in memory.
+ */
+ KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM,
+ ("swapout: lost a swapout race?"));
+
+#if defined(INVARIANTS)
+ /*
* Make sure that all threads are safe to be swapped out.
*
* Alternatively, we could swap out only safe threads.
*/
FOREACH_THREAD_IN_PROC(p, td) {
- if (!(td->td_state == TDS_SLP ||
- td->td_state == TDS_RUNQ)) {
- mtx_unlock_spin(&sched_lock);
- return;
- }
+ KASSERT(thread_safetoswapout(td),
+ ("swapout: there is a thread not safe for swapout"));
}
+#endif /* INVARIANTS */
++p->p_stats->p_ru.ru_nswap;
/*
@@ -828,14 +830,14 @@ swapout(p)
*/
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
- p->p_sflag &= ~PS_INMEM;
- p->p_sflag |= PS_SWAPPING;
PROC_UNLOCK(p);
FOREACH_THREAD_IN_PROC (p, td)
if (td->td_state == TDS_RUNQ) { /* XXXKSE */
remrunqueue(td); /* XXXKSE */
td->td_state = TDS_SWAPPED;
}
+ p->p_sflag &= ~PS_INMEM;
+ p->p_sflag |= PS_SWAPPING;
mtx_unlock_spin(&sched_lock);
vm_proc_swapout(p);
@@ -844,6 +846,5 @@ swapout(p)
mtx_lock_spin(&sched_lock);
p->p_sflag &= ~PS_SWAPPING;
p->p_swtime = 0;
- mtx_unlock_spin(&sched_lock);
}
#endif /* !NO_SWAPPING */
OpenPOWER on IntegriCloud