diff options
author | jhb <jhb@FreeBSD.org> | 2001-05-15 22:20:44 +0000 |
---|---|---|
committer | jhb <jhb@FreeBSD.org> | 2001-05-15 22:20:44 +0000 |
commit | 8865204035447b7da08d8f8b4dba8f20776349a3 (patch) | |
tree | 1e2f769cb4bd80d1f6324c7ee7e495c9731416ec /sys/vm | |
parent | a0d918e1e9b4c6103a17651082f6677c188d6235 (diff) | |
download | FreeBSD-src-8865204035447b7da08d8f8b4dba8f20776349a3.zip FreeBSD-src-8865204035447b7da08d8f8b4dba8f20776349a3.tar.gz |
- Use PROC_LOCK_ASSERT instead of a direct mtx_assert.
- Don't hold Giant in the swapper daemon while we walk the list of
processes looking for a process to swap back in.
- Don't bother grabbing the sched_lock while checking a process' sleep
time in swapout_procs() to ensure that a process has been idle for at
least swap_idle_threshold2 before swapping it out. If we lose the race
we just let a process stay in memory until the next call of
swapout_procs().
- Remove some unneeded spl's, sched_lock does all the locking needed in
this case.
Diffstat (limited to 'sys/vm')
-rw-r--r-- | sys/vm/vm_glue.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 87b13de..f59a461 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -313,7 +313,7 @@ faultin(p) struct proc *p; { - mtx_assert(&p->p_mtx, MA_OWNED); + PROC_LOCK_ASSERT(p, MA_OWNED); mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { @@ -355,7 +355,7 @@ scheduler(dummy) struct proc *pp; int ppri; - mtx_assert(&Giant, MA_OWNED); + mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); loop: if (vm_page_count_min()) { @@ -363,6 +363,7 @@ loop: goto loop; } + mtx_unlock(&Giant); pp = NULL; ppri = INT_MIN; sx_slock(&allproc_lock); @@ -395,6 +396,7 @@ loop: */ if ((p = pp) == NULL) { tsleep(&proc0, PVM, "sched", 0); + mtx_lock(&Giant); goto loop; } mtx_lock_spin(&sched_lock); @@ -404,6 +406,7 @@ loop: /* * We would like to bring someone in. (only if there is space). */ + mtx_lock(&Giant); PROC_LOCK(p); faultin(p); PROC_UNLOCK(p); @@ -523,17 +526,14 @@ retry: * If the process has been asleep for awhile and had * most of its pages taken away already, swap it out. */ - mtx_lock_spin(&sched_lock); if ((action & VM_SWAP_NORMAL) || ((action & VM_SWAP_IDLE) && (p->p_slptime > swap_idle_threshold2))) { - mtx_unlock_spin(&sched_lock); swapout(p); vmspace_free(vm); didswap++; goto retry; - } else - mtx_unlock_spin(&sched_lock); + } } } sx_sunlock(&allproc_lock); @@ -559,14 +559,12 @@ swapout(p) */ p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); - (void) splhigh(); mtx_lock_spin(&sched_lock); p->p_sflag &= ~PS_INMEM; p->p_sflag |= PS_SWAPPING; if (p->p_stat == SRUN) remrunqueue(p); mtx_unlock_spin(&sched_lock); - (void) spl0(); pmap_swapout_proc(p); |