summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_glue.c')
-rw-r--r--sys/vm/vm_glue.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index a180ae3..37c580a 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -161,6 +161,7 @@ useracc(addr, len, rw)
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
+ mtx_lock(&vm_mtx);
map = &curproc->p_vmspace->vm_map;
vm_map_lock_read(map);
/*
@@ -172,6 +173,7 @@ useracc(addr, len, rw)
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
map->hint = save_hint;
vm_map_unlock_read(map);
+ mtx_unlock(&vm_mtx);
return (rv == TRUE);
}
@@ -181,8 +183,12 @@ vslock(addr, len)
caddr_t addr;
u_int len;
{
- vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
+
+ mtx_lock(&vm_mtx);
+ vm_map_pageable(&curproc->p_vmspace->vm_map,
+ trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), FALSE);
+ mtx_unlock(&vm_mtx);
}
void
@@ -190,8 +196,12 @@ vsunlock(addr, len)
caddr_t addr;
u_int len;
{
- vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
+
+ mtx_lock(&vm_mtx);
+ vm_map_pageable(&curproc->p_vmspace->vm_map,
+ trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), TRUE);
+ mtx_unlock(&vm_mtx);
}
/*
@@ -201,6 +211,8 @@ vsunlock(addr, len)
* machine-dependent layer to fill those in and make the new process
* ready to run. The new process is set up so that it returns directly
* to user mode to avoid stack copying and relocation problems.
+ *
+ * Called without vm_mtx.
*/
void
vm_fork(p1, p2, flags)
@@ -209,6 +221,7 @@ vm_fork(p1, p2, flags)
{
register struct user *up;
+ mtx_lock(&vm_mtx);
if ((flags & RFPROC) == 0) {
/*
* Divorce the memory, if it is shared, essentially
@@ -221,6 +234,7 @@ vm_fork(p1, p2, flags)
}
}
cpu_fork(p1, p2, flags);
+ mtx_unlock(&vm_mtx);
return;
}
@@ -275,6 +289,7 @@ vm_fork(p1, p2, flags)
* and make the child ready to run.
*/
cpu_fork(p1, p2, flags);
+ mtx_unlock(&vm_mtx);
}
/*
@@ -360,10 +375,13 @@ scheduler(dummy)
mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
loop:
+ mtx_lock(&vm_mtx);
if (vm_page_count_min()) {
VM_WAIT;
+ mtx_unlock(&vm_mtx);
goto loop;
}
+ mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
pp = NULL;
@@ -442,6 +460,9 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
* If any procs have been sleeping/stopped for at least maxslp seconds,
* they are swapped. Else, we swap the longest-sleeping or stopped process,
* if any, otherwise the longest-resident process.
+ *
+ * Can block
+ * must be called with vm_mtx
*/
void
swapout_procs(action)
@@ -452,6 +473,8 @@ int action;
int outpri, outpri2;
int didswap = 0;
+ mtx_assert(&vm_mtx, MA_OWNED);
+ mtx_unlock(&vm_mtx);
outp = outp2 = NULL;
outpri = outpri2 = INT_MIN;
sx_slock(&allproc_lock);
@@ -465,6 +488,11 @@ retry:
PROC_UNLOCK(p);
continue;
}
+ /*
+ * only aiod changes vmspace, however it will be
+ * skipped because of the if statement above checking
+ * for P_SYSTEM
+ */
vm = p->p_vmspace;
mtx_lock_spin(&sched_lock);
if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
@@ -516,6 +544,7 @@ retry:
}
mtx_unlock_spin(&sched_lock);
+ mtx_lock(&vm_mtx);
#if 0
/*
* XXX: This is broken. We release the lock we
@@ -531,7 +560,7 @@ retry:
*/
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
- (void *)0, curproc)) {
+ NULL, curproc)) {
vmspace_free(vm);
PROC_UNLOCK(p);
continue;
@@ -548,8 +577,10 @@ retry:
swapout(p);
vmspace_free(vm);
didswap++;
+ mtx_unlock(&vm_mtx);
goto retry;
}
+ mtx_unlock(&vm_mtx);
PROC_UNLOCK(p);
}
}
@@ -558,6 +589,7 @@ retry:
* If we swapped something out, and another process needed memory,
* then wakeup the sched process.
*/
+ mtx_lock(&vm_mtx);
if (didswap)
wakeup(&proc0);
}
OpenPOWER on IntegriCloud