summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/pmap.c9
-rw-r--r--sys/amd64/amd64/pmap.c3
-rw-r--r--sys/i386/i386/pmap.c3
-rw-r--r--sys/ia64/ia64/pmap.c3
-rw-r--r--sys/kern/imgact_elf.c11
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_fork.c14
-rw-r--r--sys/kern/kern_ktrace.c4
-rw-r--r--sys/kern/kern_proc.c15
-rw-r--r--sys/kern/kern_resource.c4
-rw-r--r--sys/kern/kern_sig.c6
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/vfs_extattr.c2
-rw-r--r--sys/kern/vfs_syscalls.c2
-rw-r--r--sys/sys/proc.h3
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_meter.c4
-rw-r--r--sys/vm/vm_object.c6
-rw-r--r--sys/vm/vm_pageout.c4
20 files changed, 90 insertions, 15 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 8164ca6..09cf8ec 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -723,12 +723,16 @@ pmap_get_asn(pmap_t pmap)
printf("pmap_get_asn: generation rollover\n");
#endif
PCPU_GET(current_asngen) = 1;
+ lockmgr(&allproc_lock, LK_SHARED, NULL,
+ CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_vmspace) {
tpmap = vmspace_pmap(p->p_vmspace);
tpmap->pm_asn[PCPU_GET(cpuno)].gen = 0;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL,
+ CURPROC);
}
/*
@@ -1553,12 +1557,14 @@ pmap_growkernel(vm_offset_t addr)
newlev1 = pmap_phys_to_pte(pa)
| PG_V | PG_ASM | PG_KRE | PG_KWE;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_vmspace) {
pmap = vmspace_pmap(p->p_vmspace);
*pmap_lev1pte(pmap, kernel_vm_end) = newlev1;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
*pte = newlev1;
pmap_invalidate_all(kernel_pmap);
}
@@ -3057,6 +3063,7 @@ pmap_pid_dump(int pid)
struct proc *p;
int npte = 0;
int index;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_pid != pid)
continue;
@@ -3079,6 +3086,7 @@ pmap_pid_dump(int pid)
index = 0;
printf("\n");
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
pte = pmap_pte_quick( pmap, va);
@@ -3103,6 +3111,7 @@ pmap_pid_dump(int pid)
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
#endif
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4aefc1c..12a702a 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3324,6 +3324,7 @@ pmap_pid_dump(int pid)
struct proc *p;
int npte = 0;
int index;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_pid != pid)
continue;
@@ -3346,6 +3347,7 @@ pmap_pid_dump(int pid)
index = 0;
printf("\n");
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
pte = pmap_pte_quick( pmap, va);
@@ -3370,6 +3372,7 @@ pmap_pid_dump(int pid)
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
#endif
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 4aefc1c..12a702a 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3324,6 +3324,7 @@ pmap_pid_dump(int pid)
struct proc *p;
int npte = 0;
int index;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_pid != pid)
continue;
@@ -3346,6 +3347,7 @@ pmap_pid_dump(int pid)
index = 0;
printf("\n");
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
pte = pmap_pte_quick( pmap, va);
@@ -3370,6 +3372,7 @@ pmap_pid_dump(int pid)
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
#endif
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index b438906..f01064c 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -2244,6 +2244,7 @@ pmap_pid_dump(int pid)
struct proc *p;
int npte = 0;
int index;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_pid != pid)
continue;
@@ -2266,6 +2267,7 @@ pmap_pid_dump(int pid)
index = 0;
printf("\n");
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
pte = pmap_pte_quick( pmap, va);
@@ -2290,6 +2292,7 @@ pmap_pid_dump(int pid)
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return npte;
}
#endif
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 9706d67..90c9716 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -152,13 +152,18 @@ int
elf_brand_inuse(Elf_Brandinfo *entry)
{
struct proc *p;
+ int rval = FALSE;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
- if (p->p_sysent == entry->sysvec)
- return TRUE;
+ if (p->p_sysent == entry->sysvec) {
+ rval = TRUE;
+ break;
+ }
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
- return FALSE;
+ return (rval);
}
static int
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index de702bc..c4d1767 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -405,10 +405,12 @@ proc0_post(void *dummy __unused)
* Now we can look at the time, having had a chance to verify the
* time from the file system. Pretend that proc0 started now.
*/
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
microtime(&p->p_stats->p_start);
p->p_runtime = 0;
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
microuptime(&switchtime);
PCPU_SET(switchticks, ticks);
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 489633a..9e5d488 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -264,11 +264,13 @@ exit1(p, rv)
* Remove proc from allproc queue and pidhash chain.
* Place onto zombproc. Unlink from parent's child list.
*/
+ lockmgr(&allproc_lock, LK_EXCLUSIVE, NULL, CURPROC);
LIST_REMOVE(p, p_list);
LIST_INSERT_HEAD(&zombproc, p, p_list);
p->p_stat = SZOMB;
LIST_REMOVE(p, p_hash);
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
q = LIST_FIRST(&p->p_children);
if (q) /* only need this if any child is S_ZOMB */
@@ -510,7 +512,9 @@ loop:
* Unlink it from its process group and free it.
*/
leavepgrp(p);
+ lockmgr(&allproc_lock, LK_EXCLUSIVE, NULL, CURPROC);
LIST_REMOVE(p, p_list); /* off zombproc */
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
LIST_REMOVE(p, p_sibling);
if (--p->p_procsig->ps_refcnt == 0) {
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 70d883c..55324c6 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -287,6 +287,7 @@ fork1(p1, flags, procp)
* If RFHIGHPID is set (used during system boot), do not allocate
* low-numbered pids.
*/
+ lockmgr(&allproc_lock, LK_EXCLUSIVE, NULL, CURPROC);
trypid = nextpid + 1;
if (flags & RFHIGHPID) {
if (trypid < 10) {
@@ -343,12 +344,6 @@ again:
}
}
- p2 = newproc;
- p2->p_stat = SIDL; /* protect against others */
- p2->p_pid = trypid;
- LIST_INSERT_HEAD(&allproc, p2, p_list);
- LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
-
/*
* RFHIGHPID does not mess with the nextpid counter during boot.
*/
@@ -357,6 +352,13 @@ again:
else
nextpid = trypid;
+ p2 = newproc;
+ p2->p_stat = SIDL; /* protect against others */
+ p2->p_pid = trypid;
+ LIST_INSERT_HEAD(&allproc, p2, p_list);
+ LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
+
/*
* Make a proc table entry for the new process.
* Start by zeroing the section of proc that is zero-initialized,
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 7d2e75a..2d3b08d 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -278,6 +278,7 @@ ktrace(curp, uap)
* Clear all uses of the tracefile
*/
if (ops == KTROP_CLEARFILE) {
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_tracep == vp) {
if (ktrcanset(curp, p)) {
@@ -289,6 +290,7 @@ ktrace(curp, uap)
error = EPERM;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
goto done;
}
/*
@@ -494,6 +496,7 @@ ktrwrite(vp, kth, uio)
*/
log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
error);
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_tracep == vp) {
p->p_tracep = NULL;
@@ -501,6 +504,7 @@ ktrwrite(vp, kth, uio)
vrele(vp);
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
}
/*
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 4800747..ac30ba6 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -72,6 +72,7 @@ struct pgrphashhead *pgrphashtbl;
u_long pgrphash;
struct proclist allproc;
struct proclist zombproc;
+struct lock allproc_lock;
vm_zone_t proc_zone;
vm_zone_t ithread_zone;
@@ -82,6 +83,7 @@ void
procinit()
{
+ lockinit(&allproc_lock, PZERO, "allproc", 0, 0);
LIST_INIT(&allproc);
LIST_INIT(&zombproc);
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
@@ -113,10 +115,12 @@ pfind(pid)
{
register struct proc *p;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, PIDHASH(pid), p_hash)
if (p->p_pid == pid)
- return (p);
- return (NULL);
+ break;
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
+ return (p);
}
/*
@@ -470,6 +474,7 @@ sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
if (error)
return (error);
}
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (doingzomb=0 ; doingzomb < 2 ; doingzomb++) {
if (!doingzomb)
p = LIST_FIRST(&allproc);
@@ -525,10 +530,14 @@ sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
continue;
error = sysctl_out_proc(p, req, doingzomb);
- if (error)
+ if (error) {
+ lockmgr(&allproc_lock, LK_RELEASE, NULL,
+ CURPROC);
return (error);
+ }
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return (0);
}
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index b0d38c0..f37d49d 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -119,11 +119,13 @@ getpriority(curp, uap)
case PRIO_USER:
if (uap->who == 0)
uap->who = curp->p_ucred->cr_uid;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list)
if (!p_can(curp, p, P_CAN_SEE, NULL) &&
p->p_ucred->cr_uid == uap->who &&
p->p_nice < low)
low = p->p_nice;
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
break;
default:
@@ -185,12 +187,14 @@ setpriority(curp, uap)
case PRIO_USER:
if (uap->who == 0)
uap->who = curp->p_ucred->cr_uid;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list)
if (p->p_ucred->cr_uid == uap->who &&
!p_can(curp, p, P_CAN_SEE, NULL)) {
error = donice(curp, p, uap->prio);
found++;
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
break;
default:
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index a49964e..345bc31 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -850,10 +850,11 @@ killpg1(cp, sig, pgid, all)
struct pgrp *pgrp;
int nfound = 0;
- if (all)
+ if (all) {
/*
* broadcast
*/
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
p == cp || !CANSIGNAL(cp, p, sig))
@@ -862,7 +863,8 @@ killpg1(cp, sig, pgid, all)
if (sig)
psignal(p, sig);
}
- else {
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
+ } else {
if (pgid == 0)
/*
* zero pgid means send to my process group.
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 972b682..585d1ff 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -281,6 +281,7 @@ schedcpu(arg)
register int realstathz, s;
realstathz = stathz ? stathz : hz;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
/*
* Increment time in/out of memory and sleep time
@@ -340,6 +341,7 @@ schedcpu(arg)
mtx_exit(&sched_lock, MTX_SPIN);
splx(s);
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
vmmeter();
wakeup((caddr_t)&lbolt);
timeout(schedcpu, (void *)0, hz);
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 7cf4663..59aacb5 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -366,6 +366,7 @@ checkdirs(olddp)
return;
if (VFS_ROOT(olddp->v_mountedhere, &newdp))
panic("mount: lost mount");
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
fdp = p->p_fd;
if (fdp->fd_cdir == olddp) {
@@ -379,6 +380,7 @@ checkdirs(olddp)
fdp->fd_rdir = newdp;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
if (rootvnode == olddp) {
vrele(rootvnode);
VREF(newdp);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 7cf4663..59aacb5 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -366,6 +366,7 @@ checkdirs(olddp)
return;
if (VFS_ROOT(olddp->v_mountedhere, &newdp))
panic("mount: lost mount");
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
LIST_FOREACH(p, &allproc, p_list) {
fdp = p->p_fd;
if (fdp->fd_cdir == olddp) {
@@ -379,6 +380,7 @@ checkdirs(olddp)
fdp->fd_rdir = newdp;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
if (rootvnode == olddp) {
vrele(rootvnode);
VREF(newdp);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 6534f90..f570802 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -45,6 +45,7 @@
#include <machine/proc.h> /* Machine-dependent proc substruct. */
#include <sys/callout.h> /* For struct callout_handle. */
#include <sys/filedesc.h>
+#include <sys/lock.h> /* For lockmgr. */
#include <sys/queue.h>
#include <sys/rtprio.h> /* For struct rtprio. */
#include <sys/signal.h>
@@ -484,6 +485,8 @@ struct pgrp *pgfind __P((pid_t)); /* Find process group by id. */
struct vm_zone;
extern struct vm_zone *proc_zone;
+extern struct lock allproc_lock;
+
int enterpgrp __P((struct proc *p, pid_t pgid, int mksess));
void fixjobc __P((struct proc *p, struct pgrp *pgrp, int entering));
int inferior __P((struct proc *p));
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 82e22ba..3c9e941 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -360,6 +360,7 @@ loop:
pp = NULL;
ppri = INT_MIN;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_stat == SRUN &&
(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
@@ -380,6 +381,7 @@ loop:
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
/*
* Nothing to do, back to sleep.
@@ -439,6 +441,7 @@ int action;
outp = outp2 = NULL;
outpri = outpri2 = INT_MIN;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
retry:
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
struct vmspace *vm;
@@ -504,6 +507,7 @@ retry:
}
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
/*
* If we swapped something out, and another process needed memory,
* then wakeup the sched process.
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 8b68a65..9ed122e 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -78,6 +78,7 @@ loadav(struct loadavg *avg)
register int i, nrun;
register struct proc *p;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (nrun = 0, p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
switch (p->p_stat) {
case SSLEEP:
@@ -92,6 +93,7 @@ loadav(struct loadavg *avg)
nrun++;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
for (i = 0; i < 3; i++)
avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
@@ -149,6 +151,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
/*
* Calculate process statistics.
*/
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_flag & P_SYSTEM)
continue;
@@ -199,6 +202,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
if (paging)
totalp->t_pw++;
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
/*
* Calculate object memory usage statistics.
*/
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index e5403d1..0b079c3 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1620,12 +1620,16 @@ vm_object_in_map( object)
vm_object_t object;
{
struct proc *p;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
continue;
- if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
+ if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
return 1;
+ }
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
if( _vm_object_in_map( kernel_map, object, 0))
return 1;
if( _vm_object_in_map( kmem_map, object, 0))
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 4ab3930..3b0c7cc 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1129,6 +1129,7 @@ rescan0:
if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
bigproc = NULL;
bigsize = 0;
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
/*
* if this is a system process, skip it
@@ -1158,6 +1159,7 @@ rescan0:
bigsize = size;
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
bigproc->p_estcpu = 0;
@@ -1442,6 +1444,7 @@ vm_daemon()
* process is swapped out -- deactivate pages
*/
+ lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
vm_pindex_t limit, size;
@@ -1480,6 +1483,7 @@ vm_daemon()
&p->p_vmspace->vm_map, limit);
}
}
+ lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
}
}
#endif
OpenPOWER on IntegriCloud