summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/vm_machdep.c28
-rw-r--r--sys/amd64/amd64/vm_machdep.c26
-rw-r--r--sys/i386/i386/vm_machdep.c26
-rw-r--r--sys/ia64/ia64/vm_machdep.c33
-rw-r--r--sys/kern/kern_exit.c33
-rw-r--r--sys/kern/kern_fork.c8
-rw-r--r--sys/powerpc/aim/vm_machdep.c33
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c33
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c28
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/vm/vm_extern.h3
-rw-r--r--sys/vm/vm_glue.c18
12 files changed, 75 insertions, 196 deletions
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c
index a40ea11..0aec807 100644
--- a/sys/alpha/alpha/vm_machdep.c
+++ b/sys/alpha/alpha/vm_machdep.c
@@ -241,40 +241,14 @@ void
cpu_exit(p)
register struct proc *p;
{
- alpha_fpstate_drop(p);
-
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_switch();
- panic("cpu_exit");
+ alpha_fpstate_drop(p);
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
-
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
}
/*
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 4fc91dd..53cedff 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -268,38 +268,12 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_throw();
- panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
-
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
}
/*
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 4fc91dd..53cedff 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -268,38 +268,12 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_throw();
- panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
-
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
}
/*
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
index d7443a0..3774212 100644
--- a/sys/ia64/ia64/vm_machdep.c
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -284,40 +284,23 @@ void
cpu_exit(p)
register struct proc *p;
{
- ia64_fpstate_drop(p);
-
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_switch();
- panic("cpu_exit");
+ ia64_fpstate_drop(p);
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
+}
- /* drop per-process resources */
- pmap_dispose_proc(p);
+/* Temporary helper */
+void
+cpu_throw(void)
+{
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
+ cpu_switch();
+ panic("cpu_throw() didn't");
}
/*
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 8022cb5..812b20e 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -54,6 +54,7 @@
#include <sys/tty.h>
#include <sys/wait.h>
#include <sys/vnode.h>
+#include <sys/vmmeter.h>
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
@@ -67,6 +68,7 @@
#include <vm/vm.h>
#include <vm/vm_param.h>
+#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
@@ -380,13 +382,30 @@ exit1(p, rv)
/*
* Finally, call machine-dependent code to release the remaining
* resources including address space, the kernel stack and pcb.
- * The address space is released by "vmspace_free(p->p_vmspace)";
- * This is machine-dependent, as we may have to change stacks
- * or ensure that the current one isn't reallocated before we
- * finish. cpu_exit will end with a call to cpu_switch(), finishing
- * our execution (pun intended).
+ * The address space is released by "vmspace_free(p->p_vmspace)"
+ * in vm_waitproc();
*/
cpu_exit(p);
+
+ PROC_LOCK(p);
+ mtx_lock_spin(&sched_lock);
+ while (mtx_owned(&Giant))
+ mtx_unlock_flags(&Giant, MTX_NOSWITCH);
+
+ /*
+ * We have to wait until after releasing all locks before
+ * changing p_stat. If we block on a mutex then we will be
+ * back at SRUN when we resume and our parent will never
+ * harvest us.
+ */
+ p->p_stat = SZOMB;
+
+ wakeup(p->p_pptr);
+ PROC_UNLOCK_NOSWITCH(p);
+
+ cnt.v_swtch++;
+ cpu_throw();
+ panic("exit1");
}
#ifdef COMPAT_43
@@ -571,11 +590,11 @@ loop:
}
/*
- * Give machine-dependent layer a chance
+ * Give vm and machine-dependent layer a chance
* to free anything that cpu_exit couldn't
* release while still running in process context.
*/
- cpu_wait(p);
+ vm_waitproc(p);
mtx_destroy(&p->p_mtx);
zfree(proc_zone, p);
nprocs--;
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index dea8ff0..af154c3 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -235,7 +235,7 @@ fork1(p1, flags, procp)
* certain parts of a process from itself.
*/
if ((flags & RFPROC) == 0) {
- vm_fork(p1, 0, flags);
+ vm_forkproc(p1, 0, flags);
/*
* Close all file descriptors.
@@ -412,7 +412,7 @@ again:
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
- * The p_stats and p_sigacts substructs are set in vm_fork.
+ * The p_stats and p_sigacts substructs are set in vm_forkproc.
*/
p2->p_flag = 0;
mtx_lock_spin(&sched_lock);
@@ -461,7 +461,7 @@ again:
PROC_LOCK(p1);
bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
p2->p_procsig->ps_refcnt = 1;
- p2->p_sigacts = NULL; /* finished in vm_fork() */
+ p2->p_sigacts = NULL; /* finished in vm_forkproc() */
}
if (flags & RFLINUXTHPN)
p2->p_sigparent = SIGUSR1;
@@ -573,7 +573,7 @@ again:
* Finish creating the child process. It will return via a different
* execution path later. (ie: directly into user mode)
*/
- vm_fork(p1, p2, flags);
+ vm_forkproc(p1, p2, flags);
if (flags == (RFFDG | RFPROC)) {
cnt.v_forks++;
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index e811fc8..630f9bf 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -159,38 +159,21 @@ void
cpu_exit(p)
register struct proc *p;
{
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_switch();
- panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
+}
+
+/* Temporary helper */
+void
+cpu_throw(void)
+{
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
+ cpu_switch();
+ panic("cpu_throw() didn't");
}
/*
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index e811fc8..630f9bf 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -159,38 +159,21 @@ void
cpu_exit(p)
register struct proc *p;
{
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_switch();
- panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
+}
+
+/* Temporary helper */
+void
+cpu_throw(void)
+{
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
+ cpu_switch();
+ panic("cpu_throw() didn't");
}
/*
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 2365c32..addec1b 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -63,30 +63,9 @@
#include <machine/md_var.h>
#include <machine/tstate.h>
-/* XXX: it seems that all that is in here should really be MI... */
void
cpu_exit(struct proc *p)
{
-
- PROC_LOCK(p);
- mtx_lock_spin(&sched_lock);
- while (mtx_owned(&Giant))
- mtx_unlock_flags(&Giant, MTX_NOSWITCH);
-
- /*
- * We have to wait until after releasing all locks before
- * changing p_stat. If we block on a mutex then we will be
- * back at SRUN when we resume and our parent will never
- * harvest us.
- */
- p->p_stat = SZOMB;
-
- wakeup(p->p_pptr);
- PROC_UNLOCK_NOSWITCH(p);
-
- cnt.v_swtch++;
- cpu_throw();
- panic("cpu_exit");
}
/*
@@ -178,13 +157,6 @@ cpu_set_fork_handler(struct proc *p, void (*func)(void *), void *arg)
void
cpu_wait(struct proc *p)
{
- GIANT_REQUIRED;
-
- /* drop per-process resources */
- pmap_dispose_proc(p);
-
- /* and clean-out the vmspace */
- vmspace_free(p->p_vmspace);
}
void
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 9de9392..0a321a5 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -501,7 +501,7 @@ void updatepri __P((struct proc *));
void userret __P((struct proc *, struct trapframe *, u_int));
void maybe_resched __P((struct proc *));
-void cpu_exit __P((struct proc *)) __dead2;
+void cpu_exit __P((struct proc *));
void exit1 __P((struct proc *, int)) __dead2;
void cpu_fork __P((struct proc *, struct proc *, int));
void cpu_set_fork_handler __P((struct proc *, void (*)(void *), void *));
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 307192f..9ff0191 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -80,7 +80,8 @@ void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
-void vm_fork __P((struct proc *, struct proc *, int));
+void vm_forkproc __P((struct proc *, struct proc *, int));
+void vm_waitproc __P((struct proc *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
void vm_set_page_size __P((void));
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 8ac0dde..5638175 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -209,7 +209,7 @@ vsunlock(addr, len)
* to user mode to avoid stack copying and relocation problems.
*/
void
-vm_fork(p1, p2, flags)
+vm_forkproc(p1, p2, flags)
struct proc *p1, *p2;
int flags;
{
@@ -286,6 +286,22 @@ vm_fork(p1, p2, flags)
}
/*
+ * Called after process has been wait(2)'ed apon and is being reaped.
+ * The idea is to reclaim resources that we could not reclaim while
+ * the process was still executing.
+ */
+void
+vm_waitproc(p)
+ struct proc *p;
+{
+
+ GIANT_REQUIRED;
+ cpu_wait(p);
+ pmap_dispose_proc(p); /* drop per-process resources */
+ vmspace_free(p->p_vmspace); /* and clean-out the vmspace */
+}
+
+/*
* Set default limits for VM system.
* Called for proc 0, and then inherited by all others.
*
OpenPOWER on IntegriCloud