summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1997-04-13 01:48:35 +0000
committerdyson <dyson@FreeBSD.org>1997-04-13 01:48:35 +0000
commit61955ab83033e59cfabb8590ba5ccbf0669d9a47 (patch)
tree8d2081b02ed11701596e498b2013cf9f5a5625dd /sys/vm
parent438bdd8a9c9d390f13a4b05980c19b18e974d16c (diff)
downloadFreeBSD-src-61955ab83033e59cfabb8590ba5ccbf0669d9a47.zip
FreeBSD-src-61955ab83033e59cfabb8590ba5ccbf0669d9a47.tar.gz
Fully implement vfork. Vfork is now much much faster than even our
fork. (On my machine, fork is about 240usecs, vfork is 78usecs.) Implement rfork(!RFPROC !RFMEM), which allows a thread to divorce its memory from the other threads of a group. Implement rfork(!RFPROC RFCFDG), which closes all file descriptors, eliminating possible existing shares with other threads/processes. Implement rfork(!RFPROC RFFDG), which divorces the file descriptors for a thread from the rest of the group. Fix the case where a thread does an exec. It is almost nonsense for a thread to modify the other threads address space by an exec, so we now automatically divorce the address space before modifying it.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h3
-rw-r--r--sys/vm/vm_extern.h4
-rw-r--r--sys/vm/vm_glue.c12
-rw-r--r--sys/vm/vm_map.c54
4 files changed, 65 insertions, 8 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 64a7063..7a7233f 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id$
+ * $Id: pmap.h,v 1.19 1997/02/22 09:48:04 peter Exp $
*/
/*
@@ -129,6 +129,7 @@ void pmap_new_proc __P((struct proc *p));
void pmap_dispose_proc __P((struct proc *p));
void pmap_swapout_proc __P((struct proc *p));
void pmap_swapin_proc __P((struct proc *p));
+void pmap_activate __P((struct proc *p));
#endif /* KERNEL */
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 795cfc4..4d6169c 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.32 1997/04/06 02:30:56 dyson Exp $
+ * $Id: vm_extern.h,v 1.33 1997/04/07 07:16:04 peter Exp $
*/
#ifndef _VM_EXTERN_H_
@@ -88,6 +88,8 @@ void vm_set_page_size __P((void));
void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
+void vmspace_exec __P((struct proc *));
+void vmspace_unshare __P((struct proc *));
void vmspace_free __P((struct vmspace *));
void vnode_pager_setsize __P((struct vnode *, vm_ooffset_t));
void vnode_pager_umount __P((struct mount *));
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2116a0e..b49b25f 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.61 1997/02/22 09:48:17 peter Exp $
+ * $Id: vm_glue.c,v 1.62 1997/04/07 07:16:04 peter Exp $
*/
#include "opt_rlimit.h"
@@ -211,14 +211,16 @@ vm_fork(p1, p2, flags)
pmap_t pvp;
vm_object_t upobj;
+ if (flags & RFMEM) {
+ p2->p_vmspace = p1->p_vmspace;
+ p1->p_vmspace->vm_refcnt++;
+ }
+
while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
VM_WAIT;
}
- if (flags & RFMEM) {
- p2->p_vmspace = p1->p_vmspace;
- p1->p_vmspace->vm_refcnt++;
- } else {
+ if ((flags & RFMEM) == 0) {
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
if (p1->p_vmspace->vm_shm)
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 7a1de30..beb4c40 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.74 1997/04/06 03:04:31 dyson Exp $
+ * $Id: vm_map.c,v 1.75 1997/04/07 07:16:05 peter Exp $
*/
/*
@@ -2231,6 +2231,58 @@ vmspace_fork(vm1)
}
/*
+ * Unshare the specified VM space for exec. If other processes are
+ * mapped to it, then create a new one. The new vmspace is null.
+ */
+
+void
+vmspace_exec(struct proc *p) {
+ struct vmspace *oldvmspace = p->p_vmspace;
+ struct vmspace *newvmspace;
+ vm_map_t map = &p->p_vmspace->vm_map;
+
+ newvmspace = vmspace_alloc(map->min_offset, map->max_offset,
+ map->entries_pageable);
+ bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
+ (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
+ /*
+ * This code is written like this for prototype purposes. The
+ * goal is to avoid running down the vmspace here, but let the
+ * other process's that are still using the vmspace to finally
+ * run it down. Even though there is little or no chance of blocking
+ * here, it is a good idea to keep this form for future mods.
+ */
+ vm_map_reference(&oldvmspace->vm_map);
+ vmspace_free(oldvmspace);
+ p->p_vmspace = newvmspace;
+ if (p == curproc)
+ pmap_activate(p);
+ vm_map_deallocate(&oldvmspace->vm_map);
+}
+
+/*
+ * Unshare the specified VM space for forcing COW. This
+ * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
+ */
+
+void
+vmspace_unshare(struct proc *p) {
+ struct vmspace *oldvmspace = p->p_vmspace;
+ struct vmspace *newvmspace;
+
+ if (oldvmspace->vm_refcnt == 1)
+ return;
+ newvmspace = vmspace_fork(oldvmspace);
+ vm_map_reference(&oldvmspace->vm_map);
+ vmspace_free(oldvmspace);
+ p->p_vmspace = newvmspace;
+ if (p == curproc)
+ pmap_activate(p);
+ vm_map_deallocate(&oldvmspace->vm_map);
+}
+
+
+/*
* vm_map_lookup:
*
* Finds the VM object, offset, and
OpenPOWER on IntegriCloud