summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-04-03 05:23:44 +0000
committerdyson <dyson@FreeBSD.org>1996-04-03 05:23:44 +0000
commitceb23bccb60c64de9d170e4b901a67f038e021fc (patch)
tree293823deb47769ff95cf0e675a7ce280e7ad2cf2 /sys
parent14009d1ae054998506dbe2ac7d4b3b8d471e0df0 (diff)
downloadFreeBSD-src-ceb23bccb60c64de9d170e4b901a67f038e021fc.zip
FreeBSD-src-ceb23bccb60c64de9d170e4b901a67f038e021fc.tar.gz
Fixed a problem that the UPAGES of a process were being run down
in a suboptimal manner. I had also noticed some panics that appeared to be at least superficially caused by this problem. Also, included are some minor mods to support more general handling of page table page faulting. More details in a future commit.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/include/pmap.h6
-rw-r--r--sys/i386/include/pmap.h6
-rw-r--r--sys/vm/vm_glue.c48
-rw-r--r--sys/vm/vm_map.c10
4 files changed, 50 insertions, 20 deletions
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 388eb99..3f54f18 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.33 1996/02/04 21:20:53 davidg Exp $
+ * $Id: pmap.h,v 1.34 1996/02/25 03:02:53 dyson Exp $
*/
#ifndef _MACHINE_PMAP_H_
@@ -52,6 +52,7 @@
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
+struct vm_map;
/*
* NKPDE controls the virtual space of the kernel, what ever is left, minus
@@ -145,11 +146,10 @@ pmap_kextract(vm_offset_t va)
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
- boolean_t pm_pdchanged; /* pdir changed */
short pm_dref; /* page directory ref count */
short pm_count; /* pmap reference count */
struct pmap_statistics pm_stats; /* pmap statistics */
- long pm_ptpages; /* more stats: PT pages */
+ struct vm_map *pm_map; /* map that owns this pmap */
};
typedef struct pmap *pmap_t;
diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h
index 388eb99..3f54f18 100644
--- a/sys/i386/include/pmap.h
+++ b/sys/i386/include/pmap.h
@@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
- * $Id: pmap.h,v 1.33 1996/02/04 21:20:53 davidg Exp $
+ * $Id: pmap.h,v 1.34 1996/02/25 03:02:53 dyson Exp $
*/
#ifndef _MACHINE_PMAP_H_
@@ -52,6 +52,7 @@
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
+struct vm_map;
/*
* NKPDE controls the virtual space of the kernel, what ever is left, minus
@@ -145,11 +146,10 @@ pmap_kextract(vm_offset_t va)
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
- boolean_t pm_pdchanged; /* pdir changed */
short pm_dref; /* page directory ref count */
short pm_count; /* pmap reference count */
struct pmap_statistics pm_stats; /* pmap statistics */
- long pm_ptpages; /* more stats: PT pages */
+ struct vm_map *pm_map; /* map that owns this pmap */
};
typedef struct pmap *pmap_t;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 999c356..96abfb2 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.42 1996/03/09 06:57:53 dyson Exp $
+ * $Id: vm_glue.c,v 1.43 1996/03/11 06:11:39 hsu Exp $
*/
#include "opt_ddb.h"
@@ -245,39 +245,70 @@ vm_fork(p1, p2)
map = &p2->p_vmspace->vm_map;
pvp = &p2->p_vmspace->vm_pmap;
- /* get new pagetables and kernel stack */
+ /*
+ * allocate object for the upages
+ */
+ p2->p_vmspace->vm_upages_obj = vm_object_allocate( OBJT_DEFAULT,
+ UPAGES);
+
+ /*
+ * put upages into the address space
+ */
+ error = vm_map_find(map, p2->p_vmspace->vm_upages_obj, 0,
+ &addr, UPT_MIN_ADDRESS - addr, FALSE, VM_PROT_ALL,
+ VM_PROT_ALL, 0);
+ if (error != KERN_SUCCESS)
+ panic("vm_fork: vm_map_find (UPAGES) failed, addr=0x%x, error=%d", addr, error);
+
+ addr += UPAGES * PAGE_SIZE;
+ /* allocate space for page tables */
error = vm_map_find(map, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE,
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error != KERN_SUCCESS)
- panic("vm_fork: vm_map_find failed, addr=0x%x, error=%d", addr, error);
+ panic("vm_fork: vm_map_find (PTES) failed, addr=0x%x, error=%d", addr, error);
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
- p2->p_vmspace->vm_upages_obj = vm_object_allocate( OBJT_DEFAULT,
- UPAGES);
-
+ /*
+ * create a pagetable page for the UPAGES in the process address space
+ */
ptaddr = trunc_page((u_int) vtopte(kstack));
(void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE);
ptpa = pmap_extract(pvp, ptaddr);
if (ptpa == 0) {
panic("vm_fork: no pte for UPAGES");
}
+
+ /*
+ * hold the page table page for the kernel stack, and fault them in
+ */
stkm = PHYS_TO_VM_PAGE(ptpa);
vm_page_hold(stkm);
for(i=0;i<UPAGES;i++) {
vm_page_t m;
+ /*
+ * Get a kernel stack page
+ */
while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj,
i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
}
+ /*
+ * Wire the page
+ */
vm_page_wire(m);
m->flags &= ~PG_BUSY;
+
+ /*
+ * Enter the page into both the kernel and the process
+ * address space.
+ */
pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, 1);
pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
@@ -285,9 +316,12 @@ vm_fork(p1, p2)
m->flags &= ~PG_ZERO;
m->valid = VM_PAGE_BITS_ALL;
}
+ /*
+ * The page table page for the kernel stack should be held in memory
+ * now.
+ */
vm_page_unhold(stkm);
-
p2->p_addr = up;
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index f3ad976..26b6446 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.40 1996/03/28 04:22:17 dyson Exp $
+ * $Id: vm_map.c,v 1.41 1996/03/28 04:53:24 dyson Exp $
*/
/*
@@ -226,7 +226,8 @@ vmspace_alloc(min, max, pageable)
bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
vm_map_init(&vm->vm_map, min, max, pageable);
pmap_pinit(&vm->vm_pmap);
- vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
+ vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
+ vm->vm_pmap.pm_map = &vm->vm_map;
vm->vm_refcnt = 1;
return (vm);
}
@@ -242,10 +243,6 @@ vmspace_free(vm)
if (--vm->vm_refcnt == 0) {
int s, i;
-/*
- pmap_remove(&vm->vm_pmap, (vm_offset_t) kstack, (vm_offset_t) kstack+UPAGES*PAGE_SIZE);
-*/
-
/*
* Lock the map, to wait out all other references to it.
* Delete all of the mappings and pages they hold, then call
@@ -254,7 +251,6 @@ vmspace_free(vm)
vm_map_lock(&vm->vm_map);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
- vm_object_deallocate(vm->vm_upages_obj);
vm_map_unlock(&vm->vm_map);
while( vm->vm_map.ref_count != 1)
tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
OpenPOWER on IntegriCloud