summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-05-18 03:38:05 +0000
committerdyson <dyson@FreeBSD.org>1996-05-18 03:38:05 +0000
commit242e10df110e682d1d2f90d9734a4fd3436f24c1 (patch)
treefd8af1ab66bdbd2395179d258c5c00c4a695208d /sys/kern
parent3c6405efe575ceb7213f2bf914099da9523800cd (diff)
downloadFreeBSD-src-242e10df110e682d1d2f90d9734a4fd3436f24c1.zip
FreeBSD-src-242e10df110e682d1d2f90d9734a4fd3436f24c1.tar.gz
This set of commits to the VM system does the following, and contain
contributions or ideas from Stephen McKay <syssgm@devetir.qld.gov.au>, Alan Cox <alc@cs.rice.edu>, David Greenman <davidg@freebsd.org> and me: More usage of the TAILQ macros. Additional minor fix to queue.h. Performance enhancements to the pageout daemon. Addition of a wait in the case that the pageout daemon has to run immediately. Slightly modify the pageout algorithm. Significant revamp of the pmap/fork code: 1) PTE's and UPAGES's are NO LONGER in the process's map. 2) PTE's and UPAGES's reside in their own objects. 3) TOTAL elimination of recursive page table pagefaults. 4) The page directory now resides in the PTE object. 5) Implemented pmap_copy, thereby speeding up fork time. 6) Changed the pv entries so that the head is a pointer and not an entire entry. 7) Significant cleanup of pmap_protect, and pmap_remove. 8) Removed significant amounts of machine dependent fork code from vm_glue. Pushed much of that code into the machine dependent pmap module. 9) Support more completely the reuse of already zeroed pages (Page table pages and page directories) as being already zeroed. Performance and code cleanups in vm_map: 1) Improved and simplified allocation of map entries. 2) Improved vm_map_copy code. 3) Corrected some minor problems in the simplify code. Implemented splvm (combo of splbio and splimp.) The VM code now seldom uses splhigh. Improved the speed of and simplified kmem_malloc. Minor mod to vm_fault to avoid using pre-zeroed pages in the case of objects with backing objects along with the already existant condition of having a vnode. (If there is a backing object, there will likely be a COW... With a COW, it isn't necessary to start with a pre-zeroed page.) Minor reorg of source to perhaps improve locality of ref.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_exec.c10
-rw-r--r--sys/kern/subr_trap.c16
-rw-r--r--sys/kern/vfs_bio.c21
3 files changed, 15 insertions, 32 deletions
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 50a5388..12dd958 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: kern_exec.c,v 1.39 1996/04/29 15:07:59 smpatel Exp $
+ * $Id: kern_exec.c,v 1.40 1996/05/01 02:42:47 bde Exp $
*/
#include <sys/param.h>
@@ -171,7 +171,7 @@ interpret:
* Map the image header (first page) of the file into
* kernel address space
*/
- error = vm_mmap(kernel_map, /* map */
+ error = vm_mmap(exech_map, /* map */
(vm_offset_t *)&imgp->image_header, /* address */
PAGE_SIZE, /* size */
VM_PROT_READ, /* protection */
@@ -206,7 +206,7 @@ interpret:
/* free old vnode and name buffer */
vrele(ndp->ni_vp);
FREE(ndp->ni_cnd.cn_pnbuf, M_NAMEI);
- if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
+ if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
(vm_offset_t)imgp->image_header + PAGE_SIZE))
panic("execve: header dealloc failed (1)");
@@ -319,7 +319,7 @@ interpret:
* free various allocated resources
*/
kmem_free(exec_map, (vm_offset_t)imgp->stringbase, ARG_MAX);
- if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
+ if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
(vm_offset_t)imgp->image_header + PAGE_SIZE))
panic("execve: header dealloc failed (2)");
vrele(ndp->ni_vp);
@@ -331,7 +331,7 @@ exec_fail_dealloc:
if (imgp->stringbase != NULL)
kmem_free(exec_map, (vm_offset_t)imgp->stringbase, ARG_MAX);
if (imgp->image_header && imgp->image_header != (char *)-1)
- if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
+ if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
(vm_offset_t)imgp->image_header + PAGE_SIZE))
panic("execve: header dealloc failed (3)");
if (ndp->ni_vp)
diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c
index c97e50a..b81cfc1 100644
--- a/sys/kern/subr_trap.c
+++ b/sys/kern/subr_trap.c
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
- * $Id: trap.c,v 1.74 1996/03/27 17:33:39 bde Exp $
+ * $Id: trap.c,v 1.75 1996/03/28 05:40:57 dyson Exp $
*/
/*
@@ -806,24 +806,10 @@ int trapwrite(addr)
v = trunc_page(vtopte(va));
/*
- * wire the pte page
- */
- if (va < USRSTACK) {
- vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
- }
-
- /*
* fault the data page
*/
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
- /*
- * unwire the pte page
- */
- if (va < USRSTACK) {
- vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
- }
-
--p->p_lock;
if (rv != KERN_SUCCESS)
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index d449b94..cb76f05 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vfs_bio.c,v 1.88 1996/03/09 06:46:51 dyson Exp $
+ * $Id: vfs_bio.c,v 1.89 1996/05/03 21:01:26 phk Exp $
*/
/*
@@ -509,7 +509,7 @@ brelse(struct buf * bp)
/* buffers with no memory */
if (bp->b_bufsize == 0) {
bp->b_qindex = QUEUE_EMPTY;
- TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
+ TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
LIST_REMOVE(bp, b_hash);
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
bp->b_dev = NODEV;
@@ -742,7 +742,7 @@ start:
goto trytofreespace;
/* can we constitute a new buffer? */
- if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
+ if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
if (bp->b_qindex != QUEUE_EMPTY)
panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
bp->b_qindex);
@@ -756,11 +756,11 @@ trytofreespace:
* This is desirable because file data is cached in the
* VM/Buffer cache even if a buffer is freed.
*/
- if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
+ if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
if (bp->b_qindex != QUEUE_AGE)
panic("getnewbuf: inconsistent AGE queue, qindex=%d",
bp->b_qindex);
- } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
+ } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
if (bp->b_qindex != QUEUE_LRU)
panic("getnewbuf: inconsistent LRU queue, qindex=%d",
bp->b_qindex);
@@ -783,7 +783,7 @@ trytofreespace:
(vmiospace < maxvmiobufspace)) {
--bp->b_usecount;
TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
- if (bufqueues[QUEUE_LRU].tqh_first != NULL) {
+ if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
goto start;
}
@@ -1498,9 +1498,9 @@ count_lock_queue()
struct buf *bp;
count = 0;
- for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
+ for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
bp != NULL;
- bp = bp->b_freelist.tqe_next)
+ bp = TAILQ_NEXT(bp, b_freelist))
count++;
return (count);
}
@@ -1663,7 +1663,6 @@ vfs_clean_pages(struct buf * bp)
void
vfs_bio_clrbuf(struct buf *bp) {
int i;
- int remapbuffer = 0;
if( bp->b_flags & B_VMIO) {
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
int mask;
@@ -1691,14 +1690,12 @@ vfs_bio_clrbuf(struct buf *bp) {
bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
}
}
- bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
+ /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
}
bp->b_resid = 0;
} else {
clrbuf(bp);
}
- if (remapbuffer)
- pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
/*
OpenPOWER on IntegriCloud