summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-01-22 17:30:44 +0000
committerdyson <dyson@FreeBSD.org>1998-01-22 17:30:44 +0000
commit197bd655c435302ddb4156bc654705dfef1d9143 (patch)
treea91dcc7eb7507f4d0088eb5e1b2e6872c51fd3ea /sys/vm
parentf95fe9806cd36e8b1fa77d590b5d1c2d48482618 (diff)
downloadFreeBSD-src-197bd655c435302ddb4156bc654705dfef1d9143.zip
FreeBSD-src-197bd655c435302ddb4156bc654705dfef1d9143.tar.gz
VM level code cleanups.
1) Start using TSM. Struct procs continue to point to upages structure, after being freed. Struct vmspace continues to point to pte object and kva space for kstack. u_map is now superfluous. 2) vm_map's don't need to be reference counted. They always exist either in the kernel or in a vmspace. The vmspaces are managed by reference counts. 3) Remove the "wired" vm_map nonsense. 4) No need to keep a cache of kernel stack kva's. 5) Get rid of strange looking ++var, and change to var++. 6) Change more data structures to use our "zone" allocator. Added struct proc, struct vmspace and struct vnode. This saves a significant amount of kva space and physical memory. Additionally, this enables TSM for the zone managed memory. 7) Keep ioopt disabled for now. 8) Remove the now bogus "single use" map concept. 9) Use generation counts or id's for data structures residing in TSM, where it allows us to avoid unneeded restart overhead during traversals, where blocking might occur. 10) Account better for memory deficits, so the pageout daemon will be able to make enough memory available (experimental.) 11) Fix some vnode locking problems. (From Tor, I think.) 12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp. (experimental.) 13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c code. Use generation counts, get rid of unneded collpase operations, and clean up the cluster code. 14) Make vm_zone more suitable for TSM. This commit is partially as a result of discussions and contributions from other people, including DG, Tor Egge, PHK, and probably others that I have forgotten to attribute (so let me know, if I forgot.) This is not the infamous, final cleanup of the vnode stuff, but a necessary step. Vnode mgmt should be correct, but things might still change, and there is still some missing stuff (like ioopt, and physical backing of non-merged cache files, debugging of layering concepts.)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/pmap.h5
-rw-r--r--sys/vm/swap_pager.c19
-rw-r--r--sys/vm/vm_extern.h6
-rw-r--r--sys/vm/vm_fault.c259
-rw-r--r--sys/vm/vm_glue.c5
-rw-r--r--sys/vm/vm_kern.c9
-rw-r--r--sys/vm/vm_map.c283
-rw-r--r--sys/vm/vm_map.h11
-rw-r--r--sys/vm/vm_object.c34
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/vm/vm_page.c22
-rw-r--r--sys/vm/vm_pageout.c16
-rw-r--r--sys/vm/vm_pageout.h3
-rw-r--r--sys/vm/vm_zone.c18
14 files changed, 284 insertions, 410 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 291cb01..f38c45b 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: pmap.h,v 1.24 1997/08/05 23:03:24 dyson Exp $
+ * $Id: pmap.h,v 1.25 1997/12/14 02:10:30 dyson Exp $
*/
/*
@@ -123,8 +123,7 @@ void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
-void pmap_prefault __P((pmap_t pmap, vm_offset_t addra,
- vm_map_entry_t entry, vm_object_t object));
+void pmap_prefault __P((pmap_t, vm_offset_t, vm_map_entry_t));
int pmap_mincore __P((pmap_t pmap, vm_offset_t addr));
void pmap_new_proc __P((struct proc *p));
void pmap_dispose_proc __P((struct proc *p));
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 5ba0224..f5fd447 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.80 1997/12/24 15:05:21 dyson Exp $
+ * $Id: swap_pager.c,v 1.81 1998/01/17 09:16:47 dyson Exp $
*/
/*
@@ -477,30 +477,31 @@ swap_pager_free_swap(object)
/*
* Free left over swap blocks
*/
- s = splvm();
swb = object->un_pager.swp.swp_blocks;
- if (!swb)
+ if (swb == NULL) {
return;
+ }
+ s = splvm();
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++, swb++) {
for (j = 0; j < SWB_NPAGES; j++) {
if (swb->swb_block[j] != SWB_EMPTY) {
/*
- * initially the length of the run is zero
- */
+ * initially the length of the run is zero
+ */
if (block_count == 0) {
first_block = swb->swb_block[j];
block_count = btodb(PAGE_SIZE);
swb->swb_block[j] = SWB_EMPTY;
/*
- * if the new block can be included into the current run
- */
+ * if the new block can be included into the current run
+ */
} else if (swb->swb_block[j] == first_block + block_count) {
block_count += btodb(PAGE_SIZE);
swb->swb_block[j] = SWB_EMPTY;
/*
- * terminate the previous run, and start a new one
- */
+ * terminate the previous run, and start a new one
+ */
} else {
swap_pager_freeswapspace(object, first_block,
(unsigned) first_block + block_count - 1);
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index f8780d4..a7d2dfe 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
- * $Id: vm_extern.h,v 1.35 1997/12/06 02:23:27 dyson Exp $
+ * $Id: vm_extern.h,v 1.36 1997/12/31 02:35:29 alex Exp $
*/
#ifndef _VM_EXTERN_H_
@@ -70,7 +70,7 @@ void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
void kmem_init __P((vm_offset_t, vm_offset_t));
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
-vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t));
+vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t));
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
@@ -86,7 +86,7 @@ int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int,
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
-struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
+struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_exec __P((struct proc *));
void vmspace_unshare __P((struct proc *));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 65dcaa5..62cda5a 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.74 1998/01/12 01:44:25 dyson Exp $
+ * $Id: vm_fault.c,v 1.75 1998/01/17 09:16:49 dyson Exp $
*/
/*
@@ -131,12 +131,13 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
vm_prot_t prot;
int result;
boolean_t wired;
- boolean_t su;
boolean_t lookup_still_valid;
+ int map_generation;
vm_page_t old_m;
vm_object_t next_object;
vm_page_t marray[VM_FAULT_READ];
int hardfault = 0;
+ int faultcount;
struct vnode *vp = NULL;
struct proc *p = curproc; /* XXX */
@@ -184,6 +185,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
RetryFault:;
+ faultcount = 0;
/*
* Find the backing store object and offset into it to begin the
@@ -191,7 +193,7 @@ RetryFault:;
*/
if ((result = vm_map_lookup(&map, vaddr,
fault_type, &entry, &first_object,
- &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
+ &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
if ((result != KERN_PROTECTION_FAILURE) ||
((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) {
return result;
@@ -206,7 +208,7 @@ RetryFault:;
*/
result = vm_map_lookup(&map, vaddr,
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
- &entry, &first_object, &first_pindex, &prot, &wired, &su);
+ &entry, &first_object, &first_pindex, &prot, &wired);
if (result != KERN_SUCCESS) {
return result;
}
@@ -220,6 +222,8 @@ RetryFault:;
entry->max_protection &= ~VM_PROT_WRITE;
}
+ map_generation = map->timestamp;
+
if (entry->eflags & MAP_ENTRY_NOFAULT) {
panic("vm_fault: fault on nofault entry, addr: %lx",
vaddr);
@@ -363,15 +367,20 @@ readrest:
if (object->type != OBJT_DEFAULT &&
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) {
int rv;
- int faultcount;
int reqpage;
int ahead, behind;
- ahead = VM_FAULT_READ_AHEAD;
- behind = VM_FAULT_READ_BEHIND;
if (first_object->behavior == OBJ_RANDOM) {
ahead = 0;
behind = 0;
+ } else {
+ behind = (vaddr - entry->start) >> PAGE_SHIFT;
+ if (behind > VM_FAULT_READ_BEHIND)
+ behind = VM_FAULT_READ_BEHIND;
+
+ ahead = ((entry->end - vaddr) >> PAGE_SHIFT) - 1;
+ if (ahead > VM_FAULT_READ_AHEAD)
+ ahead = VM_FAULT_READ_AHEAD;
}
if ((first_object->type != OBJT_DEVICE) &&
@@ -568,7 +577,7 @@ readrest:
* first object. Note that we must mark the page dirty in the
* first object so that it will go out to swap when needed.
*/
- if (lookup_still_valid &&
+ if (map_generation == map->timestamp &&
/*
* Only one shadow object
*/
@@ -589,9 +598,18 @@ readrest:
/*
* We don't chase down the shadow chain
*/
- (object == first_object->backing_object)) {
+ (object == first_object->backing_object) &&
/*
+ * grab the lock if we need to
+ */
+ (lookup_still_valid ||
+ (((entry->eflags & MAP_ENTRY_IS_A_MAP) == 0) &&
+ lockmgr(&map->lock,
+ LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0))) {
+
+ lookup_still_valid = 1;
+ /*
* get rid of the unnecessary page
*/
vm_page_protect(first_m, VM_PROT_NONE);
@@ -611,91 +629,12 @@ readrest:
vm_page_copy(m, first_m);
}
- /*
- * This code handles the case where there are two references to the
- * backing object, and one reference is getting a copy of the
- * page. If the other reference is the only other object that
- * points to the backing object, then perform a virtual copy
- * from the backing object to the other object after the
- * page is copied to the current first_object. If the other
- * object already has the page, we destroy it in the backing object
- * performing an optimized collapse-type operation. We don't
- * bother removing the page from the backing object's swap space.
- */
- if (lookup_still_valid &&
- /*
- * make sure that we have two shadow objs
- */
- (object->shadow_count == 2) &&
- /*
- * And no COW refs -- note that there are sometimes
- * temp refs to objs, but ignore that case -- we just
- * punt.
- */
- (object->ref_count == 2) &&
- /*
- * Noone else can look us up
- */
- (object->handle == NULL) &&
- /*
- * Not something that can be referenced elsewhere
- */
- ((object->type == OBJT_DEFAULT) ||
- (object->type == OBJT_SWAP)) &&
- /*
- * We don't bother chasing down object chain
- */
- (object == first_object->backing_object)) {
-
- vm_object_t other_object;
- vm_pindex_t other_pindex, other_pindex_offset;
- vm_page_t tm;
-
- other_object = TAILQ_FIRST(&object->shadow_head);
- if (other_object == first_object)
- other_object = TAILQ_NEXT(other_object, shadow_list);
- if (!other_object)
- panic("vm_fault: other object missing");
- if (other_object &&
- (other_object->type == OBJT_DEFAULT) &&
- (other_object->paging_in_progress == 0)) {
- other_pindex_offset =
- OFF_TO_IDX(other_object->backing_object_offset);
- if (pindex >= other_pindex_offset) {
- other_pindex = pindex - other_pindex_offset;
- /*
- * If the other object has the page, just free it.
- */
- if ((tm = vm_page_lookup(other_object, other_pindex))) {
- if ((tm->flags & PG_BUSY) == 0 &&
- tm->busy == 0 &&
- tm->valid == VM_PAGE_BITS_ALL) {
- /*
- * get rid of the unnecessary page
- */
- vm_page_protect(m, VM_PROT_NONE);
- PAGE_WAKEUP(m);
- vm_page_free(m);
- m = NULL;
- tm->dirty = VM_PAGE_BITS_ALL;
- first_m->dirty = VM_PAGE_BITS_ALL;
- }
- } else {
- /*
- * If the other object doesn't have the page,
- * then we move it there.
- */
- vm_page_rename(m, other_object, other_pindex);
- m->dirty = VM_PAGE_BITS_ALL;
- m->valid = VM_PAGE_BITS_ALL;
- }
- }
- }
- }
-
if (m) {
- if (m->queue != PQ_ACTIVE)
+ if (m->queue != PQ_ACTIVE) {
vm_page_activate(m);
+ m->act_count = 0;
+ }
+
/*
* We no longer need the old page or object.
*/
@@ -712,16 +651,6 @@ readrest:
object = first_object;
pindex = first_pindex;
- /*
- * Now that we've gotten the copy out of the way,
- * let's try to collapse the top object.
- *
- * But we have to play ugly games with
- * paging_in_progress to do that...
- */
- vm_object_pip_wakeup(object);
- vm_object_collapse(object);
- object->paging_in_progress++;
} else {
prot &= ~VM_PROT_WRITE;
}
@@ -732,7 +661,8 @@ readrest:
* lookup.
*/
- if (!lookup_still_valid) {
+ if (!lookup_still_valid &&
+ (map->timestamp != map_generation)) {
vm_object_t retry_object;
vm_pindex_t retry_pindex;
vm_prot_t retry_prot;
@@ -751,7 +681,8 @@ readrest:
* and will merely take another fault.
*/
result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE,
- &entry, &retry_object, &retry_pindex, &retry_prot, &wired, &su);
+ &entry, &retry_object, &retry_pindex, &retry_prot, &wired);
+ map_generation = map->timestamp;
/*
* If we don't need the page any longer, put it on the active
@@ -808,8 +739,9 @@ readrest:
m->flags &= ~PG_ZERO;
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
- if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0))
- pmap_prefault(map->pmap, vaddr, entry, first_object);
+ if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
+ pmap_prefault(map->pmap, vaddr, entry);
+ }
m->flags |= PG_MAPPED|PG_REFERENCED;
if (fault_flags & VM_FAULT_HOLD)
@@ -912,6 +844,7 @@ vm_fault_user_wire(map, start, end)
* Inform the physical mapping system that the range of addresses may
* not fault, so that page tables and such can be locked down as well.
*/
+
pmap_pageable(pmap, start, end, FALSE);
/*
@@ -1087,12 +1020,10 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t *marray;
int *reqpage;
{
- int i;
+ int i,j;
vm_object_t object;
vm_pindex_t pindex, startpindex, endpindex, tpindex;
- vm_offset_t size;
vm_page_t rtm;
- int treqpage;
int cbehind, cahead;
object = m->object;
@@ -1112,8 +1043,9 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
*/
if (!vm_pager_has_page(object,
- OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead))
+ OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead)) {
return 0;
+ }
if ((cbehind == 0) && (cahead == 0)) {
*reqpage = 0;
@@ -1135,91 +1067,78 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
if ((rahead + rbehind) >
((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
pagedaemon_wakeup();
- *reqpage = 0;
marray[0] = m;
+ *reqpage = 0;
return 1;
}
/*
- * scan backward for the read behind pages -- in memory or on disk not
- * in same object
+ * scan backward for the read behind pages -- in memory
*/
- tpindex = pindex - 1;
- if (tpindex < pindex) {
- if (rbehind > pindex)
+ if (pindex > 0) {
+ if (rbehind > pindex) {
rbehind = pindex;
- startpindex = pindex - rbehind;
- while (tpindex >= startpindex) {
+ startpindex = 0;
+ } else {
+ startpindex = pindex - rbehind;
+ }
+
+ for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) {
if (vm_page_lookup( object, tpindex)) {
startpindex = tpindex + 1;
break;
}
if (tpindex == 0)
break;
- tpindex -= 1;
+ }
+
+ for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
+
+ rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
+ if (rtm == NULL) {
+ for (j = 0; j < i; j++) {
+ FREE_PAGE(marray[j]);
+ }
+ marray[0] = m;
+ *reqpage = 0;
+ return 1;
+ }
+
+ marray[i] = rtm;
}
} else {
- startpindex = pindex;
+ startpindex = 0;
+ i = 0;
}
+ marray[i] = m;
+ /* page offset of the required page */
+ *reqpage = i;
+
+ tpindex = pindex + 1;
+ i++;
+
/*
- * scan forward for the read ahead pages -- in memory or on disk not
- * in same object
+ * scan forward for the read ahead pages
*/
- tpindex = pindex + 1;
- endpindex = pindex + (rahead + 1);
+ endpindex = tpindex + rahead;
if (endpindex > object->size)
endpindex = object->size;
- while (tpindex < endpindex) {
- if ( vm_page_lookup(object, tpindex)) {
- break;
- }
- tpindex += 1;
- }
- endpindex = tpindex;
- /* calculate number of bytes of pages */
- size = endpindex - startpindex;
+ for( ; tpindex < endpindex; i++, tpindex++) {
- /* calculate the page offset of the required page */
- treqpage = pindex - startpindex;
+ if (vm_page_lookup(object, tpindex)) {
+ break;
+ }
- /* see if we have space (again) */
- if ((cnt.v_free_count + cnt.v_cache_count) >
- (cnt.v_free_reserved + size)) {
- /*
- * get our pages and don't block for them
- */
- for (i = 0; i < size; i++) {
- if (i != treqpage) {
- rtm = vm_page_alloc(object,
- startpindex + i,
- VM_ALLOC_NORMAL);
- if (rtm == NULL) {
- if (i < treqpage) {
- int j;
- for (j = 0; j < i; j++) {
- FREE_PAGE(marray[j]);
- }
- *reqpage = 0;
- marray[0] = m;
- return 1;
- } else {
- size = i;
- *reqpage = treqpage;
- return size;
- }
- }
- marray[i] = rtm;
- } else {
- marray[i] = m;
- }
+ rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
+ if (rtm == NULL) {
+ break;
}
- *reqpage = treqpage;
- return size;
+ marray[i] = rtm;
}
- *reqpage = 0;
- marray[0] = m;
- return 1;
+
+ /* return number of bytes of pages */
+ return i;
}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 9654d6f..1c0c05a 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.68 1997/12/06 02:23:30 dyson Exp $
+ * $Id: vm_glue.c,v 1.69 1997/12/11 02:10:55 dyson Exp $
*/
#include "opt_rlimit.h"
@@ -448,7 +448,6 @@ retry:
continue;
++vm->vm_refcnt;
- vm_map_reference(&vm->vm_map);
/*
* do not swapout a process that is waiting for VM
* data structures there is a possible deadlock.
@@ -456,7 +455,6 @@ retry:
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
(void *)0, curproc)) {
- vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
continue;
}
@@ -469,7 +467,6 @@ retry:
((action & VM_SWAP_IDLE) &&
(p->p_slptime > swap_idle_threshold2))) {
swapout(p);
- vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
didswap++;
goto retry;
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 9b3bbbc..a20cda4 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.38 1997/08/02 14:33:26 bde Exp $
+ * $Id: vm_kern.c,v 1.39 1997/08/05 00:01:52 dyson Exp $
*/
/*
@@ -227,11 +227,10 @@ kmem_free(map, addr, size)
* pageable Can the region be paged
*/
vm_map_t
-kmem_suballoc(parent, min, max, size, pageable)
+kmem_suballoc(parent, min, max, size)
register vm_map_t parent;
vm_offset_t *min, *max;
register vm_size_t size;
- boolean_t pageable;
{
register int ret;
vm_map_t result;
@@ -247,7 +246,7 @@ kmem_suballoc(parent, min, max, size, pageable)
}
*max = *min + size;
pmap_reference(vm_map_pmap(parent));
- result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
+ result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");
if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
@@ -439,7 +438,7 @@ kmem_init(start, end)
{
register vm_map_t m;
- m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
+ m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index e6534b4..7afc84c 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.106 1998/01/17 09:16:51 dyson Exp $
+ * $Id: vm_map.c,v 1.107 1998/01/21 12:18:00 dyson Exp $
*/
/*
@@ -158,7 +158,7 @@ extern char kstack[];
extern int inmprotect;
static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
-static vm_zone_t mapentzone, kmapentzone, mapzone;
+static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
static struct vm_object kmapentobj, mapentobj, mapobj;
#define MAP_ENTRY_INIT 128
struct vm_map_entry map_entry_init[MAX_MAPENT];
@@ -195,18 +195,18 @@ vm_map_startup()
* The remaining fields must be initialized by the caller.
*/
struct vmspace *
-vmspace_alloc(min, max, pageable)
+vmspace_alloc(min, max)
vm_offset_t min, max;
- int pageable;
{
register struct vmspace *vm;
- MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
- bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
- vm_map_init(&vm->vm_map, min, max, pageable);
+ vm = zalloc(vmspace_zone);
+ bzero(&vm->vm_map, sizeof vm->vm_map);
+ vm_map_init(&vm->vm_map, min, max);
pmap_pinit(&vm->vm_pmap);
vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
vm->vm_refcnt = 1;
+ vm->vm_shm = NULL;
return (vm);
}
@@ -218,6 +218,7 @@ vm_init2(void) {
NULL, 0, 0, 0, 1);
zinitna(mapzone, &mapobj,
NULL, 0, 0, 0, 1);
+ vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
pmap_init2();
vm_object_init2();
}
@@ -242,13 +243,8 @@ vmspace_free(vm)
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
- while( vm->vm_map.ref_count != 1)
- tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
- --vm->vm_map.ref_count;
pmap_release(&vm->vm_pmap);
- FREE(vm, M_VMMAP);
- } else {
- wakeup(&vm->vm_map.ref_count);
+ zfree(vmspace_zone, vm);
}
}
@@ -260,15 +256,14 @@ vmspace_free(vm)
* the given lower and upper address bounds.
*/
vm_map_t
-vm_map_create(pmap, min, max, pageable)
+vm_map_create(pmap, min, max)
pmap_t pmap;
vm_offset_t min, max;
- boolean_t pageable;
{
register vm_map_t result;
result = zalloc(mapzone);
- vm_map_init(result, min, max, pageable);
+ vm_map_init(result, min, max);
result->pmap = pmap;
return (result);
}
@@ -279,25 +274,21 @@ vm_map_create(pmap, min, max, pageable)
* The pmap is set elsewhere.
*/
void
-vm_map_init(map, min, max, pageable)
+vm_map_init(map, min, max)
register struct vm_map *map;
vm_offset_t min, max;
- boolean_t pageable;
{
map->header.next = map->header.prev = &map->header;
map->nentries = 0;
map->size = 0;
- map->ref_count = 1;
map->is_main_map = TRUE;
map->system_map = 0;
map->min_offset = min;
map->max_offset = max;
- map->entries_pageable = pageable;
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
- simple_lock_init(&map->ref_lock);
}
/*
@@ -349,67 +340,6 @@ vm_map_entry_create(map)
}
/*
- * vm_map_reference:
- *
- * Creates another valid reference to the given map.
- *
- */
-void
-vm_map_reference(map)
- register vm_map_t map;
-{
- if (map == NULL)
- return;
-
- map->ref_count++;
-}
-
-/*
- * vm_map_deallocate:
- *
- * Removes a reference from the specified map,
- * destroying it if no references remain.
- * The map should not be locked.
- */
-void
-vm_map_deallocate(map)
- register vm_map_t map;
-{
- register int c;
-
- if (map == NULL)
- return;
-
- c = map->ref_count;
-
- if (c == 0)
- panic("vm_map_deallocate: deallocating already freed map");
-
- if (c != 1) {
- --map->ref_count;
- wakeup(&map->ref_count);
- return;
- }
- /*
- * Lock the map, to wait out all other references to it.
- */
-
- vm_map_lock_drain_interlock(map);
- (void) vm_map_delete(map, map->min_offset, map->max_offset);
- --map->ref_count;
- if( map->ref_count != 0) {
- vm_map_unlock(map);
- return;
- }
-
- pmap_destroy(map->pmap);
-
- vm_map_unlock(map);
-
- zfree(mapzone, map);
-}
-
-/*
* SAVE_HINT:
*
* Saves the specified entry as the hint for
@@ -870,9 +800,7 @@ _vm_map_clip_start(map, entry, start)
vm_map_entry_link(map, entry->prev, new_entry);
- if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
- vm_map_reference(new_entry->object.share_map);
- else
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
vm_object_reference(new_entry->object.vm_object);
}
@@ -931,9 +859,7 @@ _vm_map_clip_end(map, entry, end)
vm_map_entry_link(map, entry, new_entry);
- if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
- vm_map_reference(new_entry->object.share_map);
- else
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
vm_object_reference(new_entry->object.vm_object);
}
@@ -995,8 +921,8 @@ vm_map_submap(map, start, end, submap)
if ((entry->start == start) && (entry->end == end) &&
((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
(entry->object.vm_object == NULL)) {
+ entry->object.sub_map = submap;
entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
- vm_map_reference(entry->object.sub_map = submap);
result = KERN_SUCCESS;
}
vm_map_unlock(map);
@@ -1117,6 +1043,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
current = current->next;
}
+ map->timestamp++;
vm_map_unlock(map);
return (KERN_SUCCESS);
}
@@ -1792,9 +1719,7 @@ vm_map_entry_delete(map, entry)
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
- if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
- vm_map_deallocate(entry->object.share_map);
- } else {
+ if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
vm_object_deallocate(entry->object.vm_object);
}
@@ -1997,27 +1922,10 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
* write-protected.
*/
if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
-
- boolean_t su;
-
- /*
- * If the source entry has only one mapping, we can
- * just protect the virtual address range.
- */
- if (!(su = src_map->is_main_map)) {
- su = (src_map->ref_count == 1);
- }
- if (su) {
- pmap_protect(src_map->pmap,
- src_entry->start,
- src_entry->end,
- src_entry->protection & ~VM_PROT_WRITE);
- } else {
- vm_object_pmap_copy(src_entry->object.vm_object,
- OFF_TO_IDX(src_entry->offset),
- OFF_TO_IDX(src_entry->offset + (src_entry->end
- - src_entry->start)));
- }
+ pmap_protect(src_map->pmap,
+ src_entry->start,
+ src_entry->end,
+ src_entry->protection & ~VM_PROT_WRITE);
}
/*
@@ -2074,8 +1982,7 @@ vmspace_fork(vm1)
vm_map_lock(old_map);
- vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
- old_map->entries_pageable);
+ vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
new_pmap = &vm2->vm_pmap; /* XXX */
@@ -2171,8 +2078,7 @@ vmspace_exec(struct proc *p) {
struct vmspace *newvmspace;
vm_map_t map = &p->p_vmspace->vm_map;
- newvmspace = vmspace_alloc(map->min_offset, map->max_offset,
- map->entries_pageable);
+ newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
(caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
/*
@@ -2182,12 +2088,10 @@ vmspace_exec(struct proc *p) {
* run it down. Even though there is little or no chance of blocking
* here, it is a good idea to keep this form for future mods.
*/
- vm_map_reference(&oldvmspace->vm_map);
vmspace_free(oldvmspace);
p->p_vmspace = newvmspace;
if (p == curproc)
pmap_activate(p);
- vm_map_deallocate(&oldvmspace->vm_map);
}
/*
@@ -2203,12 +2107,10 @@ vmspace_unshare(struct proc *p) {
if (oldvmspace->vm_refcnt == 1)
return;
newvmspace = vmspace_fork(oldvmspace);
- vm_map_reference(&oldvmspace->vm_map);
vmspace_free(oldvmspace);
p->p_vmspace = newvmspace;
if (p == curproc)
pmap_activate(p);
- vm_map_deallocate(&oldvmspace->vm_map);
}
@@ -2242,8 +2144,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_object_t *object, /* OUT */
vm_pindex_t *pindex, /* OUT */
vm_prot_t *out_prot, /* OUT */
- boolean_t *wired, /* OUT */
- boolean_t *single_use) /* OUT */
+ boolean_t *wired) /* OUT */
{
vm_map_t share_map;
vm_offset_t share_offset;
@@ -2407,9 +2308,10 @@ RetryLookup:;
* don't allow writes.
*/
- prot &= (~VM_PROT_WRITE);
+ prot &= ~VM_PROT_WRITE;
}
}
+
/*
* Create an object if necessary.
*/
@@ -2440,12 +2342,7 @@ RetryLookup:;
* Return whether this is the only map sharing this data.
*/
- if (!su) {
- su = (share_map->ref_count == 1);
- }
*out_prot = prot;
- *single_use = su;
-
return (KERN_SUCCESS);
#undef RETURN
@@ -2493,43 +2390,43 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
{
vm_map_t map;
vm_object_t first_object, oldobject, object;
- vm_map_entry_t first_entry, entry;
+ vm_map_entry_t entry;
vm_prot_t prot;
- boolean_t wired, su;
+ boolean_t wired;
int tcnt, rv;
- vm_offset_t uaddr, start, end;
+ vm_offset_t uaddr, start, end, tend;
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
- int skipinit, allremoved;
int cnt;
if (npages)
*npages = 0;
- allremoved = 0;
-
cnt = cnta;
+ uaddr = uaddra;
+
while (cnt > 0) {
map = mapa;
- uaddr = uaddra;
- skipinit = 0;
if ((vm_map_lookup(&map, uaddr,
- VM_PROT_READ, &first_entry, &first_object,
- &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
+ VM_PROT_READ, &entry, &first_object,
+ &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
return EFAULT;
}
- vm_map_clip_start(map, first_entry, uaddr);
+ vm_map_clip_start(map, entry, uaddr);
tcnt = cnt;
- if ((uaddr + tcnt) > first_entry->end)
- tcnt = first_entry->end - uaddr;
+ tend = uaddr + tcnt;
+ if (tend > entry->end) {
+ tcnt = entry->end - uaddr;
+ tend = entry->end;
+ }
- vm_map_clip_end(map, first_entry, uaddr + tcnt);
+ vm_map_clip_end(map, entry, tend);
- start = first_entry->start;
- end = first_entry->end;
+ start = entry->start;
+ end = entry->end;
osize = atop(tcnt);
@@ -2539,12 +2436,12 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
for (idx = 0; idx < osize; idx++) {
vm_page_t m;
if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
- vm_map_lookup_done(map, first_entry);
+ vm_map_lookup_done(map, entry);
return 0;
}
if ((m->flags & PG_BUSY) ||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
- vm_map_lookup_done(map, first_entry);
+ vm_map_lookup_done(map, entry);
return 0;
}
}
@@ -2554,7 +2451,44 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
* If we are changing an existing map entry, just redirect
* the object, and change mappings.
*/
- if ((first_object->ref_count == 1) &&
+ if ((first_object->type == OBJT_VNODE) &&
+ ((oldobject = entry->object.vm_object) == first_object)) {
+
+ if ((entry->offset != cp) || (oldobject != srcobject)) {
+ /*
+ * Remove old window into the file
+ */
+ pmap_remove (map->pmap, uaddr, tend);
+
+ /*
+ * Force copy on write for mmaped regions
+ */
+ vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
+
+ /*
+ * Point the object appropriately
+ */
+ if (oldobject != srcobject) {
+
+ /*
+ * Set the object optimization hint flag
+ */
+ srcobject->flags |= OBJ_OPT;
+ vm_object_reference(srcobject);
+ entry->object.vm_object = srcobject;
+
+ if (oldobject) {
+ vm_object_deallocate(oldobject);
+ }
+ }
+
+ entry->offset = cp;
+ map->timestamp++;
+ } else {
+ pmap_remove (map->pmap, uaddr, tend);
+ }
+
+ } else if ((first_object->ref_count == 1) &&
(first_object->size == osize) &&
((first_object->type == OBJT_DEFAULT) ||
(first_object->type == OBJT_SWAP)) ) {
@@ -2566,10 +2500,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Remove old window into the file
*/
- if (!allremoved) {
- pmap_remove (map->pmap, uaddra, uaddra + cnt);
- allremoved = 1;
- }
+ pmap_remove (map->pmap, uaddr, tend);
/*
* Remove unneeded old pages
@@ -2607,22 +2538,19 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
TAILQ_REMOVE(&oldobject->shadow_head,
first_object, shadow_list);
oldobject->shadow_count--;
- if (oldobject->shadow_count == 0)
- oldobject->flags &= ~OBJ_OPT;
vm_object_deallocate(oldobject);
}
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
first_object, shadow_list);
srcobject->shadow_count++;
- srcobject->flags |= OBJ_OPT;
first_object->backing_object = srcobject;
}
-
first_object->backing_object_offset = cp;
+ map->timestamp++;
} else {
- skipinit = 1;
+ pmap_remove (map->pmap, uaddr, tend);
}
/*
* Otherwise, we have to do a logical mmap.
@@ -2632,29 +2560,28 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
- object = srcobject;
- ooffset = cp;
- vm_object_shadow(&object, &ooffset, osize);
-
- if (!allremoved) {
- pmap_remove (map->pmap, uaddra, uaddra + cnt);
- allremoved = 1;
- }
+ pmap_remove (map->pmap, uaddr, tend);
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
vm_map_lock_upgrade(map);
- if (first_entry == &map->header) {
+ if (entry == &map->header) {
map->first_free = &map->header;
} else if (map->first_free->start >= start) {
- map->first_free = first_entry->prev;
+ map->first_free = entry->prev;
}
- SAVE_HINT(map, first_entry->prev);
- vm_map_entry_delete(map, first_entry);
+ SAVE_HINT(map, entry->prev);
+ vm_map_entry_delete(map, entry);
+
+ object = srcobject;
+ ooffset = cp;
+#if 0
+ vm_object_shadow(&object, &ooffset, osize);
+#endif
- rv = vm_map_insert(map, object, ooffset, start, end,
- VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
+ rv = vm_map_insert(map, object, ooffset, start, tend,
+ VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
if (rv != KERN_SUCCESS)
panic("vm_uiomove: could not insert new entry: %d", rv);
@@ -2663,15 +2590,14 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Map the window directly, if it is already in memory
*/
- if (!skipinit)
- pmap_object_init_pt(map->pmap, uaddra,
- srcobject, (vm_pindex_t) OFF_TO_IDX(cp), tcnt, 0);
+ pmap_object_init_pt(map->pmap, uaddr,
+ srcobject, oindex, tcnt, 0);
map->timestamp++;
vm_map_unlock(map);
cnt -= tcnt;
- uaddra += tcnt;
+ uaddr += tcnt;
cp += tcnt;
if (npages)
*npages += osize;
@@ -2714,8 +2640,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t robject, robjectn;
vm_pindex_t idx, from, to;
- if ((vfs_ioopt == 0) ||
- (object == NULL) ||
+ if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
return;
@@ -2836,9 +2761,9 @@ DB_SHOW_COMMAND(map, vm_map_print)
register vm_map_entry_t entry;
- db_iprintf("%s map 0x%x: pmap=0x%x, ref=%d, nentries=%d, version=%d\n",
+ db_iprintf("%s map 0x%x: pmap=0x%x, nentries=%d, version=%d\n",
(map->is_main_map ? "Task" : "Share"),
- (int) map, (int) (map->pmap), map->ref_count, map->nentries,
+ (int) map, (int) (map->pmap), map->nentries,
map->timestamp);
nlines++;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 1568814..b7c6cd5 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.30 1998/01/06 05:26:00 dyson Exp $
+ * $Id: vm_map.h,v 1.31 1998/01/17 09:16:52 dyson Exp $
*/
/*
@@ -132,12 +132,9 @@ struct vm_map {
vm_size_t size; /* virtual size */
unsigned char is_main_map; /* Am I a main map? */
unsigned char system_map; /* Am I a system map? */
- int ref_count; /* Reference count */
- struct simplelock ref_lock; /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
unsigned int timestamp; /* Version number */
vm_map_entry_t first_free; /* First free space hint */
- boolean_t entries_pageable; /* map entries pageable?? */
struct pmap *pmap; /* Physical map */
#define min_offset header.start
#define max_offset header.end
@@ -312,16 +309,16 @@ extern vm_size_t kentry_data_size;
boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, boolean_t, boolean_t));
struct pmap;
-vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t, boolean_t));
+vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
-void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t, boolean_t));
+void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
- vm_pindex_t *, vm_prot_t *, boolean_t *, boolean_t *));
+ vm_pindex_t *, vm_prot_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 43bf48b..a833fab 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.106 1998/01/12 01:44:38 dyson Exp $
+ * $Id: vm_object.c,v 1.107 1998/01/17 09:16:55 dyson Exp $
*/
/*
@@ -169,6 +169,7 @@ _vm_object_allocate(type, size, object)
object->page_hint = NULL;
object->last_read = 0;
+ object->generation++;
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
vm_object_count++;
@@ -268,6 +269,7 @@ vm_object_vndeallocate(object)
object->ref_count--;
if (object->ref_count == 0) {
vp->v_flag &= ~VTEXT;
+ object->flags &= ~OBJ_OPT;
}
vrele(vp);
}
@@ -372,7 +374,7 @@ doterm:
if (temp) {
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
temp->shadow_count--;
- if (temp->shadow_count == 0)
+ if (temp->ref_count == 0)
temp->flags &= ~OBJ_OPT;
}
vm_object_terminate(object);
@@ -455,6 +457,19 @@ vm_object_terminate(object)
vm_pager_deallocate(object);
if (object->ref_count == 0) {
+ vm_object_dispose(object);
+ }
+}
+
+/*
+ * vm_object_dispose
+ *
+ * Dispose the object.
+ */
+void
+vm_object_dispose(object)
+ vm_object_t object;
+{
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, object, object_list);
vm_object_count--;
@@ -464,7 +479,6 @@ vm_object_terminate(object)
*/
zfree(obj_zone, object);
wakeup(object);
- }
}
/*
@@ -498,6 +512,7 @@ vm_object_page_clean(object, start, end, syncio)
vm_page_t maf[vm_pageout_page_count];
vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
+ int curgeneration;
struct proc *pproc = curproc; /* XXX */
if (object->type != OBJT_VNODE ||
@@ -521,6 +536,8 @@ vm_object_page_clean(object, start, end, syncio)
p->flags |= PG_CLEANCHK;
rescan:
+ curgeneration = object->generation;
+
for(p = TAILQ_FIRST(&object->memq); p; p = np) {
np = TAILQ_NEXT(p, listq);
@@ -540,11 +557,13 @@ rescan:
}
s = splvm();
- if ((p->flags & PG_BUSY) || p->busy) {
+ while ((p->flags & PG_BUSY) || p->busy) {
p->flags |= PG_WANTED|PG_REFERENCED;
tsleep(p, PVM, "vpcwai", 0);
- splx(s);
- goto rescan;
+ if (object->generation != curgeneration) {
+ splx(s);
+ goto rescan;
+ }
}
splx(s);
@@ -617,7 +636,8 @@ rescan:
runlen = maxb + maxf + 1;
splx(s);
vm_pageout_flush(ma, runlen, 0);
- goto rescan;
+ if (object->generation != curgeneration)
+ goto rescan;
}
VOP_FSYNC(vp, NULL, syncio, curproc);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index c72acd9..36bdb87 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.41 1998/01/06 05:26:07 dyson Exp $
+ * $Id: vm_object.h,v 1.42 1998/01/17 09:16:56 dyson Exp $
*/
/*
@@ -87,6 +87,7 @@ struct vm_object {
TAILQ_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
TAILQ_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
TAILQ_HEAD(, vm_page) memq; /* list of resident pages */
+ int generation; /* generation ID */
objtype_t type; /* type of pager */
vm_size_t size; /* Object size */
int ref_count; /* How many refs?? */
@@ -168,6 +169,7 @@ void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
void vm_object_deallocate __P((vm_object_t));
void vm_object_terminate __P((vm_object_t));
+void vm_object_dispose __P((vm_object_t));
void vm_object_vndeallocate __P((vm_object_t));
void vm_object_init __P((void));
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 884d0fb..2072bf3 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.85 1998/01/12 01:44:41 dyson Exp $
+ * $Id: vm_page.c,v 1.86 1998/01/17 09:16:59 dyson Exp $
*/
/*
@@ -94,6 +94,7 @@ static vm_page_t vm_page_select_free __P((vm_object_t object,
* page structure.
*/
+static int vm_page_bucket_generation; /* generation id for buckets */
static struct pglist *vm_page_buckets; /* Array of buckets */
static int vm_page_bucket_count; /* How big is array? */
static int vm_page_hash_mask; /* Mask for hash function */
@@ -404,6 +405,7 @@ vm_page_insert(m, object, pindex)
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
TAILQ_INSERT_TAIL(bucket, m, hashq);
+ vm_page_bucket_generation++;
/*
* Now link into the object's list of backed pages.
@@ -412,6 +414,7 @@ vm_page_insert(m, object, pindex)
TAILQ_INSERT_TAIL(&object->memq, m, listq);
m->flags |= PG_TABLED;
m->object->page_hint = m;
+ m->object->generation++;
/*
* And show that the object has one more resident page.
@@ -448,6 +451,7 @@ vm_page_remove(m)
bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
TAILQ_REMOVE(bucket, m, hashq);
+ vm_page_bucket_generation++;
/*
* Now remove from the object's list of backed pages.
@@ -460,6 +464,7 @@ vm_page_remove(m)
*/
m->object->resident_page_count--;
+ m->object->generation++;
m->flags &= ~PG_TABLED;
}
@@ -480,6 +485,7 @@ vm_page_lookup(object, pindex)
{
register vm_page_t m;
register struct pglist *bucket;
+ int curgeneration;
int s;
/*
@@ -488,15 +494,16 @@ vm_page_lookup(object, pindex)
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
- s = splvm();
+restart:
+ curgeneration = vm_page_bucket_generation;
for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
+ if (curgeneration != vm_page_bucket_generation)
+ goto restart;
if ((m->object == object) && (m->pindex == pindex)) {
- splx(s);
m->object->page_hint = m;
return (m);
}
}
- splx(s);
return (NULL);
}
@@ -786,6 +793,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
+ vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@@ -807,6 +815,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(ZERO): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
+ vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@@ -830,6 +839,7 @@ vm_page_alloc(object, pindex, page_req)
if (cnt.v_cache_count > 0)
printf("vm_page_alloc(SYSTEM): missing pages on cache queue: %d\n", cnt.v_cache_count);
#endif
+ vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@@ -845,6 +855,7 @@ vm_page_alloc(object, pindex, page_req)
#endif
} else {
splx(s);
+ vm_pageout_deficit++;
pagedaemon_wakeup();
return (NULL);
}
@@ -883,8 +894,6 @@ vm_page_alloc(object, pindex, page_req)
/* XXX before splx until vm_page_insert is safe */
vm_page_insert(m, object, pindex);
- splx(s);
-
/*
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
@@ -894,7 +903,6 @@ vm_page_alloc(object, pindex, page_req)
(cnt.v_free_count < cnt.v_pageout_free_min))
pagedaemon_wakeup();
- s = splvm();
if ((qtype == PQ_CACHE) &&
((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
oldobject && (oldobject->type == OBJT_VNODE) &&
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 391b41c..8a6f97f 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.107 1998/01/12 01:44:44 dyson Exp $
+ * $Id: vm_pageout.c,v 1.108 1998/01/17 09:17:01 dyson Exp $
*/
/*
@@ -126,9 +126,9 @@ SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
#endif
-int vm_pages_needed; /* Event on which pageout daemon sleeps */
-
-int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */
+int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
+int vm_pageout_deficit=0; /* Estimated number of pages deficit */
+int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
extern int npendingio;
#if !defined(NO_SWAPPING)
@@ -535,9 +535,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_map_entry_t tmpe;
vm_object_t obj, bigobj;
- vm_map_reference(map);
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
- vm_map_deallocate(map);
return;
}
@@ -587,7 +585,6 @@ vm_pageout_map_deactivate_pages(map, desired)
pmap_remove(vm_map_pmap(map),
VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
vm_map_unlock(map);
- vm_map_deallocate(map);
return;
}
#endif
@@ -645,7 +642,7 @@ vm_pageout_scan()
*/
pages_freed = 0;
- addl_page_shortage = 0;
+ addl_page_shortage = vm_pageout_deficit;
if (max_page_launder == 0)
max_page_launder = 1;
@@ -1166,7 +1163,7 @@ vm_size_t count;
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
- cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
+ cnt.v_pageout_free_min + (count / 2048) + PQ_L2_SIZE;
cnt.v_free_min += cnt.v_free_reserved;
return 1;
}
@@ -1259,6 +1256,7 @@ vm_pageout()
splx(s);
vm_pager_sync();
vm_pageout_scan();
+ vm_pageout_deficit = 0;
vm_pager_sync();
wakeup(&cnt.v_free_count);
}
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 4a61d12..a864896 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.h,v 1.21 1997/12/06 02:23:36 dyson Exp $
+ * $Id: vm_pageout.h,v 1.22 1998/01/12 01:44:46 dyson Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@@ -78,6 +78,7 @@
extern int vm_page_max_wired;
extern int vm_pages_needed; /* should be some "event" structure */
extern int vm_pageout_pages_needed;
+extern int vm_pageout_deficit;
#define VM_PAGEOUT_ASYNC 0
#define VM_PAGEOUT_SYNC 1
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 1ebcdd3..0a79e69 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
- * $Id: vm_zone.c,v 1.13 1997/12/15 05:16:09 dyson Exp $
+ * $Id: vm_zone.c,v 1.14 1997/12/22 11:48:13 dyson Exp $
*/
#include <sys/param.h>
@@ -39,6 +39,11 @@ static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
* Note that the initial implementation of this had coloring, and
* absolutely no improvement (actually perf degradation) occurred.
*
+ * Note also that the zones are type stable. The only restriction is
+ * that the first two longwords of a data structure can be changed
+ * between allocations. Any data that must be stable between allocations
+ * must reside in areas after the first two longwords.
+ *
* zinitna, zinit, zbootinit are the initialization routines.
* zalloc, zfree, are the interrupt/lock unsafe allocation/free routines.
* zalloci, zfreei, are the interrupt/lock safe allocation/free routines.
@@ -183,6 +188,7 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
z->znalloc = 0;
simple_lock_init(&z->zlock);
+ bzero(item, nitems * z->zsize);
z->zitems = NULL;
for (i = 0; i < nitems; i++) {
((void **) item)[0] = z->zitems;
@@ -285,14 +291,16 @@ _zget(vm_zone_t z)
item = (char *) z->zkva + z->zpagecount * PAGE_SIZE;
for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
i++) {
+ vm_offset_t zkva;
m = vm_page_alloc(z->zobj, z->zpagecount,
z->zallocflag);
if (m == NULL)
break;
- pmap_kenter(z->zkva + z->zpagecount * PAGE_SIZE,
- VM_PAGE_TO_PHYS(m));
+ zkva = z->zkva + z->zpagecount * PAGE_SIZE;
+ pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t) zkva, PAGE_SIZE);
z->zpagecount++;
}
nitems = (i * PAGE_SIZE) / z->zsize;
@@ -314,13 +322,13 @@ _zget(vm_zone_t z)
*/
if (lockstatus(&kernel_map->lock)) {
int s;
- s = splhigh();
+ s = splvm();
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
splx(s);
} else {
item = (void *) kmem_alloc(kernel_map, nbytes);
}
-
+ bzero(item, nbytes);
nitems = nbytes / z->zsize;
}
z->ztotal += nitems;
OpenPOWER on IntegriCloud