summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-01-22 17:30:44 +0000
committerdyson <dyson@FreeBSD.org>1998-01-22 17:30:44 +0000
commit197bd655c435302ddb4156bc654705dfef1d9143 (patch)
treea91dcc7eb7507f4d0088eb5e1b2e6872c51fd3ea /sys/vm/vm_pageout.c
parentf95fe9806cd36e8b1fa77d590b5d1c2d48482618 (diff)
downloadFreeBSD-src-197bd655c435302ddb4156bc654705dfef1d9143.zip
FreeBSD-src-197bd655c435302ddb4156bc654705dfef1d9143.tar.gz
VM level code cleanups.
1) Start using TSM. Struct procs continue to point to upages structure, after being freed. Struct vmspace continues to point to pte object and kva space for kstack. u_map is now superfluous. 2) vm_map's don't need to be reference counted. They always exist either in the kernel or in a vmspace. The vmspaces are managed by reference counts. 3) Remove the "wired" vm_map nonsense. 4) No need to keep a cache of kernel stack kva's. 5) Get rid of strange looking ++var, and change to var++. 6) Change more data structures to use our "zone" allocator. Added struct proc, struct vmspace and struct vnode. This saves a significant amount of kva space and physical memory. Additionally, this enables TSM for the zone managed memory. 7) Keep ioopt disabled for now. 8) Remove the now bogus "single use" map concept. 9) Use generation counts or id's for data structures residing in TSM, where it allows us to avoid unneeded restart overhead during traversals, where blocking might occur. 10) Account better for memory deficits, so the pageout daemon will be able to make enough memory available (experimental.) 11) Fix some vnode locking problems. (From Tor, I think.) 12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp. (experimental.) 13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c code. Use generation counts, get rid of unneded collpase operations, and clean up the cluster code. 14) Make vm_zone more suitable for TSM. This commit is partially as a result of discussions and contributions from other people, including DG, Tor Egge, PHK, and probably others that I have forgotten to attribute (so let me know, if I forgot.) This is not the infamous, final cleanup of the vnode stuff, but a necessary step. Vnode mgmt should be correct, but things might still change, and there is still some missing stuff (like ioopt, and physical backing of non-merged cache files, debugging of layering concepts.)
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 391b41c..8a6f97f 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.107 1998/01/12 01:44:44 dyson Exp $
+ * $Id: vm_pageout.c,v 1.108 1998/01/17 09:17:01 dyson Exp $
*/
/*
@@ -126,9 +126,9 @@ SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
#endif
-int vm_pages_needed; /* Event on which pageout daemon sleeps */
-
-int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */
+int vm_pages_needed=0; /* Event on which pageout daemon sleeps */
+int vm_pageout_deficit=0; /* Estimated number of pages deficit */
+int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
extern int npendingio;
#if !defined(NO_SWAPPING)
@@ -535,9 +535,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_map_entry_t tmpe;
vm_object_t obj, bigobj;
- vm_map_reference(map);
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
- vm_map_deallocate(map);
return;
}
@@ -587,7 +585,6 @@ vm_pageout_map_deactivate_pages(map, desired)
pmap_remove(vm_map_pmap(map),
VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
vm_map_unlock(map);
- vm_map_deallocate(map);
return;
}
#endif
@@ -645,7 +642,7 @@ vm_pageout_scan()
*/
pages_freed = 0;
- addl_page_shortage = 0;
+ addl_page_shortage = vm_pageout_deficit;
if (max_page_launder == 0)
max_page_launder = 1;
@@ -1166,7 +1163,7 @@ vm_size_t count;
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
- cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE;
+ cnt.v_pageout_free_min + (count / 2048) + PQ_L2_SIZE;
cnt.v_free_min += cnt.v_free_reserved;
return 1;
}
@@ -1259,6 +1256,7 @@ vm_pageout()
splx(s);
vm_pager_sync();
vm_pageout_scan();
+ vm_pageout_deficit = 0;
vm_pager_sync();
wakeup(&cnt.v_free_count);
}
OpenPOWER on IntegriCloud