summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_kern.c
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1995-01-09 16:06:02 +0000
committerdg <dg@FreeBSD.org>1995-01-09 16:06:02 +0000
commit1707d41102ca7d645ec2db9e442a27857337d7e9 (patch)
treeafd2f1c13b93c3fff08b58c44153df0d13479824 /sys/vm/vm_kern.c
parent08ff911ba0fc8f893be057ca25d8b941f6d0f276 (diff)
downloadFreeBSD-src-1707d41102ca7d645ec2db9e442a27857337d7e9.zip
FreeBSD-src-1707d41102ca7d645ec2db9e442a27857337d7e9.tar.gz
These changes embody the support of the fully coherent merged VM buffer cache,
much higher filesystem I/O performance, and much better paging performance. It represents the culmination of over 6 months of R&D. The majority of the merged VM/cache work is by John Dyson. The following highlights the most significant changes. Additionally, there are (mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to support the new VM/buffer scheme. vfs_bio.c: Significant rewrite of most of vfs_bio to support the merged VM buffer cache scheme. The scheme is almost fully compatible with the old filesystem interface. Significant improvement in the number of opportunities for write clustering. vfs_cluster.c, vfs_subr.c Upgrade and performance enhancements in vfs layer code to support merged VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff. vm_object.c: Yet more improvements in the collapse code. Elimination of some windows that can cause list corruption. vm_pageout.c: Fixed it, it really works better now. Somehow in 2.0, some "enhancements" broke the code. This code has been reworked from the ground-up. vm_fault.c, vm_page.c, pmap.c, vm_object.c Support for small-block filesystems with merged VM/buffer cache scheme. pmap.c vm_map.c Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of kernel PTs. vm_glue.c Much simpler and more effective swapping code. No more gratuitous swapping. proc.h Fixed the problem that the p_lock flag was not being cleared on a fork. swap_pager.c, vnode_pager.c Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the code doesn't need it anymore. machdep.c Changes to better support the parameter values for the merged VM/buffer cache scheme. machdep.c, kern_exec.c, vm_glue.c Implemented a seperate submap for temporary exec string space and another one to contain process upages. This eliminates all map fragmentation problems that previously existed. ffs_inode.c, ufs_inode.c, ufs_readwrite.c Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on busy buffers. Submitted by: John Dyson and David Greenman
Diffstat (limited to 'sys/vm/vm_kern.c')
-rw-r--r--sys/vm/vm_kern.c247
1 files changed, 124 insertions, 123 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 072c906..d59bbb8 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -40,17 +40,17 @@
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
- *
+ *
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.6 1994/08/07 14:53:26 davidg Exp $
+ * $Id: vm_kern.c,v 1.7 1994/08/18 22:36:02 wollman Exp $
*/
/*
@@ -78,14 +78,16 @@
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
-vm_map_t buffer_map;
-vm_map_t kernel_map;
-vm_map_t kmem_map;
-vm_map_t mb_map;
-vm_map_t io_map;
-vm_map_t clean_map;
-vm_map_t pager_map;
-vm_map_t phys_map;
+vm_map_t buffer_map;
+vm_map_t kernel_map;
+vm_map_t kmem_map;
+vm_map_t mb_map;
+vm_map_t io_map;
+vm_map_t clean_map;
+vm_map_t pager_map;
+vm_map_t phys_map;
+vm_map_t exec_map;
+vm_map_t u_map;
/*
* kmem_alloc_pageable:
@@ -94,12 +96,13 @@ vm_map_t phys_map;
* map must be "kernel_map" below.
*/
-vm_offset_t kmem_alloc_pageable(map, size)
- vm_map_t map;
- register vm_size_t size;
+vm_offset_t
+kmem_alloc_pageable(map, size)
+ vm_map_t map;
+ register vm_size_t size;
{
- vm_offset_t addr;
- register int result;
+ vm_offset_t addr;
+ register int result;
#if 0
if (map != kernel_map)
@@ -110,38 +113,37 @@ vm_offset_t kmem_alloc_pageable(map, size)
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
- &addr, size, TRUE);
+ &addr, size, TRUE);
if (result != KERN_SUCCESS) {
- return(0);
+ return (0);
}
-
- return(addr);
+ return (addr);
}
/*
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
-vm_offset_t kmem_alloc(map, size)
- register vm_map_t map;
- register vm_size_t size;
+vm_offset_t
+kmem_alloc(map, size)
+ register vm_map_t map;
+ register vm_size_t size;
{
- vm_offset_t addr;
- register vm_offset_t offset;
- vm_offset_t i;
+ vm_offset_t addr;
+ register vm_offset_t offset;
+ vm_offset_t i;
size = round_page(size);
/*
- * Use the kernel object for wired-down kernel pages.
- * Assume that no region of the kernel object is
- * referenced more than once.
+ * Use the kernel object for wired-down kernel pages. Assume that no
+ * region of the kernel object is referenced more than once.
*/
/*
- * Locate sufficient space in the map. This will give us the
- * final virtual address for the new memory, and thus will tell
- * us the offset within the kernel map.
+ * Locate sufficient space in the map. This will give us the final
+ * virtual address for the new memory, and thus will tell us the
+ * offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
@@ -154,56 +156,50 @@ vm_offset_t kmem_alloc(map, size)
vm_map_unlock(map);
/*
- * Guarantee that there are pages already in this object
- * before calling vm_map_pageable. This is to prevent the
- * following scenario:
- *
- * 1) Threads have swapped out, so that there is a
- * pager for the kernel_object.
- * 2) The kmsg zone is empty, and so we are kmem_allocing
- * a new page for it.
- * 3) vm_map_pageable calls vm_fault; there is no page,
- * but there is a pager, so we call
- * pager_data_request. But the kmsg zone is empty,
- * so we must kmem_alloc.
- * 4) goto 1
- * 5) Even if the kmsg zone is not empty: when we get
- * the data back from the pager, it will be (very
- * stale) non-zero data. kmem_alloc is defined to
- * return zero-filled memory.
- *
- * We're intentionally not activating the pages we allocate
- * to prevent a race with page-out. vm_map_pageable will wire
- * the pages.
+ * Guarantee that there are pages already in this object before
+ * calling vm_map_pageable. This is to prevent the following
+ * scenario:
+ *
+ * 1) Threads have swapped out, so that there is a pager for the
+ * kernel_object. 2) The kmsg zone is empty, and so we are
+ * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
+ * there is no page, but there is a pager, so we call
+ * pager_data_request. But the kmsg zone is empty, so we must
+ * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
+ * we get the data back from the pager, it will be (very stale)
+ * non-zero data. kmem_alloc is defined to return zero-filled memory.
+ *
+ * We're intentionally not activating the pages we allocate to prevent a
+ * race with page-out. vm_map_pageable will wire the pages.
*/
vm_object_lock(kernel_object);
- for (i = 0 ; i < size; i+= PAGE_SIZE) {
- vm_page_t mem;
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ vm_page_t mem;
- while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) {
+ while ((mem = vm_page_alloc(kernel_object, offset + i, 0)) == NULL) {
vm_object_unlock(kernel_object);
VM_WAIT;
vm_object_lock(kernel_object);
}
vm_page_zero_fill(mem);
mem->flags &= ~PG_BUSY;
+ mem->valid |= VM_PAGE_BITS_ALL;
}
vm_object_unlock(kernel_object);
-
+
/*
- * And finally, mark the data as non-pageable.
+ * And finally, mark the data as non-pageable.
*/
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
/*
- * Try to coalesce the map
+ * Try to coalesce the map
*/
-
vm_map_simplify(map, addr);
- return(addr);
+ return (addr);
}
/*
@@ -213,10 +209,11 @@ vm_offset_t kmem_alloc(map, size)
* with kmem_alloc, and return the physical pages
* associated with that region.
*/
-void kmem_free(map, addr, size)
- vm_map_t map;
- register vm_offset_t addr;
- vm_size_t size;
+void
+kmem_free(map, addr, size)
+ vm_map_t map;
+ register vm_offset_t addr;
+ vm_size_t size;
{
(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
}
@@ -234,20 +231,21 @@ void kmem_free(map, addr, size)
* min, max Returned endpoints of map
* pageable Can the region be paged
*/
-vm_map_t kmem_suballoc(parent, min, max, size, pageable)
- register vm_map_t parent;
- vm_offset_t *min, *max;
- register vm_size_t size;
- boolean_t pageable;
+vm_map_t
+kmem_suballoc(parent, min, max, size, pageable)
+ register vm_map_t parent;
+ vm_offset_t *min, *max;
+ register vm_size_t size;
+ boolean_t pageable;
{
- register int ret;
- vm_map_t result;
+ register int ret;
+ vm_map_t result;
size = round_page(size);
*min = (vm_offset_t) vm_map_min(parent);
ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
- min, size, TRUE);
+ min, size, TRUE);
if (ret != KERN_SUCCESS) {
printf("kmem_suballoc: bad status return of %d.\n", ret);
panic("kmem_suballoc");
@@ -259,7 +257,7 @@ vm_map_t kmem_suballoc(parent, min, max, size, pageable)
panic("kmem_suballoc: cannot create submap");
if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
- return(result);
+ return (result);
}
/*
@@ -280,14 +278,14 @@ vm_map_t kmem_suballoc(parent, min, max, size, pageable)
*/
vm_offset_t
kmem_malloc(map, size, canwait)
- register vm_map_t map;
- register vm_size_t size;
- boolean_t canwait;
+ register vm_map_t map;
+ register vm_size_t size;
+ boolean_t canwait;
{
- register vm_offset_t offset, i;
- vm_map_entry_t entry;
- vm_offset_t addr;
- vm_page_t m;
+ register vm_offset_t offset, i;
+ vm_map_entry_t entry;
+ vm_offset_t addr;
+ vm_page_t m;
if (map != kmem_map && map != mb_map)
panic("kern_malloc_alloc: map != {kmem,mb}_map");
@@ -296,15 +294,15 @@ kmem_malloc(map, size, canwait)
addr = vm_map_min(map);
/*
- * Locate sufficient space in the map. This will give us the
- * final virtual address for the new memory, and thus will tell
- * us the offset within the kernel map.
+ * Locate sufficient space in the map. This will give us the final
+ * virtual address for the new memory, and thus will tell us the
+ * offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
vm_map_unlock(map);
#if 0
- if (canwait) /* XXX should wait */
+ if (canwait) /* XXX should wait */
panic("kmem_malloc: %s too small",
map == kmem_map ? "kmem_map" : "mb_map");
#endif
@@ -317,29 +315,28 @@ kmem_malloc(map, size, canwait)
vm_map_insert(map, kmem_object, offset, addr, addr + size);
/*
- * If we can wait, just mark the range as wired
- * (will fault pages as necessary).
+ * If we can wait, just mark the range as wired (will fault pages as
+ * necessary).
*/
if (canwait) {
vm_map_unlock(map);
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
- FALSE);
+ FALSE);
vm_map_simplify(map, addr);
- return(addr);
+ return (addr);
}
-
/*
* If we cannot wait then we must allocate all memory up front,
* pulling it off the active queue to prevent pageout.
*/
vm_object_lock(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
- m = vm_page_alloc(kmem_object, offset + i);
+ m = vm_page_alloc(kmem_object, offset + i, 1);
/*
- * Ran out of space, free everything up and return.
- * Don't need to lock page queues here as we know
- * that the pages we got aren't on any queues.
+ * Ran out of space, free everything up and return. Don't need
+ * to lock page queues here as we know that the pages we got
+ * aren't on any queues.
*/
if (m == NULL) {
while (i != 0) {
@@ -350,20 +347,21 @@ kmem_malloc(map, size, canwait)
vm_object_unlock(kmem_object);
vm_map_delete(map, addr, addr + size);
vm_map_unlock(map);
- return(0);
+ return (0);
}
#if 0
vm_page_zero_fill(m);
#endif
m->flags &= ~PG_BUSY;
+ m->valid |= VM_PAGE_BITS_ALL;
}
vm_object_unlock(kmem_object);
/*
- * Mark map entry as non-pageable.
- * Assert: vm_map_insert() will never be able to extend the previous
- * entry so there will be a new entry exactly corresponding to this
- * address range and it will have wired_count == 0.
+ * Mark map entry as non-pageable. Assert: vm_map_insert() will never
+ * be able to extend the previous entry so there will be a new entry
+ * exactly corresponding to this address range and it will have
+ * wired_count == 0.
*/
if (!vm_map_lookup_entry(map, addr, &entry) ||
entry->start != addr || entry->end != addr + size ||
@@ -372,20 +370,20 @@ kmem_malloc(map, size, canwait)
entry->wired_count++;
/*
- * Loop thru pages, entering them in the pmap.
- * (We cannot add them to the wired count without
- * wrapping the vm_page_queue_lock in splimp...)
+ * Loop thru pages, entering them in the pmap. (We cannot add them to
+ * the wired count without wrapping the vm_page_queue_lock in
+ * splimp...)
*/
for (i = 0; i < size; i += PAGE_SIZE) {
vm_object_lock(kmem_object);
m = vm_page_lookup(kmem_object, offset + i);
vm_object_unlock(kmem_object);
- pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
+ pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
}
vm_map_unlock(map);
vm_map_simplify(map, addr);
- return(addr);
+ return (addr);
}
/*
@@ -395,18 +393,19 @@ kmem_malloc(map, size, canwait)
* has no room, the caller sleeps waiting for more memory in the submap.
*
*/
-vm_offset_t kmem_alloc_wait(map, size)
- vm_map_t map;
- vm_size_t size;
+vm_offset_t
+kmem_alloc_wait(map, size)
+ vm_map_t map;
+ vm_size_t size;
{
- vm_offset_t addr;
+ vm_offset_t addr;
size = round_page(size);
for (;;) {
/*
- * To make this work for more than one map,
- * use the map's lock to lock out sleepers/wakers.
+ * To make this work for more than one map, use the map's lock
+ * to lock out sleepers/wakers.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr) == 0)
@@ -416,11 +415,11 @@ vm_offset_t kmem_alloc_wait(map, size)
vm_map_unlock(map);
return (0);
}
- assert_wait((int)map, TRUE);
+ assert_wait((int) map, TRUE);
vm_map_unlock(map);
thread_block("kmaw");
}
- vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
+ vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
vm_map_unlock(map);
return (addr);
}
@@ -431,14 +430,15 @@ vm_offset_t kmem_alloc_wait(map, size)
* Returns memory to a submap of the kernel, and wakes up any threads
* waiting for memory in that map.
*/
-void kmem_free_wakeup(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+void
+kmem_free_wakeup(map, addr, size)
+ vm_map_t map;
+ vm_offset_t addr;
+ vm_size_t size;
{
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
- thread_wakeup((int)map);
+ thread_wakeup((int) map);
vm_map_unlock(map);
}
@@ -448,7 +448,8 @@ void kmem_free_wakeup(map, addr, size)
* map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
* the range between `start' and `end' as free.
*/
-void kmem_init(start, end)
+void
+kmem_init(start, end)
vm_offset_t start, end;
{
register vm_map_t m;
@@ -457,7 +458,7 @@ void kmem_init(start, end)
vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
- (void) vm_map_insert(m, NULL, (vm_offset_t)0,
+ (void) vm_map_insert(m, NULL, (vm_offset_t) 0,
VM_MIN_KERNEL_ADDRESS, start);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
OpenPOWER on IntegriCloud