summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c29
-rw-r--r--sys/vm/vm_fault.c61
-rw-r--r--sys/vm/vm_map.c145
-rw-r--r--sys/vm/vm_map.h3
-rw-r--r--sys/vm/vm_mmap.c204
-rw-r--r--sys/vm/vm_object.c7
-rw-r--r--sys/vm/vm_page.c49
-rw-r--r--sys/vm/vm_page.h10
-rw-r--r--sys/vm/vm_pageout.c58
-rw-r--r--sys/vm/vnode_pager.c8
10 files changed, 365 insertions, 209 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index ec37003..b68195c 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.67 1996/05/23 00:45:50 dyson Exp $
+ * $Id: swap_pager.c,v 1.68 1996/06/10 04:58:48 dyson Exp $
*/
/*
@@ -1078,7 +1078,7 @@ swap_pager_getpages(object, m, count, reqpage)
pagedaemon_wakeup();
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
if (rv == VM_PAGER_OK) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[reqpage]));
+ pmap_tc_modified(m[reqpage]);
m[reqpage]->valid = VM_PAGE_BITS_ALL;
m[reqpage]->dirty = 0;
}
@@ -1092,7 +1092,7 @@ swap_pager_getpages(object, m, count, reqpage)
*/
if (rv == VM_PAGER_OK) {
for (i = 0; i < count; i++) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ pmap_tc_modified(m[i]);
m[i]->dirty = 0;
m[i]->flags &= ~PG_ZERO;
if (i != reqpage) {
@@ -1469,7 +1469,7 @@ retryfree:
if (rv == VM_PAGER_OK) {
for (i = 0; i < count; i++) {
if (rtvals[i] == VM_PAGER_OK) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ pmap_tc_modified(m[i]);
m[i]->dirty = 0;
/*
* optimization, if a page has been read
@@ -1477,7 +1477,7 @@ retryfree:
*/
if ((m[i]->queue != PQ_ACTIVE) &&
((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
- pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
+ pmap_tc_referenced(VM_PAGE_TO_PHYS(m[i])))) {
vm_page_activate(m[i]);
}
}
@@ -1580,12 +1580,21 @@ swap_pager_finish(spc)
(u_long) VM_PAGE_TO_PHYS(spc->spc_m[i]));
}
} else {
+ int pagewanted = 0;
for (i = 0; i < spc->spc_count; i++) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
+ if (spc->spc_m[i]->flags & (PG_WANTED | PG_REFERENCED)) {
+ pagewanted = 1;
+ break;
+ }
+ }
+ for (i = 0; i < spc->spc_count; i++) {
+ pmap_tc_modified(spc->spc_m[i]);
spc->spc_m[i]->dirty = 0;
- if ((spc->spc_m[i]->queue != PQ_ACTIVE) &&
- ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
- vm_page_activate(spc->spc_m[i]);
+ if (pagewanted) {
+ if (spc->spc_m[i]->queue != PQ_ACTIVE)
+ vm_page_activate(spc->spc_m[i]);
+ spc->spc_m[i]->flags |= PG_REFERENCED;
+ }
}
}
@@ -1625,9 +1634,7 @@ swap_pager_iodone(bp)
if (bp->b_vp)
pbrelvp(bp);
-/*
if (bp->b_flags & B_WANTED)
-*/
wakeup(bp);
if (bp->b_rcred != NOCRED)
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index c3424b9..df2f0aa 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.52 1996/06/16 20:37:26 dyson Exp $
+ * $Id: vm_fault.c,v 1.53 1996/07/02 02:07:59 dyson Exp $
*/
/*
@@ -103,10 +103,6 @@ int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *));
#define VM_FAULT_READ_BEHIND 3
#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
-int vm_fault_free_1;
-int vm_fault_copy_save_1;
-int vm_fault_copy_save_2;
-
/*
* vm_fault:
*
@@ -282,7 +278,7 @@ RetryFault:;
}
queue = m->queue;
- vm_page_unqueue_nowakeup(m);
+ vm_page_unqueue(m,0);
/*
* Mark page busy for other processes, and the pagedaemon.
@@ -297,8 +293,7 @@ RetryFault:;
m->flags |= PG_BUSY;
- if (m->valid &&
- ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
+ if (((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
m->object != kernel_object && m->object != kmem_object) {
goto readrest;
}
@@ -401,19 +396,17 @@ readrest:
if (rv == VM_PAGER_OK) {
/*
- * Found the page. Leave it busy while we play
- * with it.
- */
-
- /*
* Relookup in case pager changed page. Pager
* is responsible for disposition of old page
* if moved.
*/
- m = vm_page_lookup(object, pindex);
- if( !m) {
- UNLOCK_AND_DEALLOCATE;
- goto RetryFault;
+ if ((m->object != object) || (m->pindex != pindex) ||
+ (m->flags & PG_TABLED) == 0) {
+ m = vm_page_lookup(object, pindex);
+ if( !m) {
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
}
hardfault++;
@@ -485,9 +478,26 @@ readrest:
}
first_m = NULL;
- if ((m->flags & PG_ZERO) == 0)
- vm_page_zero_fill(m);
- cnt.v_zfod++;
+ if ((m->flags & PG_ZERO) == 0) {
+ if (vm_page_zero_count) {
+ vm_page_protect(m, VM_PROT_NONE);
+ PAGE_WAKEUP(m);
+ vm_page_free(m);
+ m = vm_page_alloc(object, pindex, VM_ALLOC_ZERO);
+ if (!m)
+ panic("vm_fault: missing zero page");
+ /*
+ * This should not be true, but just in case...
+ */
+ if ((m->flags & PG_ZERO) == 0) {
+ vm_page_zero_fill(m);
+ cnt.v_zfod++;
+ }
+ } else {
+ vm_page_zero_fill(m);
+ cnt.v_zfod++;
+ }
+ }
break;
} else {
if (object != first_object) {
@@ -565,7 +575,6 @@ readrest:
first_m = m;
m->dirty = VM_PAGE_BITS_ALL;
m = NULL;
- ++vm_fault_copy_save_1;
} else {
/*
* Oh, well, lets copy it.
@@ -639,7 +648,6 @@ readrest:
PAGE_WAKEUP(m);
vm_page_free(m);
m = NULL;
- ++vm_fault_free_1;
tm->dirty = VM_PAGE_BITS_ALL;
first_m->dirty = VM_PAGE_BITS_ALL;
}
@@ -651,7 +659,6 @@ readrest:
vm_page_rename(m, other_object, other_pindex);
m->dirty = VM_PAGE_BITS_ALL;
m->valid = VM_PAGE_BITS_ALL;
- ++vm_fault_copy_save_2;
}
}
}
@@ -660,9 +667,9 @@ readrest:
if (m) {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
- /*
- * We no longer need the old page or object.
- */
+ /*
+ * We no longer need the old page or object.
+ */
PAGE_WAKEUP(m);
}
@@ -1091,7 +1098,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
endpindex = pindex + (rahead + 1);
if (endpindex > object->size)
endpindex = object->size;
- while (tpindex < endpindex) {
+ while (tpindex < endpindex) {
if ( vm_page_lookup(object, tpindex)) {
break;
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 48de311..31455b4 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.51 1996/06/16 20:37:29 dyson Exp $
+ * $Id: vm_map.c,v 1.52 1996/07/07 03:27:41 davidg Exp $
*/
/*
@@ -172,6 +172,8 @@ static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
vm_map_entry_t));
static void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
+static __pure int vm_map_simplify_okay __P((vm_map_entry_t entry1,
+ vm_map_entry_t entry2));
void
vm_map_startup()
@@ -230,7 +232,6 @@ vmspace_alloc(min, max, pageable)
vm_map_init(&vm->vm_map, min, max, pageable);
pmap_pinit(&vm->vm_pmap);
vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
- vm->vm_pmap.pm_map = &vm->vm_map;
vm->vm_refcnt = 1;
return (vm);
}
@@ -634,8 +635,8 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
return (KERN_NO_SPACE);
if ((prev_entry != &map->header) &&
+ (object == NULL) &&
(prev_entry->end == start) &&
- ((object == NULL) || (prev_entry->object.vm_object == object)) &&
(prev_entry->is_a_map == FALSE) &&
(prev_entry->is_sub_map == FALSE) &&
(prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
@@ -648,24 +649,22 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
* See if we can avoid creating a new entry by extending one of our
* neighbors.
*/
- if (object == NULL) {
- if (vm_object_coalesce(prev_entry->object.vm_object,
- OFF_TO_IDX(prev_entry->offset),
- (vm_size_t) (prev_entry->end
- - prev_entry->start),
- (vm_size_t) (end - prev_entry->end))) {
+ if (vm_object_coalesce(prev_entry->object.vm_object,
+ OFF_TO_IDX(prev_entry->offset),
+ (vm_size_t) (prev_entry->end
+ - prev_entry->start),
+ (vm_size_t) (end - prev_entry->end))) {
- /*
- * Coalesced the two objects - can extend the
- * previous map entry to include the new
- * range.
- */
- map->size += (end - prev_entry->end);
- prev_entry->end = end;
- prev_object = prev_entry->object.vm_object;
- default_pager_convert_to_swapq(prev_object);
- return (KERN_SUCCESS);
- }
+ /*
+ * Coalesced the two objects - can extend the
+ * previous map entry to include the new
+ * range.
+ */
+ map->size += (end - prev_entry->end);
+ prev_entry->end = end;
+ prev_object = prev_entry->object.vm_object;
+ default_pager_convert_to_swapq(prev_object);
+ return (KERN_SUCCESS);
}
}
/*
@@ -707,9 +706,10 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
/*
* Update the free space hint
*/
- if ((map->first_free == prev_entry) &&
- (prev_entry->end >= new_entry->start))
- map->first_free = new_entry;
+ if (map->first_free == prev_entry) {
+ if (prev_entry->end == new_entry->start)
+ map->first_free = new_entry;
+ }
default_pager_convert_to_swapq(object);
return (KERN_SUCCESS);
@@ -739,8 +739,9 @@ vm_map_findspace(map, start, length, addr)
* at this address, we have to start after it.
*/
if (start == map->min_offset) {
- if ((entry = map->first_free) != &map->header)
+ if ((entry = map->first_free) != &map->header) {
start = entry->end;
+ }
} else {
vm_map_entry_t tmp;
@@ -821,12 +822,39 @@ vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
return (result);
}
+static __pure int
+vm_map_simplify_okay(entry1, entry2)
+ vm_map_entry_t entry1, entry2;
+{
+ if ((entry1->end != entry2->start) ||
+ (entry1->object.vm_object != entry2->object.vm_object))
+ return 0;
+ if (entry1->object.vm_object) {
+ if (entry1->object.vm_object->behavior !=
+ entry2->object.vm_object->behavior)
+ return 0;
+ if (entry1->offset + (entry1->end - entry1->start) !=
+ entry2->offset)
+ return 0;
+ }
+ if ((entry1->needs_copy != entry2->needs_copy) ||
+ (entry1->copy_on_write != entry2->copy_on_write) ||
+ (entry1->protection != entry2->protection) ||
+ (entry1->max_protection != entry2->max_protection) ||
+ (entry1->inheritance != entry2->inheritance) ||
+ (entry1->is_sub_map != FALSE) ||
+ (entry1->is_a_map != FALSE) ||
+ (entry1->wired_count != 0) ||
+ (entry2->is_sub_map != FALSE) ||
+ (entry2->is_a_map != FALSE) ||
+ (entry2->wired_count != 0))
+ return 0;
+
+ return 1;
+}
+
/*
* vm_map_simplify_entry: [ internal use only ]
- *
- * Simplify the given map entry by:
- * removing extra sharing maps
- * [XXX maybe later] merging with a neighbor
*/
static void
vm_map_simplify_entry(map, entry)
@@ -834,34 +862,13 @@ vm_map_simplify_entry(map, entry)
vm_map_entry_t entry;
{
vm_map_entry_t next, prev;
- vm_size_t nextsize, prevsize, esize;
- /*
- * If this entry corresponds to a sharing map, then see if we can
- * remove the level of indirection. If it's not a sharing map, then it
- * points to a VM object, so see if we can merge with either of our
- * neighbors.
- */
-
- if (entry->is_sub_map || entry->is_a_map || entry->wired_count)
+ if (entry->is_a_map || entry->is_sub_map || entry->wired_count)
return;
prev = entry->prev;
if (prev != &map->header) {
- prevsize = prev->end - prev->start;
- if ( (prev->end == entry->start) &&
- (prev->object.vm_object == entry->object.vm_object) &&
- (!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
- (!prev->object.vm_object ||
- (prev->offset + prevsize == entry->offset)) &&
- (prev->needs_copy == entry->needs_copy) &&
- (prev->copy_on_write == entry->copy_on_write) &&
- (prev->protection == entry->protection) &&
- (prev->max_protection == entry->max_protection) &&
- (prev->inheritance == entry->inheritance) &&
- (prev->is_a_map == FALSE) &&
- (prev->is_sub_map == FALSE) &&
- (prev->wired_count == 0)) {
+ if ( vm_map_simplify_okay(prev, entry)) {
if (map->first_free == prev)
map->first_free = entry;
if (map->hint == prev)
@@ -877,21 +884,7 @@ vm_map_simplify_entry(map, entry)
next = entry->next;
if (next != &map->header) {
- nextsize = next->end - next->start;
- esize = entry->end - entry->start;
- if ((entry->end == next->start) &&
- (next->object.vm_object == entry->object.vm_object) &&
- (!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
- (!entry->object.vm_object ||
- (entry->offset + esize == next->offset)) &&
- (next->needs_copy == entry->needs_copy) &&
- (next->copy_on_write == entry->copy_on_write) &&
- (next->protection == entry->protection) &&
- (next->max_protection == entry->max_protection) &&
- (next->inheritance == entry->inheritance) &&
- (next->is_a_map == FALSE) &&
- (next->is_sub_map == FALSE) &&
- (next->wired_count == 0)) {
+ if ( vm_map_simplify_okay(entry, next)) {
if (map->first_free == next)
map->first_free = entry;
if (map->hint == next)
@@ -904,6 +897,7 @@ vm_map_simplify_entry(map, entry)
}
}
}
+
/*
* vm_map_clip_start: [ internal use only ]
*
@@ -1841,6 +1835,21 @@ vm_map_remove(map, start, end)
}
/*
+ * vm_map_remove_userspace:
+ * Removes the user portion of the address space.
+ */
+void
+vm_map_remove_userspace(map)
+ register vm_map_t map;
+{
+ vm_map_lock(map);
+ pmap_remove_pages(map->pmap, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
+ vm_map_delete(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
+ vm_map_unlock(map);
+ return;
+}
+
+/*
* vm_map_check_protection:
*
* Assert that the target map allows the specified
@@ -2257,8 +2266,8 @@ RetryLookup:;
lock_write_to_read(&share_map->lock);
}
- if (entry->object.vm_object != NULL)
- default_pager_convert_to_swapq(entry->object.vm_object);
+ default_pager_convert_to_swapq(entry->object.vm_object);
+
/*
* Return the object/offset from this entry. If the entry was
* copy-on-write or empty, it has been fixed up.
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 3ba8375..a50fa62 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.12 1996/01/30 23:02:35 mpp Exp $
+ * $Id: vm_map.h,v 1.13 1996/05/19 07:36:48 dyson Exp $
*/
/*
@@ -233,6 +233,7 @@ int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t))
int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_map_reference __P((vm_map_t));
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
+void vm_map_remove_userspace __P((vm_map_t));
void vm_map_simplify __P((vm_map_t, vm_offset_t));
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 697baf6..ff0e79c 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.43 1996/05/19 07:36:49 dyson Exp $
+ * $Id: vm_mmap.c,v 1.44 1996/05/31 00:38:00 dyson Exp $
*/
/*
@@ -72,6 +72,7 @@
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
+#include <vm/loadaout.h>
#ifndef _SYS_SYSPROTO_H_
struct sbrk_args {
@@ -689,10 +690,10 @@ mincore(p, uap, retval)
if (m) {
mincoreinfo = MINCORE_INCORE;
if (m->dirty ||
- pmap_is_modified(VM_PAGE_TO_PHYS(m)))
+ pmap_tc_modified(m))
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
- pmap_is_referenced(VM_PAGE_TO_PHYS(m)))
+ pmap_tc_referenced(VM_PAGE_TO_PHYS(m)))
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
}
@@ -844,7 +845,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
vm_ooffset_t foff;
{
boolean_t fitit;
- vm_object_t object, object2;
+ vm_object_t object;
struct vnode *vp = NULL;
objtype_t type;
int rv = KERN_SUCCESS;
@@ -916,29 +917,13 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
flags |= MAP_SHARED;
}
- object2 = NULL;
docow = 0;
if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
- docow = MAP_COPY_ON_WRITE;
- if (objsize < size) {
- object2 = vm_object_allocate( OBJT_DEFAULT,
- OFF_TO_IDX(size - (foff & ~PAGE_MASK)));
- object2->backing_object = object;
- object2->backing_object_offset = foff;
- TAILQ_INSERT_TAIL(&object->shadow_head,
- object2, shadow_list);
- ++object->shadow_count;
- } else {
- docow |= MAP_COPY_NEEDED;
- }
+ docow = MAP_COPY_ON_WRITE|MAP_COPY_NEEDED;
}
- if (object2)
- rv = vm_map_find(map, object2, 0, addr, size, fitit,
- prot, maxprot, docow);
- else
- rv = vm_map_find(map, object, foff, addr, size, fitit,
- prot, maxprot, docow);
+ rv = vm_map_find(map, object, foff, addr, size, fitit,
+ prot, maxprot, docow);
if (rv != KERN_SUCCESS) {
@@ -947,10 +932,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
* object if it's an unnamed anonymous mapping
* or named anonymous without other references.
*/
- if (object2)
- vm_object_deallocate(object2);
- else
- vm_object_deallocate(object);
+ vm_object_deallocate(object);
goto out;
}
@@ -985,3 +967,171 @@ out:
return (EINVAL);
}
}
+
+#ifdef notyet
+/*
+ * Efficient mapping of a .text+.data+.bss object
+ */
+int
+vm_mapaout(map, baseaddr, vp, foff, textsize, datasize, bsssize, addr)
+ vm_map_t map;
+ vm_offset_t baseaddr;
+ struct vnode *vp;
+ vm_ooffset_t foff;
+ register vm_size_t textsize, datasize, bsssize;
+ vm_offset_t *addr;
+{
+ vm_object_t object;
+ int rv;
+ vm_pindex_t objpsize;
+ struct proc *p = curproc;
+
+ vm_size_t totalsize;
+ vm_size_t textend;
+ struct vattr vat;
+ int error;
+
+ textsize = round_page(textsize);
+ datasize = round_page(datasize);
+ bsssize = round_page(bsssize);
+ totalsize = textsize + datasize + bsssize;
+
+ vm_map_lock(map);
+ /*
+ * If baseaddr == -1, then we need to search for space. Otherwise,
+ * we need to be loaded into a certain spot.
+ */
+ if (baseaddr != (vm_offset_t) -1) {
+ if (vm_map_findspace(map, baseaddr, totalsize, addr)) {
+ goto outnomem;
+ }
+
+ if(*addr != baseaddr) {
+ goto outnomem;
+ }
+ } else {
+ baseaddr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
+ if (vm_map_findspace(map, baseaddr, totalsize, addr)) {
+ goto outnomem;
+ }
+ }
+
+ if (foff & PAGE_MASK) {
+ vm_map_unlock(map);
+ return EINVAL;
+ }
+
+ /*
+ * get the object size to allocate
+ */
+ error = VOP_GETATTR(vp, &vat, p->p_ucred, p);
+ if (error) {
+ vm_map_unlock(map);
+ return error;
+ }
+ objpsize = OFF_TO_IDX(round_page(vat.va_size));
+
+ /*
+ * Alloc/reference the object
+ */
+ object = vm_pager_allocate(OBJT_VNODE, vp,
+ objpsize, VM_PROT_ALL, foff);
+ if (object == NULL) {
+ goto outnomem;
+ }
+
+ /*
+ * Insert .text into the map
+ */
+ textend = *addr + textsize;
+ rv = vm_map_insert(map, object, foff,
+ *addr, textend,
+ VM_PROT_READ|VM_PROT_EXECUTE, VM_PROT_ALL,
+ MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
+ if (rv != KERN_SUCCESS) {
+ vm_object_deallocate(object);
+ goto out;
+ }
+
+ /*
+ * Insert .data into the map, if there is any to map.
+ */
+ if (datasize != 0) {
+ object->ref_count++;
+ rv = vm_map_insert(map, object, foff + textsize,
+ textend, textend + datasize,
+ VM_PROT_ALL, VM_PROT_ALL,
+ MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
+ if (rv != KERN_SUCCESS) {
+ --object->ref_count;
+ vm_map_delete(map, *addr, textend);
+ goto out;
+ }
+ }
+
+ /*
+ * Preload the page tables
+ */
+ pmap_object_init_pt(map->pmap, *addr,
+ object, (vm_pindex_t) OFF_TO_IDX(foff),
+ textsize + datasize, 1);
+
+ /*
+ * Get the space for bss.
+ */
+ if (bsssize != 0) {
+ rv = vm_map_insert(map, NULL, 0,
+ textend + datasize,
+ *addr + totalsize,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
+ }
+ if (rv != KERN_SUCCESS) {
+ vm_map_delete(map, *addr, textend + datasize + bsssize);
+ }
+
+out:
+ vm_map_unlock(map);
+ switch (rv) {
+ case KERN_SUCCESS:
+ return 0;
+ case KERN_INVALID_ADDRESS:
+ case KERN_NO_SPACE:
+ return ENOMEM;
+ case KERN_PROTECTION_FAILURE:
+ return EACCES;
+ default:
+ return EINVAL;
+ }
+outnomem:
+ vm_map_unlock(map);
+ return ENOMEM;
+}
+
+
+int
+mapaout(struct proc *p, struct mapaout_args *uap, int *retval)
+{
+
+ register struct filedesc *fdp = p->p_fd;
+ struct file *fp;
+ struct vnode *vp;
+ int rtval;
+
+ if (((unsigned) uap->fd) >= fdp->fd_nfiles ||
+ (fp = fdp->fd_ofiles[uap->fd]) == NULL)
+ return (EBADF);
+ if (fp->f_type != DTYPE_VNODE)
+ return (EINVAL);
+
+ vp = (struct vnode *) fp->f_data;
+ if ((vp->v_type != VREG) && (vp->v_type != VCHR))
+ return (EINVAL);
+
+ rtval = vm_mapaout( &p->p_vmspace->vm_map,
+ uap->addr, vp, uap->offset,
+ uap->textsize, uap->datasize, uap->bsssize,
+ (vm_offset_t *)retval);
+
+ return rtval;
+}
+#endif
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 473e2397..39c7ee0 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.75 1996/05/31 00:38:02 dyson Exp $
+ * $Id: vm_object.c,v 1.76 1996/06/16 20:37:30 dyson Exp $
*/
/*
@@ -219,7 +219,6 @@ vm_object_allocate(type, size)
result = (vm_object_t)
malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
-
_vm_object_allocate(type, size, result);
return (result);
@@ -231,7 +230,7 @@ vm_object_allocate(type, size)
*
* Gets another reference to the given object.
*/
-inline void
+void
vm_object_reference(object)
register vm_object_t object;
{
@@ -403,8 +402,10 @@ vm_object_terminate(object)
* from paging queues.
*/
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
+#if defined(DIAGNOSTIC)
if (p->flags & PG_BUSY)
printf("vm_object_terminate: freeing busy page\n");
+#endif
PAGE_WAKEUP(p);
vm_page_free(p);
cnt.v_pfree++;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 4a95e6e..79dd930 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.59 1996/06/21 05:39:22 dyson Exp $
+ * $Id: vm_page.c,v 1.60 1996/06/26 05:39:25 dyson Exp $
*/
/*
@@ -385,7 +385,7 @@ vm_page_hash(object, pindex)
* The object and page must be locked, and must be splhigh.
*/
-__inline void
+void
vm_page_insert(m, object, pindex)
register vm_page_t m;
register vm_object_t object;
@@ -434,7 +434,7 @@ vm_page_insert(m, object, pindex)
* The object and page must be locked, and at splhigh.
*/
-__inline void
+void
vm_page_remove(m)
register vm_page_t m;
{
@@ -523,34 +523,19 @@ vm_page_rename(m, new_object, new_pindex)
}
/*
- * vm_page_unqueue without any wakeup
- */
-__inline void
-vm_page_unqueue_nowakeup(m)
- vm_page_t m;
-{
- int queue = m->queue;
- if (queue != PQ_NONE) {
- m->queue = PQ_NONE;
- TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
- --(*vm_page_queues[queue].cnt);
- }
-}
-
-
-/*
* vm_page_unqueue must be called at splhigh();
*/
__inline void
-vm_page_unqueue(m)
+vm_page_unqueue(m, wakeup)
vm_page_t m;
+ int wakeup;
{
int queue = m->queue;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
- if (queue == PQ_CACHE) {
+ if ((queue == PQ_CACHE) && wakeup) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
@@ -736,7 +721,7 @@ vm_page_activate(m)
if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
- vm_page_unqueue(m);
+ vm_page_unqueue(m, 1);
if (m->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
@@ -751,7 +736,7 @@ vm_page_activate(m)
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
-static int
+__inline static int
vm_page_freechk_and_unqueue(m)
vm_page_t m;
{
@@ -769,7 +754,7 @@ vm_page_freechk_and_unqueue(m)
}
vm_page_remove(m);
- vm_page_unqueue_nowakeup(m);
+ vm_page_unqueue(m,0);
if ((m->flags & PG_FICTITIOUS) != 0) {
return 0;
}
@@ -788,7 +773,7 @@ vm_page_freechk_and_unqueue(m)
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
-static __inline void
+__inline static void
vm_page_free_wakeup()
{
@@ -895,7 +880,7 @@ vm_page_wire(m)
if (m->wire_count == 0) {
s = splvm();
- vm_page_unqueue(m);
+ vm_page_unqueue(m,1);
splx(s);
cnt.v_wire_count++;
}
@@ -961,7 +946,7 @@ vm_page_deactivate(m)
if (m->wire_count == 0 && m->hold_count == 0) {
if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
- vm_page_unqueue(m);
+ vm_page_unqueue(m,1);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m->queue = PQ_INACTIVE;
cnt.v_inactive_count++;
@@ -992,7 +977,7 @@ vm_page_cache(m)
panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
}
s = splvm();
- vm_page_unqueue_nowakeup(m);
+ vm_page_unqueue(m,0);
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
m->queue = PQ_CACHE;
cnt.v_cache_count++;
@@ -1031,7 +1016,7 @@ vm_page_set_validclean(m, base, size)
m->valid |= pagebits;
m->dirty &= ~pagebits;
if( base == 0 && size == PAGE_SIZE)
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ pmap_tc_modified(m);
}
/*
@@ -1071,10 +1056,8 @@ void
vm_page_test_dirty(m)
vm_page_t m;
{
- if ((m->dirty != VM_PAGE_BITS_ALL) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
- m->dirty = VM_PAGE_BITS_ALL;
- }
+ if (m->dirty != VM_PAGE_BITS_ALL)
+ pmap_tc_modified(m);
}
/*
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index c246deb..1680d4d 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.28 1996/06/08 06:48:35 dyson Exp $
+ * $Id: vm_page.h,v 1.29 1996/06/26 05:39:25 dyson Exp $
*/
/*
@@ -220,6 +220,7 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
(m)->flags &= ~PG_BUSY; \
if ((m)->flags & PG_WANTED) { \
(m)->flags &= ~PG_WANTED; \
+ (m)->flags |= PG_REFERENCED; \
wakeup((caddr_t) (m)); \
} \
}
@@ -251,8 +252,7 @@ void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
-void vm_page_unqueue __P((vm_page_t));
-void vm_page_unqueue_nowakeup __P((vm_page_t));
+void vm_page_unqueue __P((vm_page_t, int));
void vm_page_set_validclean __P((vm_page_t, int, int));
void vm_page_set_invalid __P((vm_page_t, int, int));
static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
@@ -292,11 +292,11 @@ vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
+ pmap_page_protect(mem, prot);
mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
+ pmap_page_protect(mem, prot);
mem->flags &= ~PG_WRITEABLE;
}
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 98ad6ef..26df38c 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -314,12 +314,9 @@ do_backward:
}
}
- /*
- * we allow reads during pageouts...
- */
for (i = page_base; i < (page_base + pageout_count); i++) {
mc[i]->flags |= PG_BUSY;
- vm_page_protect(mc[i], VM_PROT_READ);
+ vm_page_protect(mc[i], VM_PROT_NONE);
}
return vm_pageout_flush(&mc[page_base], pageout_count, sync);
@@ -359,7 +356,7 @@ vm_pageout_flush(mc, count, sync)
* essentially lose the changes by pretending it
* worked.
*/
- pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
+ pmap_tc_modified(mt);
mt->dirty = 0;
break;
case VM_PAGER_ERROR:
@@ -446,7 +443,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
continue;
}
- refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
+ refcount = pmap_tc_referenced(VM_PAGE_TO_PHYS(p));
if (refcount) {
p->flags |= PG_REFERENCED;
} else if (p->flags & PG_REFERENCED) {
@@ -586,7 +583,7 @@ vm_pageout_scan()
maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
MAXLAUNDER : cnt.v_inactive_target;
-rescan0:
+
maxscan = cnt.v_inactive_count;
for( m = TAILQ_FIRST(&vm_page_queue_inactive);
@@ -599,7 +596,7 @@ rescan0:
cnt.v_pdpages++;
if (m->queue != PQ_INACTIVE) {
- goto rescan0;
+ break;
}
next = TAILQ_NEXT(m, pageq);
@@ -621,32 +618,33 @@ rescan0:
continue;
}
- if (m->object->ref_count == 0) {
- m->flags &= ~PG_REFERENCED;
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
- } else if (((m->flags & PG_REFERENCED) == 0) &&
- pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
- vm_page_activate(m);
- continue;
- }
-
- if ((m->flags & PG_REFERENCED) != 0) {
- m->flags &= ~PG_REFERENCED;
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
- vm_page_activate(m);
- continue;
- }
+ if (m->valid != 0) {
+ if (m->object->ref_count == 0) {
+ m->flags &= ~PG_REFERENCED;
+ pmap_tc_referenced(VM_PAGE_TO_PHYS(m));
+ } else if (((m->flags & PG_REFERENCED) == 0) &&
+ pmap_tc_referenced(VM_PAGE_TO_PHYS(m))) {
+ vm_page_activate(m);
+ continue;
+ }
- if (m->dirty == 0) {
- vm_page_test_dirty(m);
- } else if (m->dirty != 0) {
- m->dirty = VM_PAGE_BITS_ALL;
- }
+ if ((m->flags & PG_REFERENCED) != 0) {
+ m->flags &= ~PG_REFERENCED;
+ pmap_tc_referenced(VM_PAGE_TO_PHYS(m));
+ vm_page_activate(m);
+ continue;
+ }
+ if (m->dirty == 0) {
+ vm_page_test_dirty(m);
+ } else if (m->dirty != 0) {
+ m->dirty = VM_PAGE_BITS_ALL;
+ }
+ }
if (m->valid == 0) {
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
- cnt.v_dfree++;
+ ++cnt.v_dfree;
++pages_freed;
} else if (m->dirty == 0) {
vm_page_cache(m);
@@ -788,7 +786,7 @@ rescan0:
if (m->flags & PG_REFERENCED) {
refcount += 1;
}
- refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
+ refcount += pmap_tc_referenced(VM_PAGE_TO_PHYS(m));
if (refcount) {
m->act_count += ACT_ADVANCE + refcount;
if (m->act_count > ACT_MAX)
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 4cef671..3fe0ae3 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.59 1996/03/19 05:13:22 dyson Exp $
+ * $Id: vnode_pager.c,v 1.60 1996/05/03 21:01:54 phk Exp $
*/
/*
@@ -525,7 +525,7 @@ vnode_pager_input_smlfs(object, m)
}
}
vm_pager_unmap_page(kva);
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ pmap_tc_modified(m);
m->flags &= ~PG_ZERO;
if (error) {
return VM_PAGER_ERROR;
@@ -588,7 +588,7 @@ vnode_pager_input_old(object, m)
}
vm_pager_unmap_page(kva);
}
- pmap_clear_modify(VM_PAGE_TO_PHYS(m));
+ pmap_tc_modified(m);
m->dirty = 0;
m->flags &= ~PG_ZERO;
return error ? VM_PAGER_ERROR : VM_PAGER_OK;
@@ -808,7 +808,7 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
relpbuf(bp);
for (i = 0; i < count; i++) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
+ pmap_tc_modified(m[i]);
m[i]->dirty = 0;
m[i]->valid = VM_PAGE_BITS_ALL;
m[i]->flags &= ~PG_ZERO;
OpenPOWER on IntegriCloud