summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordg <dg@FreeBSD.org>1995-01-24 10:14:09 +0000
committerdg <dg@FreeBSD.org>1995-01-24 10:14:09 +0000
commita9e08ab1e382c62f084401bff2ad5528a3b71e3e (patch)
treea53b9363f7e1cd0fe0867d24dd5952593915453e /sys
parentcba22c2c7d66e22200f82b02a38eb994e4eb64b3 (diff)
downloadFreeBSD-src-a9e08ab1e382c62f084401bff2ad5528a3b71e3e.zip
FreeBSD-src-a9e08ab1e382c62f084401bff2ad5528a3b71e3e.tar.gz
Added ability to detect sequential faults and DTRT. (swap_pager.c)
Added hook for pmap_prefault() and use symbolic constant for new third argument to vm_page_alloc() (vm_fault.c, various) Changed the way that upages and page tables are held. (vm_glue.c) Fixed architectural flaw in allocating pages at interrupt time that was introduced with the merged cache changes. (vm_page.c, various) Adjusted some algorithms to acheive better paging performance and to accomodate the fix for the architectural flaw mentioned above. (vm_pageout.c) Fixed pbuf handling problem, changed policy on handling read-behind page. (vnode_pager.c) Submitted by: John Dyson
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/swap_pager.c14
-rw-r--r--sys/vm/vm_fault.c17
-rw-r--r--sys/vm/vm_glue.c15
-rw-r--r--sys/vm/vm_kern.c6
-rw-r--r--sys/vm/vm_map.c5
-rw-r--r--sys/vm/vm_object.c3
-rw-r--r--sys/vm/vm_object.h3
-rw-r--r--sys/vm/vm_page.c51
-rw-r--r--sys/vm/vm_page.h5
-rw-r--r--sys/vm/vm_pageout.c43
-rw-r--r--sys/vm/vnode_pager.c19
11 files changed, 104 insertions, 77 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 63d49f0..da751f3 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.22 1995/01/09 16:05:33 davidg Exp $
+ * $Id: swap_pager.c,v 1.23 1995/01/10 07:32:43 davidg Exp $
*/
/*
@@ -894,6 +894,7 @@ swap_pager_input(swp, m, count, reqpage)
vm_offset_t paging_offset;
vm_object_t object;
int reqaddr[count];
+ int sequential;
int first, last;
int failed;
@@ -901,6 +902,7 @@ swap_pager_input(swp, m, count, reqpage)
object = m[reqpage]->object;
paging_offset = object->paging_offset;
+ sequential = (m[reqpage]->offset == (object->last_read + PAGE_SIZE));
/*
* First determine if the page exists in the pager if this is a sync
* read. This quickly handles cases where we are following shadow
@@ -947,7 +949,7 @@ swap_pager_input(swp, m, count, reqpage)
failed = 0;
first = 0;
for (i = reqpage - 1; i >= 0; --i) {
- if (failed || (reqaddr[i] == SWB_EMPTY) ||
+ if (sequential || failed || (reqaddr[i] == SWB_EMPTY) ||
(swb[i]->swb_valid & (1 << off[i])) == 0 ||
(reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
((reqaddr[i] / dmmax) != reqdskregion)) {
@@ -1105,6 +1107,7 @@ swap_pager_input(swp, m, count, reqpage)
pmap_qremove(kva, count);
if (spc) {
+ m[reqpage]->object->last_read = m[reqpage]->offset;
if (bp->b_flags & B_WANTED)
wakeup((caddr_t) bp);
/*
@@ -1141,9 +1144,11 @@ swap_pager_input(swp, m, count, reqpage)
* results, it is best to deactivate
* the readahead pages.
*/
- if ((i == reqpage - 1) || (i == reqpage + 1))
+/*
+ if (sequential || (i == reqpage - 1) || (i == reqpage + 1))
vm_page_activate(m[i]);
else
+*/
vm_page_deactivate(m[i]);
/*
@@ -1155,6 +1160,9 @@ swap_pager_input(swp, m, count, reqpage)
PAGE_WAKEUP(m[i]);
}
}
+
+ m[reqpage]->object->last_read = m[count-1]->offset;
+
/*
* If we're out of swap space, then attempt to free
* some whenever pages are brought in. We must clear
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index de9bfab..d7b4105 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.14 1995/01/09 16:05:39 davidg Exp $
+ * $Id: vm_fault.c,v 1.15 1995/01/10 07:32:45 davidg Exp $
*/
/*
@@ -320,7 +320,7 @@ RetryFault:;
* Allocate a new page for this object/offset pair.
*/
- m = vm_page_alloc(object, offset, 0);
+ m = vm_page_alloc(object, offset, VM_ALLOC_NORMAL);
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
@@ -655,7 +655,7 @@ RetryCopy:
* that the copy_object's pager doesn't have
* the page...
*/
- copy_m = vm_page_alloc(copy_object, copy_offset, 0);
+ copy_m = vm_page_alloc(copy_object, copy_offset, VM_ALLOC_NORMAL);
if (copy_m == NULL) {
/*
* Wait for a page, then retry.
@@ -839,6 +839,8 @@ RetryCopy:
*/
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
+ if( ((prot & VM_PROT_WRITE) == 0) && change_wiring == 0 && wired == 0)
+ pmap_prefault(map->pmap, vaddr, entry, first_object);
/*
* If the page is not wired down, then put it where the pageout daemon
@@ -905,6 +907,11 @@ vm_fault_wire(map, start, end)
*/
for (va = start; va < end; va += PAGE_SIZE) {
+
+ if( curproc != pageproc &&
+ (cnt.v_free_count <= cnt.v_pageout_free_min))
+ VM_WAIT;
+
rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
if (rv) {
if (va != start)
@@ -1019,7 +1026,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
*/
vm_object_lock(dst_object);
do {
- dst_m = vm_page_alloc(dst_object, dst_offset, 0);
+ dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL);
if (dst_m == NULL) {
vm_object_unlock(dst_object);
VM_WAIT;
@@ -1215,7 +1222,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
*/
for (i = 0; i < size; i++) {
if (i != treqpage)
- rtm = vm_page_alloc(object, startoffset + i * NBPG, 0);
+ rtm = vm_page_alloc(object, startoffset + i * NBPG, VM_ALLOC_NORMAL);
else
rtm = m;
marray[i] = rtm;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 0767ae1..6ec7a1b 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.11 1995/01/09 16:05:40 davidg Exp $
+ * $Id: vm_glue.c,v 1.12 1995/01/10 07:32:45 davidg Exp $
*/
#include <sys/param.h>
@@ -236,12 +236,6 @@ vm_fork(p1, p2, isvfork)
pmap_extract(vp->pmap, addr + NBPG * i),
VM_PROT_READ | VM_PROT_WRITE, 1);
- /*
- * and allow the UPAGES page table entry to be paged (at the vm system
- * level)
- */
- vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
-
p2->p_addr = up;
/*
@@ -339,9 +333,6 @@ faultin(p)
pa, VM_PROT_READ | VM_PROT_WRITE, 1);
}
- /* and let the page table pages go (at least above pmap level) */
- vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
-
s = splhigh();
if (p->p_stat == SRUN)
@@ -491,6 +482,7 @@ swapout(p)
register struct proc *p;
{
vm_map_t map = &p->p_vmspace->vm_map;
+ vm_offset_t ptaddr;
++p->p_stats->p_ru.ru_nswap;
/*
@@ -515,6 +507,9 @@ swapout(p)
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
+ ptaddr = trunc_page((u_int) vtopte(kstack));
+ vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
+
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index d59bbb8..a1ef102 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.7 1994/08/18 22:36:02 wollman Exp $
+ * $Id: vm_kern.c,v 1.8 1995/01/09 16:05:43 davidg Exp $
*/
/*
@@ -177,7 +177,7 @@ kmem_alloc(map, size)
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
- while ((mem = vm_page_alloc(kernel_object, offset + i, 0)) == NULL) {
+ while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) {
vm_object_unlock(kernel_object);
VM_WAIT;
vm_object_lock(kernel_object);
@@ -331,7 +331,7 @@ kmem_malloc(map, size, canwait)
*/
vm_object_lock(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
- m = vm_page_alloc(kmem_object, offset + i, 1);
+ m = vm_page_alloc(kmem_object, offset + i, VM_ALLOC_INTERRUPT);
/*
* Ran out of space, free everything up and return. Don't need
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index ec7bb5a..5ba5895 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.10 1995/01/09 16:05:45 davidg Exp $
+ * $Id: vm_map.c,v 1.11 1995/01/10 07:32:46 davidg Exp $
*/
/*
@@ -313,7 +313,8 @@ vm_map_entry_create(map)
vm_page_t m;
m = vm_page_alloc(kmem_object,
- mapvm - vm_map_min(kmem_map), 0);
+ mapvm - vm_map_min(kmem_map),
+ (map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
if (m) {
int newentries;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 007742d..e5424cb 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.17 1995/01/11 20:19:20 davidg Exp $
+ * $Id: vm_object.c,v 1.18 1995/01/13 13:30:24 davidg Exp $
*/
/*
@@ -140,6 +140,7 @@ _vm_object_allocate(size, object)
object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
object->paging_in_progress = 0;
object->copy = NULL;
+ object->last_read = 0;
/*
* Object starts out read-write, with no pager.
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 03661df..71bdd76 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.3 1994/11/06 05:07:52 davidg Exp $
+ * $Id: vm_object.h,v 1.4 1995/01/09 16:05:50 davidg Exp $
*/
/*
@@ -98,6 +98,7 @@ struct vm_object {
vm_offset_t paging_offset; /* Offset into paging space */
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
+ vm_offset_t last_read; /* last read in object -- detect seq behavior */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 45852fd..73906dd 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.15 1995/01/10 09:19:46 davidg Exp $
+ * $Id: vm_page.c,v 1.16 1995/01/15 07:31:34 davidg Exp $
*/
/*
@@ -601,16 +601,22 @@ vm_page_requeue(vm_page_t mem, int flags)
* Allocate and return a memory cell associated
* with this VM object/offset pair.
*
+ * page_req -- 0 normal process request VM_ALLOC_NORMAL
+ * page_req -- 1 interrupt time request VM_ALLOC_INTERRUPT
+ * page_req -- 2 system *really* needs a page VM_ALLOC_SYSTEM
+ * but *cannot* be at interrupt time
+ *
* Object must be locked.
*/
vm_page_t
-vm_page_alloc(object, offset, inttime)
+vm_page_alloc(object, offset, page_req)
vm_object_t object;
vm_offset_t offset;
- int inttime;
+ int page_req;
{
register vm_page_t mem;
int s;
+ int msgflg;
simple_lock(&vm_page_queue_free_lock);
@@ -625,22 +631,20 @@ vm_page_alloc(object, offset, inttime)
splx(s);
return (NULL);
}
- if (inttime) {
+ if (page_req == VM_ALLOC_INTERRUPT) {
if ((mem = vm_page_queue_free.tqh_first) == 0) {
- for (mem = vm_page_queue_cache.tqh_first; mem; mem = mem->pageq.tqe_next) {
- if ((mem->object->flags & OBJ_ILOCKED) == 0) {
- TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
- vm_page_remove(mem);
- cnt.v_cache_count--;
- goto gotpage;
- }
- }
simple_unlock(&vm_page_queue_free_lock);
splx(s);
+ /*
+ * need to wakeup at interrupt time -- it doesn't do VM_WAIT
+ */
+ wakeup((caddr_t) &vm_pages_needed);
return NULL;
}
+ if( cnt.v_free_count < cnt.v_pageout_free_min)
+ wakeup((caddr_t) &vm_pages_needed);
} else {
- if ((cnt.v_free_count < 3) ||
+ if ((cnt.v_free_count < cnt.v_pageout_free_min) ||
(mem = vm_page_queue_free.tqh_first) == 0) {
mem = vm_page_queue_cache.tqh_first;
if (mem) {
@@ -649,9 +653,15 @@ vm_page_alloc(object, offset, inttime)
cnt.v_cache_count--;
goto gotpage;
}
- simple_unlock(&vm_page_queue_free_lock);
- splx(s);
- return (NULL);
+ if( page_req == VM_ALLOC_SYSTEM) {
+ mem = vm_page_queue_free.tqh_first;
+ if( !mem) {
+ simple_unlock(&vm_page_queue_free_lock);
+ splx(s);
+ wakeup((caddr_t) &vm_pages_needed);
+ return (NULL);
+ }
+ }
}
}
@@ -661,7 +671,7 @@ vm_page_alloc(object, offset, inttime)
gotpage:
simple_unlock(&vm_page_queue_free_lock);
- mem->flags = PG_BUSY | PG_CLEAN;
+ mem->flags = PG_BUSY;
mem->wire_count = 0;
mem->hold_count = 0;
mem->act_count = 0;
@@ -680,7 +690,8 @@ gotpage:
* we would be nearly out of memory.
*/
if (curproc != pageproc &&
- ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min))
+ ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
+ (cnt.v_free_count < cnt.v_pageout_free_min))
wakeup((caddr_t) &vm_pages_needed);
return (mem);
@@ -1109,8 +1120,8 @@ void
vm_page_test_dirty(m)
vm_page_t m;
{
- if ((!m->dirty || (m->dirty != vm_page_bits(0, PAGE_SIZE))) &&
- pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
+ if ((m->dirty != VM_PAGE_BITS_ALL) &&
+ pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = VM_PAGE_BITS_ALL;
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 3be697c..8c133d2 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.11 1995/01/09 16:05:52 davidg Exp $
+ * $Id: vm_page.h,v 1.12 1995/01/10 09:19:52 davidg Exp $
*/
/*
@@ -220,6 +220,9 @@ extern simple_lock_data_t vm_page_queue_free_lock; /* lock on free page queue */
#define VM_PAGE_BITS_ALL 0xffff
#endif
+#define VM_ALLOC_NORMAL 0
+#define VM_ALLOC_INTERRUPT 1
+#define VM_ALLOC_SYSTEM 2
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t, int));
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index d3b883d..5f8cd32 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.29 1995/01/09 16:05:53 davidg Exp $
+ * $Id: vm_pageout.c,v 1.30 1995/01/10 07:32:49 davidg Exp $
*/
/*
@@ -341,7 +341,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
if (object->shadow->ref_count == 1)
dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only);
else
- dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, 1);
+ vm_pageout_object_deactivate_pages(map, object->shadow, count, 1);
}
if (object->paging_in_progress || !vm_object_lock_try(object))
return dcount;
@@ -518,8 +518,8 @@ vm_pageout_inactive_stats(int maxiscan)
* heuristic alert -- if a page is being re-activated,
* it probably will be used one more time...
*/
- ++m->act_count;
- ++m->act_count;
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
}
m = next;
}
@@ -574,6 +574,7 @@ vm_pageout_scan()
*/
+rescan0:
vm_pageout_inactive_stats(MAXISCAN);
maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
MAXLAUNDER : cnt.v_inactive_target;
@@ -620,17 +621,21 @@ rescan1:
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
- ++m->act_count;
- ++m->act_count;
+ if (m->act_count < ACT_MAX)
+ m->act_count += ACT_ADVANCE;
m = next;
continue;
}
vm_page_test_dirty(m);
if ((m->dirty & m->valid) == 0) {
- if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
- (cnt.v_cache_count < cnt.v_cache_min))
+ if (m->valid == 0) {
+ pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
+ vm_page_free(m);
+ } else if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
+ (cnt.v_cache_count < cnt.v_cache_min)) {
vm_page_cache(m);
+ }
} else if (maxlaunder > 0) {
int written;
@@ -684,6 +689,8 @@ rescan1:
desired_free - (cnt.v_free_count + cnt.v_cache_count);
}
}
+ if( (page_shortage <= 0) && (cnt.v_free_count < cnt.v_free_min))
+ page_shortage = 1;
}
maxscan = cnt.v_active_count;
minscan = cnt.v_active_count;
@@ -706,6 +713,8 @@ rescan1:
(m->flags & PG_BUSY) ||
(m->hold_count != 0) ||
(m->bmapped != 0)) {
+ TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m = next;
continue;
}
@@ -725,6 +734,8 @@ rescan1:
TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
splx(s);
} else {
+ m->flags &= ~PG_REFERENCED;
+ pmap_clear_reference(VM_PAGE_TO_PHYS(m));
m->act_count -= min(m->act_count, ACT_DECLINE);
/*
@@ -733,10 +744,6 @@ rescan1:
if (!m->act_count && (page_shortage > 0)) {
if (m->object->ref_count == 0) {
vm_page_test_dirty(m);
-
- m->flags &= ~PG_REFERENCED;
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
-
--page_shortage;
if ((m->dirty & m->valid) == 0) {
m->act_count = 0;
@@ -745,14 +752,10 @@ rescan1:
vm_page_deactivate(m);
}
} else {
-
- m->flags &= ~PG_REFERENCED;
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
-
vm_page_deactivate(m);
--page_shortage;
}
- } else {
+ } else if (m->act_count) {
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
}
@@ -764,7 +767,7 @@ rescan1:
* We try to maintain some *really* free pages, this allows interrupt
* code to be guaranteed space.
*/
- while (cnt.v_free_count < MINFREE) {
+ while (cnt.v_free_count < cnt.v_free_min) {
m = vm_page_queue_cache.tqh_first;
if (!m)
break;
@@ -840,7 +843,7 @@ vm_pageout()
* free_reserved needs to include enough for the largest swap pager
* structures plus enough for any pv_entry structs when paging.
*/
- cnt.v_pageout_free_min = 5 + cnt.v_page_count / 1024;
+ cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024;
cnt.v_free_reserved = cnt.v_pageout_free_min + 2;
cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
cnt.v_inactive_target = cnt.v_free_count / 4;
@@ -926,8 +929,6 @@ vm_daemon()
size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
if (limit >= 0 && size >= limit) {
overage = (size - limit) / NBPG;
- if (limit == 0)
- overage += 20;
vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
(vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 0693518..6df0f79 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.19 1995/01/09 16:06:01 davidg Exp $
+ * $Id: vnode_pager.c,v 1.20 1995/01/11 20:00:10 davidg Exp $
*/
/*
@@ -881,13 +881,15 @@ vnode_pager_input(vnp, m, count, reqpage)
counta = (count - reqpage) - 1;
bpa = 0;
sizea = 0;
+ bp = getpbuf();
if (counta) {
- bpa = getpbuf();
- count -= counta;
- sizea = size - count * PAGE_SIZE;
- size = count * PAGE_SIZE;
+ bpa = (struct buf *) trypbuf();
+ if (bpa) {
+ count -= counta;
+ sizea = size - count * PAGE_SIZE;
+ size = count * PAGE_SIZE;
+ }
}
- bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
/*
@@ -981,10 +983,7 @@ finishup:
* now tell them that it is ok to use
*/
if (!error) {
- if (i != reqpage - 1)
- vm_page_deactivate(m[i]);
- else
- vm_page_activate(m[i]);
+ vm_page_deactivate(m[i]);
PAGE_WAKEUP(m[i]);
} else {
vnode_pager_freepage(m[i]);
OpenPOWER on IntegriCloud