summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1996-01-19 04:00:31 +0000
committerdyson <dyson@FreeBSD.org>1996-01-19 04:00:31 +0000
commit8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d (patch)
tree3c31fd95ea745005a9cd6733db5a16f31bd828a6 /sys/vm
parent6755beedbf0ddaa9e66e91c8e74f620ede6bfad5 (diff)
downloadFreeBSD-src-8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d.zip
FreeBSD-src-8fc8a772af22f6e03233d248fa2dbd9b5c2bdd7d.tar.gz
Eliminated many redundant vm_map_lookup operations for vm_mmap.
Speed up for vfs_bio -- addition of a routine bqrelse to greatly diminish overhead for merged cache. Efficiency improvement for vfs_cluster. It used to do alot of redundant calls to cluster_rbuild. Correct the ordering for vrele of .text and release of credentials. Use the selective tlb update for 486/586/P6. Numerous fixes to the size of objects allocated for files. Additionally, fixes in the various pagers. Fixes for proper positioning of vnode_pager_setsize in msdosfs and ext2fs. Fixes in the swap pager for exhausted resources. The pageout code will not as readily thrash. Change the page queue flags (PG_ACTIVE, PG_INACTIVE, PG_FREE, PG_CACHE) into page queue indices (PQ_ACTIVE, PQ_INACTIVE, PQ_FREE, PQ_CACHE), thereby improving efficiency of several routines. Eliminate even more unnecessary vm_page_protect operations. Significantly speed up process forks. Make vm_object_page_clean more efficient, thereby eliminating the pause that happens every 30seconds. Make sequential clustered writes B_ASYNC instead of B_DELWRI even in the case of filesystems mounted async. Fix a panic with busy pages when write clustering is done for non-VMIO buffers.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/default_pager.c4
-rw-r--r--sys/vm/device_pager.c12
-rw-r--r--sys/vm/swap_pager.c66
-rw-r--r--sys/vm/vm_fault.c20
-rw-r--r--sys/vm/vm_glue.c153
-rw-r--r--sys/vm/vm_kern.c20
-rw-r--r--sys/vm/vm_map.c62
-rw-r--r--sys/vm/vm_map.h13
-rw-r--r--sys/vm/vm_mmap.c79
-rw-r--r--sys/vm/vm_object.c166
-rw-r--r--sys/vm/vm_page.c351
-rw-r--r--sys/vm/vm_page.h47
-rw-r--r--sys/vm/vm_pageout.c81
-rw-r--r--sys/vm/vm_unix.c6
-rw-r--r--sys/vm/vnode_pager.c5
15 files changed, 599 insertions, 486 deletions
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index f61439b..dae1837 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: default_pager.c,v 1.4 1995/12/11 04:57:56 dyson Exp $
+ * $Id: default_pager.c,v 1.5 1995/12/14 09:54:46 phk Exp $
*/
#include <sys/param.h>
@@ -80,7 +80,7 @@ default_pager_alloc(handle, size, prot, offset)
if (handle != NULL)
panic("default_pager_alloc: handle specified");
- return vm_object_allocate(OBJT_DEFAULT, offset + size);
+ return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(offset) + size);
}
static void
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 63201de..942af2d 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
- * $Id: device_pager.c,v 1.18 1995/12/13 15:13:54 julian Exp $
+ * $Id: device_pager.c,v 1.19 1995/12/14 09:54:49 phk Exp $
*/
#include <sys/param.h>
@@ -128,7 +128,7 @@ dev_pager_alloc(handle, size, prot, foff)
*
* XXX assumes VM_PROT_* == PROT_*
*/
- npages = atop(round_page(size));
+ npages = size;
for (off = foff; npages--; off += PAGE_SIZE)
if ((*mapfunc) (dev, off, (int) prot) == -1)
return (NULL);
@@ -152,7 +152,7 @@ dev_pager_alloc(handle, size, prot, foff)
* Allocate object and associate it with the pager.
*/
object = vm_object_allocate(OBJT_DEVICE,
- OFF_TO_IDX(foff + size));
+ OFF_TO_IDX(foff) + size);
object->handle = handle;
TAILQ_INIT(&object->un_pager.devp.devp_pglist);
TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list);
@@ -161,8 +161,8 @@ dev_pager_alloc(handle, size, prot, foff)
* Gain a reference to the object.
*/
vm_object_reference(object);
- if (OFF_TO_IDX(foff + size) > object->size)
- object->size = OFF_TO_IDX(foff + size);
+ if (OFF_TO_IDX(foff) + size > object->size)
+ object->size = OFF_TO_IDX(foff) + size;
}
dev_pager_alloc_lock = 0;
@@ -279,7 +279,7 @@ dev_pager_getfake(paddr)
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
m->busy = 0;
- m->bmapped = 0;
+ m->queue = PQ_NONE;
m->wire_count = 1;
m->phys_addr = paddr;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 1fc7f85..25b608d 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.57 1995/12/14 09:54:52 phk Exp $
+ * $Id: swap_pager.c,v 1.58 1995/12/17 07:19:55 bde Exp $
*/
/*
@@ -302,13 +302,13 @@ swap_pager_alloc(handle, size, prot, offset)
* rip support of "named anonymous regions" out altogether.
*/
object = vm_object_allocate(OBJT_SWAP,
- OFF_TO_IDX(offset+ PAGE_SIZE - 1 + size));
+ OFF_TO_IDX(offset + PAGE_SIZE - 1) + size);
object->handle = handle;
(void) swap_pager_swp_alloc(object, M_WAITOK);
}
} else {
object = vm_object_allocate(OBJT_SWAP,
- OFF_TO_IDX(offset + PAGE_SIZE - 1 + size));
+ OFF_TO_IDX(offset + PAGE_SIZE - 1) + size);
(void) swap_pager_swp_alloc(object, M_WAITOK);
}
@@ -1266,6 +1266,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
s = splbio();
if (curproc == pageproc) {
+retryfree:
/*
* pageout daemon needs a swap control block
*/
@@ -1273,33 +1274,42 @@ swap_pager_putpages(object, m, count, sync, rtvals)
/*
* if it does not get one within a short time, then
* there is a potential deadlock, so we go-on trying
- * to free pages.
+ * to free pages. It is important to block here as opposed
+ * to returning, thereby allowing the pageout daemon to continue.
+ * It is likely that pageout daemon will start suboptimally
+ * reclaiming vnode backed pages if we don't block. Since the
+ * I/O subsystem is probably already fully utilized, might as
+ * well wait.
*/
- tsleep(&swap_pager_free, PVM, "swpfre", hz/10);
- swap_pager_sync();
- if (swap_pager_free.tqh_first == NULL ||
- swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
- swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
- splx(s);
- return VM_PAGER_AGAIN;
+ if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) {
+ swap_pager_sync();
+ if (swap_pager_free.tqh_first == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
+ splx(s);
+ return VM_PAGER_AGAIN;
+ }
+ } else {
+ /*
+ * we make sure that pageouts aren't taking up all of
+ * the free swap control blocks.
+ */
+ swap_pager_sync();
+ if (swap_pager_free.tqh_first == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
+ goto retryfree;
+ }
}
- } else
+ } else {
pagedaemon_wakeup();
- while (swap_pager_free.tqh_first == NULL ||
- swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
- swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
- if (curproc == pageproc) {
- swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
- if((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved)
- wakeup(&cnt.v_free_count);
- }
-
- swap_pager_needflags |= SWAP_FREE_NEEDED;
- tsleep(&swap_pager_free, PVM, "swpfre", 0);
- if (curproc == pageproc)
- swap_pager_sync();
- else
+ while (swap_pager_free.tqh_first == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
+ swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
+ swap_pager_needflags |= SWAP_FREE_NEEDED;
+ tsleep(&swap_pager_free, PVM, "swpfre", 0);
pagedaemon_wakeup();
+ }
}
splx(s);
}
@@ -1436,7 +1446,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
* optimization, if a page has been read
* during the pageout process, we activate it.
*/
- if ((m[i]->flags & PG_ACTIVE) == 0 &&
+ if ((m[i]->queue != PQ_ACTIVE) &&
((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
vm_page_activate(m[i]);
@@ -1542,7 +1552,7 @@ swap_pager_finish(spc)
for (i = 0; i < spc->spc_count; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
spc->spc_m[i]->dirty = 0;
- if ((spc->spc_m[i]->flags & PG_ACTIVE) == 0 &&
+ if ((spc->spc_m[i]->queue != PQ_ACTIVE) &&
((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
vm_page_activate(spc->spc_m[i]);
}
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 87090a4..771da03 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.38 1995/12/07 12:48:10 davidg Exp $
+ * $Id: vm_fault.c,v 1.39 1995/12/11 04:58:06 dyson Exp $
*/
/*
@@ -157,7 +157,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
#define RELEASE_PAGE(m) { \
PAGE_WAKEUP(m); \
- if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \
+ if (m->queue != PQ_ACTIVE) vm_page_activate(m); \
}
#define UNLOCK_MAP { \
@@ -280,7 +280,7 @@ RetryFault:;
* Mark page busy for other processes, and the pagedaemon.
*/
m->flags |= PG_BUSY;
- if ((m->flags & PG_CACHE) &&
+ if ((m->queue == PQ_CACHE) &&
(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
UNLOCK_AND_DEALLOCATE;
VM_WAIT;
@@ -288,8 +288,9 @@ RetryFault:;
goto RetryFault;
}
- if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
- m->object != kernel_object && m->object != kmem_object) {
+ if (m->valid &&
+ ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
+ m->object != kernel_object && m->object != kmem_object) {
goto readrest;
}
break;
@@ -306,7 +307,7 @@ RetryFault:;
* Allocate a new page for this object/offset pair.
*/
m = vm_page_alloc(object, pindex,
- vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO));
+ vp?VM_ALLOC_NORMAL:VM_ALLOC_ZERO);
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
@@ -504,9 +505,8 @@ readrest:
* call.
*/
- if ((m->flags & PG_ACTIVE) == 0)
+ if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
- vm_page_protect(m, VM_PROT_NONE);
/*
* We no longer need the old page or object.
@@ -642,7 +642,7 @@ readrest:
else
vm_page_unwire(m);
} else {
- if ((m->flags & PG_ACTIVE) == 0)
+ if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
}
@@ -654,8 +654,6 @@ readrest:
}
}
- if ((m->flags & PG_BUSY) == 0)
- printf("page not busy: %d\n", m->pindex);
/*
* Unlock everything, and return
*/
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index c6ff0e0..77d82a6 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.33 1995/12/14 09:54:57 phk Exp $
+ * $Id: vm_glue.c,v 1.35 1996/01/04 21:13:14 wollman Exp $
*/
#include "opt_sysvipc.h"
@@ -87,6 +87,8 @@
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
#include <sys/user.h>
@@ -213,9 +215,11 @@ vm_fork(p1, p2, isvfork)
int isvfork;
{
register struct user *up;
- vm_offset_t addr, ptaddr;
+ vm_offset_t addr, ptaddr, ptpa;
int error, i;
- struct vm_map *vp;
+ vm_map_t vp;
+ pmap_t pvp;
+ vm_page_t stkm;
while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
VM_WAIT;
@@ -243,30 +247,48 @@ vm_fork(p1, p2, isvfork)
addr = (vm_offset_t) kstack;
vp = &p2->p_vmspace->vm_map;
+ pvp = &p2->p_vmspace->vm_pmap;
/* get new pagetables and kernel stack */
- (void) vm_map_find(vp, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE);
-
- /* force in the page table encompassing the UPAGES */
- ptaddr = trunc_page((u_int) vtopte(addr));
- error = vm_map_pageable(vp, ptaddr, ptaddr + PAGE_SIZE, FALSE);
- if (error)
- panic("vm_fork: wire of PT failed. error=%d", error);
-
- /* and force in (demand-zero) the UPAGES */
- error = vm_map_pageable(vp, addr, addr + UPAGES * PAGE_SIZE, FALSE);
- if (error)
- panic("vm_fork: wire of UPAGES failed. error=%d", error);
+ (void) vm_map_find(vp, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
- /* and force-map the upages into the kernel pmap */
- for (i = 0; i < UPAGES; i++)
- pmap_kenter(((vm_offset_t) up) + PAGE_SIZE * i,
- pmap_extract(vp->pmap, addr + PAGE_SIZE * i));
+ p2->p_vmspace->vm_upages_obj = vm_object_allocate( OBJT_DEFAULT,
+ UPAGES);
+
+ ptaddr = trunc_page((u_int) vtopte(kstack));
+ (void) vm_fault(vp, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ ptpa = pmap_extract(pvp, ptaddr);
+ if (ptpa == 0) {
+ panic("vm_fork: no pte for UPAGES");
+ }
+ stkm = PHYS_TO_VM_PAGE(ptpa);
+ vm_page_hold(stkm);
+
+ for(i=0;i<UPAGES;i++) {
+ vm_page_t m;
+
+ while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj, i, VM_ALLOC_ZERO)) == NULL) {
+ VM_WAIT;
+ }
+
+ vm_page_wire(m);
+ m->flags &= ~PG_BUSY;
+ pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, 1);
+ pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
+ if ((m->flags & PG_ZERO) == 0)
+ bzero(((caddr_t) up) + i * PAGE_SIZE, PAGE_SIZE);
+ m->flags &= ~PG_ZERO;
+ m->valid = VM_PAGE_BITS_ALL;
+ }
+ vm_page_unhold(stkm);
p2->p_addr = up;
@@ -334,37 +356,62 @@ faultin(p)
int s;
if ((p->p_flag & P_INMEM) == 0) {
- vm_map_t map;
+ vm_map_t map = &p->p_vmspace->vm_map;
+ pmap_t pmap = &p->p_vmspace->vm_pmap;
+ vm_page_t stkm, m;
+ vm_offset_t ptpa;
int error;
++p->p_lock;
- map = &p->p_vmspace->vm_map;
- /* force the page table encompassing the kernel stack (upages) */
ptaddr = trunc_page((u_int) vtopte(kstack));
- error = vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, FALSE);
- if (error)
- panic("faultin: wire of PT failed. error=%d", error);
-
- /* wire in the UPAGES */
- error = vm_map_pageable(map, (vm_offset_t) kstack,
- (vm_offset_t) kstack + UPAGES * PAGE_SIZE, FALSE);
- if (error)
- panic("faultin: wire of UPAGES failed. error=%d", error);
-
- /* and map them nicely into the kernel pmap */
- for (i = 0; i < UPAGES; i++) {
- vm_offset_t off = i * PAGE_SIZE;
- vm_offset_t pa = (vm_offset_t)
- pmap_extract(&p->p_vmspace->vm_pmap,
- (vm_offset_t) kstack + off);
-
- if (pa == 0)
- panic("faultin: missing page for UPAGES\n");
-
- pmap_kenter(((vm_offset_t) p->p_addr) + off, pa);
+ (void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ ptpa = pmap_extract(&p->p_vmspace->vm_pmap, ptaddr);
+ if (ptpa == 0) {
+ panic("vm_fork: no pte for UPAGES");
}
+ stkm = PHYS_TO_VM_PAGE(ptpa);
+ vm_page_hold(stkm);
+ for(i=0;i<UPAGES;i++) {
+ int s;
+ s = splhigh();
+
+retry:
+ if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL) {
+ if ((m = vm_page_alloc(p->p_vmspace->vm_upages_obj, i, VM_ALLOC_NORMAL)) == NULL) {
+ VM_WAIT;
+ goto retry;
+ }
+ } else {
+ if ((m->flags & PG_BUSY) || m->busy) {
+ m->flags |= PG_WANTED;
+ tsleep(m, PVM, "swinuw",0);
+ goto retry;
+ }
+ }
+ vm_page_wire(m);
+ if (m->valid == VM_PAGE_BITS_ALL)
+ m->flags &= ~PG_BUSY;
+ splx(s);
+
+ pmap_enter( pmap, (vm_offset_t) kstack + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
+ VM_PAGE_TO_PHYS(m));
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ int rv;
+ rv = vm_pager_get_pages(p->p_vmspace->vm_upages_obj,
+ &m, 1, 0);
+ if (rv != VM_PAGER_OK)
+ panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
+ m->valid = VM_PAGE_BITS_ALL;
+ m->flags &= ~PG_BUSY;
+ }
+ }
+ vm_page_unhold(stkm);
+
+
s = splhigh();
if (p->p_stat == SRUN)
@@ -402,7 +449,8 @@ loop:
pp = NULL;
ppri = INT_MIN;
for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
- if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
+ if (p->p_stat == SRUN &&
+ (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
int mempri;
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
@@ -515,6 +563,7 @@ swapout(p)
register struct proc *p;
{
vm_map_t map = &p->p_vmspace->vm_map;
+ pmap_t pmap = &p->p_vmspace->vm_pmap;
vm_offset_t ptaddr;
int i;
@@ -535,14 +584,16 @@ swapout(p)
/*
* let the upages be paged
*/
- for(i=0;i<UPAGES;i++)
+ for(i=0;i<UPAGES;i++) {
+ vm_page_t m;
+ if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL)
+ panic("swapout: upage already missing???");
+ m->dirty = VM_PAGE_BITS_ALL;
+ vm_page_unwire(m);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
-
- vm_map_pageable(map, (vm_offset_t) kstack,
- (vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE);
-
- ptaddr = trunc_page((u_int) vtopte(kstack));
- vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, TRUE);
+ }
+ pmap_remove(pmap, (vm_offset_t) kstack,
+ (vm_offset_t) kstack + PAGE_SIZE * UPAGES);
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 6b3b006..9569a39 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.19 1995/12/10 14:52:09 bde Exp $
+ * $Id: vm_kern.c,v 1.20 1995/12/11 04:58:09 dyson Exp $
*/
/*
@@ -118,7 +118,7 @@ kmem_alloc_pageable(map, size)
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
- &addr, size, TRUE);
+ &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -157,7 +157,8 @@ kmem_alloc(map, size)
}
offset = addr - VM_MIN_KERNEL_ADDRESS;
vm_object_reference(kernel_object);
- vm_map_insert(map, kernel_object, offset, addr, addr + size);
+ vm_map_insert(map, kernel_object, offset, addr, addr + size,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(map);
/*
@@ -182,8 +183,7 @@ kmem_alloc(map, size)
vm_page_t mem;
while ((mem = vm_page_alloc(kernel_object,
- OFF_TO_IDX(offset + i),
- (VM_ALLOC_NORMAL|VM_ALLOC_ZERO))) == NULL) {
+ OFF_TO_IDX(offset + i), VM_ALLOC_ZERO)) == NULL) {
VM_WAIT;
}
if ((mem->flags & PG_ZERO) == 0)
@@ -249,7 +249,7 @@ kmem_suballoc(parent, min, max, size, pageable)
*min = (vm_offset_t) vm_map_min(parent);
ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
- min, size, TRUE);
+ min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (ret != KERN_SUCCESS) {
printf("kmem_suballoc: bad status return of %d.\n", ret);
panic("kmem_suballoc");
@@ -316,7 +316,8 @@ kmem_malloc(map, size, waitflag)
}
offset = addr - vm_map_min(kmem_map);
vm_object_reference(kmem_object);
- vm_map_insert(map, kmem_object, offset, addr, addr + size);
+ vm_map_insert(map, kmem_object, offset, addr, addr + size,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
/*
* If we can wait, just mark the range as wired (will fault pages as
@@ -376,6 +377,7 @@ kmem_malloc(map, size, waitflag)
*/
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
+ vm_page_wire(m);
pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
}
vm_map_unlock(map);
@@ -416,7 +418,7 @@ kmem_alloc_wait(map, size)
vm_map_unlock(map);
tsleep(map, PVM, "kmaw", 0);
}
- vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
+ vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(map);
return (addr);
}
@@ -456,7 +458,7 @@ kmem_init(start, end)
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
- VM_MIN_KERNEL_ADDRESS, start);
+ VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 3bfb8ad..e1d9330 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.30 1995/12/14 09:54:59 phk Exp $
+ * $Id: vm_map.c,v 1.31 1996/01/04 21:13:17 wollman Exp $
*/
/*
@@ -151,6 +151,7 @@ vm_offset_t kentry_data;
vm_size_t kentry_data_size;
static vm_map_entry_t kentry_free;
static vm_map_t kmap_free;
+extern char kstack[];
static int kentry_count;
static vm_offset_t mapvm_start, mapvm, mapvmmax;
@@ -241,12 +242,17 @@ vmspace_free(vm)
panic("vmspace_free: attempt to free already freed vmspace");
if (--vm->vm_refcnt == 0) {
+ int s, i;
+
+ pmap_remove(&vm->vm_pmap, (vm_offset_t) kstack, (vm_offset_t) kstack+UPAGES*PAGE_SIZE);
+
/*
* Lock the map, to wait out all other references to it.
* Delete all of the mappings and pages they hold, then call
* the pmap module to reclaim anything left.
*/
vm_map_lock(&vm->vm_map);
+ vm_object_deallocate(vm->vm_upages_obj);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
@@ -509,12 +515,14 @@ vm_map_deallocate(map)
* Requires that the map be locked, and leaves it so.
*/
int
-vm_map_insert(map, object, offset, start, end)
+vm_map_insert(map, object, offset, start, end, prot, max, cow)
vm_map_t map;
vm_object_t object;
vm_ooffset_t offset;
vm_offset_t start;
vm_offset_t end;
+ vm_prot_t prot, max;
+ int cow;
{
register vm_map_entry_t new_entry;
register vm_map_entry_t prev_entry;
@@ -558,8 +566,8 @@ vm_map_insert(map, object, offset, start, end)
(prev_entry->is_a_map == FALSE) &&
(prev_entry->is_sub_map == FALSE) &&
(prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
- (prev_entry->protection == VM_PROT_DEFAULT) &&
- (prev_entry->max_protection == VM_PROT_DEFAULT) &&
+ (prev_entry->protection == prot) &&
+ (prev_entry->max_protection == max) &&
(prev_entry->wired_count == 0)) {
if (vm_object_coalesce(prev_entry->object.vm_object,
@@ -591,13 +599,20 @@ vm_map_insert(map, object, offset, start, end)
new_entry->object.vm_object = object;
new_entry->offset = offset;
- new_entry->copy_on_write = FALSE;
- new_entry->needs_copy = FALSE;
+ if (cow & MAP_COPY_NEEDED)
+ new_entry->needs_copy = TRUE;
+ else
+ new_entry->needs_copy = FALSE;
+
+ if (cow & MAP_COPY_ON_WRITE)
+ new_entry->copy_on_write = TRUE;
+ else
+ new_entry->copy_on_write = FALSE;
if (map->is_main_map) {
new_entry->inheritance = VM_INHERIT_DEFAULT;
- new_entry->protection = VM_PROT_DEFAULT;
- new_entry->max_protection = VM_PROT_DEFAULT;
+ new_entry->protection = prot;
+ new_entry->max_protection = max;
new_entry->wired_count = 0;
}
/*
@@ -611,7 +626,8 @@ vm_map_insert(map, object, offset, start, end)
* Update the free space hint
*/
- if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start))
+ if ((map->first_free == prev_entry) &&
+ (prev_entry->end >= new_entry->start))
map->first_free = new_entry;
return (KERN_SUCCESS);
@@ -770,13 +786,15 @@ vm_map_findspace(map, start, length, addr)
*
*/
int
-vm_map_find(map, object, offset, addr, length, find_space)
+vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
vm_map_t map;
vm_object_t object;
vm_ooffset_t offset;
vm_offset_t *addr; /* IN/OUT */
vm_size_t length;
boolean_t find_space;
+ vm_prot_t prot, max;
+ int cow;
{
register vm_offset_t start;
int result, s = 0;
@@ -796,7 +814,8 @@ vm_map_find(map, object, offset, addr, length, find_space)
}
start = *addr;
}
- result = vm_map_insert(map, object, offset, start, start + length);
+ result = vm_map_insert(map, object, offset,
+ start, start + length, prot, max, cow);
vm_map_unlock(map);
if (map == kmem_map)
@@ -1767,20 +1786,6 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
if (dst_entry->wired_count != 0)
vm_map_entry_unwire(dst_map, dst_entry);
- /*
- * If we're dealing with a sharing map, we must remove the destination
- * pages from all maps (since we cannot know which maps this sharing
- * map belongs in).
- */
-
- if (dst_map->is_main_map)
- pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);
- else
- vm_object_pmap_remove(dst_entry->object.vm_object,
- OFF_TO_IDX(dst_entry->offset),
- OFF_TO_IDX(dst_entry->offset +
- (dst_entry->end - dst_entry->start)));
-
if (src_entry->wired_count == 0) {
boolean_t src_needs_copy;
@@ -1800,17 +1805,21 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
if (!(su = src_map->is_main_map)) {
su = (src_map->ref_count == 1);
}
+#ifdef VM_MAP_OLD
if (su) {
pmap_protect(src_map->pmap,
src_entry->start,
src_entry->end,
src_entry->protection & ~VM_PROT_WRITE);
} else {
+#endif
vm_object_pmap_copy(src_entry->object.vm_object,
OFF_TO_IDX(src_entry->offset),
OFF_TO_IDX(src_entry->offset + (src_entry->end
- src_entry->start)));
+#ifdef VM_MAP_OLD
}
+#endif
}
/*
* Make a copy of the object.
@@ -1932,7 +1941,8 @@ vmspace_fork(vm1)
new_entry->is_a_map = FALSE;
vm_map_entry_link(new_map, new_map->header.prev,
new_entry);
- vm_map_copy_entry(old_map, new_map, old_entry, new_entry);
+ vm_map_copy_entry(old_map, new_map, old_entry,
+ new_entry);
break;
}
old_entry = old_entry->next;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 93cb5ec..e24eace 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.9 1995/12/11 04:58:14 dyson Exp $
+ * $Id: vm_map.h,v 1.10 1995/12/14 09:55:00 phk Exp $
*/
/*
@@ -148,6 +148,7 @@ struct vmspace {
struct pmap vm_pmap; /* private physical map */
int vm_refcnt; /* number of references */
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
+ vm_object_t vm_upages_obj; /* UPAGES object */
/* we copy from vm_startcopy to the end of the structure on fork */
#define vm_startcopy vm_rssize
segsz_t vm_rssize; /* current resident set size in pages */
@@ -202,6 +203,12 @@ typedef struct {
#define MAX_KMAP 10
#define MAX_KMAPENT 128
+/*
+ * Copy-on-write flags for vm_map operations
+ */
+#define MAP_COPY_NEEDED 0x1
+#define MAP_COPY_ON_WRITE 0x2
+
#ifdef KERNEL
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
@@ -212,11 +219,11 @@ struct pmap;
vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t, boolean_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
-int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t));
+int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t, boolean_t));
-int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t));
+int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
vm_pindex_t *, vm_prot_t *, boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 6579961..c68f5f4 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.33 1995/12/13 12:28:39 dyson Exp $
+ * $Id: vm_mmap.c,v 1.34 1995/12/17 07:19:57 bde Exp $
*/
/*
@@ -70,6 +70,7 @@
#include <vm/vm_pager.h>
#include <vm/vm_pageout.h>
#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
#ifndef _SYS_SYSPROTO_H_
struct sbrk_args {
@@ -604,11 +605,12 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
vm_ooffset_t foff;
{
boolean_t fitit;
- vm_object_t object;
+ vm_object_t object, object2;
struct vnode *vp = NULL;
objtype_t type;
int rv = KERN_SUCCESS;
- vm_size_t objsize;
+ vm_ooffset_t objsize;
+ int docow;
struct proc *p = curproc;
if (size == 0)
@@ -659,69 +661,60 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
error = VOP_GETATTR(vp, &vat, p->p_ucred, p);
if (error)
return (error);
- objsize = vat.va_size;
+ objsize = round_page(vat.va_size);
type = OBJT_VNODE;
}
}
- object = vm_pager_allocate(type, handle, objsize, prot, foff);
+ object = vm_pager_allocate(type, handle, OFF_TO_IDX(objsize), prot, foff);
if (object == NULL)
return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
- rv = vm_map_find(map, object, foff, addr, size, fitit);
+ object2 = NULL;
+ docow = 0;
+ if ((flags & (MAP_ANON|MAP_SHARED)) == 0 && (type != OBJT_DEVICE)) {
+ docow = MAP_COPY_ON_WRITE;
+ if (objsize < size) {
+ object2 = vm_object_allocate( OBJT_DEFAULT,
+ OFF_TO_IDX(size - (foff & ~(PAGE_SIZE - 1))));
+ object2->backing_object = object;
+ object2->backing_object_offset = foff;
+ TAILQ_INSERT_TAIL(&object->shadow_head,
+ object2, shadow_list);
+ } else {
+ docow |= MAP_COPY_NEEDED;
+ }
+ }
+ if (object2)
+ rv = vm_map_find(map, object2, 0, addr, size, fitit,
+ prot, maxprot, docow);
+ else
+ rv = vm_map_find(map, object, foff, addr, size, fitit,
+ prot, maxprot, docow);
+
+
if (rv != KERN_SUCCESS) {
/*
* Lose the object reference. Will destroy the
* object if it's an unnamed anonymous mapping
* or named anonymous without other references.
*/
- vm_object_deallocate(object);
+ if (object2)
+ vm_object_deallocate(object2);
+ else
+ vm_object_deallocate(object);
goto out;
}
/*
- * mmap a COW regular file
- */
- if ((flags & (MAP_ANON|MAP_SHARED)) == 0 && (type != OBJT_DEVICE)) {
- vm_map_entry_t entry;
- if (!vm_map_lookup_entry(map, *addr, &entry)) {
- panic("vm_mmap: missing map entry!!!");
- }
- entry->copy_on_write = TRUE;
- /*
- * This will create the processes private object on
- * an as needed basis.
- */
- entry->needs_copy = TRUE;
-
- /*
- * set pages COW and protect for read access only
- */
- vm_object_pmap_copy(object, foff, foff + size);
-
- }
-
- /*
* "Pre-fault" resident pages.
*/
- if ((type == OBJT_VNODE) && (map->pmap != NULL)) {
+ if ((map != kernel_map) &&
+ (type == OBJT_VNODE) && (map->pmap != NULL)) {
pmap_object_init_pt(map->pmap, *addr,
object, (vm_pindex_t) OFF_TO_IDX(foff), size);
}
/*
- * Correct protection (default is VM_PROT_ALL). If maxprot is
- * different than prot, we must set both explicitly.
- */
- rv = KERN_SUCCESS;
- if (maxprot != VM_PROT_ALL)
- rv = vm_map_protect(map, *addr, *addr + size, maxprot, TRUE);
- if (rv == KERN_SUCCESS && prot != maxprot)
- rv = vm_map_protect(map, *addr, *addr + size, prot, FALSE);
- if (rv != KERN_SUCCESS) {
- (void) vm_map_remove(map, *addr, *addr + size);
- goto out;
- }
- /*
* Shared memory is also shared with children.
*/
if (flags & MAP_SHARED) {
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 7ba53e8..088d8b6 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.61 1996/01/04 18:32:31 davidg Exp $
+ * $Id: vm_object.c,v 1.62 1996/01/04 21:13:20 wollman Exp $
*/
/*
@@ -442,11 +442,18 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
boolean_t syncio;
boolean_t lockflag;
{
- register vm_page_t p;
+ register vm_page_t p, np, tp;
register vm_offset_t tstart, tend;
+ vm_pindex_t pi;
int s;
struct vnode *vp;
int runlen;
+ int maxf;
+ int chkb;
+ int maxb;
+ int i;
+ vm_page_t maf[vm_pageout_page_count];
+ vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
if (object->type != OBJT_VNODE ||
@@ -468,62 +475,99 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
if ((tstart == 0) && (tend == object->size)) {
object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
}
-
- runlen = 0;
- for(;tstart < tend; tstart += 1) {
-relookup:
- p = vm_page_lookup(object, tstart);
- if (!p) {
- if (runlen > 0) {
- vm_pageout_flush(ma, runlen, syncio);
- runlen = 0;
- }
+ for(p = object->memq.tqh_first; p; p = p->listq.tqe_next)
+ p->flags |= PG_CLEANCHK;
+
+rescan:
+ for(p = object->memq.tqh_first; p; p = np) {
+ np = p->listq.tqe_next;
+
+ pi = p->pindex;
+ if (((p->flags & PG_CLEANCHK) == 0) ||
+ (pi < tstart) || (pi >= tend) ||
+ (p->valid == 0) || (p->queue == PQ_CACHE)) {
+ p->flags &= ~PG_CLEANCHK;
continue;
}
- if ((p->valid == 0) || (p->flags & PG_CACHE)) {
- if (runlen > 0) {
- vm_pageout_flush(ma, runlen, syncio);
- runlen = 0;
- }
+
+ vm_page_test_dirty(p);
+ if ((p->dirty & p->valid) == 0) {
+ p->flags &= ~PG_CLEANCHK;
continue;
}
- vm_page_protect(p, VM_PROT_READ);
-
s = splhigh();
- while ((p->flags & PG_BUSY) || p->busy) {
- if (runlen > 0) {
- splx(s);
- vm_pageout_flush(ma, runlen, syncio);
- runlen = 0;
- goto relookup;
- }
+ if ((p->flags & PG_BUSY) || p->busy) {
p->flags |= PG_WANTED|PG_REFERENCED;
tsleep(p, PVM, "vpcwai", 0);
splx(s);
- goto relookup;
+ goto rescan;
}
splx(s);
+
+ maxf = 0;
+ for(i=1;i<vm_pageout_page_count;i++) {
+ if (tp = vm_page_lookup(object, pi + i)) {
+ if ((tp->flags & PG_BUSY) ||
+ (tp->flags & PG_CLEANCHK) == 0)
+ break;
+ vm_page_test_dirty(tp);
+ if ((tp->dirty & tp->valid) == 0) {
+ tp->flags &= ~PG_CLEANCHK;
+ break;
+ }
+ maf[ i - 1 ] = tp;
+ maxf++;
+ continue;
+ }
+ break;
+ }
- if (p->dirty == 0)
- vm_page_test_dirty(p);
-
- if ((p->valid & p->dirty) != 0) {
- ma[runlen] = p;
- p->flags |= PG_BUSY;
- runlen++;
- if (runlen >= vm_pageout_page_count) {
- vm_pageout_flush(ma, runlen, syncio);
- runlen = 0;
+ maxb = 0;
+ chkb = vm_pageout_page_count - maxf;
+ if (chkb) {
+ for(i = 1; i < chkb;i++) {
+ if (tp = vm_page_lookup(object, pi - i)) {
+ if ((tp->flags & PG_BUSY) ||
+ (tp->flags & PG_CLEANCHK) == 0)
+ break;
+ vm_page_test_dirty(tp);
+ if ((tp->dirty & tp->valid) == 0) {
+ tp->flags &= ~PG_CLEANCHK;
+ break;
+ }
+ mab[ i - 1 ] = tp;
+ maxb++;
+ continue;
+ }
+ break;
}
- } else if (runlen > 0) {
- vm_pageout_flush(ma, runlen, syncio);
- runlen = 0;
}
-
- }
- if (runlen > 0) {
- vm_pageout_flush(ma, runlen, syncio);
+
+ for(i=0;i<maxb;i++) {
+ int index = (maxb - i) - 1;
+ ma[index] = mab[i];
+ ma[index]->flags |= PG_BUSY;
+ ma[index]->flags &= ~PG_CLEANCHK;
+ vm_page_protect(ma[index], VM_PROT_READ);
+ }
+ vm_page_protect(p, VM_PROT_READ);
+ p->flags |= PG_BUSY;
+ p->flags &= ~PG_CLEANCHK;
+ ma[maxb] = p;
+ for(i=0;i<maxf;i++) {
+ int index = (maxb + i) + 1;
+ ma[index] = maf[i];
+ ma[index]->flags |= PG_BUSY;
+ ma[index]->flags &= ~PG_CLEANCHK;
+ vm_page_protect(ma[index], VM_PROT_READ);
+ }
+ runlen = maxb + maxf + 1;
+/*
+ printf("maxb: %d, maxf: %d, runlen: %d, offset: %d\n", maxb, maxf, runlen, ma[0]->pindex);
+*/
+ vm_pageout_flush(ma, runlen, 0);
+ goto rescan;
}
VOP_FSYNC(vp, NULL, syncio, curproc);
@@ -619,7 +663,8 @@ vm_object_pmap_remove(object, start, end)
if (object == NULL)
return;
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
- vm_page_protect(p, VM_PROT_NONE);
+ if (p->pindex >= start && p->pindex < end)
+ vm_page_protect(p, VM_PROT_NONE);
}
}
@@ -763,8 +808,8 @@ vm_object_qcollapse(object)
vm_page_t next;
next = p->listq.tqe_next;
- if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
- !p->valid || p->hold_count || p->wire_count || p->busy) {
+ if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
+ (p->queue == PQ_CACHE) || !p->valid || p->hold_count || p->wire_count || p->busy) {
p = next;
continue;
}
@@ -1104,12 +1149,13 @@ again:
if (size > 4 || size >= object->size / 4) {
for (p = object->memq.tqh_first; p != NULL; p = next) {
next = p->listq.tqe_next;
+ if (p->wire_count != 0) {
+ vm_page_protect(p, VM_PROT_NONE);
+ p->valid = 0;
+ continue;
+ }
if ((start <= p->pindex) && (p->pindex < end)) {
s = splhigh();
- if (p->bmapped) {
- splx(s);
- continue;
- }
if ((p->flags & PG_BUSY) || p->busy) {
p->flags |= PG_WANTED;
tsleep(p, PVM, "vmopar", 0);
@@ -1129,12 +1175,15 @@ again:
}
} else {
while (size > 0) {
- while ((p = vm_page_lookup(object, start)) != 0) {
- s = splhigh();
- if (p->bmapped) {
- splx(s);
- break;
+ if ((p = vm_page_lookup(object, start)) != 0) {
+ if (p->wire_count != 0) {
+ p->valid = 0;
+ vm_page_protect(p, VM_PROT_NONE);
+ start += 1;
+ size -= 1;
+ continue;
}
+ s = splhigh();
if ((p->flags & PG_BUSY) || p->busy) {
p->flags |= PG_WANTED;
tsleep(p, PVM, "vmopar", 0);
@@ -1144,8 +1193,11 @@ again:
splx(s);
if (clean_only) {
vm_page_test_dirty(p);
- if (p->valid & p->dirty)
+ if (p->valid & p->dirty) {
+ start += 1;
+ size -= 1;
continue;
+ }
}
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 325b5d5..288f140 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.44 1995/12/17 07:19:58 bde Exp $
+ * $Id: vm_page.c,v 1.45 1996/01/04 21:13:23 wollman Exp $
*/
/*
@@ -105,6 +105,20 @@ struct pglist vm_page_queue_active;
struct pglist vm_page_queue_inactive;
struct pglist vm_page_queue_cache;
+int no_queue;
+
+struct {
+ struct pglist *pl;
+ int *cnt;
+} vm_page_queues[PQ_CACHE+1] = {
+ {NULL, &no_queue},
+ { &vm_page_queue_free, &cnt.v_free_count},
+ { &vm_page_queue_zero, &cnt.v_free_count},
+ { &vm_page_queue_inactive, &cnt.v_inactive_count},
+ { &vm_page_queue_active, &cnt.v_active_count},
+ { &vm_page_queue_cache, &cnt.v_cache_count}
+};
+
vm_page_t vm_page_array;
static int vm_page_array_size;
long first_page;
@@ -229,7 +243,7 @@ vm_page_startup(starta, enda, vaddr)
vm_page_buckets = (struct pglist *) vaddr;
bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
- vm_page_bucket_count = 1;
+ vm_page_bucket_count = 2;
while (vm_page_bucket_count < atop(total))
vm_page_bucket_count <<= 1;
}
@@ -333,7 +347,8 @@ vm_page_startup(starta, enda, vaddr)
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
- m->flags = PG_FREE;
+ m->queue = PQ_FREE;
+ m->flags = 0;
m->phys_addr = pa;
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
pa += PAGE_SIZE;
@@ -368,36 +383,36 @@ vm_page_hash(object, pindex)
*/
inline void
-vm_page_insert(mem, object, pindex)
- register vm_page_t mem;
+vm_page_insert(m, object, pindex)
+ register vm_page_t m;
register vm_object_t object;
register vm_pindex_t pindex;
{
register struct pglist *bucket;
- if (mem->flags & PG_TABLED)
+ if (m->flags & PG_TABLED)
panic("vm_page_insert: already inserted");
/*
* Record the object/offset pair in this page
*/
- mem->object = object;
- mem->pindex = pindex;
+ m->object = object;
+ m->pindex = pindex;
/*
* Insert it into the object_object/offset hash table
*/
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
- TAILQ_INSERT_TAIL(bucket, mem, hashq);
+ TAILQ_INSERT_TAIL(bucket, m, hashq);
/*
* Now link into the object's list of backed pages.
*/
- TAILQ_INSERT_TAIL(&object->memq, mem, listq);
- mem->flags |= PG_TABLED;
+ TAILQ_INSERT_TAIL(&object->memq, m, listq);
+ m->flags |= PG_TABLED;
/*
* And show that the object has one more resident page.
@@ -417,34 +432,34 @@ vm_page_insert(mem, object, pindex)
*/
inline void
-vm_page_remove(mem)
- register vm_page_t mem;
+vm_page_remove(m)
+ register vm_page_t m;
{
register struct pglist *bucket;
- if (!(mem->flags & PG_TABLED))
+ if (!(m->flags & PG_TABLED))
return;
/*
* Remove from the object_object/offset hash table
*/
- bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->pindex)];
- TAILQ_REMOVE(bucket, mem, hashq);
+ bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
+ TAILQ_REMOVE(bucket, m, hashq);
/*
* Now remove from the object's list of backed pages.
*/
- TAILQ_REMOVE(&mem->object->memq, mem, listq);
+ TAILQ_REMOVE(&m->object->memq, m, listq);
/*
* And show that the object has one fewer resident page.
*/
- mem->object->resident_page_count--;
+ m->object->resident_page_count--;
- mem->flags &= ~PG_TABLED;
+ m->flags &= ~PG_TABLED;
}
/*
@@ -461,7 +476,7 @@ vm_page_lookup(object, pindex)
register vm_object_t object;
register vm_pindex_t pindex;
{
- register vm_page_t mem;
+ register vm_page_t m;
register struct pglist *bucket;
int s;
@@ -472,10 +487,10 @@ vm_page_lookup(object, pindex)
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
s = splhigh();
- for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
- if ((mem->object == object) && (mem->pindex == pindex)) {
+ for (m = bucket->tqh_first; m != NULL; m = m->hashq.tqe_next) {
+ if ((m->object == object) && (m->pindex == pindex)) {
splx(s);
- return (mem);
+ return (m);
}
}
@@ -492,16 +507,16 @@ vm_page_lookup(object, pindex)
* The object must be locked.
*/
void
-vm_page_rename(mem, new_object, new_pindex)
- register vm_page_t mem;
+vm_page_rename(m, new_object, new_pindex)
+ register vm_page_t m;
register vm_object_t new_object;
vm_pindex_t new_pindex;
{
int s;
s = splhigh();
- vm_page_remove(mem);
- vm_page_insert(mem, new_object, new_pindex);
+ vm_page_remove(m);
+ vm_page_insert(m, new_object, new_pindex);
splx(s);
}
@@ -509,28 +524,17 @@ vm_page_rename(mem, new_object, new_pindex)
* vm_page_unqueue must be called at splhigh();
*/
static inline void
-vm_page_unqueue(vm_page_t mem)
+vm_page_unqueue(vm_page_t m)
{
- int origflags;
-
- origflags = mem->flags;
-
- if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
+ int queue = m->queue;
+ if (queue == PQ_NONE)
return;
-
- if (origflags & PG_ACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
- cnt.v_active_count--;
- mem->flags &= ~PG_ACTIVE;
- } else if (origflags & PG_INACTIVE) {
- TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
- cnt.v_inactive_count--;
- mem->flags &= ~PG_INACTIVE;
- } else if (origflags & PG_CACHE) {
- TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
- cnt.v_cache_count--;
- mem->flags &= ~PG_CACHE;
- if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
+ m->queue = PQ_NONE;
+ TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
+ --(*vm_page_queues[queue].cnt);
+ if (queue == PQ_CACHE) {
+ if ((cnt.v_cache_count + cnt.v_free_count) <
+ (cnt.v_free_min + cnt.v_cache_min))
pagedaemon_wakeup();
}
return;
@@ -546,7 +550,6 @@ vm_page_unqueue(vm_page_t mem)
* VM_ALLOC_NORMAL normal process request
* VM_ALLOC_SYSTEM system *really* needs a page
* VM_ALLOC_INTERRUPT interrupt time request
- * or in:
* VM_ALLOC_ZERO zero page
*
* Object must be locked.
@@ -557,12 +560,13 @@ vm_page_alloc(object, pindex, page_req)
vm_pindex_t pindex;
int page_req;
{
- register vm_page_t mem;
+ register vm_page_t m;
+ int queue;
int s;
#ifdef DIAGNOSTIC
- mem = vm_page_lookup(object, pindex);
- if (mem)
+ m = vm_page_lookup(object, pindex);
+ if (m)
panic("vm_page_alloc: page already allocated");
#endif
@@ -572,41 +576,36 @@ vm_page_alloc(object, pindex, page_req)
s = splhigh();
- switch ((page_req & ~(VM_ALLOC_ZERO))) {
+ switch (page_req) {
+
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
- if (page_req & VM_ALLOC_ZERO) {
- mem = vm_page_queue_zero.tqh_first;
- if (mem) {
- --vm_page_zero_count;
- TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
- mem->flags = PG_BUSY|PG_ZERO;
- } else {
- mem = vm_page_queue_free.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
- mem->flags = PG_BUSY;
- }
- } else {
- mem = vm_page_queue_free.tqh_first;
- if (mem) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
- mem->flags = PG_BUSY;
- } else {
- --vm_page_zero_count;
- mem = vm_page_queue_zero.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
- mem->flags = PG_BUSY|PG_ZERO;
- }
+ m = vm_page_queue_free.tqh_first;
+ if (m == NULL) {
+ --vm_page_zero_count;
+ m = vm_page_queue_zero.tqh_first;
}
- cnt.v_free_count--;
} else {
- mem = vm_page_queue_cache.tqh_first;
- if (mem != NULL) {
- TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
- vm_page_remove(mem);
- mem->flags = PG_BUSY;
- cnt.v_cache_count--;
+ m = vm_page_queue_cache.tqh_first;
+ if (m == NULL) {
+ splx(s);
+ pagedaemon_wakeup();
+ return (NULL);
+ }
+ }
+ break;
+
+ case VM_ALLOC_ZERO:
+ if (cnt.v_free_count >= cnt.v_free_reserved) {
+ m = vm_page_queue_zero.tqh_first;
+ if (m) {
+ --vm_page_zero_count;
} else {
+ m = vm_page_queue_free.tqh_first;
+ }
+ } else {
+ m = vm_page_queue_cache.tqh_first;
+ if (m == NULL) {
splx(s);
pagedaemon_wakeup();
return (NULL);
@@ -618,38 +617,14 @@ vm_page_alloc(object, pindex, page_req)
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
- if (page_req & VM_ALLOC_ZERO) {
- mem = vm_page_queue_zero.tqh_first;
- if (mem) {
+ m = vm_page_queue_free.tqh_first;
+ if (m == NULL) {
--vm_page_zero_count;
- TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
- mem->flags = PG_BUSY|PG_ZERO;
- } else {
- mem = vm_page_queue_free.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
- mem->flags = PG_BUSY;
+ m = vm_page_queue_zero.tqh_first;
}
- } else {
- mem = vm_page_queue_free.tqh_first;
- if (mem) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
- mem->flags = PG_BUSY;
- } else {
- --vm_page_zero_count;
- mem = vm_page_queue_zero.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
- mem->flags = PG_BUSY|PG_ZERO;
- }
- }
- cnt.v_free_count--;
} else {
- mem = vm_page_queue_cache.tqh_first;
- if (mem != NULL) {
- TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
- vm_page_remove(mem);
- mem->flags = PG_BUSY;
- cnt.v_cache_count--;
- } else {
+ m = vm_page_queue_cache.tqh_first;
+ if (m == NULL) {
splx(s);
pagedaemon_wakeup();
return (NULL);
@@ -659,21 +634,15 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
- mem = vm_page_queue_free.tqh_first;
- if (mem) {
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
- mem->flags = PG_BUSY;
- } else {
+ m = vm_page_queue_free.tqh_first;
+ if (m == NULL) {
--vm_page_zero_count;
- mem = vm_page_queue_zero.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
- mem->flags = PG_BUSY|PG_ZERO;
+ m = vm_page_queue_zero.tqh_first;
}
- cnt.v_free_count--;
} else {
splx(s);
pagedaemon_wakeup();
- return NULL;
+ return (NULL);
}
break;
@@ -681,16 +650,27 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc: invalid allocation class");
}
- mem->wire_count = 0;
- mem->hold_count = 0;
- mem->act_count = 0;
- mem->busy = 0;
- mem->valid = 0;
- mem->dirty = 0;
- mem->bmapped = 0;
+ queue = m->queue;
+ TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
+ --(*vm_page_queues[queue].cnt);
+ if (queue == PQ_ZERO) {
+ m->flags = PG_ZERO|PG_BUSY;
+ } else if (queue == PQ_CACHE) {
+ vm_page_remove(m);
+ m->flags = PG_BUSY;
+ } else {
+ m->flags = PG_BUSY;
+ }
+ m->wire_count = 0;
+ m->hold_count = 0;
+ m->act_count = 0;
+ m->busy = 0;
+ m->valid = 0;
+ m->dirty = 0;
+ m->queue = PQ_NONE;
/* XXX before splx until vm_page_insert is safe */
- vm_page_insert(mem, object, pindex);
+ vm_page_insert(m, object, pindex);
splx(s);
@@ -698,11 +678,12 @@ vm_page_alloc(object, pindex, page_req)
* Don't wakeup too often - wakeup the pageout daemon when
* we would be nearly out of memory.
*/
- if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
- (cnt.v_free_count < cnt.v_pageout_free_min))
+ if (((cnt.v_free_count + cnt.v_cache_count) <
+ (cnt.v_free_min + cnt.v_cache_min)) ||
+ (cnt.v_free_count < cnt.v_pageout_free_min))
pagedaemon_wakeup();
- return (mem);
+ return (m);
}
vm_offset_t
@@ -727,7 +708,7 @@ again:
*/
for (i = start; i < cnt.v_page_count; i++) {
phys = VM_PAGE_TO_PHYS(&pga[i]);
- if (((pga[i].flags & PG_FREE) == PG_FREE) &&
+ if ((pga[i].queue == PQ_FREE) &&
(phys >= low) && (phys < high) &&
((phys & (alignment - 1)) == 0))
break;
@@ -736,7 +717,8 @@ again:
/*
* If the above failed or we will exceed the upper bound, fail.
*/
- if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
+ if ((i == cnt.v_page_count) ||
+ ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
splx(s);
return (NULL);
}
@@ -747,8 +729,8 @@ again:
*/
for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
- (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
- ((pga[i].flags & PG_FREE) != PG_FREE)) {
+ (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
+ (pga[i].queue != PQ_FREE)) {
start++;
goto again;
}
@@ -771,8 +753,8 @@ again:
m->dirty = 0;
m->wire_count = 0;
m->act_count = 0;
- m->bmapped = 0;
m->busy = 0;
+ m->queue = PQ_NONE;
vm_page_insert(m, kernel_object,
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
vm_page_wire(m);
@@ -793,38 +775,40 @@ again:
* Object and page must be locked prior to entry.
*/
void
-vm_page_free(mem)
- register vm_page_t mem;
+vm_page_free(m)
+ register vm_page_t m;
{
int s;
- int flags;
+ int flags = m->flags;
s = splhigh();
- vm_page_remove(mem);
- vm_page_unqueue(mem);
-
- flags = mem->flags;
- if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
- if (flags & PG_FREE)
+ if (m->busy || (flags & PG_BUSY) || (m->queue == PQ_FREE)) {
+ printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d)\n",
+ m->pindex, m->busy, (flags & PG_BUSY) ? 1 : 0);
+ if (m->queue == PQ_FREE)
panic("vm_page_free: freeing free page");
- printf("vm_page_free: pindex(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
- mem->pindex, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
- panic("vm_page_free: freeing busy page");
+ else
+ panic("vm_page_free: freeing busy page");
}
+ vm_page_remove(m);
+ vm_page_unqueue(m);
+
+/*
if ((flags & PG_WANTED) != 0)
- wakeup(mem);
+ wakeup(m);
+*/
if ((flags & PG_FICTITIOUS) == 0) {
- if (mem->wire_count) {
- if (mem->wire_count > 1) {
- printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
+ if (m->wire_count) {
+ if (m->wire_count > 1) {
+ printf("vm_page_free: wire count > 1 (%d)", m->wire_count);
panic("vm_page_free: invalid wire count");
}
cnt.v_wire_count--;
- mem->wire_count = 0;
+ m->wire_count = 0;
}
- mem->flags |= PG_FREE;
- TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
+ m->queue = PQ_FREE;
+ TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
splx(s);
/*
* if pageout daemon needs pages, then tell it that there are
@@ -862,19 +846,19 @@ vm_page_free(mem)
* The page queues must be locked.
*/
void
-vm_page_wire(mem)
- register vm_page_t mem;
+vm_page_wire(m)
+ register vm_page_t m;
{
int s;
- if (mem->wire_count == 0) {
+ if (m->wire_count == 0) {
s = splhigh();
- vm_page_unqueue(mem);
+ vm_page_unqueue(m);
splx(s);
cnt.v_wire_count++;
}
- mem->flags |= PG_WRITEABLE|PG_MAPPED;
- mem->wire_count++;
+ m->wire_count++;
+ m->flags |= PG_MAPPED;
}
/*
@@ -886,20 +870,23 @@ vm_page_wire(mem)
* The page queues must be locked.
*/
void
-vm_page_unwire(mem)
- register vm_page_t mem;
+vm_page_unwire(m)
+ register vm_page_t m;
{
int s;
s = splhigh();
- if (mem->wire_count)
- mem->wire_count--;
- if (mem->wire_count == 0) {
- TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
- cnt.v_active_count++;
- mem->flags |= PG_ACTIVE;
+ if (m->wire_count > 0)
+ m->wire_count--;
+
+ if (m->wire_count == 0) {
cnt.v_wire_count--;
+ TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
+ m->queue = PQ_ACTIVE;
+ if( m->act_count < ACT_MAX)
+ m->act_count += 1;
+ cnt.v_active_count++;
}
splx(s);
}
@@ -918,17 +905,17 @@ vm_page_activate(m)
int s;
s = splhigh();
- if (m->flags & PG_ACTIVE)
+ if (m->queue == PQ_ACTIVE)
panic("vm_page_activate: already active");
- if (m->flags & PG_CACHE)
+ if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
if (m->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
- m->flags |= PG_ACTIVE;
+ m->queue = PQ_ACTIVE;
if (m->act_count < 5)
m->act_count = 5;
else if( m->act_count < ACT_MAX)
@@ -960,15 +947,16 @@ vm_page_deactivate(m)
* we need to put them on the inactive queue also, otherwise we lose
* track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
*/
+ if (m->queue == PQ_INACTIVE)
+ return;
spl = splhigh();
- if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
- m->hold_count == 0) {
- if (m->flags & PG_CACHE)
+ if (m->wire_count == 0 && m->hold_count == 0) {
+ if (m->queue == PQ_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
- m->flags |= PG_INACTIVE;
+ m->queue = PQ_INACTIVE;
cnt.v_inactive_count++;
m->act_count = 0;
}
@@ -986,16 +974,16 @@ vm_page_cache(m)
{
int s;
- if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
- m->bmapped)
+ if ((m->flags & PG_BUSY) || m->busy || m->wire_count)
+ return;
+ if (m->queue == PQ_CACHE)
return;
+ vm_page_protect(m, VM_PROT_NONE);
s = splhigh();
vm_page_unqueue(m);
- vm_page_protect(m, VM_PROT_NONE);
-
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
- m->flags |= PG_CACHE;
+ m->queue = PQ_CACHE;
cnt.v_cache_count++;
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
wakeup(&cnt.v_free_count);
@@ -1005,7 +993,6 @@ vm_page_cache(m)
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
-
splx(s);
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 031bf3b..b67c9a5 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.22 1995/11/20 12:19:32 phk Exp $
+ * $Id: vm_page.h,v 1.23 1995/12/11 04:58:26 dyson Exp $
*/
/*
@@ -107,34 +107,39 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_pindex_t pindex; /* offset into object (O,P) */
vm_offset_t phys_addr; /* physical address of page */
-
+ u_short queue:4, /* page queue index */
+ flags:12; /* see below */
u_short wire_count; /* wired down maps refs (P) */
- u_short flags; /* see below */
short hold_count; /* page hold count */
- u_short act_count; /* page usage count */
- u_short bmapped; /* number of buffers mapped */
- u_short busy; /* page busy count */
- u_short valid; /* map of valid DEV_BSIZE chunks */
- u_short dirty; /* map of dirty DEV_BSIZE chunks */
+ u_char act_count; /* page usage count */
+ u_char busy; /* page busy count */
+ /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
+ /* so, on normal X86 kernels, they must be at least 8 bits wide */
+ u_char valid; /* map of valid DEV_BSIZE chunks */
+ u_char dirty; /* map of dirty DEV_BSIZE chunks */
};
+#define PQ_NONE 0
+#define PQ_FREE 1
+#define PQ_ZERO 2
+#define PQ_INACTIVE 3
+#define PQ_ACTIVE 4
+#define PQ_CACHE 5
+
/*
* These are the flags defined for vm_page.
*
* Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
-#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
-#define PG_ACTIVE 0x0002 /* page is in active list (P) */
-#define PG_BUSY 0x0010 /* page is in transit (O) */
-#define PG_WANTED 0x0020 /* someone is waiting for page (O) */
-#define PG_TABLED 0x0040 /* page is in VP table (O) */
-#define PG_FICTITIOUS 0x0100 /* physical page doesn't exist (O) */
-#define PG_WRITEABLE 0x0200 /* page is mapped writeable */
-#define PG_MAPPED 0x0400 /* page is mapped */
-#define PG_ZERO 0x0800 /* page is zeroed */
-#define PG_REFERENCED 0x1000 /* page has been referenced */
-#define PG_CACHE 0x4000 /* On VMIO cache */
-#define PG_FREE 0x8000 /* page is in free list */
+#define PG_BUSY 0x01 /* page is in transit (O) */
+#define PG_WANTED 0x02 /* someone is waiting for page (O) */
+#define PG_TABLED 0x04 /* page is in VP table (O) */
+#define PG_FICTITIOUS 0x08 /* physical page doesn't exist (O) */
+#define PG_WRITEABLE 0x10 /* page is mapped writeable */
+#define PG_MAPPED 0x20 /* page is mapped */
+#define PG_ZERO 0x40 /* page is zeroed */
+#define PG_REFERENCED 0x80 /* page has been referenced */
+#define PG_CLEANCHK 0x100 /* page has been checked for cleaning */
/*
* Misc constants.
@@ -229,7 +234,7 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
#define VM_ALLOC_NORMAL 0
#define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2
-#define VM_ALLOC_ZERO 0x80
+#define VM_ALLOC_ZERO 3
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 7946335..ef7dbe9 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.62 1995/12/11 04:58:28 dyson Exp $
+ * $Id: vm_pageout.c,v 1.63 1995/12/14 09:55:09 phk Exp $
*/
/*
@@ -234,13 +234,13 @@ vm_pageout_clean(m, sync)
}
p = vm_page_lookup(object, pindex + i);
if (p) {
- if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
+ if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) {
forward_okay = FALSE;
goto do_backward;
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
- ((p->flags & PG_INACTIVE) ||
+ ((p->queue == PQ_INACTIVE) ||
(sync == VM_PAGEOUT_FORCE)) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
@@ -268,13 +268,13 @@ do_backward:
}
p = vm_page_lookup(object, pindex - i);
if (p) {
- if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy) {
+ if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) {
backward_okay = FALSE;
continue;
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
- ((p->flags & PG_INACTIVE) ||
+ ((p->queue == PQ_INACTIVE) ||
(sync == VM_PAGEOUT_FORCE)) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
@@ -348,7 +348,7 @@ vm_pageout_flush(mc, count, sync)
* page so it doesn't clog the inactive list. (We
* will try paging out it again later).
*/
- if (mt->flags & PG_INACTIVE)
+ if (mt->queue == PQ_INACTIVE)
vm_page_activate(mt);
break;
case VM_PAGER_AGAIN:
@@ -364,13 +364,6 @@ vm_pageout_flush(mc, count, sync)
*/
if (pageout_status[i] != VM_PAGER_PEND) {
vm_object_pip_wakeup(object);
- if ((mt->flags & (PG_REFERENCED|PG_WANTED)) ||
- pmap_is_referenced(VM_PAGE_TO_PHYS(mt))) {
- pmap_clear_reference(VM_PAGE_TO_PHYS(mt));
- mt->flags &= ~PG_REFERENCED;
- if (mt->flags & PG_INACTIVE)
- vm_page_activate(mt);
- }
PAGE_WAKEUP(mt);
}
}
@@ -427,6 +420,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
if (p->wire_count != 0 ||
p->hold_count != 0 ||
p->busy != 0 ||
+ (p->flags & PG_BUSY) ||
!pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
p = next;
continue;
@@ -435,9 +429,9 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
* if a page is active, not wired and is in the processes
* pmap, then deactivate the page.
*/
- if ((p->flags & (PG_ACTIVE | PG_BUSY)) == PG_ACTIVE) {
+ if (p->queue == PQ_ACTIVE) {
if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
- (p->flags & (PG_REFERENCED|PG_WANTED)) == 0) {
+ (p->flags & PG_REFERENCED) == 0) {
p->act_count -= min(p->act_count, ACT_DECLINE);
/*
* if the page act_count is zero -- then we
@@ -461,7 +455,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
/*
* see if we are done yet
*/
- if (p->flags & PG_INACTIVE) {
+ if (p->queue == PQ_INACTIVE) {
--count;
++dcount;
if (count <= 0 &&
@@ -481,7 +475,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
}
- } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) {
+ } else if (p->queue == PQ_INACTIVE) {
vm_page_protect(p, VM_PROT_NONE);
}
p = next;
@@ -489,7 +483,6 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
return dcount;
}
-
/*
* deactivate some number of pages in a map, try to do it fairly, but
* that is really hard to do.
@@ -584,7 +577,7 @@ rescan1:
next = m->pageq.tqe_next;
#if defined(VM_DIAGNOSE)
- if ((m->flags & PG_INACTIVE) == 0) {
+ if (m->queue != PQ_INACTIVE) {
printf("vm_pageout_scan: page not inactive?\n");
break;
}
@@ -593,12 +586,17 @@ rescan1:
/*
* dont mess with busy pages
*/
- if (m->hold_count || m->busy || (m->flags & PG_BUSY)) {
+ if (m->busy || (m->flags & PG_BUSY)) {
+ m = next;
+ continue;
+ }
+ if (m->hold_count) {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m = next;
continue;
}
+
if (((m->flags & PG_REFERENCED) == 0) &&
pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
m->flags |= PG_REFERENCED;
@@ -607,7 +605,7 @@ rescan1:
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
}
- if ((m->flags & (PG_REFERENCED|PG_WANTED)) != 0) {
+ if ((m->flags & PG_REFERENCED) != 0) {
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
@@ -617,21 +615,18 @@ rescan1:
continue;
}
- vm_page_test_dirty(m);
if (m->dirty == 0) {
- if (m->bmapped == 0) {
- if (m->valid == 0) {
- pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
- vm_page_free(m);
- cnt.v_dfree++;
- } else {
- vm_page_cache(m);
- }
- ++pages_freed;
- } else {
- m = next;
- continue;
- }
+ vm_page_test_dirty(m);
+ } else if (m->dirty != 0)
+ m->dirty = VM_PAGE_BITS_ALL;
+ if (m->valid == 0) {
+ vm_page_protect(m, VM_PROT_NONE);
+ vm_page_free(m);
+ cnt.v_dfree++;
+ ++pages_freed;
+ } else if (m->dirty == 0) {
+ vm_page_cache(m);
+ ++pages_freed;
} else if (maxlaunder > 0) {
int written;
struct vnode *vp = NULL;
@@ -671,7 +666,7 @@ rescan1:
* if the next page has been re-activated, start
* scanning again
*/
- if ((next->flags & PG_INACTIVE) == 0) {
+ if (next->queue != PQ_INACTIVE) {
vm_pager_sync();
goto rescan1;
}
@@ -697,7 +692,8 @@ rescan1:
maxscan = MAXSCAN;
pcount = cnt.v_active_count;
m = vm_page_queue_active.tqh_first;
- while ((m != NULL) && (maxscan > 0) && (pcount-- > 0) && (page_shortage > 0)) {
+ while ((m != NULL) && (maxscan > 0) &&
+ (pcount-- > 0) && (page_shortage > 0)) {
cnt.v_pdpages++;
next = m->pageq.tqe_next;
@@ -711,13 +707,11 @@ rescan1:
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m = next;
- /* printf("busy: s: %d, f: 0x%x, h: %d\n",
- m->busy, m->flags, m->hold_count); */
continue;
}
if (m->object->ref_count &&
- ((m->flags & (PG_REFERENCED|PG_WANTED)) ||
- pmap_is_referenced(VM_PAGE_TO_PHYS(m)))) {
+ ((m->flags & PG_REFERENCED) ||
+ pmap_is_referenced(VM_PAGE_TO_PHYS(m))) ) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
m->flags &= ~PG_REFERENCED;
if (m->act_count < ACT_MAX) {
@@ -737,7 +731,7 @@ rescan1:
if (m->object->ref_count == 0) {
--page_shortage;
vm_page_test_dirty(m);
- if ((m->bmapped == 0) && (m->dirty == 0) ) {
+ if (m->dirty == 0) {
m->act_count = 0;
vm_page_cache(m);
} else {
@@ -773,7 +767,8 @@ rescan1:
* in a writeable object, wakeup the sync daemon. And kick swapout
* if we did not get enough free pages.
*/
- if ((cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_target) {
+ if ((cnt.v_cache_count + cnt.v_free_count) <
+ (cnt.v_free_target + cnt.v_cache_min) ) {
if (vnodes_skipped &&
(cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
if (!vfs_update_wakeup) {
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 883e36d..46531e8 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
*
* @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
- * $Id: vm_unix.c,v 1.8 1995/11/12 06:43:28 bde Exp $
+ * $Id: vm_unix.c,v 1.9 1995/12/07 12:48:29 davidg Exp $
*/
/*
@@ -56,6 +56,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/swap_pager.h>
+#include <vm/vm_prot.h>
#ifndef _SYS_SYSPROTO_H_
struct obreak_args {
@@ -85,7 +86,8 @@ obreak(p, uap, retval)
if (swap_pager_full) {
return (ENOMEM);
}
- rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE);
+ rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE,
+ VM_PROT_ALL, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
return (ENOMEM);
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 873b20ce..4edbd35 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.56 1995/12/14 09:55:14 phk Exp $
+ * $Id: vnode_pager.c,v 1.57 1995/12/17 23:29:56 dyson Exp $
*/
/*
@@ -133,7 +133,8 @@ vnode_pager_alloc(handle, size, prot, offset)
* If the object is being terminated, wait for it to
* go away.
*/
- while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) {
+ while (((object = vp->v_object) != NULL) &&
+ (object->flags & OBJ_DEAD)) {
tsleep(object, PVM, "vadead", 0);
}
OpenPOWER on IntegriCloud