summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1995-11-05 20:46:03 +0000
committerdyson <dyson@FreeBSD.org>1995-11-05 20:46:03 +0000
commit1b8e5404ee43d51a47f50d452f1f170f3ca57b22 (patch)
tree4b5a3508979357f31fb77292bfa5c2930e8ba243
parentd461b5899dc0ba4686df948588f1fb15f2133fb7 (diff)
downloadFreeBSD-src-1b8e5404ee43d51a47f50d452f1f170f3ca57b22.zip
FreeBSD-src-1b8e5404ee43d51a47f50d452f1f170f3ca57b22.tar.gz
Greatly simplify the msync code. Eliminate complications in vm_pageout
for msyncing. Remove a bug that manifests itself primarily on NFS (the dirty range on the buffers is not set on msync.)
-rw-r--r--sys/kern/vfs_bio.c4
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_glue.c17
-rw-r--r--sys/vm/vm_object.c171
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/vm/vm_pageout.c30
-rw-r--r--sys/vm/vm_pageout.h3
7 files changed, 100 insertions, 133 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 1e45a1e..4793bcf 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vfs_bio.c,v 1.67 1995/10/19 23:48:25 dyson Exp $
+ * $Id: vfs_bio.c,v 1.68 1995/10/29 15:31:13 phk Exp $
*/
/*
@@ -818,7 +818,7 @@ vfs_setdirty(struct buf *bp) {
* is not cleared simply by protecting pages off.
*/
if ((bp->b_flags & B_VMIO) &&
- ((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) {
+ ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
/*
* test the pages to see if they have been modified directly
* by users through the VM system.
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index ea62347..88c81b1 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.34 1995/10/23 03:49:28 dyson Exp $
+ * $Id: vm_fault.c,v 1.35 1995/11/02 06:42:47 davidg Exp $
*/
/*
@@ -606,7 +606,7 @@ readrest:
if (prot & VM_PROT_WRITE) {
m->flags |= PG_WRITEABLE;
- m->object->flags |= OBJ_WRITEABLE;
+ m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY;
/*
* If the fault is a write, we know that this page is being
* written NOW. This will save on the pmap_is_modified() calls
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index e82c4f5..85e1d00 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.28 1995/10/16 05:45:49 dyson Exp $
+ * $Id: vm_glue.c,v 1.29 1995/10/23 05:35:42 dyson Exp $
*/
#include <sys/param.h>
@@ -251,10 +251,8 @@ vm_fork(p1, p2, isvfork)
/* and force-map the upages into the kernel pmap */
for (i = 0; i < UPAGES; i++)
- pmap_enter(vm_map_pmap(u_map),
- ((vm_offset_t) up) + PAGE_SIZE * i,
- pmap_extract(vp->pmap, addr + PAGE_SIZE * i),
- VM_PROT_READ | VM_PROT_WRITE, 1);
+ pmap_kenter(((vm_offset_t) up) + PAGE_SIZE * i,
+ pmap_extract(vp->pmap, addr + PAGE_SIZE * i));
p2->p_addr = up;
@@ -350,9 +348,7 @@ faultin(p)
if (pa == 0)
panic("faultin: missing page for UPAGES\n");
- pmap_enter(vm_map_pmap(u_map),
- ((vm_offset_t) p->p_addr) + off,
- pa, VM_PROT_READ | VM_PROT_WRITE, 1);
+ pmap_kenter(((vm_offset_t) p->p_addr) + off, pa);
}
s = splhigh();
@@ -506,6 +502,7 @@ swapout(p)
{
vm_map_t map = &p->p_vmspace->vm_map;
vm_offset_t ptaddr;
+ int i;
++p->p_stats->p_ru.ru_nswap;
/*
@@ -524,8 +521,8 @@ swapout(p)
/*
* let the upages be paged
*/
- pmap_remove(vm_map_pmap(u_map),
- (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * PAGE_SIZE);
+ for(i=0;i<UPAGES;i++)
+ pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index b6f3954..2a01901 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.53 1995/08/26 23:19:48 bde Exp $
+ * $Id: vm_object.c,v 1.54 1995/10/23 03:49:43 dyson Exp $
*/
/*
@@ -121,6 +121,7 @@ vm_object_t kernel_object;
vm_object_t kmem_object;
struct vm_object kernel_object_store;
struct vm_object kmem_object_store;
+extern int vm_pageout_page_count;
long object_collapses;
long object_bypasses;
@@ -423,26 +424,26 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
{
register vm_page_t p;
register vm_offset_t tstart, tend;
- int pass;
- int pgcount, s;
- int allclean;
- int entireobj;
+ int s;
struct vnode *vp;
+ int runlen;
+ vm_page_t ma[vm_pageout_page_count];
- if (object->type != OBJT_VNODE || (object->flags & OBJ_WRITEABLE) == 0)
+ if (object->type != OBJT_VNODE ||
+ (object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
vp = object->handle;
if (lockflag)
VOP_LOCK(vp);
+ object->flags |= OBJ_CLEANING;
if (start != end) {
start = trunc_page(start);
end = round_page(end);
}
- pass = 0;
startover:
tstart = start;
if (end == 0) {
@@ -450,108 +451,72 @@ startover:
} else {
tend = end;
}
- entireobj = 0;
if (tstart == 0 && tend == object->size) {
- object->flags &= ~OBJ_WRITEABLE;
- entireobj = 1;
+ object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
}
- pgcount = object->resident_page_count;
-
- if (pass == 0 &&
- (pgcount < 128 || pgcount > (object->size / (8 * PAGE_SIZE)))) {
- allclean = 1;
- for(; pgcount && (tstart < tend); tstart += PAGE_SIZE) {
- p = vm_page_lookup(object, tstart);
- if (!p)
- continue;
- --pgcount;
- s = splhigh();
- TAILQ_REMOVE(&object->memq, p, listq);
- TAILQ_INSERT_TAIL(&object->memq, p, listq);
- splx(s);
- if (entireobj)
- vm_page_protect(p, VM_PROT_READ);
- if ((p->flags & (PG_BUSY|PG_CACHE)) || p->busy ||
- p->valid == 0) {
- continue;
- }
- vm_page_test_dirty(p);
- if ((p->valid & p->dirty) != 0) {
- vm_offset_t tincr;
- tincr = vm_pageout_clean(p, VM_PAGEOUT_FORCE);
- if( tincr) {
- pgcount -= (tincr - 1);
- tincr *= PAGE_SIZE;
- tstart += tincr - PAGE_SIZE;
- }
- allclean = 0;
+ runlen = 0;
+ for(;tstart < tend; tstart += PAGE_SIZE) {
+relookup:
+ p = vm_page_lookup(object, tstart);
+ if (!p) {
+ if (runlen > 0) {
+ vm_pageout_flush(ma, runlen, syncio);
+ runlen = 0;
}
+ continue;
}
- if (!allclean) {
- pass = 1;
- goto startover;
- }
- if (lockflag)
- VOP_UNLOCK(vp);
- return;
- }
-
- allclean = 1;
- while ((p = object->memq.tqh_first) != NULL && pgcount > 0) {
-
- if (p->flags & PG_CACHE) {
- goto donext;
+ if (p->valid == 0 || (p->flags & PG_CACHE)) {
+ if (runlen > 0) {
+ vm_pageout_flush(ma, runlen, syncio);
+ runlen = 0;
+ }
+ continue;
}
- if (entireobj || (p->offset >= tstart && p->offset < tend)) {
- if (entireobj)
- vm_page_protect(p, VM_PROT_READ);
-
- if (p->valid == 0) {
- goto donext;
- }
+ vm_page_protect(p, VM_PROT_READ);
- s = splhigh();
- if ((p->flags & PG_BUSY) || p->busy) {
- allclean = 0;
- if (pass > 0) {
- p->flags |= PG_WANTED;
- tsleep(p, PVM, "objpcn", 0);
- splx(s);
- continue;
- } else {
- splx(s);
- goto donext;
- }
+ s = splhigh();
+ while ((p->flags & PG_BUSY) || p->busy) {
+ if (runlen > 0) {
+ splx(s);
+ vm_pageout_flush(ma, runlen, syncio);
+ runlen = 0;
+ goto relookup;
}
-
- TAILQ_REMOVE(&object->memq, p, listq);
- TAILQ_INSERT_TAIL(&object->memq, p, listq);
+ p->flags |= PG_WANTED|PG_REFERENCED;
+ tsleep(p, PVM, "vpcwai", 0);
splx(s);
+ goto relookup;
+ }
+ splx(s);
- pgcount--;
+ if (p->dirty == 0)
vm_page_test_dirty(p);
- if ((p->valid & p->dirty) != 0) {
- vm_pageout_clean(p, VM_PAGEOUT_FORCE);
- allclean = 0;
+
+ if ((p->valid & p->dirty) != 0) {
+ ma[runlen] = p;
+ p->flags |= PG_BUSY;
+ runlen++;
+ if (runlen >= vm_pageout_page_count) {
+ vm_pageout_flush(ma, runlen, syncio);
+ runlen = 0;
}
- continue;
+ } else if (runlen > 0) {
+ vm_pageout_flush(ma, runlen, syncio);
+ runlen = 0;
}
- donext:
- TAILQ_REMOVE(&object->memq, p, listq);
- TAILQ_INSERT_TAIL(&object->memq, p, listq);
- pgcount--;
+
}
- if ((!allclean && (pass == 0)) ||
- (entireobj && (object->flags & OBJ_WRITEABLE))) {
- pass = 1;
- if (entireobj)
- object->flags &= ~OBJ_WRITEABLE;
- goto startover;
+ if (runlen > 0) {
+ vm_pageout_flush(ma, runlen, syncio);
}
+
+ VOP_FSYNC(vp, NULL, syncio, curproc);
+
if (lockflag)
VOP_UNLOCK(vp);
+ object->flags &= ~OBJ_CLEANING;
return;
}
@@ -609,14 +574,14 @@ vm_object_pmap_copy(object, start, end)
{
register vm_page_t p;
- if (object == NULL)
+ if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
return;
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
- if ((start <= p->offset) && (p->offset < end)) {
- vm_page_protect(p, VM_PROT_READ);
- }
+ vm_page_protect(p, VM_PROT_READ);
}
+
+ object->flags &= ~OBJ_WRITEABLE;
}
/*
@@ -638,23 +603,9 @@ vm_object_pmap_remove(object, start, end)
if (object == NULL)
return;
- ++object->paging_in_progress;
-
-again:
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
- if ((start <= p->offset) && (p->offset < end)) {
- s = splhigh();
- if ((p->flags & PG_BUSY) || p->busy) {
- p->flags |= PG_WANTED;
- tsleep(p, PVM, "vmopmr", 0);
- splx(s);
- goto again;
- }
- splx(s);
- vm_page_protect(p, VM_PROT_NONE);
- }
+ vm_page_protect(p, VM_PROT_NONE);
}
- vm_object_pip_wakeup(object);
}
/*
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 0528831..8b02609 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.21 1995/07/29 11:44:28 bde Exp $
+ * $Id: vm_object.h,v 1.22 1995/08/26 23:19:49 bde Exp $
*/
/*
@@ -126,6 +126,8 @@ struct vm_object {
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
#define OBJ_WRITEABLE 0x0080 /* object has been made writable */
+#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
+#define OBJ_CLEANING 0x0200
#ifdef KERNEL
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 38de743..638913d 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.57 1995/10/07 19:02:55 davidg Exp $
+ * $Id: vm_pageout.c,v 1.58 1995/10/23 05:35:48 dyson Exp $
*/
/*
@@ -164,7 +164,6 @@ vm_pageout_clean(m, sync)
int pageout_status[VM_PAGEOUT_PAGE_COUNT];
vm_page_t mc[2*VM_PAGEOUT_PAGE_COUNT];
int pageout_count;
- int anyok = 0;
int i, forward_okay, backward_okay, page_base;
vm_offset_t offset = m->offset;
@@ -292,14 +291,31 @@ do_backward:
mc[i]->flags |= PG_BUSY;
vm_page_protect(mc[i], VM_PROT_READ);
}
- object->paging_in_progress += pageout_count;
- vm_pager_put_pages(object, &mc[page_base], pageout_count,
+ return vm_pageout_flush(&mc[page_base], pageout_count, sync);
+}
+
+int
+vm_pageout_flush(mc, count, sync)
+ vm_page_t *mc;
+ int count;
+ int sync;
+{
+ register vm_object_t object;
+ int pageout_status[count];
+ int anyok = 0;
+ int i;
+
+ object = mc[0]->object;
+ object->paging_in_progress += count;
+
+ vm_pager_put_pages(object, mc, count,
((sync || (object == kernel_object)) ? TRUE : FALSE),
pageout_status);
- for (i = 0; i < pageout_count; i++) {
- vm_page_t mt = mc[page_base + i];
+
+ for (i = 0; i < count; i++) {
+ vm_page_t mt = mc[i];
switch (pageout_status[i]) {
case VM_PAGER_OK:
@@ -621,7 +637,7 @@ rescan1:
if (object->type == OBJT_VNODE) {
vp = object->handle;
if (VOP_ISLOCKED(vp) || vget(vp, 1)) {
- if (object->flags & OBJ_WRITEABLE)
+ if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
m = next;
continue;
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index b1ef37c..df05f9b 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.h,v 1.13 1995/07/13 08:48:41 davidg Exp $
+ * $Id: vm_pageout.h,v 1.14 1995/08/28 09:19:25 julian Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@@ -127,6 +127,7 @@ int vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_clean __P((vm_page_t, int));
+int vm_pageout_flush __P((vm_page_t *, int, int));
#endif
OpenPOWER on IntegriCloud