summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-03-07 21:37:31 +0000
committerdyson <dyson@FreeBSD.org>1998-03-07 21:37:31 +0000
commit8ceb6160f494e2331b3f2e241e09d92673e397af (patch)
tree5030aec9050b0e765c5aea8634ba2de5ad3306e2 /sys/vm
parentab602aeb2614330963a1cee7162c9b6b22b9b9d9 (diff)
downloadFreeBSD-src-8ceb6160f494e2331b3f2e241e09d92673e397af.zip
FreeBSD-src-8ceb6160f494e2331b3f2e241e09d92673e397af.tar.gz
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These problems have manifest themselves worse on NFS type filesystems, but can still affect local filesystems under certain circumstances. Most of the problems have involved mmap consistancy, and as a side-effect broke the vfs.ioopt code. This code might have been committed seperately, but almost everything is interrelated. 1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that are fully valid. 2) Rather than deactivating erroneously read initial (header) pages in kern_exec, we now free them. 3) Fix the rundown of non-VMIO buffers that are in an inconsistent (missing vp) state. 4) Fix the disassociation of pages from buffers in brelse. The previous code had rotted and was faulty in a couple of important circumstances. 5) Remove a gratuitious buffer wakeup in vfs_vmio_release. 6) Remove a crufty and currently unused cluster mechanism for VBLK files in vfs_bio_awrite. When the code is functional, I'll add back a cleaner version. 7) The page busy count wakeups assocated with the buffer cache usage were incorrectly cleaned up in a previous commit by me. Revert to the original, correct version, but with a cleaner implementation. 8) The cluster read code now tries to keep data associated with buffers more aggressively (without breaking the heuristics) when it is presumed that the read data (buffers) will be soon needed. 9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The delay loop waiting is not useful for filesystem locks, due to the length of the time intervals. 10) Correct and clean-up spec_getpages. 11) Implement a fully functional nfs_getpages, nfs_putpages. 12) Fix nfs_write so that modifications are coherent with the NFS data on the server disk (at least as well as NFS seems to allow.) 13) Properly support MS_INVALIDATE on NFS. 14) Properly pass down MS_INVALIDATE to lower levels of the VM code from vm_map_clean. 15) Better support the notion of pages being busy but valid, so that fewer in-transit waits occur. (use p->busy more for pageouts instead of PG_BUSY.) Since the page is fully valid, it is still usable for reads. 16) It is possible (in error) for cached pages to be busy. Make the page allocation code handle that case correctly. (It should probably be a printf or panic, but I want the system to handle coding errors robustly. I'll probably add a printf.) 17) Correct the design and usage of vm_page_sleep. It didn't handle consistancy problems very well, so make the design a little less lofty. After vm_page_sleep, if it ever blocked, it is still important to relookup the page (if the object generation count changed), and verify it's status (always.) 18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up. 19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush. 20) Fix vm_pager_put_pages and it's descendents to support an int flag instead of a boolean, so that we can pass down the invalidate bit.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c10
-rw-r--r--sys/vm/vm_map.c9
-rw-r--r--sys/vm/vm_mmap.c9
-rw-r--r--sys/vm/vm_object.c28
-rw-r--r--sys/vm/vm_object.h5
-rw-r--r--sys/vm/vm_page.c55
-rw-r--r--sys/vm/vm_page.h16
-rw-r--r--sys/vm/vm_pageout.c44
-rw-r--r--sys/vm/vm_pager.c8
-rw-r--r--sys/vm/vm_pager.h7
-rw-r--r--sys/vm/vnode_pager.c60
11 files changed, 141 insertions, 110 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 828eab9..53e0318 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.90 1998/02/25 03:55:47 dyson Exp $
+ * $Id: swap_pager.c,v 1.91 1998/03/01 04:18:14 dyson Exp $
*/
/*
@@ -1578,9 +1578,7 @@ swap_pager_finish(spc)
printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
(u_long) VM_PAGE_TO_PHYS(ma[i]));
ma[i]->dirty = VM_PAGE_BITS_ALL;
- ma[i]->flags |= PG_BUSY;
- ma[i]->busy--;
- PAGE_WAKEUP(ma[i]);
+ PAGE_BWAKEUP(ma[i]);
}
object->paging_in_progress -= spc->spc_count;
@@ -1651,9 +1649,7 @@ swap_pager_iodone(bp)
/*
* we wakeup any processes that are waiting on these pages.
*/
- ma[i]->flags |= PG_BUSY;
- ma[i]->busy--;
- PAGE_WAKEUP(ma[i]);
+ PAGE_BWAKEUP(ma[i]);
}
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 930d687..7036e5f 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.116 1998/02/23 08:22:33 dyson Exp $
+ * $Id: vm_map.c,v 1.117 1998/02/25 03:55:49 dyson Exp $
*/
/*
@@ -286,7 +286,7 @@ vm_map_init(map, min, max)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
- lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
+ lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
}
/*
@@ -1665,12 +1665,15 @@ vm_map_clean(map, start, end, syncio, invalidate)
* idea.
*/
if (current->protection & VM_PROT_WRITE) {
+ int flags;
if (object->type == OBJT_VNODE)
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
+ flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
- (syncio||invalidate)?1:0);
+ flags);
if (invalidate)
vm_object_page_remove(object,
OFF_TO_IDX(offset),
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 3184054..dc41acb 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.72 1998/02/04 22:33:48 eivind Exp $
+ * $Id: vm_mmap.c,v 1.73 1998/02/06 12:14:25 eivind Exp $
*/
/*
@@ -642,13 +642,6 @@ mincore(p, uap)
vm_map_lock(map);
- /*
- * Not needed here
- */
-#if 0
- VM_MAP_RANGE_CHECK(map, addr, end);
-#endif
-
if (!vm_map_lookup_entry(map, addr, &entry))
entry = entry->next;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index fd63b8d..bb2549c 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.114 1998/02/25 03:55:50 dyson Exp $
+ * $Id: vm_object.c,v 1.115 1998/03/01 04:18:22 dyson Exp $
*/
/*
@@ -431,7 +431,7 @@ vm_object_terminate(object)
/*
* Clean pages and flush buffers.
*/
- vm_object_page_clean(object, 0, 0, TRUE);
+ vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
vp = (struct vnode *) object->handle;
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
@@ -499,11 +499,11 @@ vm_object_dispose(object)
*/
void
-vm_object_page_clean(object, start, end, syncio)
+vm_object_page_clean(object, start, end, flags)
vm_object_t object;
vm_pindex_t start;
vm_pindex_t end;
- boolean_t syncio;
+ int flags;
{
register vm_page_t p, np, tp;
register vm_offset_t tstart, tend;
@@ -515,6 +515,7 @@ vm_object_page_clean(object, start, end, syncio)
int chkb;
int maxb;
int i;
+ int pagerflags;
vm_page_t maf[vm_pageout_page_count];
vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
@@ -525,6 +526,9 @@ vm_object_page_clean(object, start, end, syncio)
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
+ pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0;
+ pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
+
vp = object->handle;
object->flags |= OBJ_CLEANING;
@@ -628,29 +632,23 @@ rescan:
for(i=0;i<maxb;i++) {
int index = (maxb - i) - 1;
ma[index] = mab[i];
- ma[index]->flags |= PG_BUSY;
ma[index]->flags &= ~PG_CLEANCHK;
- vm_page_protect(ma[index], VM_PROT_READ);
}
- vm_page_protect(p, VM_PROT_READ);
- p->flags |= PG_BUSY;
p->flags &= ~PG_CLEANCHK;
ma[maxb] = p;
for(i=0;i<maxf;i++) {
int index = (maxb + i) + 1;
ma[index] = maf[i];
- ma[index]->flags |= PG_BUSY;
ma[index]->flags &= ~PG_CLEANCHK;
- vm_page_protect(ma[index], VM_PROT_READ);
}
runlen = maxb + maxf + 1;
splx(s);
- vm_pageout_flush(ma, runlen, 0);
+ vm_pageout_flush(ma, runlen, pagerflags);
if (object->generation != curgeneration)
goto rescan;
}
- VOP_FSYNC(vp, NULL, syncio, curproc);
+ VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?1:0, curproc);
object->flags &= ~OBJ_CLEANING;
return;
@@ -1314,7 +1312,7 @@ again:
if (vm_page_sleep(p, "vmopar", &p->busy))
goto again;
- if (clean_only) {
+ if (clean_only && p->valid) {
vm_page_test_dirty(p);
if (p->valid & p->dirty)
continue;
@@ -1342,9 +1340,9 @@ again:
* interrupt -- minimize the spl transitions
*/
if (vm_page_sleep(p, "vmopar", &p->busy))
- goto again;
+ goto again;
- if (clean_only) {
+ if (clean_only && p->valid) {
vm_page_test_dirty(p);
if (p->valid & p->dirty) {
start += 1;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 76cb4d2..5d83269 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.45 1998/02/05 03:32:45 dyson Exp $
+ * $Id: vm_object.h,v 1.46 1998/02/25 03:55:52 dyson Exp $
*/
/*
@@ -142,6 +142,9 @@ struct vm_object {
#ifdef KERNEL
+#define OBJPC_SYNC 0x1 /* sync I/O */
+#define OBJPC_INVAL 0x2 /* invalidate */
+
TAILQ_HEAD(object_q, vm_object);
extern struct object_q vm_object_list; /* list of allocated objects */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 439fc3d..f896666 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.93 1998/02/09 06:11:32 eivind Exp $
+ * $Id: vm_page.c,v 1.94 1998/03/01 04:18:24 dyson Exp $
*/
/*
@@ -88,6 +88,7 @@
static void vm_page_queue_init __P((void));
static vm_page_t vm_page_select_free __P((vm_object_t object,
vm_pindex_t pindex, int prefqueue));
+static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t));
/*
* Associated with page of user-allocatable memory is a
@@ -685,6 +686,36 @@ vm_page_select(object, pindex, basequeue)
}
/*
+ * Find a page on the cache queue with color optimization. As pages
+ * might be found, but not applicable, they are deactivated. This
+ * keeps us from using potentially busy cached pages.
+ */
+vm_page_t
+vm_page_select_cache(object, pindex)
+ vm_object_t object;
+ vm_pindex_t pindex;
+{
+ vm_page_t m;
+
+ while (TRUE) {
+#if PQ_L2_SIZE > 1
+ int index;
+ index = (pindex + object->pg_color) & PQ_L2_MASK;
+ m = vm_page_list_find(PQ_CACHE, index);
+
+#else
+ m = TAILQ_FIRST(vm_page_queues[PQ_CACHE].pl);
+#endif
+ if (m && ((m->flags & PG_BUSY) || m->busy ||
+ m->hold_count || m->wire_count)) {
+ vm_page_deactivate(m);
+ continue;
+ }
+ return m;
+ }
+}
+
+/*
* Find a free or zero page, with specified preference.
*/
static vm_page_t
@@ -825,7 +856,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(NORMAL): missing page on free queue\n");
#endif
} else {
- m = vm_page_select(object, pindex, PQ_CACHE);
+ m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@@ -847,7 +878,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(ZERO): missing page on free queue\n");
#endif
} else {
- m = vm_page_select(object, pindex, PQ_CACHE);
+ m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@@ -871,7 +902,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
#endif
} else {
- m = vm_page_select(object, pindex, PQ_CACHE);
+ m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@@ -986,18 +1017,18 @@ vm_wait()
int
vm_page_sleep(vm_page_t m, char *msg, char *busy) {
vm_object_t object = m->object;
- int generation = object->generation;
+ int slept = 0;
if ((busy && *busy) || (m->flags & PG_BUSY)) {
int s;
s = splvm();
if ((busy && *busy) || (m->flags & PG_BUSY)) {
m->flags |= PG_WANTED;
- tsleep(m, PVM, msg, 800);
+ tsleep(m, PVM, msg, 0);
+ slept = 1;
}
splx(s);
}
- return ((generation != object->generation) || (busy && *busy) ||
- (m->flags & PG_BUSY));
+ return slept;
}
/*
@@ -1540,13 +1571,11 @@ again1:
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
- vm_object_page_clean(m->object, 0, 0, TRUE);
+ vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
- m->flags |= PG_BUSY;
- vm_page_protect(m, VM_PROT_NONE);
vm_pageout_flush(&m, 1, 0);
goto again1;
}
@@ -1570,13 +1599,11 @@ again1:
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
- vm_object_page_clean(m->object, 0, 0, TRUE);
+ vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
- m->flags |= PG_BUSY;
- vm_page_protect(m, VM_PROT_NONE);
vm_pageout_flush(&m, 1, 0);
goto again1;
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index b9241dc..c80bfdd 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.36 1998/02/05 03:32:47 dyson Exp $
+ * $Id: vm_page.h,v 1.37 1998/03/01 04:18:26 dyson Exp $
*/
/*
@@ -276,6 +276,16 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
} \
}
+#define PAGE_BWAKEUP(m) { \
+ (m)->busy--; \
+ if ((((m)->flags & (PG_WANTED | PG_BUSY)) == PG_WANTED) && \
+ ((m)->busy == 0)) { \
+ (m)->flags &= ~PG_WANTED; \
+ wakeup((m)); \
+ } \
+}
+
+
#if PAGE_SIZE == 4096
#define VM_PAGE_BITS_ALL 0xff
#endif
@@ -350,11 +360,11 @@ vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
+ pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE);
mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
- pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
+ pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ);
mem->flags &= ~PG_WRITEABLE;
}
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b614c3d..3eff5ed 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.116 1998/02/24 10:16:23 dyson Exp $
+ * $Id: vm_pageout.c,v 1.117 1998/03/01 04:18:28 dyson Exp $
*/
/*
@@ -100,7 +100,7 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout __P((void));
-static int vm_pageout_clean __P((vm_page_t, int));
+static int vm_pageout_clean __P((vm_page_t));
static int vm_pageout_scan __P((void));
static int vm_pageout_free_page_calc __P((vm_size_t count));
struct proc *pageproc;
@@ -218,9 +218,8 @@ void pmap_collect(void);
* move!)
*/
static int
-vm_pageout_clean(m, sync)
+vm_pageout_clean(m)
vm_page_t m;
- int sync;
{
register vm_object_t object;
vm_page_t mc[2*vm_pageout_page_count];
@@ -234,22 +233,21 @@ vm_pageout_clean(m, sync)
* If not OBJT_SWAP, additional memory may be needed to do the pageout.
* Try to avoid the deadlock.
*/
- if ((sync != VM_PAGEOUT_FORCE) &&
- (object->type == OBJT_DEFAULT) &&
+ if ((object->type == OBJT_DEFAULT) &&
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
return 0;
/*
* Don't mess with the page if it's busy.
*/
- if ((!sync && m->hold_count != 0) ||
+ if ((m->hold_count != 0) ||
((m->busy != 0) || (m->flags & PG_BUSY)))
return 0;
/*
* Try collapsing before it's too late.
*/
- if (!sync && object->backing_object) {
+ if (object->backing_object) {
vm_object_collapse(object);
}
@@ -295,8 +293,7 @@ vm_pageout_clean(m, sync)
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
- ((p->queue == PQ_INACTIVE) ||
- (sync == VM_PAGEOUT_FORCE)) &&
+ (p->queue == PQ_INACTIVE) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[vm_pageout_page_count + i] = p;
@@ -330,8 +327,7 @@ do_backward:
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
- ((p->queue == PQ_INACTIVE) ||
- (sync == VM_PAGEOUT_FORCE)) &&
+ (p->queue == PQ_INACTIVE) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[vm_pageout_page_count - i] = p;
@@ -351,30 +347,30 @@ do_backward:
/*
* we allow reads during pageouts...
*/
- for (i = page_base; i < (page_base + pageout_count); i++) {
- mc[i]->busy++;
- vm_page_protect(mc[i], VM_PROT_READ);
- }
-
- return vm_pageout_flush(&mc[page_base], pageout_count, sync);
+ return vm_pageout_flush(&mc[page_base], pageout_count, 0);
}
int
-vm_pageout_flush(mc, count, sync)
+vm_pageout_flush(mc, count, flags)
vm_page_t *mc;
int count;
- int sync;
+ int flags;
{
register vm_object_t object;
int pageout_status[count];
int numpagedout = 0;
int i;
+ for (i = 0; i < count; i++) {
+ mc[i]->busy++;
+ vm_page_protect(mc[i], VM_PROT_READ);
+ }
+
object = mc[0]->object;
object->paging_in_progress += count;
vm_pager_put_pages(object, mc, count,
- ((sync || (object == kernel_object)) ? TRUE : FALSE),
+ (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
pageout_status);
for (i = 0; i < count; i++) {
@@ -417,9 +413,7 @@ vm_pageout_flush(mc, count, sync)
*/
if (pageout_status[i] != VM_PAGER_PEND) {
vm_object_pip_wakeup(object);
- mt->flags |= PG_BUSY;
- mt->busy--;
- PAGE_WAKEUP(mt);
+ PAGE_BWAKEUP(mt);
}
}
return numpagedout;
@@ -840,7 +834,7 @@ rescan0:
* laundry. If it is still in the laundry, then we
* start the cleaning operation.
*/
- written = vm_pageout_clean(m, 0);
+ written = vm_pageout_clean(m);
if (vp)
vput(vp);
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index bdb3dae..0f11408 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pager.c,v 1.34 1998/02/06 12:14:29 eivind Exp $
+ * $Id: vm_pager.c,v 1.35 1998/02/23 08:22:40 dyson Exp $
*/
/*
@@ -183,14 +183,14 @@ vm_pager_get_pages(object, m, count, reqpage)
}
int
-vm_pager_put_pages(object, m, count, sync, rtvals)
+vm_pager_put_pages(object, m, count, flags, rtvals)
vm_object_t object;
vm_page_t *m;
int count;
- boolean_t sync;
+ int flags;
int *rtvals;
{
- return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
+ return ((*pagertab[object->type]->pgo_putpages)(object, m, count, flags, rtvals));
}
boolean_t
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index ec21453..fb385fa 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
- * $Id: vm_pager.h,v 1.14 1997/10/12 20:26:32 phk Exp $
+ * $Id: vm_pager.h,v 1.15 1998/02/03 22:19:35 bde Exp $
*/
/*
@@ -55,7 +55,7 @@ struct pagerops {
vm_object_t (*pgo_alloc) __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t)); /* Allocate pager. */
void (*pgo_dealloc) __P((vm_object_t)); /* Disassociate. */
int (*pgo_getpages) __P((vm_object_t, vm_page_t *, int, int)); /* Get (read) page. */
- int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); /* Put (write) page. */
+ int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, int, int *)); /* Put (write) page. */
boolean_t (*pgo_haspage) __P((vm_object_t, vm_pindex_t, int *, int *)); /* Does pager have page? */
void (*pgo_sync) __P((void));
};
@@ -76,6 +76,9 @@ struct pagerops {
#define VM_PAGER_ERROR 4
#define VM_PAGER_AGAIN 5
+#define VM_PAGER_PUT_SYNC 0x1
+#define VM_PAGER_PUT_INVAL 0x2
+
#ifdef KERNEL
#ifdef MALLOC_DECLARE
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 33c69a3..bedffdc 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.87 1998/02/26 06:39:58 msmith Exp $
+ * $Id: vnode_pager.c,v 1.88 1998/03/01 04:18:31 dyson Exp $
*/
/*
@@ -557,7 +557,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
{
vm_object_t object;
vm_offset_t kva;
- off_t foff;
+ off_t foff, tfoff, nextoff;
int i, size, bsize, first, firstaddr;
struct vnode *dp;
int runpg;
@@ -749,11 +749,22 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
relpbuf(bp);
- for (i = 0; i < count; i++) {
- pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
- m[i]->dirty = 0;
- m[i]->valid = VM_PAGE_BITS_ALL;
- m[i]->flags &= ~PG_ZERO;
+ for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
+ vm_page_t mt;
+
+ nextoff = tfoff + PAGE_SIZE;
+ mt = m[i];
+
+ if (nextoff <= size) {
+ mt->valid = VM_PAGE_BITS_ALL;
+ mt->dirty = 0;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
+ } else {
+ int nvalid = ((size + DEV_BSIZE - 1) - tfoff) & ~(DEV_BSIZE - 1);
+ vm_page_set_validclean(mt, 0, nvalid);
+ }
+
+ mt->flags &= ~PG_ZERO;
if (i != reqpage) {
/*
@@ -769,13 +780,13 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
- if (m[i]->flags & PG_WANTED)
- vm_page_activate(m[i]);
+ if (mt->flags & PG_WANTED)
+ vm_page_activate(mt);
else
- vm_page_deactivate(m[i]);
- PAGE_WAKEUP(m[i]);
+ vm_page_deactivate(mt);
+ PAGE_WAKEUP(mt);
} else {
- vnode_pager_freepage(m[i]);
+ vnode_pager_freepage(mt);
}
}
}
@@ -814,11 +825,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* own vnodes if they fail to implement VOP_GETPAGES.
*/
int
-vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
+vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
struct vnode *vp;
vm_page_t *m;
int bytecount;
- boolean_t sync;
+ int flags;
int *rtvals;
{
int i;
@@ -830,6 +841,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
struct uio auio;
struct iovec aiov;
int error;
+ int ioflags;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@@ -838,7 +850,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
rtvals[i] = VM_PAGER_AGAIN;
if ((int) m[0]->pindex < 0) {
- printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty);
+ printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n",
+ m[0]->pindex, m[0]->dirty);
rtvals[0] = VM_PAGER_BAD;
return VM_PAGER_BAD;
}
@@ -857,21 +870,12 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
for (i = ncount; i < count; i++) {
rtvals[i] = VM_PAGER_BAD;
}
-#ifdef BOGUS
- if (ncount == 0) {
- printf("vnode_pager_putpages: write past end of file: %d, %lu\n",
- poffset,
- (unsigned long) object->un_pager.vnp.vnp_size);
- return rtvals[0];
- }
-#endif
}
}
- for (i = 0; i < count; i++) {
- m[i]->busy++;
- m[i]->flags &= ~PG_BUSY;
- }
+ ioflags = IO_VMIO;
+ ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: 0;
+ ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
aiov.iov_base = (caddr_t) 0;
aiov.iov_len = maxsize;
@@ -882,7 +886,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
auio.uio_procp = (struct proc *) 0;
- error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred);
+ error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
OpenPOWER on IntegriCloud