summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/vfs_bio.c137
-rw-r--r--sys/kern/vfs_export.c30
-rw-r--r--sys/kern/vfs_extattr.c7
-rw-r--r--sys/kern/vfs_subr.c30
-rw-r--r--sys/kern/vfs_syscalls.c7
-rw-r--r--sys/nfs/nfs_bio.c98
-rw-r--r--sys/nfsclient/nfs_bio.c98
-rw-r--r--sys/sys/mount.h7
-rw-r--r--sys/ufs/ffs/ffs_vnops.c10
-rw-r--r--sys/vm/vm_object.c3
-rw-r--r--sys/vm/vm_pageout.c4
11 files changed, 325 insertions, 106 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 731c9ed..f3f6528 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vfs_bio.c,v 1.43 1995/04/30 05:09:13 davidg Exp $
+ * $Id: vfs_bio.c,v 1.44 1995/05/11 19:26:29 rgrimes Exp $
*/
/*
@@ -59,6 +59,7 @@ struct swqueue bswlist;
void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
void vfs_clean_pages(struct buf * bp);
+static void vfs_setdirty(struct buf *bp);
int needsbuffer;
@@ -127,7 +128,10 @@ bufinit()
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
}
/*
- * this will change later!!!
+ * maxbufspace is currently calculated to support all filesystem blocks
+ * to be 8K. If you happen to use a 16K filesystem, the size of the buffer
+ * cache is still the same as it would be for 8K filesystems. This
+ * keeps the size of the buffer cache "in check" for big block filesystems.
*/
minbuf = nbuf / 3;
maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
@@ -314,9 +318,32 @@ bdwrite(struct buf * bp)
bp->b_flags |= B_DONE | B_DELWRI;
reassignbuf(bp, bp->b_vp);
}
+
+ /*
+ * This bmap keeps the system from needing to do the bmap later,
+ * perhaps when the system is attempting to do a sync. Since it
+ * is likely that the indirect block -- or whatever other datastructure
+ * that the filesystem needs is still in memory now, it is a good
+ * thing to do this. Note also, that if the pageout daemon is
+ * requesting a sync -- there might not be enough memory to do
+ * the bmap then... So, this is important to do.
+ */
if( bp->b_lblkno == bp->b_blkno) {
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL);
}
+
+ /*
+ * Set the *dirty* buffer range based upon the VM system dirty pages.
+ */
+ vfs_setdirty(bp);
+
+ /*
+ * We need to do this here to satisfy the vnode_pager and the
+ * pageout daemon, so that it thinks that the pages have been
+ * "cleaned". Note that since the pages are in a delayed write
+ * buffer -- the VFS layer "will" see that the pages get written
+ * out on the next sync, or perhaps the cluster will be completed.
+ */
vfs_clean_pages(bp);
brelse(bp);
return;
@@ -413,31 +440,39 @@ brelse(struct buf * bp)
if (resid > iototal)
resid = iototal;
if (resid > 0) {
- if (bp->b_flags & (B_ERROR | B_NOCACHE)) {
- vm_page_set_invalid(m, foff, resid);
- if (m->valid == 0)
- vm_page_protect(m, VM_PROT_NONE);
- }
+ /*
+ * Don't invalidate the page if the local machine has already
+ * modified it. This is the lesser of two evils, and should
+ * be fixed.
+ */
+ if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
+ vm_page_test_dirty(m);
+ if (m->dirty == 0) {
+ vm_page_set_invalid(m, foff, resid);
+ if (m->valid == 0)
+ vm_page_protect(m, VM_PROT_NONE);
+ }
+ }
}
foff += resid;
iototal -= resid;
}
- if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_RELBUF)) {
+ if (bp->b_flags & (B_INVAL | B_RELBUF)) {
for(i=0;i<bp->b_npages;i++) {
m = bp->b_pages[i];
--m->bmapped;
if (m->bmapped == 0) {
- vm_page_test_dirty(m);
- if(m->flags & PG_WANTED) {
+ if (m->flags & PG_WANTED) {
wakeup((caddr_t) m);
m->flags &= ~PG_WANTED;
}
+ vm_page_test_dirty(m);
if ((m->dirty & m->valid) == 0 &&
(m->flags & PG_REFERENCED) == 0 &&
- !pmap_is_referenced(VM_PAGE_TO_PHYS(m)))
+ !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_cache(m);
- else if ((m->flags & PG_ACTIVE) == 0) {
+ } else if ((m->flags & PG_ACTIVE) == 0) {
vm_page_activate(m);
m->act_count = 0;
}
@@ -661,8 +696,8 @@ incore(struct vnode * vp, daddr_t blkno)
/* Search hash chain */
while (bp) {
/* hit */
- if (bp->b_lblkno == blkno && bp->b_vp == vp
- && (bp->b_flags & B_INVAL) == 0) {
+ if (bp->b_lblkno == blkno && bp->b_vp == vp &&
+ (bp->b_flags & B_INVAL) == 0) {
splx(s);
return (bp);
}
@@ -712,6 +747,66 @@ inmem(struct vnode * vp, daddr_t blkno)
}
/*
+ * now we set the dirty range for the buffer --
+ * for NFS -- if the file is mapped and pages have
+ * been written to, let it know. We want the
+ * entire range of the buffer to be marked dirty if
+ * any of the pages have been written to for consistancy
+ * with the b_validoff, b_validend set in the nfs write
+ * code, and used by the nfs read code.
+ */
+static void
+vfs_setdirty(struct buf *bp) {
+ int i;
+ vm_object_t object;
+ vm_offset_t boffset, offset;
+ /*
+ * We qualify the scan for modified pages on whether the
+ * object has been flushed yet. The OBJ_WRITEABLE flag
+ * is not cleared simply by protecting pages off.
+ */
+ if ((bp->b_flags & B_VMIO) &&
+ ((object = bp->b_pages[0]->object)->flags & OBJ_WRITEABLE)) {
+ /*
+ * test the pages to see if they have been modified directly
+ * by users through the VM system.
+ */
+ for (i = 0; i < bp->b_npages; i++)
+ vm_page_test_dirty(bp->b_pages[i]);
+
+ /*
+ * scan forwards for the first page modified
+ */
+ for (i = 0; i < bp->b_npages; i++) {
+ if (bp->b_pages[i]->dirty) {
+ break;
+ }
+ }
+ boffset = i * PAGE_SIZE;
+ if (boffset < bp->b_dirtyoff) {
+ bp->b_dirtyoff = boffset;
+ }
+
+ /*
+ * scan backwards for the last page modified
+ */
+ for (i = bp->b_npages - 1; i >= 0; --i) {
+ if (bp->b_pages[i]->dirty) {
+ break;
+ }
+ }
+ boffset = (i + 1) * PAGE_SIZE;
+ offset = boffset + bp->b_pages[0]->offset;
+ if (offset >= object->size) {
+ boffset = object->size - bp->b_pages[0]->offset;
+ }
+ if (bp->b_dirtyend < boffset) {
+ bp->b_dirtyend = boffset;
+ }
+ }
+}
+
+/*
* Get a block given a specified block and offset into a file/device.
*/
struct buf *
@@ -1121,7 +1216,12 @@ biodone(register struct buf * bp)
resid = (m->offset + PAGE_SIZE) - foff;
if (resid > iosize)
resid = iosize;
- if (!bogusflag && resid > 0) {
+ /*
+ * In the write case, the valid and clean bits are
+ * already changed correctly, so we only need to do this
+ * here in the read case.
+ */
+ if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
vm_page_set_valid(m, foff & (PAGE_SIZE-1), resid);
vm_page_set_clean(m, foff & (PAGE_SIZE-1), resid);
}
@@ -1258,6 +1358,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
int iocount = bp->b_bufsize;
+ vfs_setdirty(bp);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
int resid = (m->offset + PAGE_SIZE) - foff;
@@ -1268,8 +1369,6 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
m->busy++;
if (clear_modify) {
vm_page_protect(m, VM_PROT_READ);
- pmap_clear_reference(VM_PAGE_TO_PHYS(m));
- m->flags &= ~PG_REFERENCED;
vm_page_set_valid(m,
foff & (PAGE_SIZE-1), resid);
vm_page_set_clean(m,
@@ -1288,8 +1387,8 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
/*
* Tell the VM system that the pages associated with this buffer
- * are dirty. This is in case of the unlikely circumstance that
- * a buffer has to be destroyed before it is flushed.
+ * are clean. This is used for delayed writes where the data is
+ * going to go to disk eventually without additional VM intevention.
*/
void
vfs_clean_pages(struct buf * bp)
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 0998a81..710b3cb 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
- * $Id: vfs_subr.c,v 1.28 1995/04/16 11:33:33 davidg Exp $
+ * $Id: vfs_subr.c,v 1.29 1995/05/12 04:24:53 davidg Exp $
*/
/*
@@ -1513,3 +1513,31 @@ vfs_export_lookup(mp, nep, nam)
}
return (np);
}
+
+
+/*
+ * perform msync on all vnodes under a mount point
+ * the mount point must be locked.
+ */
+void
+vfs_msync(struct mount *mp, int flags) {
+ struct vnode *vp;
+loop:
+ for (vp = mp->mnt_vnodelist.lh_first;
+ vp != NULL;
+ vp = vp->v_mntvnodes.le_next) {
+
+ if (vp->v_mount != mp)
+ goto loop;
+ if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
+ continue;
+ if (vp->v_vmdata &&
+ (((vm_object_t) vp->v_vmdata)->flags & OBJ_WRITEABLE)) {
+ if (vget(vp, 1))
+ goto loop;
+ _vm_object_page_clean( (vm_object_t) vp->v_vmdata,
+ 0, 0, TRUE);
+ vput(vp);
+ }
+ }
+}
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 1d50f6f..7b4c9fe 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.23 1995/05/02 09:06:04 davidg Exp $
+ * $Id: vfs_syscalls.c,v 1.24 1995/05/15 08:39:31 davidg Exp $
*/
#include <sys/param.h>
@@ -282,6 +282,7 @@ dounmount(mp, flags, p)
return (error);
mp->mnt_flag &=~ MNT_ASYNC;
+ vfs_msync(mp, MNT_NOWAIT);
vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
if ((error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0 ||
@@ -331,6 +332,7 @@ sync(p, uap, retval)
!vfs_busy(mp)) {
asyncflag = mp->mnt_flag & MNT_ASYNC;
mp->mnt_flag &= ~MNT_ASYNC;
+ vfs_msync(mp, MNT_NOWAIT);
VFS_SYNC(mp, MNT_NOWAIT, p != NULL ? p->p_ucred : NOCRED, p);
if (asyncflag)
mp->mnt_flag |= MNT_ASYNC;
@@ -1782,6 +1784,9 @@ fsync(p, uap, retval)
return (error);
vp = (struct vnode *)fp->f_data;
VOP_LOCK(vp);
+ if (vp->v_vmdata) {
+ _vm_object_page_clean((vm_object_t) vp->v_vmdata, 0, 0 ,0);
+ }
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
VOP_UNLOCK(vp);
return (error);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 0998a81..710b3cb 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
- * $Id: vfs_subr.c,v 1.28 1995/04/16 11:33:33 davidg Exp $
+ * $Id: vfs_subr.c,v 1.29 1995/05/12 04:24:53 davidg Exp $
*/
/*
@@ -1513,3 +1513,31 @@ vfs_export_lookup(mp, nep, nam)
}
return (np);
}
+
+
+/*
+ * perform msync on all vnodes under a mount point
+ * the mount point must be locked.
+ */
+void
+vfs_msync(struct mount *mp, int flags) {
+ struct vnode *vp;
+loop:
+ for (vp = mp->mnt_vnodelist.lh_first;
+ vp != NULL;
+ vp = vp->v_mntvnodes.le_next) {
+
+ if (vp->v_mount != mp)
+ goto loop;
+ if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
+ continue;
+ if (vp->v_vmdata &&
+ (((vm_object_t) vp->v_vmdata)->flags & OBJ_WRITEABLE)) {
+ if (vget(vp, 1))
+ goto loop;
+ _vm_object_page_clean( (vm_object_t) vp->v_vmdata,
+ 0, 0, TRUE);
+ vput(vp);
+ }
+ }
+}
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 1d50f6f..7b4c9fe 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.23 1995/05/02 09:06:04 davidg Exp $
+ * $Id: vfs_syscalls.c,v 1.24 1995/05/15 08:39:31 davidg Exp $
*/
#include <sys/param.h>
@@ -282,6 +282,7 @@ dounmount(mp, flags, p)
return (error);
mp->mnt_flag &=~ MNT_ASYNC;
+ vfs_msync(mp, MNT_NOWAIT);
vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
if ((error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0 ||
@@ -331,6 +332,7 @@ sync(p, uap, retval)
!vfs_busy(mp)) {
asyncflag = mp->mnt_flag & MNT_ASYNC;
mp->mnt_flag &= ~MNT_ASYNC;
+ vfs_msync(mp, MNT_NOWAIT);
VFS_SYNC(mp, MNT_NOWAIT, p != NULL ? p->p_ucred : NOCRED, p);
if (asyncflag)
mp->mnt_flag |= MNT_ASYNC;
@@ -1782,6 +1784,9 @@ fsync(p, uap, retval)
return (error);
vp = (struct vnode *)fp->f_data;
VOP_LOCK(vp);
+ if (vp->v_vmdata) {
+ _vm_object_page_clean((vm_object_t) vp->v_vmdata, 0, 0 ,0);
+ }
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
VOP_UNLOCK(vp);
return (error);
diff --git a/sys/nfs/nfs_bio.c b/sys/nfs/nfs_bio.c
index ebbab31..057ffb6 100644
--- a/sys/nfs/nfs_bio.c
+++ b/sys/nfs/nfs_bio.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.5 (Berkeley) 1/4/94
- * $Id: nfs_bio.c,v 1.11 1995/03/04 03:24:34 davidg Exp $
+ * $Id: nfs_bio.c,v 1.12 1995/04/16 05:05:25 davidg Exp $
*/
#include <sys/param.h>
@@ -78,6 +78,7 @@ nfs_bioread(vp, uio, ioflag, cred)
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, rabn;
+ int bufsize;
int nra, error = 0, n = 0, on = 0, not_readin;
#ifdef lint
@@ -209,7 +210,7 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp = nfs_getcacheblk(vp, rabn, biosize, p);
if (!rabp)
return (EINTR);
- if ((rabp->b_flags & B_DELWRI) == 0) {
+ if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
@@ -231,7 +232,12 @@ nfs_bioread(vp, uio, ioflag, cred)
* as required.
*/
again:
- bp = nfs_getcacheblk(vp, lbn, biosize, p);
+ bufsize = biosize;
+ if ((lbn + 1) * biosize > np->n_size) {
+ bufsize = np->n_size - lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if ((bp->b_flags & B_CACHE) == 0) {
@@ -244,7 +250,11 @@ again:
return (error);
}
}
- n = min((unsigned)(biosize - on), uio->uio_resid);
+ if (bufsize > on) {
+ n = min((unsigned)(bufsize - on), uio->uio_resid);
+ } else {
+ n = 0;
+ }
diff = np->n_size - uio->uio_offset;
if (diff < n)
n = diff;
@@ -313,7 +323,7 @@ again:
!incore(vp, rabn)) {
rabp = nfs_getcacheblk(vp, rabn, NFS_DIRBLKSIZ, p);
if (rabp) {
- if ((rabp->b_flags & B_CACHE) == 0) {
+ if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
@@ -378,6 +388,7 @@ nfs_write(ap)
struct vattr vattr;
struct nfsmount *nmp;
daddr_t lbn;
+ int bufsize;
int n, on, error = 0;
#ifdef DIAGNOSTIC
@@ -458,7 +469,16 @@ nfs_write(ap)
on = uio->uio_offset & (biosize-1);
n = min((unsigned)(biosize - on), uio->uio_resid);
again:
- bp = nfs_getcacheblk(vp, lbn, biosize, p);
+ if (uio->uio_offset + n > np->n_size) {
+ np->n_size = uio->uio_offset + n;
+ vnode_pager_setsize(vp, (u_long)np->n_size);
+ }
+ bufsize = biosize;
+ if ((lbn + 1) * biosize > np->n_size) {
+ bufsize = np->n_size - lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@@ -466,9 +486,9 @@ again:
bp->b_wcred = cred;
}
np->n_flag |= NMODIFIED;
- if (uio->uio_offset + n > np->n_size) {
- np->n_size = uio->uio_offset + n;
- vnode_pager_setsize(vp, (u_long)np->n_size);
+
+ if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
+ bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
@@ -801,18 +821,23 @@ nfs_doio(bp, cr, p)
bp->b_error = error;
}
} else {
- io.iov_len = uiop->uio_resid = bp->b_dirtyend
- - bp->b_dirtyoff;
- uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
- + bp->b_dirtyoff;
- io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
- uiop->uio_rw = UIO_WRITE;
- nfsstats.write_bios++;
- if (bp->b_flags & B_APPENDWRITE)
- error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
- else
- error = nfs_writerpc(vp, uiop, cr, 0);
- bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
+
+ if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
+ bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
+
+ if (bp->b_dirtyend > bp->b_dirtyoff) {
+ io.iov_len = uiop->uio_resid = bp->b_dirtyend
+ - bp->b_dirtyoff;
+ uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
+ + bp->b_dirtyoff;
+ io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
+ uiop->uio_rw = UIO_WRITE;
+ nfsstats.write_bios++;
+ if (bp->b_flags & B_APPENDWRITE)
+ error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
+ else
+ error = nfs_writerpc(vp, uiop, cr, 0);
+ bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
/*
* For an interrupted write, the buffer is still valid and the
@@ -821,26 +846,31 @@ nfs_doio(bp, cr, p)
* the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
* is essentially a noop.
*/
- if (error == EINTR) {
- bp->b_flags &= ~B_INVAL;
- bp->b_flags |= B_DELWRI;
+ if (error == EINTR) {
+ bp->b_flags &= ~(B_INVAL|B_NOCACHE);
+ bp->b_flags |= B_DELWRI;
/*
* Since for the B_ASYNC case, nfs_bwrite() has reassigned the
* buffer to the clean list, we have to reassign it back to the
* dirty one. Ugh.
*/
- if (bp->b_flags & B_ASYNC)
- reassignbuf(bp, vp);
- else
- bp->b_flags |= B_EINTR;
- } else {
- if (error) {
- bp->b_flags |= B_ERROR;
- bp->b_error = np->n_error = error;
- np->n_flag |= NWRITEERR;
+ if (bp->b_flags & B_ASYNC)
+ reassignbuf(bp, vp);
+ else
+ bp->b_flags |= B_EINTR;
+ } else {
+ if (error) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = np->n_error = error;
+ np->n_flag |= NWRITEERR;
+ }
+ bp->b_dirtyoff = bp->b_dirtyend = 0;
}
- bp->b_dirtyoff = bp->b_dirtyend = 0;
+ } else {
+ bp->b_resid = 0;
+ biodone(bp);
+ return (0);
}
}
bp->b_resid = uiop->uio_resid;
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index ebbab31..057ffb6 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.5 (Berkeley) 1/4/94
- * $Id: nfs_bio.c,v 1.11 1995/03/04 03:24:34 davidg Exp $
+ * $Id: nfs_bio.c,v 1.12 1995/04/16 05:05:25 davidg Exp $
*/
#include <sys/param.h>
@@ -78,6 +78,7 @@ nfs_bioread(vp, uio, ioflag, cred)
struct proc *p;
struct nfsmount *nmp;
daddr_t lbn, rabn;
+ int bufsize;
int nra, error = 0, n = 0, on = 0, not_readin;
#ifdef lint
@@ -209,7 +210,7 @@ nfs_bioread(vp, uio, ioflag, cred)
rabp = nfs_getcacheblk(vp, rabn, biosize, p);
if (!rabp)
return (EINTR);
- if ((rabp->b_flags & B_DELWRI) == 0) {
+ if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
@@ -231,7 +232,12 @@ nfs_bioread(vp, uio, ioflag, cred)
* as required.
*/
again:
- bp = nfs_getcacheblk(vp, lbn, biosize, p);
+ bufsize = biosize;
+ if ((lbn + 1) * biosize > np->n_size) {
+ bufsize = np->n_size - lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if ((bp->b_flags & B_CACHE) == 0) {
@@ -244,7 +250,11 @@ again:
return (error);
}
}
- n = min((unsigned)(biosize - on), uio->uio_resid);
+ if (bufsize > on) {
+ n = min((unsigned)(bufsize - on), uio->uio_resid);
+ } else {
+ n = 0;
+ }
diff = np->n_size - uio->uio_offset;
if (diff < n)
n = diff;
@@ -313,7 +323,7 @@ again:
!incore(vp, rabn)) {
rabp = nfs_getcacheblk(vp, rabn, NFS_DIRBLKSIZ, p);
if (rabp) {
- if ((rabp->b_flags & B_CACHE) == 0) {
+ if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
rabp->b_flags |= (B_READ | B_ASYNC);
vfs_busy_pages(rabp, 0);
if (nfs_asyncio(rabp, cred)) {
@@ -378,6 +388,7 @@ nfs_write(ap)
struct vattr vattr;
struct nfsmount *nmp;
daddr_t lbn;
+ int bufsize;
int n, on, error = 0;
#ifdef DIAGNOSTIC
@@ -458,7 +469,16 @@ nfs_write(ap)
on = uio->uio_offset & (biosize-1);
n = min((unsigned)(biosize - on), uio->uio_resid);
again:
- bp = nfs_getcacheblk(vp, lbn, biosize, p);
+ if (uio->uio_offset + n > np->n_size) {
+ np->n_size = uio->uio_offset + n;
+ vnode_pager_setsize(vp, (u_long)np->n_size);
+ }
+ bufsize = biosize;
+ if ((lbn + 1) * biosize > np->n_size) {
+ bufsize = np->n_size - lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = nfs_getcacheblk(vp, lbn, bufsize, p);
if (!bp)
return (EINTR);
if (bp->b_wcred == NOCRED) {
@@ -466,9 +486,9 @@ again:
bp->b_wcred = cred;
}
np->n_flag |= NMODIFIED;
- if (uio->uio_offset + n > np->n_size) {
- np->n_size = uio->uio_offset + n;
- vnode_pager_setsize(vp, (u_long)np->n_size);
+
+ if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) {
+ bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
}
/*
@@ -801,18 +821,23 @@ nfs_doio(bp, cr, p)
bp->b_error = error;
}
} else {
- io.iov_len = uiop->uio_resid = bp->b_dirtyend
- - bp->b_dirtyoff;
- uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
- + bp->b_dirtyoff;
- io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
- uiop->uio_rw = UIO_WRITE;
- nfsstats.write_bios++;
- if (bp->b_flags & B_APPENDWRITE)
- error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
- else
- error = nfs_writerpc(vp, uiop, cr, 0);
- bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
+
+ if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
+ bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);
+
+ if (bp->b_dirtyend > bp->b_dirtyoff) {
+ io.iov_len = uiop->uio_resid = bp->b_dirtyend
+ - bp->b_dirtyoff;
+ uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
+ + bp->b_dirtyoff;
+ io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
+ uiop->uio_rw = UIO_WRITE;
+ nfsstats.write_bios++;
+ if (bp->b_flags & B_APPENDWRITE)
+ error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
+ else
+ error = nfs_writerpc(vp, uiop, cr, 0);
+ bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
/*
* For an interrupted write, the buffer is still valid and the
@@ -821,26 +846,31 @@ nfs_doio(bp, cr, p)
* the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
* is essentially a noop.
*/
- if (error == EINTR) {
- bp->b_flags &= ~B_INVAL;
- bp->b_flags |= B_DELWRI;
+ if (error == EINTR) {
+ bp->b_flags &= ~(B_INVAL|B_NOCACHE);
+ bp->b_flags |= B_DELWRI;
/*
* Since for the B_ASYNC case, nfs_bwrite() has reassigned the
* buffer to the clean list, we have to reassign it back to the
* dirty one. Ugh.
*/
- if (bp->b_flags & B_ASYNC)
- reassignbuf(bp, vp);
- else
- bp->b_flags |= B_EINTR;
- } else {
- if (error) {
- bp->b_flags |= B_ERROR;
- bp->b_error = np->n_error = error;
- np->n_flag |= NWRITEERR;
+ if (bp->b_flags & B_ASYNC)
+ reassignbuf(bp, vp);
+ else
+ bp->b_flags |= B_EINTR;
+ } else {
+ if (error) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = np->n_error = error;
+ np->n_flag |= NWRITEERR;
+ }
+ bp->b_dirtyoff = bp->b_dirtyend = 0;
}
- bp->b_dirtyoff = bp->b_dirtyend = 0;
+ } else {
+ bp->b_resid = 0;
+ biodone(bp);
+ return (0);
}
}
bp->b_resid = uiop->uio_resid;
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index 6e68401..d102a1e 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)mount.h 8.13 (Berkeley) 3/27/94
- * $Id: mount.h,v 1.16 1995/04/10 18:52:40 wollman Exp $
+ * $Id: mount.h,v 1.17 1995/04/20 03:18:17 julian Exp $
*/
#ifndef _SYS_MOUNT_H_
@@ -476,14 +476,15 @@ extern int (*mountroot) __P((void));
*/
int dounmount __P((struct mount *, int, struct proc *));
struct mount *getvfs __P((fsid_t *)); /* return vfs given fsid */
-void getnewfsid __P((struct mount *mp, int mtype));
-int vflush __P((struct mount *mp, struct vnode *skipvp, int flags));
+void getnewfsid __P((struct mount *, int));
+int vflush __P((struct mount *, struct vnode *, int));
int vfs_export /* process mount export info */
__P((struct mount *, struct netexport *, struct export_args *));
struct netcred *vfs_export_lookup /* lookup host in fs export list */
__P((struct mount *, struct netexport *, struct mbuf *));
int vfs_lock __P((struct mount *)); /* lock a vfs */
int vfs_mountedon __P((struct vnode *)); /* is a vfs mounted on vp */
+void vfs_msync __P((struct mount *, int));
void vfs_unlock __P((struct mount *)); /* unlock a vfs */
void vfs_unmountall __P((void));
int vfs_busy __P((struct mount *)); /* mark a vfs busy */
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index ef08ebd..e2abd2b 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vnops.c 8.7 (Berkeley) 2/3/94
- * $Id: ffs_vnops.c,v 1.8 1995/01/09 16:05:19 davidg Exp $
+ * $Id: ffs_vnops.c,v 1.9 1995/04/09 06:03:40 davidg Exp $
*/
#include <sys/param.h>
@@ -253,14 +253,6 @@ ffs_fsync(ap)
int pass;
int s;
- /*
- * If the vnode has an object, then flush all of the dirty pages
- * into the buffer cache.
- */
-
- if (vp->v_vmdata)
- _vm_object_page_clean((vm_object_t)vp->v_vmdata, 0, 0, 0);
-
pass = 0;
/*
* Flush all dirty buffers associated with a vnode.
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index e3b47eb..1d5ee25 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.44 1995/04/21 02:48:40 dyson Exp $
+ * $Id: vm_object.c,v 1.45 1995/05/02 05:57:10 davidg Exp $
*/
/*
@@ -328,6 +328,7 @@ vm_object_deallocate(object)
*/
if (object->flags & OBJ_CANPERSIST) {
if (object->resident_page_count != 0) {
+ vm_object_page_clean(object, 0, 0 ,TRUE);
TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
cached_list);
vm_object_cached++;
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 3b54c9e..79d28c9 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.48 1995/04/17 10:00:55 davidg Exp $
+ * $Id: vm_pageout.c,v 1.49 1995/05/10 18:56:06 davidg Exp $
*/
/*
@@ -834,7 +834,7 @@ vm_pageout()
*/
cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024 +
cnt.v_interrupt_free_min;
- cnt.v_free_reserved = cnt.v_pageout_free_min + 2;
+ cnt.v_free_reserved = cnt.v_pageout_free_min + 6;
cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
cnt.v_free_min += cnt.v_free_reserved;
OpenPOWER on IntegriCloud