summaryrefslogtreecommitdiffstats
path: root/sys/nfsclient/nfs_bio.c
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1998-03-07 21:37:31 +0000
committerdyson <dyson@FreeBSD.org>1998-03-07 21:37:31 +0000
commit8ceb6160f494e2331b3f2e241e09d92673e397af (patch)
tree5030aec9050b0e765c5aea8634ba2de5ad3306e2 /sys/nfsclient/nfs_bio.c
parentab602aeb2614330963a1cee7162c9b6b22b9b9d9 (diff)
downloadFreeBSD-src-8ceb6160f494e2331b3f2e241e09d92673e397af.zip
FreeBSD-src-8ceb6160f494e2331b3f2e241e09d92673e397af.tar.gz
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These problems have manifest themselves worse on NFS type filesystems, but can still affect local filesystems under certain circumstances. Most of the problems have involved mmap consistancy, and as a side-effect broke the vfs.ioopt code. This code might have been committed seperately, but almost everything is interrelated. 1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that are fully valid. 2) Rather than deactivating erroneously read initial (header) pages in kern_exec, we now free them. 3) Fix the rundown of non-VMIO buffers that are in an inconsistent (missing vp) state. 4) Fix the disassociation of pages from buffers in brelse. The previous code had rotted and was faulty in a couple of important circumstances. 5) Remove a gratuitious buffer wakeup in vfs_vmio_release. 6) Remove a crufty and currently unused cluster mechanism for VBLK files in vfs_bio_awrite. When the code is functional, I'll add back a cleaner version. 7) The page busy count wakeups assocated with the buffer cache usage were incorrectly cleaned up in a previous commit by me. Revert to the original, correct version, but with a cleaner implementation. 8) The cluster read code now tries to keep data associated with buffers more aggressively (without breaking the heuristics) when it is presumed that the read data (buffers) will be soon needed. 9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The delay loop waiting is not useful for filesystem locks, due to the length of the time intervals. 10) Correct and clean-up spec_getpages. 11) Implement a fully functional nfs_getpages, nfs_putpages. 12) Fix nfs_write so that modifications are coherent with the NFS data on the server disk (at least as well as NFS seems to allow.) 13) Properly support MS_INVALIDATE on NFS. 14) Properly pass down MS_INVALIDATE to lower levels of the VM code from vm_map_clean. 15) Better support the notion of pages being busy but valid, so that fewer in-transit waits occur. (use p->busy more for pageouts instead of PG_BUSY.) Since the page is fully valid, it is still usable for reads. 16) It is possible (in error) for cached pages to be busy. Make the page allocation code handle that case correctly. (It should probably be a printf or panic, but I want the system to handle coding errors robustly. I'll probably add a printf.) 17) Correct the design and usage of vm_page_sleep. It didn't handle consistancy problems very well, so make the design a little less lofty. After vm_page_sleep, if it ever blocked, it is still important to relookup the page (if the object generation count changed), and verify it's status (always.) 18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up. 19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush. 20) Fix vm_pager_put_pages and it's descendents to support an int flag instead of a boolean, so that we can pass down the invalidate bit.
Diffstat (limited to 'sys/nfsclient/nfs_bio.c')
-rw-r--r--sys/nfsclient/nfs_bio.c167
1 files changed, 145 insertions, 22 deletions
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index a03b91b..be2b423 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
- * $Id: nfs_bio.c,v 1.50 1998/02/06 12:13:55 eivind Exp $
+ * $Id: nfs_bio.c,v 1.51 1998/03/06 09:46:43 msmith Exp $
*/
@@ -65,6 +65,7 @@
static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
struct proc *p));
+static void nfs_prot_buf __P((struct buf *bp, int off, int n));
extern int nfs_numasync;
extern struct nfsstats nfsstats;
@@ -76,58 +77,153 @@ int
nfs_getpages(ap)
struct vop_getpages_args *ap;
{
- int i, pcount, error;
+ int i, error, nextoff, size, toff, npages;
struct uio uio;
struct iovec iov;
vm_page_t m;
vm_offset_t kva;
+ struct buf *bp;
if ((ap->a_vp->v_object) == NULL) {
printf("nfs_getpages: called with non-merged cache vnode??\n");
return EOPNOTSUPP;
}
- m = ap->a_m[ap->a_reqpage];
- kva = vm_pager_map_page(m);
+ /*
+ * We use only the kva address for the buffer, but this is extremely
+ * convienient and fast.
+ */
+ bp = getpbuf();
+
+ npages = btoc(ap->a_count);
+ kva = (vm_offset_t) bp->b_data;
+ pmap_qenter(kva, ap->a_m, npages);
iov.iov_base = (caddr_t) kva;
- iov.iov_len = PAGE_SIZE;
+ iov.iov_len = ap->a_count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
- uio.uio_offset = IDX_TO_OFF(m->pindex);
- uio.uio_resid = PAGE_SIZE;
+ uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
+ uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
uio.uio_procp = curproc;
error = nfs_readrpc(ap->a_vp, &uio, curproc->p_ucred);
- vm_pager_unmap_page(kva);
+ pmap_qremove(kva, npages);
+
+ relpbuf(bp);
+
+ if (error && (uio.uio_resid == ap->a_count))
+ return VM_PAGER_ERROR;
+
+ size = ap->a_count - uio.uio_resid;
- pcount = round_page(ap->a_count) / PAGE_SIZE;
- for (i = 0; i < pcount; i++) {
+ for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
+ vm_page_t m;
+ nextoff = toff + PAGE_SIZE;
+ m = ap->a_m[i];
+
+ m->flags &= ~PG_ZERO;
+
+ if (nextoff <= size) {
+ m->valid = VM_PAGE_BITS_ALL;
+ m->dirty = 0;
+ } else {
+ int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
+ vm_page_set_validclean(m, 0, nvalid);
+ }
+
if (i != ap->a_reqpage) {
- vnode_pager_freepage(ap->a_m[i]);
+ /*
+ * Whether or not to leave the page activated is up in
+ * the air, but we should put the page on a page queue
+ * somewhere (it already is in the object). Result:
+ * It appears that emperical results show that
+ * deactivating pages is best.
+ */
+
+ /*
+ * Just in case someone was asking for this page we
+ * now tell them that it is ok to use.
+ */
+ if (!error) {
+ if (m->flags & PG_WANTED)
+ vm_page_activate(m);
+ else
+ vm_page_deactivate(m);
+ PAGE_WAKEUP(m);
+ } else {
+ vnode_pager_freepage(m);
+ }
}
}
-
- if (error && (uio.uio_resid == PAGE_SIZE))
- return VM_PAGER_ERROR;
return 0;
}
-
/*
- * put page routine
- *
- * XXX By default, wimp out... note that a_offset is ignored (and always
- * XXX has been).
+ * Vnode op for VM putpages.
*/
int
nfs_putpages(ap)
struct vop_putpages_args *ap;
{
- return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
- ap->a_sync, ap->a_rtvals);
+ struct uio uio;
+ struct iovec iov;
+ vm_page_t m;
+ vm_offset_t kva;
+ struct buf *bp;
+ int iomode, must_commit, i, error, npages;
+ int *rtvals;
+
+ rtvals = ap->a_rtvals;
+
+ npages = btoc(ap->a_count);
+
+ for (i = 0; i < npages; i++) {
+ rtvals[i] = VM_PAGER_AGAIN;
+ }
+
+ /*
+ * We use only the kva address for the buffer, but this is extremely
+ * convienient and fast.
+ */
+ bp = getpbuf();
+
+ kva = (vm_offset_t) bp->b_data;
+ pmap_qenter(kva, ap->a_m, npages);
+
+ iov.iov_base = (caddr_t) kva;
+ iov.iov_len = ap->a_count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
+ uio.uio_resid = ap->a_count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+
+ if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
+ iomode = NFSV3WRITE_UNSTABLE;
+ else
+ iomode = NFSV3WRITE_FILESYNC;
+
+ error = nfs_writerpc(ap->a_vp, &uio,
+ curproc->p_ucred, &iomode, &must_commit);
+
+ pmap_qremove(kva, npages);
+ relpbuf(bp);
+
+ if (!error) {
+ int nwritten = round_page(ap->a_count - uio.uio_resid) / PAGE_SIZE;
+ for (i = 0; i < nwritten; i++) {
+ rtvals[i] = VM_PAGER_OK;
+ ap->a_m[i]->dirty = 0;
+ }
+ if (must_commit)
+ nfs_clearcommit(ap->a_vp->v_mount);
+ }
+ return ap->a_rtvals[0];
}
/*
@@ -464,7 +560,7 @@ again:
};
if (n > 0) {
- error = uiomove(bp->b_data + on, (int)n, uio);
+ error = uiomove(bp->b_data + on, (int)n, uio);
}
switch (vp->v_type) {
case VREG:
@@ -484,6 +580,24 @@ again:
return (error);
}
+static void
+nfs_prot_buf(bp, off, n)
+ struct buf *bp;
+ int off;
+ int n;
+{
+ int pindex, boff, end;
+
+ if ((bp->b_flags & B_VMIO) == 0)
+ return;
+
+ end = round_page(off + n);
+ for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) {
+ pindex = boff >> PAGE_SHIFT;
+ vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE);
+ }
+}
+
/*
* Vnode op for write using bio
*/
@@ -648,12 +762,19 @@ again:
goto again;
}
}
+
error = uiomove((char *)bp->b_data + on, n, uio);
if (error) {
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
+
+ /*
+ * This will keep the buffer and mmaped regions more coherent.
+ */
+ nfs_prot_buf(bp, on, n);
+
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
@@ -681,6 +802,8 @@ again:
*/
if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
bp->b_proc = p;
+ if (ioflag & IO_INVAL)
+ bp->b_flags |= B_INVAL;
error = VOP_BWRITE(bp);
if (error)
return (error);
OpenPOWER on IntegriCloud