diff options
author | dillon <dillon@FreeBSD.org> | 2000-01-05 05:11:37 +0000 |
---|---|---|
committer | dillon <dillon@FreeBSD.org> | 2000-01-05 05:11:37 +0000 |
commit | c6689c797daca2e96b0ccaf727e86ebf09e00e37 (patch) | |
tree | 5cdfd2f2b906cb0407fd5a329f091623735a9701 /sys/nfs/nfs_bio.c | |
parent | e112622878175672c0eaa5a87ccdf0e882ccc784 (diff) | |
download | FreeBSD-src-c6689c797daca2e96b0ccaf727e86ebf09e00e37.zip FreeBSD-src-c6689c797daca2e96b0ccaf727e86ebf09e00e37.tar.gz |
Enhance reassignbuf(). When a buffer cannot be time-optimally inserted
into vnode dirtyblkhd we append it to the list instead of prepend it to
the list in order to maintain a 'forward' locality of reference, which
is arguably better then 'reverse'. The original algorithm did things this
way to but at a huge time cost.
Enhance the append interlock for NFS writes to handle intr/soft mounts
better.
Fix the hysteresis for NFS async daemon I/O requests to reduce the
number of unnecessary context switches.
Modify handling of NFS mount options. Any given user option that is
too high now defaults to the kernel maximum for that option rather then
the kernel default for that option.
Reviewed by: Alfred Perlstein <bright@wintelcom.net>
Diffstat (limited to 'sys/nfs/nfs_bio.c')
-rw-r--r-- | sys/nfs/nfs_bio.c | 48 |
1 files changed, 43 insertions, 5 deletions
diff --git a/sys/nfs/nfs_bio.c b/sys/nfs/nfs_bio.c index 8e99d98..4b9dcec 100644 --- a/sys/nfs/nfs_bio.c +++ b/sys/nfs/nfs_bio.c @@ -471,9 +471,11 @@ nfs_bioread(vp, uio, ioflag, cred) rabp->b_flags |= B_INVAL|B_ERROR; vfs_unbusy_pages(rabp); brelse(rabp); + break; } - } else + } else { brelse(rabp); + } } } } @@ -497,8 +499,19 @@ again: } else if ((off_t)(lbn + 1) * biosize > np->n_size) { bcount = np->n_size - (off_t)lbn * biosize; } - if (bcount != biosize && nfs_rslock(np, p) == ENOLCK) - goto again; + if (bcount != biosize) { + switch(nfs_rslock(np, p)) { + case ENOLCK: + goto again; + /* not reached */ + case EINTR: + case ERESTART: + return(EINTR); + /* not reached */ + default: + break; + } + } bp = nfs_getcacheblk(vp, lbn, bcount, p); @@ -785,8 +798,17 @@ restart: */ if ((ioflag & IO_APPEND) || uio->uio_offset + uio->uio_resid > np->n_size) { - if (nfs_rslock(np, p) == ENOLCK) + switch(nfs_rslock(np, p)) { + case ENOLCK: goto restart; + /* not reached */ + case EINTR: + case ERESTART: + return(EINTR); + /* not reached */ + default: + break; + } haverslock = 1; } @@ -1196,10 +1218,25 @@ nfs_asyncio(bp, cred, procp) int slptimeo = 0; int error; + /* + * If no async daemons then return EIO to force caller to run the rpc + * synchronously. + */ if (nfs_numasync == 0) return (EIO); nmp = VFSTONFS(bp->b_vp->v_mount); + + /* + * Commits are usually short and sweet so lets save some cpu and + * leave the async daemons for more important rpc's (such as reads + * and writes). + */ + if ((bp->b_flags & (B_READ|B_NEEDCOMMIT)) == B_NEEDCOMMIT && + (nmp->nm_bufqiods > nfs_numasync / 2)) { + return(EIO); + } + again: if (nmp->nm_flag & NFSMNT_INT) slpflag = PCATCH; @@ -1244,7 +1281,8 @@ again: */ if (gotiod) { /* - * Ensure that the queue never grows too large. + * Ensure that the queue never grows too large. We still want + * to asynchronize so we block rather then return EIO. */ while (nmp->nm_bufqlen >= 2*nfs_numasync) { NFS_DPF(ASYNCIO, |