diff options
author | dillon <dillon@FreeBSD.org> | 2001-10-26 00:08:05 +0000 |
---|---|---|
committer | dillon <dillon@FreeBSD.org> | 2001-10-26 00:08:05 +0000 |
commit | f883ef447af57985b21cde8cd13232ca845190a4 (patch) | |
tree | d2fcc74cff2c1c1ff478c189a5bc5cafc79d4509 /sys/ufs/ffs | |
parent | 9e3e7670a88869e7b562b5608cd584c56c3a6517 (diff) | |
download | FreeBSD-src-f883ef447af57985b21cde8cd13232ca845190a4.zip FreeBSD-src-f883ef447af57985b21cde8cd13232ca845190a4.tar.gz |
Implement kern.maxvnodes. adjusting kern.maxvnodes now actually has a
real effect.
Optimize vfs_msync(). Avoid having to continually drop and re-obtain
mutexes when scanning the vnode list. Improves looping case by 500%.
Optimize ffs_sync(). Avoid having to continually drop and re-obtain
mutexes when scanning the vnode list. This makes a couple of assumptions,
which I believe are ok, in regards to vnode stability when the mount list
mutex is held. Improves looping case by 500%.
(more optimization work is needed on top of these fixes)
MFC after: 1 week
Diffstat (limited to 'sys/ufs/ffs')
-rw-r--r-- | sys/ufs/ffs/ffs_vfsops.c | 38 |
1 files changed, 22 insertions, 16 deletions
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c index ad4c24d..d080fbb 100644 --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -1001,10 +1001,10 @@ ffs_sync(mp, waitfor, cred, td) * Write back each (modified) inode. */ wait = 0; - lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK; + lockreq = LK_EXCLUSIVE | LK_NOWAIT; if (waitfor == MNT_WAIT) { wait = 1; - lockreq = LK_EXCLUSIVE | LK_INTERLOCK; + lockreq = LK_EXCLUSIVE; } mtx_lock(&mntvnode_mtx); loop: @@ -1015,34 +1015,40 @@ loop: */ if (vp->v_mount != mp) goto loop; - nvp = TAILQ_NEXT(vp, v_nmntvnodes); - mtx_unlock(&mntvnode_mtx); - mtx_lock(&vp->v_interlock); + /* + * Depend on the mntvnode_slock to keep things stable enough + * for a quick test. Since there might be hundreds of + * thousands of vnodes, we cannot afford even a subroutine + * call unless there's a good chance that we have work to do. + */ + nvp = TAILQ_NEXT(vp, v_nmntvnodes); ip = VTOI(vp); if (vp->v_type == VNON || ((ip->i_flag & - (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && - TAILQ_EMPTY(&vp->v_dirtyblkhd))) { - mtx_unlock(&vp->v_interlock); - mtx_lock(&mntvnode_mtx); + (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && + TAILQ_EMPTY(&vp->v_dirtyblkhd))) { continue; } if (vp->v_type != VCHR) { + mtx_unlock(&mntvnode_mtx); if ((error = vget(vp, lockreq, td)) != 0) { mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto loop; - continue; + } else { + if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0) + allerror = error; + VOP_UNLOCK(vp, 0, td); + vrele(vp); + mtx_lock(&mntvnode_mtx); } - if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0) - allerror = error; - VOP_UNLOCK(vp, 0, td); - vrele(vp); } else { - mtx_unlock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); UFS_UPDATE(vp, wait); + mtx_lock(&mntvnode_mtx); } - mtx_lock(&mntvnode_mtx); + if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) + goto loop; } mtx_unlock(&mntvnode_mtx); /* |