diff options
author | jeff <jeff@FreeBSD.org> | 2008-03-22 09:15:16 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2008-03-22 09:15:16 +0000 |
commit | a9d123c3ab34baa9fe2c8c25bd9acfbfb31b381e (patch) | |
tree | 5fedc50643363d96cefce7e3cd6edbdbf2d7fb2b /sys/kern/vfs_default.c | |
parent | b283b3e59a3e18ec4e7cf225a3a9922139733a73 (diff) | |
download | FreeBSD-src-a9d123c3ab34baa9fe2c8c25bd9acfbfb31b381e.zip FreeBSD-src-a9d123c3ab34baa9fe2c8c25bd9acfbfb31b381e.tar.gz |
- Complete part of the unfinished bufobj work by consistently using
BO_LOCK/UNLOCK/MTX when manipulating the bufobj.
- Create a new lock in the bufobj to lock bufobj fields independently.
This leaves the vnode interlock as an 'identity' lock while the bufobj
is an io lock. The bufobj lock is ordered before the vnode interlock
and also before the mnt ilock.
- Exploit this new lock order to simplify softdep_check_suspend().
- A few sync related functions are marked with a new XXX to note that
we may not properly interlock against a non-zero bv_cnt when
attempting to sync all vnodes on a mountlist. I do not believe this
race is important. If I'm wrong this will make these locations easier
to find.
Reviewed by: kib (earlier diff)
Tested by: kris, pho (earlier diff)
Diffstat (limited to 'sys/kern/vfs_default.c')
-rw-r--r-- | sys/kern/vfs_default.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 5422530..8b4170f 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -405,12 +405,13 @@ vop_stdfsync(ap) int error = 0; int maxretry = 1000; /* large, arbitrarily chosen */ - VI_LOCK(vp); + bo = &vp->v_bufobj; + BO_LOCK(bo); loop1: /* * MARK/SCAN initialization to avoid infinite loops. */ - TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { + TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { bp->b_vflags &= ~BV_SCANNED; bp->b_error = 0; } @@ -419,16 +420,16 @@ loop1: * Flush all dirty buffers associated with a vnode. */ loop2: - TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { + TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { if ((bp->b_vflags & BV_SCANNED) != 0) continue; bp->b_vflags |= BV_SCANNED; if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) continue; - VI_UNLOCK(vp); - KASSERT(bp->b_bufobj == &vp->v_bufobj, + BO_UNLOCK(bo); + KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", - bp, bp->b_bufobj, &vp->v_bufobj)); + bp, bp->b_bufobj, bo)); if ((bp->b_flags & B_DELWRI) == 0) panic("fsync: not dirty"); if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { @@ -437,7 +438,7 @@ loop2: bremfree(bp); bawrite(bp); } - VI_LOCK(vp); + BO_LOCK(bo); goto loop2; } @@ -448,7 +449,6 @@ loop2: * retry if dirty blocks still exist. */ if (ap->a_waitfor == MNT_WAIT) { - bo = &vp->v_bufobj; bufobj_wwait(bo, 0, 0); if (bo->bo_dirty.bv_cnt > 0) { /* @@ -464,7 +464,7 @@ loop2: error = EAGAIN; } } - VI_UNLOCK(vp); + BO_UNLOCK(bo); if (error == EAGAIN) vprint("fsync: giving up on dirty", vp); @@ -571,14 +571,11 @@ vfs_stdsync(mp, waitfor, td) MNT_ILOCK(mp); loop: MNT_VNODE_FOREACH(vp, mp, mvp) { - - VI_LOCK(vp); - if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { - VI_UNLOCK(vp); + /* bv_cnt is an acceptable race here. */ + if (vp->v_bufobj.bo_dirty.bv_cnt == 0) continue; - } + VI_LOCK(vp); MNT_IUNLOCK(mp); - if ((error = vget(vp, lockreq, td)) != 0) { MNT_ILOCK(mp); if (error == ENOENT) { |