summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2005-06-14 20:31:53 +0000
committerjeff <jeff@FreeBSD.org>2005-06-14 20:31:53 +0000
commit909b5b7c58acbee4d16abcaa2d0b153bbee659c0 (patch)
tree7971aa0ba90acac4de3db4e195f438f5c8dfdf30 /sys/kern/vfs_subr.c
parent63219d9727ad7b591409cd3663a47ed7c8e34171 (diff)
downloadFreeBSD-src-909b5b7c58acbee4d16abcaa2d0b153bbee659c0.zip
FreeBSD-src-909b5b7c58acbee4d16abcaa2d0b153bbee659c0.tar.gz
- In reassignbuf() add many asserts to validate the head and tail pointers
of the clean and dirty lists. This is in an attempt to catch the wrong bufobj problem sooner. - In vgonel() don't acquire an extra reference in the active case, the vnode lock and VI_DOOMED protect us from recursively cleaning. - Also in vgonel() clean up some stale comments. Sponsored by: Isilon Systems, Inc. Approved by: re (blanket vfs)
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c47
1 files changed, 29 insertions, 18 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index dd80289..5171633 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1071,7 +1071,8 @@ flushbuflist(bufv, flags, bo, slpflag, slptimeo)
return (error != ENOLCK ? error : EAGAIN);
}
KASSERT(bp->b_bufobj == bo,
- ("wrong b_bufobj %p should be %p", bp->b_bufobj, bo));
+ ("bp %p wrong b_bufobj %p should be %p",
+ bp, bp->b_bufobj, bo));
if (bp->b_bufobj != bo) { /* XXX: necessary ? */
BUF_UNLOCK(bp);
BO_LOCK(bo);
@@ -1723,6 +1724,9 @@ reassignbuf(struct buf *bp)
struct vnode *vp;
struct bufobj *bo;
int delay;
+#ifdef INVARIANTS
+ struct bufv *bv;
+#endif
vp = bp->b_vp;
bo = bp->b_bufobj;
@@ -1776,6 +1780,22 @@ reassignbuf(struct buf *bp)
}
}
VI_UNLOCK(vp);
+#ifdef INVARIANTS
+ bv = &bo->bo_clean;
+ bp = TAILQ_FIRST(&bv->bv_hd);
+ KASSERT(bp == NULL || bp->b_bufobj == bo,
+ ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
+ bp = TAILQ_LAST(&bv->bv_hd, buflists);
+ KASSERT(bp == NULL || bp->b_bufobj == bo,
+ ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
+ bv = &bo->bo_dirty;
+ bp = TAILQ_FIRST(&bv->bv_hd);
+ KASSERT(bp == NULL || bp->b_bufobj == bo,
+ ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
+ bp = TAILQ_LAST(&bv->bv_hd, buflists);
+ KASSERT(bp == NULL || bp->b_bufobj == bo,
+ ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
+#endif
}
static void
@@ -2271,6 +2291,7 @@ vgonel(struct vnode *vp, int shouldfree)
CTR1(KTR_VFS, "vgonel: vp %p", vp);
ASSERT_VOP_LOCKED(vp, "vgonel");
ASSERT_VI_LOCKED(vp, "vgonel");
+ td = curthread;
/*
* Don't vgonel if we're already doomed.
@@ -2279,18 +2300,14 @@ vgonel(struct vnode *vp, int shouldfree)
VI_UNLOCK(vp);
return;
}
+ vp->v_iflag |= VI_DOOMED;
/*
- * Check to see if the vnode is in use. If so we have to reference it
- * before we clean it out so that its count cannot fall to zero and
- * generate a race against ourselves to recycle it.
+ * Check to see if the vnode is in use. If so, we have to call
+ * VOP_CLOSE() and VOP_INACTIVE().
*/
- if ((active = vp->v_usecount))
- v_incr_usecount(vp, 1);
- vp->v_iflag |= VI_DOOMED;
+ active = vp->v_usecount;
oweinact = (vp->v_iflag & VI_OWEINACT);
- td = curthread;
VI_UNLOCK(vp);
-
/*
* Clean out any buffers associated with the vnode.
* If the flush fails, just toss the buffers.
@@ -2317,24 +2334,18 @@ vgonel(struct vnode *vp, int shouldfree)
*/
if (VOP_RECLAIM(vp, td))
panic("vgone: cannot reclaim");
-
VNASSERT(vp->v_object == NULL, vp,
("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
-
/*
* Delete from old mount point vnode list.
*/
delmntque(vp);
cache_purge(vp);
- VI_LOCK(vp);
- if (active) {
- v_incr_usecount(vp, -1);
- VNASSERT(vp->v_usecount >= 0, vp, ("vgone: bad ref count"));
- }
/*
- * Done with purge, reset to the standard lock and
- * notify sleepers of the grim news.
+ * Done with purge, reset to the standard lock and invalidate
+ * the vnode.
*/
+ VI_LOCK(vp);
vp->v_vnlock = &vp->v_lock;
vp->v_op = &dead_vnodeops;
vp->v_tag = "none";
OpenPOWER on IntegriCloud