summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>1997-08-31 07:32:39 +0000
committerphk <phk@FreeBSD.org>1997-08-31 07:32:39 +0000
commit0b3a12b83eb2feac881bc220304736c7fc61556a (patch)
treecdd844c554de3bb51aaf1b188cf99f92ac8467ee
parent2fd4aa4ddff6800e1a692d84c9a3fbf1ef3df97a (diff)
downloadFreeBSD-src-0b3a12b83eb2feac881bc220304736c7fc61556a.zip
FreeBSD-src-0b3a12b83eb2feac881bc220304736c7fc61556a.tar.gz
Change the 0xdeadb hack to a flag called VDOOMED.
Introduce VFREE which indicates that vnode is on freelist. Rename vholdrele() to vdrop(). Create vfree() and vbusy() to add/delete vnode from freelist. Add vfree()/vbusy() to keep (v_holdcnt != 0 || v_usecount != 0) vnodes off the freelist. Generalize vhold()/v_holdcnt to mean "do not recycle". Fix reassignbuf()s lack of use of vhold(). Use vhold() instead of checking v_cache_src list. Remove vtouch(), the vnodes are always vget'ed soon enough after for it to have any measuable effect. Add sysctl debug.freevnodes to keep track of things. Move cache_purge() up in getnewvnodes to avoid race. Decrement v_usecount after VOP_INACTIVE(), put a vhold() on it during VOP_INACTIVE() Unmacroize vhold()/vdrop() Print out VDOOMED and VFREE flags (XXX: should use %b) Reviewed by: dyson
-rw-r--r--sys/kern/vfs_cache.c15
-rw-r--r--sys/kern/vfs_export.c121
-rw-r--r--sys/kern/vfs_subr.c121
-rw-r--r--sys/sys/vnode.h46
-rw-r--r--sys/ufs/lfs/lfs_segment.c4
-rw-r--r--sys/vm/vm_swap.c4
6 files changed, 163 insertions, 148 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index a1f94f4..f4f52bb 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
- * $Id: vfs_cache.c,v 1.26 1997/08/04 07:31:36 phk Exp $
+ * $Id: vfs_cache.c,v 1.27 1997/08/26 07:32:34 phk Exp $
*/
#include <sys/param.h>
@@ -103,6 +103,8 @@ cache_zap(ncp)
{
LIST_REMOVE(ncp, nc_hash);
LIST_REMOVE(ncp, nc_src);
+ if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src))
+ vdrop(ncp->nc_dvp);
if (ncp->nc_vp) {
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
} else {
@@ -180,7 +182,6 @@ cache_lookup(dvp, vpp, cnp)
/* We found a "positive" match, return the vnode */
if (ncp->nc_vp) {
nchstats.ncs_goodhits++;
- vtouch(ncp->nc_vp);
*vpp = ncp->nc_vp;
return (-1);
}
@@ -239,8 +240,10 @@ cache_enter(dvp, vp, cnp)
malloc(sizeof *ncp + cnp->cn_namelen, M_CACHE, M_WAITOK);
bzero((char *)ncp, sizeof *ncp);
numcache++;
- if (!vp)
+ if (!vp) {
numneg++;
+ ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
+ }
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
@@ -249,15 +252,13 @@ cache_enter(dvp, vp, cnp)
* otherwise unused.
*/
ncp->nc_vp = vp;
- if (vp)
- vtouch(vp);
- else
- ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
ncp->nc_dvp = dvp;
ncp->nc_nlen = cnp->cn_namelen;
bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen);
ncpp = NCHHASH(dvp, cnp);
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
+ if (LIST_EMPTY(&dvp->v_cache_src))
+ vhold(dvp);
LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
if (vp) {
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 143521c..6358781 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.94 1997/08/26 04:36:17 dyson Exp $
+ * $Id: vfs_subr.c,v 1.95 1997/08/26 11:59:20 bde Exp $
*/
/*
@@ -103,6 +103,7 @@ int vttoif_tab[9] = {
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
static u_long freevnodes = 0;
+SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
struct mntlist mountlist; /* mounted filesystem list */
struct simplelock mountlist_slock;
@@ -380,11 +381,11 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp) {
+ vp->v_flag |= VDOOMED;
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- /* see comment on why 0xdeadb is set at end of vgone (below) */
- vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
+ cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD)
vgonel(vp, p);
@@ -418,13 +419,13 @@ getnewvnode(tag, mp, vops, vpp)
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
vp->v_dd = vp;
+ cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
numvnodes++;
}
vp->v_type = VNON;
- cache_purge(vp);
vp->v_tag = tag;
vp->v_op = vops;
insmntque(vp, mp);
@@ -582,7 +583,7 @@ bgetvp(vp, bp)
if (bp->b_vp)
panic("bgetvp: not free");
- VHOLD(vp);
+ vhold(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
@@ -618,7 +619,7 @@ brelvp(bp)
vp = bp->b_vp;
bp->b_vp = (struct vnode *) 0;
- HOLDRELE(vp);
+ vdrop(vp);
}
/*
@@ -678,8 +679,10 @@ reassignbuf(bp, newvp)
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_vnbufs.le_next != NOLIST)
+ if (bp->b_vnbufs.le_next != NOLIST) {
bufremvn(bp);
+ vdrop(bp->b_vp);
+ }
/*
* If dirty, put on list of dirty buffers; otherwise insert onto list
* of clean buffers.
@@ -700,6 +703,8 @@ reassignbuf(bp, newvp)
} else {
bufinsvn(bp, &newvp->v_cleanblkhd);
}
+ bp->b_vp = newvp;
+ vhold(bp->b_vp);
splx(s);
}
@@ -836,13 +841,9 @@ vget(vp, flags, p)
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
- if (vp->v_usecount == 0) {
- simple_lock(&vnode_free_list_slock);
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- simple_unlock(&vnode_free_list_slock);
- freevnodes--;
- }
vp->v_usecount++;
+ if (VSHOULDBUSY(vp))
+ vbusy(vp);
/*
* Create the VM object, if needed
*/
@@ -1089,11 +1090,11 @@ vputrele(vp, put)
panic("vputrele: null vp");
#endif
simple_lock(&vp->v_interlock);
- vp->v_usecount--;
- if ((vp->v_usecount == 1) &&
+ if ((vp->v_usecount == 2) &&
vp->v_object &&
(vp->v_object->flags & OBJ_VFS_REF)) {
+ vp->v_usecount--;
vp->v_object->flags &= ~OBJ_VFS_REF;
if (put) {
VOP_UNLOCK(vp, LK_INTERLOCK, p);
@@ -1104,7 +1105,8 @@ vputrele(vp, put)
return;
}
- if (vp->v_usecount > 0) {
+ if (vp->v_usecount > 1) {
+ vp->v_usecount--;
if (put) {
VOP_UNLOCK(vp, LK_INTERLOCK, p);
} else {
@@ -1113,23 +1115,14 @@ vputrele(vp, put)
return;
}
- if (vp->v_usecount < 0) {
+ if (vp->v_usecount < 1) {
#ifdef DIAGNOSTIC
vprint("vputrele: negative ref count", vp);
#endif
panic("vputrele: negative ref cnt");
}
- simple_lock(&vnode_free_list_slock);
- if (vp->v_flag & VAGE) {
- vp->v_flag &= ~VAGE;
- if(vp->v_tag != VT_TFS)
- TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- } else {
- if(vp->v_tag != VT_TFS)
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- }
- freevnodes++;
- simple_unlock(&vnode_free_list_slock);
+
+ vp->v_holdcnt++; /* Make sure vnode isn't recycled */
/*
* If we are doing a vput, the node is already locked, and we must
@@ -1139,8 +1132,18 @@ vputrele(vp, put)
if (put) {
simple_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
+ simple_lock(&vp->v_interlock);
+ vp->v_usecount--;
+ vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
+ simple_unlock(&vp->v_interlock);
} else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
VOP_INACTIVE(vp, p);
+ vp->v_usecount--;
+ vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
}
}
@@ -1161,9 +1164,8 @@ vrele(vp)
vputrele(vp, 0);
}
-#ifdef DIAGNOSTIC
/*
- * Page or buffer structure gets a reference.
+ * Somebody doesn't want the vnode recycled.
*/
void
vhold(vp)
@@ -1172,14 +1174,16 @@ vhold(vp)
simple_lock(&vp->v_interlock);
vp->v_holdcnt++;
+ if (VSHOULDBUSY(vp))
+ vbusy(vp);
simple_unlock(&vp->v_interlock);
}
/*
- * Page or buffer structure frees a reference.
+ * One less who cares about this vnode.
*/
void
-holdrele(vp)
+vdrop(vp)
register struct vnode *vp;
{
@@ -1187,9 +1191,10 @@ holdrele(vp)
if (vp->v_holdcnt <= 0)
panic("holdrele: holdcnt");
vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
simple_unlock(&vp->v_interlock);
}
-#endif /* DIAGNOSTIC */
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
@@ -1572,17 +1577,11 @@ vgonel(vp, p)
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
- * So, the back pointer is explicitly set to `0xdeadb' in
- * getnewvnode after removing it from the freelist to ensure
- * that we do not try to move it here.
*/
- if (vp->v_usecount == 0) {
+ if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
simple_lock(&vnode_free_list_slock);
- if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
- vnode_free_list.tqh_first != vp) {
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- }
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
@@ -1680,6 +1679,10 @@ vprint(label, vp)
strcat(buf, "|VBWAIT");
if (vp->v_flag & VALIASED)
strcat(buf, "|VALIASED");
+ if (vp->v_flag & VDOOMED)
+ strcat(buf, "|VDOOMED");
+ if (vp->v_flag & VFREE)
+ strcat(buf, "|VFREE");
if (buf[0] != '\0')
printf(" flags (%s)", &buf[1]);
if (vp->v_data == NULL) {
@@ -2255,20 +2258,28 @@ retn:
}
void
-vtouch(vp)
+vfree(vp)
struct vnode *vp;
{
- simple_lock(&vp->v_interlock);
- if (vp->v_usecount) {
- simple_unlock(&vp->v_interlock);
- return;
- }
- if (simple_lock_try(&vnode_free_list_slock)) {
- if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) {
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- }
- simple_unlock(&vnode_free_list_slock);
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_flag & VAGE) {
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+ } else {
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
- simple_unlock(&vp->v_interlock);
+ freevnodes++;
+ simple_unlock(&vnode_free_list_slock);
+ vp->v_flag &= ~VAGE;
+ vp->v_flag |= VFREE;
+}
+
+void
+vbusy(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vnode_free_list_slock);
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ freevnodes--;
+ simple_unlock(&vnode_free_list_slock);
+ vp->v_flag &= ~VFREE;
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 143521c..6358781 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.94 1997/08/26 04:36:17 dyson Exp $
+ * $Id: vfs_subr.c,v 1.95 1997/08/26 11:59:20 bde Exp $
*/
/*
@@ -103,6 +103,7 @@ int vttoif_tab[9] = {
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
static u_long freevnodes = 0;
+SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
struct mntlist mountlist; /* mounted filesystem list */
struct simplelock mountlist_slock;
@@ -380,11 +381,11 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp) {
+ vp->v_flag |= VDOOMED;
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
- /* see comment on why 0xdeadb is set at end of vgone (below) */
- vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
+ cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD)
vgonel(vp, p);
@@ -418,13 +419,13 @@ getnewvnode(tag, mp, vops, vpp)
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
vp->v_dd = vp;
+ cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
numvnodes++;
}
vp->v_type = VNON;
- cache_purge(vp);
vp->v_tag = tag;
vp->v_op = vops;
insmntque(vp, mp);
@@ -582,7 +583,7 @@ bgetvp(vp, bp)
if (bp->b_vp)
panic("bgetvp: not free");
- VHOLD(vp);
+ vhold(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
bp->b_dev = vp->v_rdev;
@@ -618,7 +619,7 @@ brelvp(bp)
vp = bp->b_vp;
bp->b_vp = (struct vnode *) 0;
- HOLDRELE(vp);
+ vdrop(vp);
}
/*
@@ -678,8 +679,10 @@ reassignbuf(bp, newvp)
/*
* Delete from old vnode list, if on one.
*/
- if (bp->b_vnbufs.le_next != NOLIST)
+ if (bp->b_vnbufs.le_next != NOLIST) {
bufremvn(bp);
+ vdrop(bp->b_vp);
+ }
/*
* If dirty, put on list of dirty buffers; otherwise insert onto list
* of clean buffers.
@@ -700,6 +703,8 @@ reassignbuf(bp, newvp)
} else {
bufinsvn(bp, &newvp->v_cleanblkhd);
}
+ bp->b_vp = newvp;
+ vhold(bp->b_vp);
splx(s);
}
@@ -836,13 +841,9 @@ vget(vp, flags, p)
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
- if (vp->v_usecount == 0) {
- simple_lock(&vnode_free_list_slock);
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- simple_unlock(&vnode_free_list_slock);
- freevnodes--;
- }
vp->v_usecount++;
+ if (VSHOULDBUSY(vp))
+ vbusy(vp);
/*
* Create the VM object, if needed
*/
@@ -1089,11 +1090,11 @@ vputrele(vp, put)
panic("vputrele: null vp");
#endif
simple_lock(&vp->v_interlock);
- vp->v_usecount--;
- if ((vp->v_usecount == 1) &&
+ if ((vp->v_usecount == 2) &&
vp->v_object &&
(vp->v_object->flags & OBJ_VFS_REF)) {
+ vp->v_usecount--;
vp->v_object->flags &= ~OBJ_VFS_REF;
if (put) {
VOP_UNLOCK(vp, LK_INTERLOCK, p);
@@ -1104,7 +1105,8 @@ vputrele(vp, put)
return;
}
- if (vp->v_usecount > 0) {
+ if (vp->v_usecount > 1) {
+ vp->v_usecount--;
if (put) {
VOP_UNLOCK(vp, LK_INTERLOCK, p);
} else {
@@ -1113,23 +1115,14 @@ vputrele(vp, put)
return;
}
- if (vp->v_usecount < 0) {
+ if (vp->v_usecount < 1) {
#ifdef DIAGNOSTIC
vprint("vputrele: negative ref count", vp);
#endif
panic("vputrele: negative ref cnt");
}
- simple_lock(&vnode_free_list_slock);
- if (vp->v_flag & VAGE) {
- vp->v_flag &= ~VAGE;
- if(vp->v_tag != VT_TFS)
- TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- } else {
- if(vp->v_tag != VT_TFS)
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- }
- freevnodes++;
- simple_unlock(&vnode_free_list_slock);
+
+ vp->v_holdcnt++; /* Make sure vnode isn't recycled */
/*
* If we are doing a vput, the node is already locked, and we must
@@ -1139,8 +1132,18 @@ vputrele(vp, put)
if (put) {
simple_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
+ simple_lock(&vp->v_interlock);
+ vp->v_usecount--;
+ vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
+ simple_unlock(&vp->v_interlock);
} else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
VOP_INACTIVE(vp, p);
+ vp->v_usecount--;
+ vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
}
}
@@ -1161,9 +1164,8 @@ vrele(vp)
vputrele(vp, 0);
}
-#ifdef DIAGNOSTIC
/*
- * Page or buffer structure gets a reference.
+ * Somebody doesn't want the vnode recycled.
*/
void
vhold(vp)
@@ -1172,14 +1174,16 @@ vhold(vp)
simple_lock(&vp->v_interlock);
vp->v_holdcnt++;
+ if (VSHOULDBUSY(vp))
+ vbusy(vp);
simple_unlock(&vp->v_interlock);
}
/*
- * Page or buffer structure frees a reference.
+ * One less who cares about this vnode.
*/
void
-holdrele(vp)
+vdrop(vp)
register struct vnode *vp;
{
@@ -1187,9 +1191,10 @@ holdrele(vp)
if (vp->v_holdcnt <= 0)
panic("holdrele: holdcnt");
vp->v_holdcnt--;
+ if (VSHOULDFREE(vp))
+ vfree(vp);
simple_unlock(&vp->v_interlock);
}
-#endif /* DIAGNOSTIC */
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
@@ -1572,17 +1577,11 @@ vgonel(vp, p)
* after calling vgone. If the reference count were
* incremented first, vgone would (incorrectly) try to
* close the previous instance of the underlying object.
- * So, the back pointer is explicitly set to `0xdeadb' in
- * getnewvnode after removing it from the freelist to ensure
- * that we do not try to move it here.
*/
- if (vp->v_usecount == 0) {
+ if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
simple_lock(&vnode_free_list_slock);
- if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
- vnode_free_list.tqh_first != vp) {
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
- }
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
@@ -1680,6 +1679,10 @@ vprint(label, vp)
strcat(buf, "|VBWAIT");
if (vp->v_flag & VALIASED)
strcat(buf, "|VALIASED");
+ if (vp->v_flag & VDOOMED)
+ strcat(buf, "|VDOOMED");
+ if (vp->v_flag & VFREE)
+ strcat(buf, "|VFREE");
if (buf[0] != '\0')
printf(" flags (%s)", &buf[1]);
if (vp->v_data == NULL) {
@@ -2255,20 +2258,28 @@ retn:
}
void
-vtouch(vp)
+vfree(vp)
struct vnode *vp;
{
- simple_lock(&vp->v_interlock);
- if (vp->v_usecount) {
- simple_unlock(&vp->v_interlock);
- return;
- }
- if (simple_lock_try(&vnode_free_list_slock)) {
- if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) {
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- }
- simple_unlock(&vnode_free_list_slock);
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_flag & VAGE) {
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+ } else {
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
}
- simple_unlock(&vp->v_interlock);
+ freevnodes++;
+ simple_unlock(&vnode_free_list_slock);
+ vp->v_flag &= ~VAGE;
+ vp->v_flag |= VFREE;
+}
+
+void
+vbusy(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vnode_free_list_slock);
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ freevnodes--;
+ simple_unlock(&vnode_free_list_slock);
+ vp->v_flag &= ~VFREE;
}
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 4c5225d..fac5979 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
- * $Id: vnode.h,v 1.45 1997/08/18 03:29:08 fsmp Exp $
+ * $Id: vnode.h,v 1.46 1997/08/26 07:32:46 phk Exp $
*/
#ifndef _SYS_VNODE_H_
@@ -141,6 +141,8 @@ struct vnode {
#define VAGE 0x08000 /* Insert vnode at head of free list */
#define VOLOCK 0x10000 /* vnode is locked waiting for an object */
#define VOWANT 0x20000 /* a process is waiting for VOLOCK */
+#define VDOOMED 0x40000 /* This vnode is being recycled */
+#define VFREE 0x80000 /* This vnode is on the freelist */
/*
* Vnode attributes. A field value of VNOVAL represents a field whose value
@@ -221,35 +223,12 @@ extern int vttoif_tab[];
#define V_SAVEMETA 0x0002 /* vinvalbuf: leave indirect blocks */
#define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
-#ifdef DIAGNOSTIC
-#define HOLDRELE(vp) holdrele(vp)
-#define VATTR_NULL(vap) vattr_null(vap)
-#define VHOLD(vp) vhold(vp)
#define VREF(vp) vref(vp)
-void holdrele __P((struct vnode *));
-void vhold __P((struct vnode *));
-void vref __P((struct vnode *vp));
+#ifdef DIAGNOSTIC
+#define VATTR_NULL(vap) vattr_null(vap)
#else
#define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
-#define HOLDRELE(vp) holdrele(vp) /* decrease buf or page ref */
-static __inline void
-holdrele(struct vnode *vp)
-{
- simple_lock(&vp->v_interlock);
- vp->v_holdcnt--;
- simple_unlock(&vp->v_interlock);
-}
-#define VHOLD(vp) vhold(vp) /* increase buf or page ref */
-static __inline void
-vhold(struct vnode *vp)
-{
- simple_lock(&vp->v_interlock);
- vp->v_holdcnt++;
- simple_unlock(&vp->v_interlock);
-}
-#define VREF(vp) vref(vp) /* increase reference */
-void vref __P((struct vnode *vp));
#endif /* DIAGNOSTIC */
#define NULLVP ((struct vnode *)NULL)
@@ -288,6 +267,15 @@ extern void (*lease_updatetime) __P((int deltat));
do { if(lease_updatetime) lease_updatetime(dt); } while(0)
#endif /* NFS */
+#define VSHOULDFREE(vp) \
+ (!((vp)->v_flag & (VFREE|VDOOMED)) && \
+ !(vp)->v_holdcnt && !(vp)->v_usecount)
+
+#define VSHOULDBUSY(vp) \
+ (((vp)->v_flag & VFREE) && \
+ ((vp)->v_holdcnt || (vp)->v_usecount))
+
+
#endif /* KERNEL */
@@ -482,12 +470,16 @@ int getnewvnode __P((enum vtagtype tag,
void insmntque __P((struct vnode *vp, struct mount *mp));
int lease_check __P((struct vop_lease_args *ap));
void vattr_null __P((struct vattr *vap));
+void vbusy __P((struct vnode *));
int vcount __P((struct vnode *vp));
+void vdrop __P((struct vnode *));
int vfinddev __P((dev_t dev, enum vtype type, struct vnode **vpp));
+void vfree __P((struct vnode *));
void vfs_opv_init __P((struct vnodeopv_desc **them));
int vflush __P((struct mount *mp, struct vnode *skipvp, int flags));
int vget __P((struct vnode *vp, int lockflag, struct proc *p));
void vgone __P((struct vnode *vp));
+void vhold __P((struct vnode *));
int vinvalbuf __P((struct vnode *vp, int save, struct ucred *cred,
struct proc *p, int slpflag, int slptimeo));
void vprint __P((char *label, struct vnode *vp));
@@ -514,8 +506,8 @@ int vop_revoke __P((struct vop_revoke_args *));
struct vnode *
checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp));
void vput __P((struct vnode *vp));
+void vref __P((struct vnode *vp));
void vrele __P((struct vnode *vp));
-void vtouch __P((struct vnode *vp));
#endif /* KERNEL */
#endif /* !_SYS_VNODE_H_ */
diff --git a/sys/ufs/lfs/lfs_segment.c b/sys/ufs/lfs/lfs_segment.c
index d09a185..59206c4 100644
--- a/sys/ufs/lfs/lfs_segment.c
+++ b/sys/ufs/lfs/lfs_segment.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
- * $Id: lfs_segment.c,v 1.22 1997/06/15 17:56:46 dyson Exp $
+ * $Id: lfs_segment.c,v 1.23 1997/08/02 14:33:20 bde Exp $
*/
#include <sys/param.h>
@@ -1222,7 +1222,7 @@ lfs_vref(vp)
if ((vp->v_flag & VXLOCK) || /* XXX */
(vp->v_usecount == 0 &&
- vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb))
+ vp->v_flag & VDOOMED))
return(1);
return (vget(vp, 0, p));
}
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index 6082278..9fc4935 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
- * $Id: vm_swap.c,v 1.42 1997/02/22 09:48:40 peter Exp $
+ * $Id: vm_swap.c,v 1.43 1997/03/23 03:37:54 bde Exp $
*/
#include <sys/param.h>
@@ -136,7 +136,7 @@ swstrategy(bp)
biodone(bp);
return;
}
- VHOLD(sp->sw_vp);
+ vhold(sp->sw_vp);
if ((bp->b_flags & B_READ) == 0) {
vp = bp->b_vp;
if (vp) {
OpenPOWER on IntegriCloud