summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/vfs_cache.c212
-rw-r--r--sys/kern/vfs_export.c96
-rw-r--r--sys/kern/vfs_subr.c96
-rw-r--r--sys/nfs/nfs_vnops.c5
-rw-r--r--sys/nfsclient/nfs_vnops.c5
-rw-r--r--sys/sys/namei.h24
-rw-r--r--sys/sys/vnode.h9
7 files changed, 226 insertions, 221 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index ef0f222..43725a5 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
- * $Id: vfs_cache.c,v 1.23 1997/02/22 09:39:31 peter Exp $
+ * $Id: vfs_cache.c,v 1.24 1997/03/08 15:22:14 bde Exp $
*/
#include <sys/param.h>
@@ -62,10 +62,6 @@
* If it is a "negative" entry, (i.e. for a name that is known NOT to
* exist) the vnode pointer will be NULL.
*
- * For simplicity (and economy of storage), names longer than
- * a maximum length of NCHNAMLEN are not cached; they occur
- * infrequently in any case, and are almost never of interest.
- *
* Upon reaching the last segment of a path, if the reference
* is for DELETE, or NOCACHE is set (rewrite), and the
* name is located in the cache, it will be dropped.
@@ -77,44 +73,45 @@
#define NCHHASH(dvp, cnp) \
(&nchashtbl[((dvp)->v_id + (cnp)->cn_hash) % nchash])
static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
+static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
static u_long nchash; /* size of hash table */
+static u_long ncnegfactor = 16; /* ratio of negative entries */
+SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
+static u_long numneg; /* number of cache entries allocated */
+SYSCTL_INT(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
static u_long numcache; /* number of cache entries allocated */
-static TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */
+SYSCTL_INT(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
struct nchstats nchstats; /* cache effectiveness statistics */
static int doingcache = 1; /* 1 => enable the cache */
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
+SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
+SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
-#ifdef NCH_STATISTICS
-u_long nchnbr;
-#define NCHNBR(ncp) (ncp)->nc_nbr = ++nchnbr;
-#define NCHHIT(ncp) (ncp)->nc_hits++
-#else
-#define NCHNBR(ncp)
-#define NCHHIT(ncp)
-#endif
+static void cache_zap __P((struct namecache *ncp));
/*
- * Delete an entry from its hash list and move it to the front
- * of the LRU list for immediate reuse.
+ * Flags in namecache.nc_flag
*/
-#define PURGE(ncp) { \
- LIST_REMOVE(ncp, nc_hash); \
- ncp->nc_hash.le_prev = 0; \
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \
- TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \
-}
-
+#define NCF_WHITE 1
/*
- * Move an entry that has been used to the tail of the LRU list
- * so that it will be preserved for future use.
+ * Delete an entry from its hash list and move it to the front
+ * of the LRU list for immediate reuse.
*/
-#define TOUCH(ncp) { \
- if (ncp->nc_lru.tqe_next != 0) { \
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \
- TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); \
- NCHNBR(ncp); \
- } \
+static void
+cache_zap(ncp)
+ struct namecache *ncp;
+{
+ LIST_REMOVE(ncp, nc_hash);
+ LIST_REMOVE(ncp, nc_src);
+ if (ncp->nc_vp) {
+ TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
+ } else {
+ TAILQ_REMOVE(&ncneg, ncp, nc_dst);
+ numneg--;
+ }
+ numcache--;
+ free(ncp, M_CACHE);
}
/*
@@ -145,25 +142,25 @@ cache_lookup(dvp, vpp, cnp)
cnp->cn_flags &= ~MAKEENTRY;
return (0);
}
- if (cnp->cn_namelen > NCHNAMLEN) {
- nchstats.ncs_long++;
- cnp->cn_flags &= ~MAKEENTRY;
- return (0);
- }
- ncpp = NCHHASH(dvp, cnp);
- for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) {
- nnp = ncp->nc_hash.le_next;
- /* If one of the vp's went stale, don't bother anymore. */
- if ((ncp->nc_dvpid != ncp->nc_dvp->v_id) ||
- (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id)) {
- nchstats.ncs_falsehits++;
- PURGE(ncp);
- continue;
+ if (cnp->cn_nameptr[0] == '.') {
+ if (cnp->cn_namelen == 1) {
+ *vpp = dvp;
+ return (-1);
}
- /* Now that we know the vp's to be valid, is it ours ? */
- if (ncp->nc_dvp == dvp &&
- ncp->nc_nlen == cnp->cn_namelen &&
+ if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
+ if (dvp->v_dd->v_id != dvp->v_ddid ||
+ (cnp->cn_flags & MAKEENTRY) == 0) {
+ dvp->v_ddid = 0;
+ return (0);
+ }
+ *vpp = dvp->v_dd;
+ return (-1);
+ }
+ }
+
+ LIST_FOREACH(ncp, (NCHHASH(dvp, cnp)), nc_hash) {
+ if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
break;
}
@@ -174,29 +171,25 @@ cache_lookup(dvp, vpp, cnp)
return (0);
}
- NCHHIT(ncp);
-
/* We don't want to have an entry, so dump it */
if ((cnp->cn_flags & MAKEENTRY) == 0) {
nchstats.ncs_badhits++;
- PURGE(ncp);
+ cache_zap(ncp);
return (0);
}
/* We found a "positive" match, return the vnode */
if (ncp->nc_vp) {
nchstats.ncs_goodhits++;
- TOUCH(ncp);
+ vtouch(ncp->nc_vp);
*vpp = ncp->nc_vp;
- if ((*vpp)->v_usage < MAXVNODEUSE)
- (*vpp)->v_usage++;
return (-1);
}
/* We found a negative match, and want to create it, so purge */
if (cnp->cn_nameiop == CREATE) {
nchstats.ncs_badhits++;
- PURGE(ncp);
+ cache_zap(ncp);
return (0);
}
@@ -204,9 +197,11 @@ cache_lookup(dvp, vpp, cnp)
* We found a "negative" match, ENOENT notifies client of this match.
* The nc_vpid field records whether this is a whiteout.
*/
+ TAILQ_REMOVE(&ncneg, ncp, nc_dst);
+ TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
nchstats.ncs_neghits++;
- TOUCH(ncp);
- cnp->cn_flags |= ncp->nc_vpid;
+ if (ncp->nc_flag & NCF_WHITE)
+ cnp->cn_flags |= ISWHITEOUT;
return (ENOENT);
}
@@ -225,35 +220,28 @@ cache_enter(dvp, vp, cnp)
if (!doingcache)
return;
- if (cnp->cn_namelen > NCHNAMLEN) {
- printf("cache_enter: name too long");
- return;
- }
-
- /*
- * We allocate a new entry if we are less than the maximum
- * allowed and the one at the front of the LRU list is in use.
- * Otherwise we use the one at the front of the LRU list.
- */
- if (numcache < desiredvnodes &&
- ((ncp = nclruhead.tqh_first) == NULL ||
- ncp->nc_hash.le_prev != 0)) {
- /* Add one more entry */
- ncp = (struct namecache *)
- malloc((u_long)sizeof *ncp, M_CACHE, M_WAITOK);
- bzero((char *)ncp, sizeof *ncp);
- numcache++;
- } else if (ncp = nclruhead.tqh_first) {
- /* reuse an old entry */
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
- if (ncp->nc_hash.le_prev != 0) {
- LIST_REMOVE(ncp, nc_hash);
- ncp->nc_hash.le_prev = 0;
+ if (cnp->cn_nameptr[0] == '.') {
+ if (cnp->cn_namelen == 1) {
+ return;
+ }
+ if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
+ if (vp) {
+ dvp->v_dd = vp;
+ dvp->v_ddid = vp->v_id;
+ } else {
+ dvp->v_dd = dvp;
+ dvp->v_ddid = 0;
+ }
+ return;
}
- } else {
- /* give up */
- return;
}
+
+ ncp = (struct namecache *)
+ malloc(sizeof *ncp + cnp->cn_namelen, M_CACHE, M_WAITOK);
+ bzero((char *)ncp, sizeof *ncp);
+ numcache++;
+ if (!vp)
+ numneg++;
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
@@ -262,19 +250,25 @@ cache_enter(dvp, vp, cnp)
* otherwise unused.
*/
ncp->nc_vp = vp;
- if (vp) {
- ncp->nc_vpid = vp->v_id;
- if (vp->v_usage < MAXVNODEUSE)
- ++vp->v_usage;
- } else
- ncp->nc_vpid = cnp->cn_flags & ISWHITEOUT;
+ if (vp)
+ vtouch(vp);
+ else
+ ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
ncp->nc_dvp = dvp;
- ncp->nc_dvpid = dvp->v_id;
ncp->nc_nlen = cnp->cn_namelen;
bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen);
- TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
ncpp = NCHHASH(dvp, cnp);
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
+ LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
+ if (vp) {
+ TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
+ } else {
+ TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
+ }
+ if (numneg*ncnegfactor > numcache) {
+ ncp = TAILQ_FIRST(&ncneg);
+ cache_zap(ncp);
+ }
}
/*
@@ -284,7 +278,7 @@ void
nchinit()
{
- TAILQ_INIT(&nclruhead);
+ TAILQ_INIT(&ncneg);
nchashtbl = phashinit(desiredvnodes, M_CACHE, &nchash);
}
@@ -304,14 +298,20 @@ cache_purge(vp)
struct nchashhead *ncpp;
static u_long nextvnodeid;
- vp->v_id = ++nextvnodeid;
- if (nextvnodeid != 0)
- return;
- for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
- while (ncp = ncpp->lh_first)
- PURGE(ncp);
- }
- vp->v_id = ++nextvnodeid;
+ while (!LIST_EMPTY(&vp->v_cache_src))
+ cache_zap(LIST_FIRST(&vp->v_cache_src));
+ while (!TAILQ_EMPTY(&vp->v_cache_dst))
+ cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
+
+ /* Never assign the same v_id, and never assign zero as v_id */
+ do {
+ if (++nextvnodeid == vp->v_id)
+ ++nextvnodeid;
+ } while (!nextvnodeid);
+
+ vp->v_id = nextvnodeid;
+ vp->v_dd = vp;
+ vp->v_ddid = 0;
}
/*
@@ -329,12 +329,10 @@ cache_purgevfs(mp)
/* Scan hash tables for applicable entries */
for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
- for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) {
- nnp = ncp->nc_hash.le_next;
- if (ncp->nc_dvpid != ncp->nc_dvp->v_id ||
- (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id) ||
- ncp->nc_dvp->v_mount == mp) {
- PURGE(ncp);
+ for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
+ nnp = LIST_NEXT(ncp, nc_hash);
+ if (ncp->nc_dvp->v_mount == mp) {
+ cache_zap(ncp);
}
}
}
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 61a4d14..295c233 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.83 1997/04/25 06:47:12 peter Exp $
+ * $Id: vfs_subr.c,v 1.84 1997/04/30 03:09:15 dyson Exp $
*/
/*
@@ -78,6 +78,7 @@ extern void printlockedvnodes __P((void));
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
static void vgonel __P((struct vnode *vp, struct proc *p));
unsigned long numvnodes;
+SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
static void vputrele __P((struct vnode *vp, int put));
enum vtype iftovt_tab[16] = {
@@ -342,54 +343,36 @@ getnewvnode(tag, mp, vops, vpp)
struct proc *p = curproc; /* XXX */
struct vnode *vp;
- simple_lock(&vnode_free_list_slock);
-retry:
/*
- * we allocate a new vnode if
- * 1. we don't have any free
- * Pretty obvious, we actually used to panic, but that
- * is a silly thing to do.
- * 2. we havn't filled our pool yet
- * We don't want to trash the incore (VM-)vnodecache.
- * 3. if less that 1/4th of our vnodes are free.
- * We don't want to trash the namei cache either.
+ * We take the least recently used vnode from the freelist
+ * if we can get it and it has no cached pages, and no
+ * namecache entries are relative to it.
+ * Otherwise we allocate a new vnode
*/
- if (freevnodes < (numvnodes >> 2) ||
- numvnodes < desiredvnodes ||
- vnode_free_list.tqh_first == NULL) {
- simple_unlock(&vnode_free_list_slock);
- vp = (struct vnode *) malloc((u_long) sizeof *vp,
- M_VNODE, M_WAITOK);
- bzero((char *) vp, sizeof *vp);
- numvnodes++;
- } else {
- for (vp = vnode_free_list.tqh_first;
- vp != NULLVP; vp = vp->v_freelist.tqe_next) {
- if (simple_lock_try(&vp->v_interlock))
- break;
- }
- /*
- * Unless this is a bad time of the month, at most
- * the first NCPUS items on the free list are
- * locked, so this is close enough to being empty.
- */
- if (vp == NULLVP) {
- simple_unlock(&vnode_free_list_slock);
- tablefull("vnode");
- *vpp = 0;
- return (ENFILE);
- }
+
+ simple_lock(&vnode_free_list_slock);
+
+ TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
+ if (!simple_lock_try(&vp->v_interlock))
+ continue;
if (vp->v_usecount)
panic("free vnode isn't");
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- if (vp->v_usage > 0) {
+
+ if (vp->v_object && vp->v_object->resident_page_count) {
+ /* Don't recycle if it's caching some pages */
simple_unlock(&vp->v_interlock);
- --vp->v_usage;
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- goto retry;
+ continue;
+ } else if (LIST_FIRST(&vp->v_cache_src)) {
+ /* Don't recycle if active in the namecache */
+ simple_unlock(&vp->v_interlock);
+ continue;
+ } else {
+ break;
}
+ }
+ if (vp) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
-
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
@@ -420,8 +403,17 @@ retry:
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
- vp->v_usage = 0;
+ } else {
+ simple_unlock(&vnode_free_list_slock);
+ vp = (struct vnode *) malloc((u_long) sizeof *vp,
+ M_VNODE, M_WAITOK);
+ bzero((char *) vp, sizeof *vp);
+ vp->v_dd = vp;
+ LIST_INIT(&vp->v_cache_src);
+ TAILQ_INIT(&vp->v_cache_dst);
+ numvnodes++;
}
+
vp->v_type = VNON;
cache_purge(vp);
vp->v_tag = tag;
@@ -1119,7 +1111,6 @@ vputrele(vp, put)
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VAGE) {
vp->v_flag &= ~VAGE;
- vp->v_usage = 0;
if(vp->v_tag != VT_TFS)
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@@ -2147,3 +2138,20 @@ retry:
retn:
return error;
}
+
+void
+vtouch(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vp->v_interlock);
+ if (vp->v_usecount) {
+ simple_unlock(&vp->v_interlock);
+ return;
+ }
+ if (simple_lock_try(&vnode_free_list_slock)) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ }
+ simple_unlock(&vp->v_interlock);
+}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 61a4d14..295c233 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.83 1997/04/25 06:47:12 peter Exp $
+ * $Id: vfs_subr.c,v 1.84 1997/04/30 03:09:15 dyson Exp $
*/
/*
@@ -78,6 +78,7 @@ extern void printlockedvnodes __P((void));
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
static void vgonel __P((struct vnode *vp, struct proc *p));
unsigned long numvnodes;
+SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
static void vputrele __P((struct vnode *vp, int put));
enum vtype iftovt_tab[16] = {
@@ -342,54 +343,36 @@ getnewvnode(tag, mp, vops, vpp)
struct proc *p = curproc; /* XXX */
struct vnode *vp;
- simple_lock(&vnode_free_list_slock);
-retry:
/*
- * we allocate a new vnode if
- * 1. we don't have any free
- * Pretty obvious, we actually used to panic, but that
- * is a silly thing to do.
- * 2. we havn't filled our pool yet
- * We don't want to trash the incore (VM-)vnodecache.
- * 3. if less that 1/4th of our vnodes are free.
- * We don't want to trash the namei cache either.
+ * We take the least recently used vnode from the freelist
+ * if we can get it and it has no cached pages, and no
+ * namecache entries are relative to it.
+ * Otherwise we allocate a new vnode
*/
- if (freevnodes < (numvnodes >> 2) ||
- numvnodes < desiredvnodes ||
- vnode_free_list.tqh_first == NULL) {
- simple_unlock(&vnode_free_list_slock);
- vp = (struct vnode *) malloc((u_long) sizeof *vp,
- M_VNODE, M_WAITOK);
- bzero((char *) vp, sizeof *vp);
- numvnodes++;
- } else {
- for (vp = vnode_free_list.tqh_first;
- vp != NULLVP; vp = vp->v_freelist.tqe_next) {
- if (simple_lock_try(&vp->v_interlock))
- break;
- }
- /*
- * Unless this is a bad time of the month, at most
- * the first NCPUS items on the free list are
- * locked, so this is close enough to being empty.
- */
- if (vp == NULLVP) {
- simple_unlock(&vnode_free_list_slock);
- tablefull("vnode");
- *vpp = 0;
- return (ENFILE);
- }
+
+ simple_lock(&vnode_free_list_slock);
+
+ TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
+ if (!simple_lock_try(&vp->v_interlock))
+ continue;
if (vp->v_usecount)
panic("free vnode isn't");
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- if (vp->v_usage > 0) {
+
+ if (vp->v_object && vp->v_object->resident_page_count) {
+ /* Don't recycle if it's caching some pages */
simple_unlock(&vp->v_interlock);
- --vp->v_usage;
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- goto retry;
+ continue;
+ } else if (LIST_FIRST(&vp->v_cache_src)) {
+ /* Don't recycle if active in the namecache */
+ simple_unlock(&vp->v_interlock);
+ continue;
+ } else {
+ break;
}
+ }
+ if (vp) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
-
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
@@ -420,8 +403,17 @@ retry:
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
- vp->v_usage = 0;
+ } else {
+ simple_unlock(&vnode_free_list_slock);
+ vp = (struct vnode *) malloc((u_long) sizeof *vp,
+ M_VNODE, M_WAITOK);
+ bzero((char *) vp, sizeof *vp);
+ vp->v_dd = vp;
+ LIST_INIT(&vp->v_cache_src);
+ TAILQ_INIT(&vp->v_cache_dst);
+ numvnodes++;
}
+
vp->v_type = VNON;
cache_purge(vp);
vp->v_tag = tag;
@@ -1119,7 +1111,6 @@ vputrele(vp, put)
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VAGE) {
vp->v_flag &= ~VAGE;
- vp->v_usage = 0;
if(vp->v_tag != VT_TFS)
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@@ -2147,3 +2138,20 @@ retry:
retn:
return error;
}
+
+void
+vtouch(vp)
+ struct vnode *vp;
+{
+ simple_lock(&vp->v_interlock);
+ if (vp->v_usecount) {
+ simple_unlock(&vp->v_interlock);
+ return;
+ }
+ if (simple_lock_try(&vnode_free_list_slock)) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ }
+ simple_unlock(&vp->v_interlock);
+}
diff --git a/sys/nfs/nfs_vnops.c b/sys/nfs/nfs_vnops.c
index 0d90627..72a7a04 100644
--- a/sys/nfs/nfs_vnops.c
+++ b/sys/nfs/nfs_vnops.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
- * $Id: nfs_vnops.c,v 1.45 1997/03/29 12:40:20 bde Exp $
+ * $Id: nfs_vnops.c,v 1.46 1997/04/04 17:49:33 dfr Exp $
*/
@@ -2382,8 +2382,7 @@ nfs_readdirplusrpc(vp, uiop, cred)
for (cp = cnp->cn_nameptr, i = 1; i <= len;
i++, cp++)
cnp->cn_hash += (unsigned char)*cp * i;
- if (cnp->cn_namelen <= NCHNAMLEN)
- cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
+ cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
}
} else {
/* Just skip over the file handle */
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 0d90627..72a7a04 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
- * $Id: nfs_vnops.c,v 1.45 1997/03/29 12:40:20 bde Exp $
+ * $Id: nfs_vnops.c,v 1.46 1997/04/04 17:49:33 dfr Exp $
*/
@@ -2382,8 +2382,7 @@ nfs_readdirplusrpc(vp, uiop, cred)
for (cp = cnp->cn_nameptr, i = 1; i <= len;
i++, cp++)
cnp->cn_hash += (unsigned char)*cp * i;
- if (cnp->cn_namelen <= NCHNAMLEN)
- cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
+ cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
}
} else {
/* Just skip over the file handle */
diff --git a/sys/sys/namei.h b/sys/sys/namei.h
index 5b7aacd..be8a7c4 100644
--- a/sys/sys/namei.h
+++ b/sys/sys/namei.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)namei.h 8.5 (Berkeley) 1/9/95
- * $Id$
+ * $Id: namei.h,v 1.13 1997/02/22 09:45:38 peter Exp $
*/
#ifndef _SYS_NAMEI_H_
@@ -153,30 +153,18 @@ struct nameidata {
/*
* This structure describes the elements in the cache of recent
- * names looked up by namei. NCHNAMLEN is sized to make structure
- * size a power of two to optimize malloc's. Minimum reasonable
- * size is 15.
+ * names looked up by namei.
*/
-#ifdef NCH_STATISTICS
-#define NCHNAMLEN 23 /* maximum name segment length we bother with */
-#else
-#define NCHNAMLEN 31 /* maximum name segment length we bother with */
-#endif
-
struct namecache {
LIST_ENTRY(namecache) nc_hash; /* hash chain */
- TAILQ_ENTRY(namecache) nc_lru; /* LRU chain */
+ LIST_ENTRY(namecache) nc_src; /* source vnode list */
+ TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
struct vnode *nc_dvp; /* vnode of parent of name */
- u_long nc_dvpid; /* capability number of nc_dvp */
struct vnode *nc_vp; /* vnode the name refers to */
- u_long nc_vpid; /* capability number of nc_vp */
-#ifdef NCH_STATISTICS
- u_long nc_nbr; /* a serial number */
- u_long nc_hits; /* how many times we got hit */
-#endif
+ char nc_flag; /* flag bits */
char nc_nlen; /* length of name */
- char nc_name[NCHNAMLEN]; /* segment name */
+ char nc_name[0]; /* segment name */
};
#ifdef KERNEL
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 1d5ea6e..e308f8f 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
- * $Id: vnode.h,v 1.42 1997/02/22 09:46:29 peter Exp $
+ * $Id: vnode.h,v 1.43 1997/04/04 17:43:32 dfr Exp $
*/
#ifndef _SYS_VNODE_H_
@@ -70,6 +70,7 @@ LIST_HEAD(buflists, buf);
typedef int vop_t __P((void *));
struct vm_object;
+struct namecache;
/*
* Reading or writing any of these items requires holding the appropriate lock.
@@ -104,12 +105,15 @@ struct vnode {
daddr_t v_cstart; /* start block of cluster */
daddr_t v_lasta; /* last allocation */
int v_clen; /* length of current cluster */
- int v_usage; /* Vnode usage counter */
struct vm_object *v_object; /* Place to store VM object */
struct simplelock v_interlock; /* lock on usecount and flag */
struct lock *v_vnlock; /* used for non-locking fs's */
enum vtagtype v_tag; /* type of underlying data */
void *v_data; /* private data for fs */
+ LIST_HEAD(, namecache) v_cache_src; /* Cache entries from us */
+ TAILQ_HEAD(, namecache) v_cache_dst; /* Cache entries to us */
+ struct vnode *v_dd; /* .. vnode */
+ u_long v_ddid; /* .. capability identifier */
};
#define v_mountedhere v_un.vu_mountedhere
#define v_socket v_un.vu_socket
@@ -506,6 +510,7 @@ struct vnode *
checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp));
void vput __P((struct vnode *vp));
void vrele __P((struct vnode *vp));
+void vtouch __P((struct vnode *vp));
#endif /* KERNEL */
#endif /* !_SYS_VNODE_H_ */
OpenPOWER on IntegriCloud