summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2016-01-21 01:04:03 +0000
committermjg <mjg@FreeBSD.org>2016-01-21 01:04:03 +0000
commit5ca67cac1ebb3eb0efcc941134f08024d73c6cbe (patch)
treeaa80b5f06981d08ce428629bc46eba4c007a3e41 /sys/kern
parent1e7830f85d11e00a3d2c11a761b140c5c265aad4 (diff)
downloadFreeBSD-src-5ca67cac1ebb3eb0efcc941134f08024d73c6cbe.zip
FreeBSD-src-5ca67cac1ebb3eb0efcc941134f08024d73c6cbe.tar.gz
cache: use counter(9) API to maintain statistics
Previously the code would just increment statistics while only holding a shared lock, in effect losing updates. Separate tracking for nchstats is removed as values can be obtained from existing counters. Note that some fields are updated by external consumers and are left unfixed. This should not be a serious issue as this structure looks quite obsolete. No strong objections: kib
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_cache.c141
1 files changed, 80 insertions, 61 deletions
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index f1bd821..58a06a7 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/counter.h>
#include <sys/filedesc.h>
#include <sys/fnv_hash.h>
#include <sys/kernel.h>
@@ -272,37 +273,35 @@ SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR,
*/
static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
"Name cache statistics");
-#define STATNODE(mode, name, var, descr) \
- SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr);
-STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries");
-STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries");
-static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls,
- "Number of cache lookups");
-static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits,
- "Number of '.' hits");
-static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits,
- "Number of '..' hits");
-static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks,
- "Number of checks in lookup");
-static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss,
- "Number of cache misses");
-static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap,
- "Number of cache misses we do not want to cache");
-static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps,
+#define STATNODE_ULONG(name, descr) \
+ SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr);
+#define STATNODE_COUNTER(name, descr) \
+ static counter_u64_t name; \
+ SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr);
+STATNODE_ULONG(numneg, "Number of negative cache entries");
+STATNODE_ULONG(numcache, "Number of cache entries");
+STATNODE_COUNTER(numcalls, "Number of cache lookups");
+STATNODE_COUNTER(dothits, "Number of '.' hits");
+STATNODE_COUNTER(dotdothits, "Number of '..' hits");
+STATNODE_COUNTER(numchecks, "Number of checks in lookup");
+STATNODE_COUNTER(nummiss, "Number of cache misses");
+STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache");
+STATNODE_COUNTER(numposzaps,
"Number of cache hits (positive) we do not want to cache");
-static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits,
- "Number of cache hits (positive)");
-static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps,
+STATNODE_COUNTER(numposhits, "Number of cache hits (positive)");
+STATNODE_COUNTER(numnegzaps,
"Number of cache hits (negative) we do not want to cache");
-static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits,
- "Number of cache hits (negative)");
-static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades,
+STATNODE_COUNTER(numneghits, "Number of cache hits (negative)");
+/* These count for kern___getcwd(), too. */
+STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls");
+STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
+STATNODE_COUNTER(numfullpathfail2,
+ "Number of fullpath search errors (VOP_VPTOCNP failures)");
+STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
+STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls");
+static long numupgrades; STATNODE_ULONG(numupgrades,
"Number of updates of the cache after lookup (write lock + retry)");
-SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
- &nchstats, sizeof(nchstats), "LU",
- "VFS cache effectiveness statistics");
-
static void cache_zap(struct namecache *ncp);
static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
u_int *buflen);
@@ -311,6 +310,28 @@ static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
+static int
+sysctl_nchstats(SYSCTL_HANDLER_ARGS)
+{
+ struct nchstats snap;
+
+ if (req->oldptr == NULL)
+ return (SYSCTL_OUT(req, 0, sizeof(snap)));
+
+ snap = nchstats;
+ snap.ncs_goodhits = counter_u64_fetch(numposhits);
+ snap.ncs_neghits = counter_u64_fetch(numneghits);
+ snap.ncs_badhits = counter_u64_fetch(numposzaps) +
+ counter_u64_fetch(numnegzaps);
+ snap.ncs_miss = counter_u64_fetch(nummisszap) +
+ counter_u64_fetch(nummiss);
+
+ return (SYSCTL_OUT(req, &snap, sizeof(snap)));
+}
+SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD |
+ CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU",
+ "VFS cache effectiveness statistics");
+
#ifdef DIAGNOSTIC
/*
* Grab an atomic snapshot of the name cache hash chain lengths
@@ -479,7 +500,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
retry:
CACHE_RLOCK();
wlocked = 0;
- numcalls++;
+ counter_u64_add(numcalls, 1);
error = 0;
retry_wlocked:
@@ -488,7 +509,7 @@ retry_wlocked:
*vpp = dvp;
CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
dvp, cnp->cn_nameptr);
- dothits++;
+ counter_u64_add(dothits, 1);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
if (tsp != NULL)
timespecclear(tsp);
@@ -497,7 +518,7 @@ retry_wlocked:
goto success;
}
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
- dotdothits++;
+ counter_u64_add(dotdothits, 1);
if (dvp->v_cache_dd == NULL) {
SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
"..", NULL);
@@ -536,7 +557,7 @@ retry_wlocked:
hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
- numchecks++;
+ counter_u64_add(numchecks, 1);
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen))
break;
@@ -547,18 +568,16 @@ retry_wlocked:
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
NULL);
if ((cnp->cn_flags & MAKEENTRY) == 0) {
- nummisszap++;
+ counter_u64_add(nummisszap, 1);
} else {
- nummiss++;
+ counter_u64_add(nummiss, 1);
}
- nchstats.ncs_miss++;
goto unlock;
}
/* We don't want to have an entry, so dump it */
if ((cnp->cn_flags & MAKEENTRY) == 0) {
- numposzaps++;
- nchstats.ncs_badhits++;
+ counter_u64_add(numposzaps, 1);
if (!wlocked && !CACHE_UPGRADE_LOCK())
goto wlock;
cache_zap(ncp);
@@ -568,8 +587,7 @@ retry_wlocked:
/* We found a "positive" match, return the vnode */
if (ncp->nc_vp) {
- numposhits++;
- nchstats.ncs_goodhits++;
+ counter_u64_add(numposhits, 1);
*vpp = ncp->nc_vp;
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
dvp, cnp->cn_nameptr, *vpp, ncp);
@@ -582,8 +600,7 @@ retry_wlocked:
negative_success:
/* We found a negative match, and want to create it, so purge */
if (cnp->cn_nameiop == CREATE) {
- numnegzaps++;
- nchstats.ncs_badhits++;
+ counter_u64_add(numnegzaps, 1);
if (!wlocked && !CACHE_UPGRADE_LOCK())
goto wlock;
cache_zap(ncp);
@@ -593,7 +610,7 @@ negative_success:
if (!wlocked && !CACHE_UPGRADE_LOCK())
goto wlock;
- numneghits++;
+ counter_u64_add(numneghits, 1);
/*
* We found a "negative" match, so we shift it to the end of
* the "negative" cache entries queue to satisfy LRU. Also,
@@ -602,7 +619,6 @@ negative_success:
*/
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
- nchstats.ncs_neghits++;
if (ncp->nc_flag & NCF_WHITE)
cnp->cn_flags |= ISWHITEOUT;
SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp,
@@ -918,6 +934,22 @@ nchinit(void *dummy __unused)
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
+
+ numcalls = counter_u64_alloc(M_WAITOK);
+ dothits = counter_u64_alloc(M_WAITOK);
+ dotdothits = counter_u64_alloc(M_WAITOK);
+ numchecks = counter_u64_alloc(M_WAITOK);
+ nummiss = counter_u64_alloc(M_WAITOK);
+ nummisszap = counter_u64_alloc(M_WAITOK);
+ numposzaps = counter_u64_alloc(M_WAITOK);
+ numposhits = counter_u64_alloc(M_WAITOK);
+ numnegzaps = counter_u64_alloc(M_WAITOK);
+ numneghits = counter_u64_alloc(M_WAITOK);
+ numfullpathcalls = counter_u64_alloc(M_WAITOK);
+ numfullpathfail1 = counter_u64_alloc(M_WAITOK);
+ numfullpathfail2 = counter_u64_alloc(M_WAITOK);
+ numfullpathfail4 = counter_u64_alloc(M_WAITOK);
+ numfullpathfound = counter_u64_alloc(M_WAITOK);
}
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
@@ -1122,23 +1154,10 @@ kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen,
* Thus begins the fullpath magic.
*/
-#undef STATNODE
-#define STATNODE(name, descr) \
- static u_int name; \
- SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr)
-
static int disablefullpath;
SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
"Disable the vn_fullpath function");
-/* These count for kern___getcwd(), too. */
-STATNODE(numfullpathcalls, "Number of fullpath search calls");
-STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
-STATNODE(numfullpathfail2,
- "Number of fullpath search errors (VOP_VPTOCNP failures)");
-STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
-STATNODE(numfullpathfound, "Number of successful fullpath calls");
-
/*
* Retrieve the full filesystem path that correspond to a vnode from the name
* cache (if available)
@@ -1226,7 +1245,7 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
if (*buflen < ncp->nc_nlen) {
CACHE_RUNLOCK();
vrele(*vp);
- numfullpathfail4++;
+ counter_u64_add(numfullpathfail4, 1);
error = ENOMEM;
SDT_PROBE3(vfs, namecache, fullpath, return, error,
vp, NULL);
@@ -1251,7 +1270,7 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
vput(*vp);
if (error) {
- numfullpathfail2++;
+ counter_u64_add(numfullpathfail2, 1);
SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL);
return (error);
}
@@ -1292,7 +1311,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
slash_prefixed = 0;
SDT_PROBE1(vfs, namecache, fullpath, entry, vp);
- numfullpathcalls++;
+ counter_u64_add(numfullpathcalls, 1);
vref(vp);
CACHE_RLOCK();
if (vp->v_type != VDIR) {
@@ -1328,7 +1347,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
if (vp->v_type != VDIR) {
CACHE_RUNLOCK();
vrele(vp);
- numfullpathfail1++;
+ counter_u64_add(numfullpathfail1, 1);
error = ENOTDIR;
SDT_PROBE3(vfs, namecache, fullpath, return,
error, vp, NULL);
@@ -1354,14 +1373,14 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
if (buflen == 0) {
CACHE_RUNLOCK();
vrele(vp);
- numfullpathfail4++;
+ counter_u64_add(numfullpathfail4, 1);
SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM,
startvp, NULL);
return (ENOMEM);
}
buf[--buflen] = '/';
}
- numfullpathfound++;
+ counter_u64_add(numfullpathfound, 1);
CACHE_RUNLOCK();
vrele(vp);
OpenPOWER on IntegriCloud