summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authordes <des@FreeBSD.org>2002-04-02 14:42:01 +0000
committerdes <des@FreeBSD.org>2002-04-02 14:42:01 +0000
commitcbcf839df44a6aa4619246bf27b742ddf62dad25 (patch)
tree545848546500c62463b615e478304b01029aa22b /sys/kern/kern_mutex.c
parentf47a79c6730e82bef42ff6477a29612f18447d8c (diff)
downloadFreeBSD-src-cbcf839df44a6aa4619246bf27b742ddf62dad25.zip
FreeBSD-src-cbcf839df44a6aa4619246bf27b742ddf62dad25.tar.gz
Instead of get_cyclecount(9), use nanotime(9) to record acquisition and
release times. Measurements are made and stored in nanoseconds but presented in microseconds, which should be sufficient for the locks for which we actually want this (those that are held long and / or often). Also, rename some variables and structure members to unit-agnostic names.
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index e2f5f30..a79398b 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -212,7 +212,7 @@ struct mutex_prof {
#define MPROF_TOT 1
#define MPROF_CNT 2
#define MPROF_AVG 3
- u_int64_t cycles[4];
+ u_int64_t counter[4];
};
/*
@@ -258,6 +258,15 @@ mprof_init(void *arg __unused)
}
SYSINIT(mprofinit, SI_SUB_LOCK, SI_ORDER_ANY, mprof_init, NULL);
+static u_int64_t
+nanoseconds(void)
+{
+ struct timespec tv;
+
+ nanotime(&tv);
+ return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
+}
+
static int
dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
{
@@ -274,10 +283,10 @@ dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
mtx_lock_spin(&mprof_mtx);
for (i = 0; i < first_free_mprof_buf; ++i)
sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n",
- mprof_buf[i].cycles[MPROF_MAX],
- mprof_buf[i].cycles[MPROF_TOT],
- mprof_buf[i].cycles[MPROF_CNT],
- mprof_buf[i].cycles[MPROF_AVG],
+ mprof_buf[i].counter[MPROF_MAX] / 1000,
+ mprof_buf[i].counter[MPROF_TOT] / 1000,
+ mprof_buf[i].counter[MPROF_CNT],
+ mprof_buf[i].counter[MPROF_AVG] / 1000,
mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
mtx_unlock_spin(&mprof_mtx);
sbuf_finish(sb);
@@ -304,10 +313,10 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
#ifdef MUTEX_PROFILING
/* don't reset the timer when/if recursing */
- if (m->cycles == 0) {
+ if (m->acqtime == 0) {
m->file = file;
m->line = line;
- m->cycles = mutex_prof_enable ? get_cyclecount() : 0;
+ m->acqtime = mutex_prof_enable ? nanoseconds() : 0;
++mutex_prof_acquisitions;
}
#endif
@@ -320,17 +329,17 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
MPASS(curthread != NULL);
mtx_assert(m, MA_OWNED);
#ifdef MUTEX_PROFILING
- if (m->cycles != 0) {
+ if (m->acqtime != 0) {
static const char *unknown = "(unknown)";
struct mutex_prof *mpp;
- u_int64_t cycles, mcycles;
+ u_int64_t acqtime, now;
const char *p, *q;
volatile u_int hash, n;
- cycles = get_cyclecount();
- mcycles = m->cycles;
- m->cycles = 0;
- if (cycles <= mcycles)
+ now = nanoseconds();
+ acqtime = m->acqtime;
+ m->acqtime = 0;
+ if (now <= acqtime)
goto out;
for (p = file; strncmp(p, "../", 3) == 0; p += 3)
/* nothing */ ;
@@ -363,12 +372,12 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
* Record if the mutex has been held longer now than ever
* before
*/
- if ((cycles - mcycles) > mpp->cycles[MPROF_MAX])
- mpp->cycles[MPROF_MAX] = cycles - mcycles;
- mpp->cycles[MPROF_TOT] += cycles - mcycles;
- mpp->cycles[MPROF_CNT] += 1;
- mpp->cycles[MPROF_AVG] =
- mpp->cycles[MPROF_TOT] / mpp->cycles[MPROF_CNT];
+ if ((now - acqtime) > mpp->counter[MPROF_MAX])
+ mpp->counter[MPROF_MAX] = now - acqtime;
+ mpp->counter[MPROF_TOT] += now - acqtime;
+ mpp->counter[MPROF_CNT] += 1;
+ mpp->counter[MPROF_AVG] =
+ mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT];
unlock:
mtx_unlock_spin(&mprof_mtx);
}
OpenPOWER on IntegriCloud