summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2004-01-25 01:59:27 +0000
committerrwatson <rwatson@FreeBSD.org>2004-01-25 01:59:27 +0000
commitca8a19313e2f06d57456af75aaa70204aa2efede (patch)
treec3fe9c836ece7d77c67ab56d4a2e4c5c8dbb0add /sys/kern/kern_mutex.c
parent983ec1a4d4e7bac61755ac3385faa3d186f57b78 (diff)
downloadFreeBSD-src-ca8a19313e2f06d57456af75aaa70204aa2efede.zip
FreeBSD-src-ca8a19313e2f06d57456af75aaa70204aa2efede.tar.gz
Add some basic support for measuring sleep mutex contention to the
mutex profiling code. As with existing mutex profiling, measurement is done with respect to mtx_lock() instances in the code, as opposed to specific mutexes. In particular, measure two things: (1) Lock contention. How often did this mtx_lock() call get made and have to sleep (or almost sleep) waiting for the lock. This helps identify the "victims" of contention. (2) Hold contention. How often, while the lock was held by a thread as a result of this mtx_lock(), did another thread try to acquire the same mutex. This helps identify the causes of contention. I'm currently exploring adding measurement of "time waited for the lock", but the current implementation has proven useful to me so far so I figured I'd commit it so others could try it out. Note that this increases the size of mutexes when MUTEX_PROFILING is enabled, so you might find you need to further bump UMA_BOOT_PAGES. Fixes welcome. The once over: des, others
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c38
1 files changed, 33 insertions, 5 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 3ceaca7..ca45e67 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -105,6 +105,8 @@ struct mutex_prof {
uintmax_t cnt_max;
uintmax_t cnt_tot;
uintmax_t cnt_cur;
+ uintmax_t cnt_contest_holding;
+ uintmax_t cnt_contest_locking;
struct mutex_prof *next;
};
@@ -169,8 +171,8 @@ dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
retry_sbufops:
sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
- sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
- "max", "total", "count", "avg", "name");
+ sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n",
+ "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
/*
* XXX this spinlock seems to be by far the largest perpetrator
* of spinlock latency (1.6 msec on an Athlon1600 was recorded
@@ -179,12 +181,14 @@ retry_sbufops:
*/
mtx_lock_spin(&mprof_mtx);
for (i = 0; i < first_free_mprof_buf; ++i) {
- sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
+ sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
mprof_buf[i].cnt_max / 1000,
mprof_buf[i].cnt_tot / 1000,
mprof_buf[i].cnt_cur,
mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
+ mprof_buf[i].cnt_contest_holding,
+ mprof_buf[i].cnt_contest_locking,
mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
if (sbuf_overflowed(sb)) {
mtx_unlock_spin(&mprof_mtx);
@@ -291,6 +295,16 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
mpp->cnt_max = now - acqtime;
mpp->cnt_tot += now - acqtime;
mpp->cnt_cur++;
+ /*
+ * There's a small race, really we should cmpxchg
+ * 0 with the current value, but that would bill
+ * the contention to the wrong lock instance if
+ * it followed this also.
+ */
+ mpp->cnt_contest_holding += m->mtx_contest_holding;
+ m->mtx_contest_holding = 0;
+ mpp->cnt_contest_locking += m->mtx_contest_locking;
+ m->mtx_contest_locking = 0;
unlock:
mtx_unlock_spin(&mprof_mtx);
}
@@ -381,6 +395,9 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
#ifdef KTR
int cont_logged = 0;
#endif
+#ifdef MUTEX_PROFILING
+ int contested;
+#endif
if (mtx_owned(m)) {
KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
@@ -398,8 +415,14 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
+#ifdef MUTEX_PROFILING
+ contested = 0;
+#endif
while (!_obtain_lock(m, td)) {
-
+#ifdef MUTEX_PROFILING
+ contested = 1;
+ atomic_add_int(&m->mtx_contest_holding, 1);
+#endif
ts = turnstile_lookup(&m->mtx_object);
v = m->mtx_lock;
@@ -425,7 +448,7 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
MPASS(ts != NULL);
m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
turnstile_claim(ts);
- return;
+ break;
}
/*
@@ -489,6 +512,11 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_name, td, file, line);
}
#endif
+#ifdef MUTEX_PROFILING
+ if (contested)
+ m->mtx_contest_locking++;
+ m->mtx_contest_holding = 0;
+#endif
return;
}
OpenPOWER on IntegriCloud