From 2d008d4579eea1dabbd9175371c3b0c3d9437bf8 Mon Sep 17 00:00:00 2001 From: avg Date: Fri, 12 Jun 2015 10:01:24 +0000 Subject: several lockstat improvements 0. For spin events report time spent spinning, not a loop count. While loop count is much easier and cheaper to obtain it is hard to reason about the reported numbers, espcially for adaptive locks where both spinning and sleeping can happen. So, it's better to compare apples and apples. 1. Teach lockstat about FreeBSD rw locks. This is done in part by changing the corresponding probes and in part by changing what probes lockstat should expect. 2. Teach lockstat that rw locks are adaptive and can spin on FreeBSD. 3. Report lock acquisition events for successful rw try-lock operations. 4. Teach lockstat about FreeBSD sx locks. Reporting of events for those locks completely mirrors rw locks. 5. Report spin and block events before acquisition event. This is behavior documented for the upstream, so it makes sense to stick to it. Note that because of FreeBSD adaptive lock implementations both the spin and block events may be reported for the same acquisition while the upstream reports only one of them. Differential Revision: https://reviews.freebsd.org/D2727 Reviewed by: markj MFC after: 17 days Relnotes: yes Sponsored by: ClusterHQ --- sys/kern/kern_mutex.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) (limited to 'sys/kern/kern_mutex.c') diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index ea6f7a6..72fc434 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -384,6 +384,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, uint64_t spin_cnt = 0; uint64_t sleep_cnt = 0; int64_t sleep_time = 0; + int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) @@ -414,6 +415,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); +#ifdef KDTRACE_HOOKS + all_time -= lockstat_nsecs(); +#endif while (!_mtx_obtain_lock(m, tid)) { #ifdef KDTRACE_HOOKS @@ -517,6 +521,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, sleep_cnt++; #endif } +#ifdef KDTRACE_HOOKS + all_time += lockstat_nsecs(); +#endif #ifdef KTR if (cont_logged) { CTR4(KTR_CONTENTION, @@ -534,7 +541,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, * Only record the loops spinning and not sleeping. */ if (spin_cnt > sleep_cnt) - LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); + LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time)); #endif } @@ -574,6 +581,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, int contested = 0; uint64_t waittime = 0; #endif +#ifdef KDTRACE_HOOKS + int64_t spin_time = 0; +#endif if (SCHEDULER_STOPPED()) return; @@ -589,6 +599,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); +#ifdef KDTRACE_HOOKS + spin_time -= lockstat_nsecs(); +#endif while (!_mtx_obtain_lock(m, tid)) { /* Give interrupts a chance while we spin. */ @@ -606,6 +619,9 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, } spinlock_enter(); } +#ifdef KDTRACE_HOOKS + spin_time += lockstat_nsecs(); +#endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); @@ -614,7 +630,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); - LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); + LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time); } #endif /* SMP */ @@ -629,7 +645,7 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS - uint64_t spin_cnt = 0; + int64_t spin_time = 0; #endif i = 0; @@ -638,6 +654,9 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) if (SCHEDULER_STOPPED()) return; +#ifdef KDTRACE_HOOKS + spin_time -= lockstat_nsecs(); +#endif for (;;) { retry: spinlock_enter(); @@ -654,9 +673,6 @@ retry: WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); while (!_mtx_obtain_lock(m, tid)) { -#ifdef KDTRACE_HOOKS - spin_cnt++; -#endif if (m->mtx_lock == tid) { m->mtx_recurse++; break; @@ -685,17 +701,17 @@ retry: if (m == td->td_lock) break; __mtx_unlock_spin(m); /* does spinlock_exit() */ + } #ifdef KDTRACE_HOOKS - spin_cnt++; + spin_time += lockstat_nsecs(); #endif - } if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); - LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); + LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time); } struct mtx * -- cgit v1.1