diff options
-rw-r--r-- | sys/kern/kern_lock.c | 20 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 11 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 21 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 17 | ||||
-rw-r--r-- | sys/kern/kern_timeout.c | 14 | ||||
-rw-r--r-- | tools/sched/schedgraph.py | 6 | ||||
-rwxr-xr-x | usr.bin/man/man.sh | 48 |
7 files changed, 125 insertions, 12 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 3655389..ca2a359 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -583,6 +583,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -598,11 +601,16 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) != 0 && LK_SHARERS(x) && spintries < alk_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -620,6 +628,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; @@ -815,6 +825,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -830,6 +843,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && @@ -839,6 +854,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, !atomic_cmpset_ptr(&lk->lk_lock, x, x | LK_EXCLUSIVE_SPINNERS)) continue; + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -855,6 +873,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index e61a187..2d9a48d5 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -436,6 +436,10 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname((struct thread *)tid), + "spinning", "lockname:\"%s\"", + m->lock_object.lo_name); while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -443,6 +447,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname((struct thread *)tid), + "running"); continue; } } @@ -579,6 +586,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); + KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), + "spinning", "lockname:\"%s\"", m->lock_object.lo_name); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -604,6 +613,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); + KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), + "running"); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 5b356b2..2d73220 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include <sys/mutex.h> #include <sys/proc.h> #include <sys/rwlock.h> +#include <sys/sched.h> #include <sys/sysctl.h> #include <sys/systm.h> #include <sys/turnstile.h> @@ -423,6 +424,9 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -430,16 +434,23 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } else if (spintries < rowner_retries) { spintries++; + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { v = rw->rw_lock; if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); if (i != rowner_loops) continue; } @@ -759,6 +770,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -766,6 +780,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); continue; } if ((v & RW_LOCK_READ) && RW_READERS(v) && @@ -777,11 +793,16 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, } } spintries++; + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); #ifdef KDTRACE_HOOKS spin_cnt += rowner_loops - i; #endif diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index a1fc36a..e588450 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$"); #include <sys/lock.h> #include <sys/mutex.h> #include <sys/proc.h> +#include <sys/sched.h> #include <sys/sleepqueue.h> #include <sys/sx.h> #include <sys/sysctl.h> @@ -560,6 +561,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", + sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) { @@ -568,9 +573,14 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } else if (SX_SHARERS(x) && spintries < asx_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); spintries++; for (i = 0; i < asx_loops; i++) { @@ -587,6 +597,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); if (i != asx_loops) continue; } @@ -849,6 +861,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) { @@ -857,6 +872,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) #endif cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 2961d73..af8ff47 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -150,6 +150,7 @@ struct callout_cpu { sbintime_t cc_lastscan; void *cc_cookie; u_int cc_bucket; + char cc_ktr_event_name[20]; }; #define cc_exec_curr cc_exec_entity[0].cc_curr @@ -188,7 +189,7 @@ struct callout_cpu cc_cpu; static int timeout_cpu; -static void callout_cpu_init(struct callout_cpu *cc); +static void callout_cpu_init(struct callout_cpu *cc, int cpu); static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, #ifdef CALLOUT_PROFILING int *mpcalls, int *lockcalls, int *gcalls, @@ -283,7 +284,7 @@ callout_callwheel_init(void *dummy) cc = CC_CPU(timeout_cpu); cc->cc_callout = malloc(ncallout * sizeof(struct callout), M_CALLOUT, M_WAITOK); - callout_cpu_init(cc); + callout_cpu_init(cc, timeout_cpu); } SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); @@ -291,7 +292,7 @@ SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); * Initialize the per-cpu callout structures. */ static void -callout_cpu_init(struct callout_cpu *cc) +callout_cpu_init(struct callout_cpu *cc, int cpu) { struct callout *c; int i; @@ -306,6 +307,8 @@ callout_cpu_init(struct callout_cpu *cc) cc->cc_firstevent = INT64_MAX; for (i = 0; i < 2; i++) cc_cce_cleanup(cc, i); + snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), + "callwheel cpu %d", cpu); if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ return; for (i = 0; i < ncallout; i++) { @@ -367,7 +370,7 @@ start_softclock(void *dummy) continue; cc = CC_CPU(cpu); cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ - callout_cpu_init(cc); + callout_cpu_init(cc, cpu); if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, INTR_MPSAFE, &cc->cc_cookie)) panic("died while creating standard software ithreads"); @@ -674,6 +677,8 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, CTR3(KTR_CALLOUT, "callout %p func %p arg %p", c, c_func, c_arg); } + KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", + "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) sbt1 = sbinuptime(); #endif @@ -696,6 +701,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, lastfunc = c_func; } #endif + KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); CTR1(KTR_CALLOUT, "callout %p finished", c); if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) class->lc_unlock(c_lock); diff --git a/tools/sched/schedgraph.py b/tools/sched/schedgraph.py index 4335574..48adc08 100644 --- a/tools/sched/schedgraph.py +++ b/tools/sched/schedgraph.py @@ -70,6 +70,7 @@ eventcolors = [ ("count", "red"), ("running", "green"), ("idle", "grey"), + ("spinning", "red"), ("yielding", "yellow"), ("swapped", "violet"), ("suspended", "purple"), @@ -80,8 +81,6 @@ eventcolors = [ ("runq rem", "yellow"), ("thread exit", "grey"), ("proc exit", "grey"), - ("callwheel idle", "grey"), - ("callout running", "green"), ("lock acquire", "blue"), ("lock contest", "purple"), ("failed lock try", "red"), @@ -856,7 +855,7 @@ class EventSource: return (Y_EVENTSOURCE) def eventat(self, i): - if (i >= len(self.events)): + if (i >= len(self.events) or i < 0): return (None) event = self.events[i] return (event) @@ -903,7 +902,6 @@ class KTRFile: self.timestamp_f = None self.timestamp_l = None self.locks = {} - self.callwheels = {} self.ticks = {} self.load = {} self.crit = {} diff --git a/usr.bin/man/man.sh b/usr.bin/man/man.sh index 0f6249a..ac66eec 100755 --- a/usr.bin/man/man.sh +++ b/usr.bin/man/man.sh @@ -276,11 +276,8 @@ man_check_for_so() { return 0 } -# Usage: man_display_page -# Display either the manpage or catpage depending on the use_cat variable man_display_page() { - local EQN NROFF PIC TBL TROFF REFER VGRIND - local IFS l nroff_dev pipeline preproc_arg tool + local IFS pipeline preconv_enc testline # We are called with IFS set to colon. This causes really weird # things to happen for the variables that have spaces in them. @@ -312,6 +309,49 @@ man_display_page() { return fi + case "${manpage}" in + *.${man_charset}/*) + case "$man_charset" in + ISO8859-1) preconv_enc="latin-1" ;; + ISO8859-15) preconv_enc="latin-1" ;; + UTF-8) preconv_enc="utf-8" ;; + esac + ;; + esac + + if [ -n "$preconv_enc" ]; then + pipeline="preconv -e $preconv_enc |" + fi + testline="$pipeline mandoc -Tlint -Werror 2>/dev/null" + pipeline="$pipeline mandoc -Tlocale | $MANPAGER" + + if ! eval "$cattool $manpage | $testline" ;then + if which -s groff2; then + man_display_page_groff + else + echo "This manpage needs groff(1) to be rendered" >&2 + echo "First install groff(1): " >&2 + echo "pkg install groff " >&2 + ret=1 + fi + return + fi + + if [ $debug -gt 0 ]; then + decho "Command: $cattool $manpage | $pipeline" + ret=0 + else + eval "$cattool $manpage | $pipeline" + ret=$? + fi +} + +# Usage: man_display_page +# Display either the manpage or catpage depending on the use_cat variable +man_display_page_groff() { + local EQN NROFF PIC TBL TROFF REFER VGRIND + local IFS l nroff_dev pipeline preproc_arg tool + # So, we really do need to parse the manpage. First, figure out the # device flag (-T) we have to pass to eqn(1) and groff(1). Then, # setup the pipeline of commands based on the user's request. |