diff options
author | sbruno <sbruno@FreeBSD.org> | 2015-02-13 00:29:57 +0000 |
---|---|---|
committer | sbruno <sbruno@FreeBSD.org> | 2015-02-13 00:29:57 +0000 |
commit | 538802789a529fb5850e4d2625c62f36c1b0e2f1 (patch) | |
tree | d0eec84147dc302e2a17fd024c99ad62a4bd538b /sys/kern/kern_lock.c | |
parent | 914dd97ca51bfe6301ae8bd78c4d92aec1e8aa48 (diff) | |
download | FreeBSD-src-538802789a529fb5850e4d2625c62f36c1b0e2f1.zip FreeBSD-src-538802789a529fb5850e4d2625c62f36c1b0e2f1.tar.gz |
MFC 272315 272757 274091 274902
r272315
Explicitly return None for negative event indices. Prior to this,
eventat(-1) would return the next-to-last event causing the back button
to cycle back to the end of an event source instead of stopping at the
start.
r272757
Add schedgraph traces for callout handlers. Specifically, a callwheel logs
a running event each time it executes a callout function. The event
includes the function pointer, argument, and whether or not it was run from
hardware interrupt context. The callwheel is marked idle when each handler
completes. This effectively logs the duration of each callout routine in
the graph.
r274091
Bind Ctrl-Q as a global hotkey to exit. Bind Ctrl-W as a hotkey to close
dialogs.
r274902
Add a new thread state "spinning" to schedgraph and add tracepoints at the
start and stop of spinning waits in lock primitives.
Reviewed by: jhb
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r-- | sys/kern/kern_lock.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 3655389..ca2a359 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -583,6 +583,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -598,11 +601,16 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) != 0 && LK_SHARERS(x) && spintries < alk_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -620,6 +628,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; @@ -815,6 +825,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -830,6 +843,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && @@ -839,6 +854,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, !atomic_cmpset_ptr(&lk->lk_lock, x, x | LK_EXCLUSIVE_SPINNERS)) continue; + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -855,6 +873,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; |