summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_lock.c20
-rw-r--r--sys/kern/kern_mutex.c11
-rw-r--r--sys/kern/kern_rwlock.c21
-rw-r--r--sys/kern/kern_sx.c17
-rw-r--r--sys/kern/kern_timeout.c14
-rw-r--r--tools/sched/schedgraph.py23
6 files changed, 98 insertions, 8 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 3655389..ca2a359 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -583,6 +583,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, lk, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
/*
* If we are holding also an interlock drop it
@@ -598,11 +601,16 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
flags &= ~LK_INTERLOCK;
@@ -620,6 +628,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
if (i != alk_loops)
continue;
@@ -815,6 +825,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, lk, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
/*
* If we are holding also an interlock drop it
@@ -830,6 +843,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
@@ -839,6 +854,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
!atomic_cmpset_ptr(&lk->lk_lock, x,
x | LK_EXCLUSIVE_SPINNERS))
continue;
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(td), "spinning",
+ "lockname:\"%s\"", lk->lock_object.lo_name);
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
flags &= ~LK_INTERLOCK;
@@ -855,6 +873,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(td), "running");
GIANT_RESTORE();
if (i != alk_loops)
continue;
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index e61a187..2d9a48d5 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -436,6 +436,10 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, m, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname((struct thread *)tid),
+ "spinning", "lockname:\"%s\"",
+ m->lock_object.lo_name);
while (mtx_owner(m) == owner &&
TD_IS_RUNNING(owner)) {
cpu_spinwait();
@@ -443,6 +447,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
spin_cnt++;
#endif
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname((struct thread *)tid),
+ "running");
continue;
}
}
@@ -579,6 +586,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
+ "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@@ -604,6 +613,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
if (LOCK_LOG_TEST(&m->lock_object, opts))
CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
+ KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
+ "running");
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
contested, waittime, (file), (line));
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 5b356b2..2d73220 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/turnstile.h>
@@ -423,6 +424,9 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, rw, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) ==
owner && TD_IS_RUNNING(owner)) {
cpu_spinwait();
@@ -430,16 +434,23 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
spin_cnt++;
#endif
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
continue;
}
} else if (spintries < rowner_retries) {
spintries++;
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
+ "spinning", "lockname:\"%s\"",
+ rw->lock_object.lo_name);
for (i = 0; i < rowner_loops; i++) {
v = rw->rw_lock;
if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
+ "running");
if (i != rowner_loops)
continue;
}
@@ -759,6 +770,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
+ "spinning", "lockname:\"%s\"",
+ rw->lock_object.lo_name);
while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
TD_IS_RUNNING(owner)) {
cpu_spinwait();
@@ -766,6 +780,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
spin_cnt++;
#endif
}
+ KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
+ "running");
continue;
}
if ((v & RW_LOCK_READ) && RW_READERS(v) &&
@@ -777,11 +793,16 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
}
}
spintries++;
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
+ "spinning", "lockname:\"%s\"",
+ rw->lock_object.lo_name);
for (i = 0; i < rowner_loops; i++) {
if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
break;
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
+ "running");
#ifdef KDTRACE_HOOKS
spin_cnt += rowner_loops - i;
#endif
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index a1fc36a..e588450 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -560,6 +561,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"",
+ sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner)) {
@@ -568,9 +573,14 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
spin_cnt++;
#endif
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
continue;
}
} else if (SX_SHARERS(x) && spintries < asx_retries) {
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
spintries++;
for (i = 0; i < asx_loops; i++) {
@@ -587,6 +597,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
spin_cnt++;
#endif
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
if (i != asx_loops)
continue;
}
@@ -849,6 +861,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
while (SX_OWNER(sx->sx_lock) == x &&
TD_IS_RUNNING(owner)) {
@@ -857,6 +872,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
#endif
cpu_spinwait();
}
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
continue;
}
}
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 2961d73..af8ff47 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -150,6 +150,7 @@ struct callout_cpu {
sbintime_t cc_lastscan;
void *cc_cookie;
u_int cc_bucket;
+ char cc_ktr_event_name[20];
};
#define cc_exec_curr cc_exec_entity[0].cc_curr
@@ -188,7 +189,7 @@ struct callout_cpu cc_cpu;
static int timeout_cpu;
-static void callout_cpu_init(struct callout_cpu *cc);
+static void callout_cpu_init(struct callout_cpu *cc, int cpu);
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
#ifdef CALLOUT_PROFILING
int *mpcalls, int *lockcalls, int *gcalls,
@@ -283,7 +284,7 @@ callout_callwheel_init(void *dummy)
cc = CC_CPU(timeout_cpu);
cc->cc_callout = malloc(ncallout * sizeof(struct callout),
M_CALLOUT, M_WAITOK);
- callout_cpu_init(cc);
+ callout_cpu_init(cc, timeout_cpu);
}
SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
@@ -291,7 +292,7 @@ SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
* Initialize the per-cpu callout structures.
*/
static void
-callout_cpu_init(struct callout_cpu *cc)
+callout_cpu_init(struct callout_cpu *cc, int cpu)
{
struct callout *c;
int i;
@@ -306,6 +307,8 @@ callout_cpu_init(struct callout_cpu *cc)
cc->cc_firstevent = INT64_MAX;
for (i = 0; i < 2; i++)
cc_cce_cleanup(cc, i);
+ snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
+ "callwheel cpu %d", cpu);
if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
return;
for (i = 0; i < ncallout; i++) {
@@ -367,7 +370,7 @@ start_softclock(void *dummy)
continue;
cc = CC_CPU(cpu);
cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
- callout_cpu_init(cc);
+ callout_cpu_init(cc, cpu);
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
INTR_MPSAFE, &cc->cc_cookie))
panic("died while creating standard software ithreads");
@@ -674,6 +677,8 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
c, c_func, c_arg);
}
+ KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
+ "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
sbt1 = sbinuptime();
#endif
@@ -696,6 +701,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
lastfunc = c_func;
}
#endif
+ KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
CTR1(KTR_CALLOUT, "callout %p finished", c);
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
class->lc_unlock(c_lock);
diff --git a/tools/sched/schedgraph.py b/tools/sched/schedgraph.py
index 4335574..f0552ca 100644
--- a/tools/sched/schedgraph.py
+++ b/tools/sched/schedgraph.py
@@ -70,6 +70,7 @@ eventcolors = [
("count", "red"),
("running", "green"),
("idle", "grey"),
+ ("spinning", "red"),
("yielding", "yellow"),
("swapped", "violet"),
("suspended", "purple"),
@@ -80,8 +81,6 @@ eventcolors = [
("runq rem", "yellow"),
("thread exit", "grey"),
("proc exit", "grey"),
- ("callwheel idle", "grey"),
- ("callout running", "green"),
("lock acquire", "blue"),
("lock contest", "purple"),
("failed lock try", "red"),
@@ -288,6 +287,10 @@ class ColorConfigure(Toplevel):
color = graph.getcolor(type[0])
if (color != ""):
self.additem(type[0], color)
+ self.bind("<Control-w>", self.destroycb)
+
+ def destroycb(self, event):
+ self.destroy()
def additem(self, name, color):
item = ColorConf(self.items, name, color)
@@ -373,6 +376,10 @@ class SourceConfigure(Toplevel):
self.addsource(source)
self.drawbuttons()
self.buttons.grid(row=1, column=0, sticky=W)
+ self.bind("<Control-w>", self.destroycb)
+
+ def destroycb(self, event):
+ self.destroy()
def addsource(self, source):
if (self.irow > 30):
@@ -487,6 +494,10 @@ class SourceStats(Toplevel):
bd=1, relief=SUNKEN, width=10).grid(
row=ypos, column=3, sticky=W+E)
ypos += 1
+ self.bind("<Control-w>", self.destroycb)
+
+ def destroycb(self, event):
+ self.destroy()
class SourceContext(Menu):
@@ -529,6 +540,7 @@ class EventView(Toplevel):
self.drawbuttons()
event.displayref(canvas)
self.bind("<Destroy>", self.destroycb)
+ self.bind("<Control-w>", self.destroycb)
def destroycb(self, event):
self.unbind("<Destroy>")
@@ -856,7 +868,7 @@ class EventSource:
return (Y_EVENTSOURCE)
def eventat(self, i):
- if (i >= len(self.events)):
+ if (i >= len(self.events) or i < 0):
return (None)
event = self.events[i]
return (event)
@@ -903,7 +915,6 @@ class KTRFile:
self.timestamp_f = None
self.timestamp_l = None
self.locks = {}
- self.callwheels = {}
self.ticks = {}
self.load = {}
self.crit = {}
@@ -1310,6 +1321,10 @@ class SchedGraph(Frame):
self.pack(expand=1, fill="both")
self.buildwidgets()
self.layout()
+ self.bind_all("<Control-q>", self.quitcb)
+
+ def quitcb(self, event):
+ self.quit()
def buildwidgets(self):
global status
OpenPOWER on IntegriCloud