summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_clock.c4
-rw-r--r--sys/kern/kern_synch.c27
-rw-r--r--sys/kern/sched_4bsd.c72
-rw-r--r--sys/kern/sched_ule.c77
-rw-r--r--sys/kern/subr_pcpu.c3
5 files changed, 132 insertions, 51 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 1c8e7bd..b311d30 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -498,8 +498,8 @@ statclock(int usermode)
rss = pgtok(vmspace_resident_count(vm));
if (ru->ru_maxrss < rss)
ru->ru_maxrss = rss;
- CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
- td, td->td_name, td->td_priority, (stathz)?stathz:hz);
+ KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
+ "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
thread_lock_flags(td, MTX_QUIET);
sched_clock(td);
thread_unlock(td);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 53e3ced..bc539af 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -71,6 +71,13 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#endif
+#define KTDSTATE(td) \
+ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
+ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
+ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
+ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
+ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
+
static void synch_setup(void *dummy);
SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
NULL);
@@ -425,25 +432,19 @@ mi_switch(int flags, struct thread *newtd)
td->td_tid, td->td_sched, p->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
- CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
- td, td->td_name, td->td_priority);
- else if (newtd != NULL)
- CTR5(KTR_SCHED,
- "mi_switch: %p(%s) prio %d preempted by %p(%s)",
- td, td->td_name, td->td_priority, newtd,
- newtd->td_name);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
+ "prio:%d", td->td_priority);
else
- CTR6(KTR_SCHED,
- "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
- td, td->td_name, td->td_priority,
- td->td_inhibitors, td->td_wmesg, td->td_lockname);
+ KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
+ "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
+ "lockname:\"%s\"", td->td_lockname);
#endif
#ifdef XEN
PT_UPDATES_FLUSH();
#endif
sched_switch(td, newtd, flags);
- CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
- td, td->td_name, td->td_priority);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
+ "prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 88a5494..cf669c9 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -82,6 +82,8 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
#endif
#define NICE_WEIGHT 1 /* Priorities per nice level. */
+#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__STRING(UINT_MAX)))
+
/*
* The schedulable entity that runs a context.
* This is an extension to the thread structure and is tailored to
@@ -93,6 +95,9 @@ struct td_sched {
int ts_slptime; /* (j) Seconds !RUNNING. */
int ts_flags;
struct runq *ts_runq; /* runq the thread is currently on */
+#ifdef KTR
+ char ts_name[TS_NAME_LEN];
+#endif
};
/* flags kept in td_flags */
@@ -243,15 +248,17 @@ SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
static __inline void
sched_load_add(void)
{
+
sched_tdcnt++;
- CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
+ KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
}
static __inline void
sched_load_rem(void)
{
+
sched_tdcnt--;
- CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
+ KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
}
/*
* Arrange to reschedule if necessary, taking the priorities and
@@ -705,8 +712,9 @@ void
sched_exit(struct proc *p, struct thread *td)
{
- CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
- td, td->td_name, td->td_priority);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
+ "prio:td", td->td_priority);
+
PROC_LOCK_ASSERT(p, MA_OWNED);
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
}
@@ -715,8 +723,8 @@ void
sched_exit_thread(struct thread *td, struct thread *child)
{
- CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
- child, child->td_name, child->td_priority);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
+ "prio:td", child->td_priority);
thread_lock(td);
td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
thread_unlock(td);
@@ -773,10 +781,16 @@ sched_class(struct thread *td, int class)
static void
sched_priority(struct thread *td, u_char prio)
{
- CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
- td, td->td_name, td->td_priority, prio, curthread,
- curthread->td_name);
+
+ KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
+ "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
+ if (td != curthread && prio > td->td_priority) {
+ KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
+ "lend prio", "prio:%d", td->td_priority, "new prio:%d",
+ prio, KTR_ATTR_LINKED, sched_tdname(td));
+ }
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
return;
@@ -1205,9 +1219,13 @@ sched_add(struct thread *td, int flags)
("sched_add: bad thread state"));
KASSERT(td->td_flags & TDF_INMEM,
("sched_add: thread swapped out"));
- CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
- td, td->td_name, td->td_priority, curthread,
- curthread->td_name);
+
+ KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
+ "prio:%d", td->td_priority, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
+ KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
+ KTR_ATTR_LINKED, sched_tdname(td));
+
/*
* Now that the thread is moving to the run-queue, set the lock
@@ -1289,9 +1307,11 @@ sched_add(struct thread *td, int flags)
("sched_add: bad thread state"));
KASSERT(td->td_flags & TDF_INMEM,
("sched_add: thread swapped out"));
- CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
- td, td->td_name, td->td_priority, curthread,
- curthread->td_name);
+ KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
+ "prio:%d", td->td_priority, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
+ KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
+ KTR_ATTR_LINKED, sched_tdname(td));
/*
* Now that the thread is moving to the run-queue, set the lock
@@ -1336,9 +1356,9 @@ sched_rem(struct thread *td)
KASSERT(TD_ON_RUNQ(td),
("sched_rem: thread not on run queue"));
mtx_assert(&sched_lock, MA_OWNED);
- CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
- td, td->td_name, td->td_priority, curthread,
- curthread->td_name);
+ KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
+ "prio:%d", td->td_priority, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_rem();
@@ -1570,6 +1590,22 @@ sched_fork_exit(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
}
+char *
+sched_tdname(struct thread *td)
+{
+#ifdef KTR
+ struct td_sched *ts;
+
+ ts = td->td_sched;
+ if (ts->ts_name[0] == '\0')
+ snprintf(ts->ts_name, sizeof(ts->ts_name),
+ "%s tid %d", td->td_name, td->td_tid);
+ return (ts->ts_name);
+#else
+ return (td->td_name);
+#endif
+}
+
void
sched_affinity(struct thread *td)
{
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 2c2d906..3a06223 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -86,6 +86,10 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
#define KTR_ULE 0
+#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__STRING(UINT_MAX)))
+#define TDQ_NAME_LEN (sizeof("sched lock ") + sizeof(__STRING(MAXCPU)))
+#define TDQ_LOADNAME_LEN (PCPU_NAME_LEN + sizeof(" load"))
+
/*
* Thread scheduler specific section. All fields are protected
* by the thread lock.
@@ -101,6 +105,9 @@ struct td_sched {
int ts_ltick; /* Last tick that we were running on */
int ts_ftick; /* First tick that we were running on */
int ts_ticks; /* Tick count */
+#ifdef KTR
+ char ts_name[TS_NAME_LEN];
+#endif
};
/* flags kept in ts_flags */
#define TSF_BOUND 0x0001 /* Thread can not migrate. */
@@ -216,7 +223,10 @@ struct tdq {
struct runq tdq_realtime; /* real-time run queue. */
struct runq tdq_timeshare; /* timeshare run queue. */
struct runq tdq_idle; /* Queue of IDLE threads. */
- char tdq_name[sizeof("sched lock") + 6];
+ char tdq_name[TDQ_NAME_LEN];
+#ifdef KTR
+ char tdq_loadname[TDQ_LOADNAME_LEN];
+#endif
} __aligned(64);
/* Idle thread states and config. */
@@ -489,7 +499,7 @@ tdq_load_add(struct tdq *tdq, struct thread *td)
tdq->tdq_load++;
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload++;
- CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
+ KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
/*
@@ -508,7 +518,7 @@ tdq_load_rem(struct tdq *tdq, struct thread *td)
tdq->tdq_load--;
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
tdq->tdq_sysload--;
- CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
+ KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
}
/*
@@ -1237,6 +1247,10 @@ tdq_setup(struct tdq *tdq)
"sched lock %d", (int)TDQ_ID(tdq));
mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
MTX_SPIN | MTX_RECURSE);
+#ifdef KTR
+ snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
+ "CPU %d load", (int)TDQ_ID(tdq));
+#endif
}
#ifdef SMP
@@ -1559,9 +1573,14 @@ sched_thread_priority(struct thread *td, u_char prio)
struct tdq *tdq;
int oldpri;
- CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
- td, td->td_name, td->td_priority, prio, curthread,
- curthread->td_name);
+ KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
+ "prio:%d", td->td_priority, "new prio:%d", prio,
+ KTR_ATTR_LINKED, sched_tdname(curthread));
+ if (td != curthread && prio > td->td_priority) {
+ KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
+ "lend prio", "prio:%d", td->td_priority, "new prio:%d",
+ prio, KTR_ATTR_LINKED, sched_tdname(td));
+ }
ts = td->td_sched;
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
@@ -1990,6 +2009,9 @@ sched_fork_thread(struct thread *td, struct thread *child)
ts2->ts_slptime = ts->ts_slptime;
ts2->ts_runtime = ts->ts_runtime;
ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
+#ifdef KTR
+ bzero(ts2->ts_name, sizeof(ts2->ts_name));
+#endif
}
/*
@@ -2012,10 +2034,9 @@ void
sched_exit(struct proc *p, struct thread *child)
{
struct thread *td;
-
- CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
- child, child->td_name, child->td_priority);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
+ "prio:td", child->td_priority);
PROC_LOCK_ASSERT(p, MA_OWNED);
td = FIRST_THREAD_IN_PROC(p);
sched_exit_thread(td, child);
@@ -2031,9 +2052,8 @@ void
sched_exit_thread(struct thread *td, struct thread *child)
{
- CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
- child, child->td_name, child->td_priority);
-
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
+ "prio:td", child->td_priority);
/*
* Give the child's runtime to the parent without returning the
* sleep time as a penalty to the parent. This causes shells that
@@ -2291,9 +2311,12 @@ sched_add(struct thread *td, int flags)
#ifdef SMP
int cpu;
#endif
- CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
- td, td->td_name, td->td_priority, curthread,
- curthread->td_name);
+
+ KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
+ "prio:%d", td->td_priority, KTR_ATTR_LINKED,
+ sched_tdname(curthread));
+ KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
+ KTR_ATTR_LINKED, sched_tdname(td));
THREAD_LOCK_ASSERT(td, MA_OWNED);
/*
* Recalculate the priority before we select the target cpu or
@@ -2337,9 +2360,8 @@ sched_rem(struct thread *td)
{
struct tdq *tdq;
- CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
- td, td->td_name, td->td_priority, curthread,
- curthread->td_name);
+ KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
+ "prio:%d", td->td_priority);
tdq = TDQ_CPU(td->td_sched->ts_cpu);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
@@ -2605,6 +2627,25 @@ sched_fork_exit(struct thread *td)
&TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
}
+/*
+ * Create on first use to catch odd startup conditons.
+ */
+char *
+sched_tdname(struct thread *td)
+{
+#ifdef KTR
+ struct td_sched *ts;
+
+ ts = td->td_sched;
+ if (ts->ts_name[0] == '\0')
+ snprintf(ts->ts_name, sizeof(ts->ts_name),
+ "%s tid %d", td->td_name, td->td_tid);
+ return (ts->ts_name);
+#else
+ return (td->td_name);
+#endif
+}
+
#ifdef SMP
/*
diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c
index e9fb8d0d..ea25aa7 100644
--- a/sys/kern/subr_pcpu.c
+++ b/sys/kern/subr_pcpu.c
@@ -76,6 +76,9 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
cpu_pcpu_init(pcpu, cpuid, size);
pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
+#ifdef KTR
+ snprintf(pcpu->pc_name, sizeof(pcpu->pc_name), "CPU %d", cpuid);
+#endif
}
OpenPOWER on IntegriCloud