summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-08-09 18:21:12 +0000
committerjulian <julian@FreeBSD.org>2004-08-09 18:21:12 +0000
commit61fada7840e0031c1ea485ad598d1368b9052ee9 (patch)
treee484c85519f8541c69c627ca0f305f7b5dcd0f51
parent197114e321511b6704f9f173a2c769ffafd81610 (diff)
downloadFreeBSD-src-61fada7840e0031c1ea485ad598d1368b9052ee9.zip
FreeBSD-src-61fada7840e0031c1ea485ad598d1368b9052ee9.tar.gz
Increase the amount of data exported by KTR in the KTR_RUNQ setting.
This extra data is needed to really follow what is going on in the threaded case.
-rw-r--r--sys/kern/kern_fork.c4
-rw-r--r--sys/kern/kern_switch.c21
-rw-r--r--sys/kern/kern_synch.c8
-rw-r--r--sys/kern/kern_thread.c5
-rw-r--r--sys/kern/sched_4bsd.c11
5 files changed, 28 insertions, 21 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 149b6ba..78c6d8b 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -794,8 +794,8 @@ fork_exit(callout, arg, frame)
sched_lock.mtx_lock = (uintptr_t)td;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
cpu_critical_fork_exit();
- CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid,
- p->p_comm);
+ CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
+ td, td->td_kse, p->p_pid, p->p_comm);
/*
* Processes normally resume in mi_switch() after being
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 23fbbe1..5020af3 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -213,8 +213,8 @@ kse_reassign(struct kse *ke)
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
- sched_add(td);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
+ sched_add(td);
return;
}
@@ -327,7 +327,8 @@ setrunqueue(struct thread *td)
struct thread *td2;
struct thread *tda;
- CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
+ CTR4(KTR_RUNQ, "setrunqueue: td:%p ke:%p kg:%p pid:%d",
+ td, td->td_kse, td->td_ksegrp, td->td_proc->p_pid);
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
("setrunqueue: bad thread state"));
@@ -351,6 +352,8 @@ setrunqueue(struct thread *td)
* There is a free one so it's ours for the asking..
*/
ke = TAILQ_FIRST(&kg->kg_iq);
+ CTR2(KTR_RUNQ, "setrunqueue: kg:%p: Use free ke:%p",
+ kg, ke);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
@@ -359,6 +362,9 @@ setrunqueue(struct thread *td)
* None free, but there is one we can commandeer.
*/
ke = tda->td_kse;
+ CTR3(KTR_RUNQ,
+ "setrunqueue: kg:%p: take ke:%p from td: %p",
+ kg, ke, tda);
sched_rem(tda);
tda->td_kse = NULL;
ke->ke_thread = NULL;
@@ -423,6 +429,9 @@ setrunqueue(struct thread *td)
ke->ke_thread = td2;
}
sched_add(ke->ke_thread);
+ } else {
+ CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
+ td, td->td_ksegrp, td->td_proc->p_pid);
}
}
@@ -639,8 +648,8 @@ runq_add(struct runq *rq, struct kse *ke)
ke->ke_rqindex = pri;
runq_setbit(rq, pri);
rqh = &rq->rq_queues[pri];
- CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
- ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
+ CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
+ ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
}
@@ -706,8 +715,8 @@ runq_remove(struct runq *rq, struct kse *ke)
("runq_remove: process swapped out"));
pri = ke->ke_rqindex;
rqh = &rq->rq_queues[pri];
- CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
- ke, ke->ke_thread->td_priority, pri, rqh);
+ CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
+ ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
TAILQ_REMOVE(rqh, ke, ke_procq);
if (TAILQ_EMPTY(rqh)) {
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 2e48ef1..7830521 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -345,14 +345,14 @@ mi_switch(int flags, struct thread *newtd)
cnt.v_swtch++;
PCPU_SET(switchtime, new_switchtime);
PCPU_SET(switchticks, ticks);
- CTR3(KTR_PROC, "mi_switch: old thread %p (pid %ld, %s)",
- (void *)td, (long)p->p_pid, p->p_comm);
+ CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
+ (void *)td, td->td_kse, (long)p->p_pid, p->p_comm);
if (td->td_proc->p_flag & P_SA)
thread_switchout(td);
sched_switch(td, newtd);
- CTR3(KTR_PROC, "mi_switch: new thread %p (pid %ld, %s)",
- (void *)td, (long)p->p_pid, p->p_comm);
+ CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
+ (void *)td, td->td_kse, (long)p->p_pid, p->p_comm);
/*
* If the last thread was exiting, finish cleaning it up.
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 77df263..64a3419 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -682,9 +682,8 @@ thread_exit(void)
} else {
PROC_UNLOCK(p);
}
- td->td_state = TDS_INACTIVE;
- /* XXX Shouldn't cpu_throw() here. */
- mtx_assert(&sched_lock, MA_OWNED);
+ td->td_state = TDS_INACTIVE;
+ CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
cpu_throw(td, choosethread());
panic("I'm a teapot!");
/* NOTREACHED */
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index c0f6a32..2f6d0ad 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -49,8 +49,6 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/sx.h>
-#define KTR_4BSD 0x0
-
/*
* INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
* the range 100-256 Hz (approximately).
@@ -725,14 +723,15 @@ sched_add(struct thread *td)
#ifdef SMP
if (KSE_CAN_MIGRATE(ke)) {
- CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke);
+ CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
ke->ke_runq = &runq;
} else {
- CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke);
+ CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p)to pcpu runq", ke, td);
if (!SKE_RUNQ_PCPU(ke))
ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)];
}
#else
+ CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
ke->ke_runq = &runq;
#endif
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
@@ -777,12 +776,12 @@ sched_choose(void)
if (ke == NULL ||
(kecpu != NULL &&
kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
- CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu,
+ CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
PCPU_GET(cpuid));
ke = kecpu;
rq = &runq_pcpu[PCPU_GET(cpuid)];
} else {
- CTR1(KTR_4BSD, "choosing kse %p from main runq", ke);
+ CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
}
#else
OpenPOWER on IntegriCloud