summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-10-05 21:10:44 +0000
committerjulian <julian@FreeBSD.org>2004-10-05 21:10:44 +0000
commit7b170fd9fa203dd45f0e476e3c479181f9ceb2f7 (patch)
tree68aa2dbaf19404b35d832a0466bffd8081c3d081
parent8587c9806d6a780c141ff151e28c2fff6bb3160f (diff)
downloadFreeBSD-src-7b170fd9fa203dd45f0e476e3c479181f9ceb2f7.zip
FreeBSD-src-7b170fd9fa203dd45f0e476e3c479181f9ceb2f7.tar.gz
Use some macros to trach available scheduler slots to allow
easier debugging. MFC after: 4 days
-rw-r--r--sys/kern/kern_switch.c11
-rw-r--r--sys/kern/sched_4bsd.c30
-rw-r--r--sys/kern/sched_ule.c46
3 files changed, 64 insertions, 23 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 787ec45..2851ab8 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -372,7 +372,7 @@ setrunqueue(struct thread *td, int flags)
sched_rem(tda);
tda = kg->kg_last_assigned =
TAILQ_PREV(tda, threadqueue, td_runq);
- kg->kg_avail_opennings++;
+ SLOT_RELEASE(kg);
}
/*
@@ -820,6 +820,7 @@ void
sched_init_concurrency(struct ksegrp *kg)
{
+ CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
kg->kg_concurrency = 1;
kg->kg_avail_opennings = 1;
}
@@ -836,7 +837,11 @@ void
sched_set_concurrency(struct ksegrp *kg, int concurrency)
{
- /* Handle the case for a declining concurrency */
+ CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
+ kg,
+ concurrency,
+ kg->kg_avail_opennings,
+ kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
kg->kg_concurrency = concurrency;
}
@@ -854,7 +859,7 @@ void
sched_thread_exit(struct thread *td)
{
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
slot_fill(td->td_ksegrp);
}
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index a88e266..eecae95 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -119,6 +119,28 @@ struct kg_sched {
#define kg_concurrency kg_sched->skg_concurrency
#define kg_runq_kses kg_sched->skg_runq_kses
+#define SLOT_RELEASE(kg) \
+do { \
+ kg->kg_avail_opennings++; \
+ CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+/* KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \
+ ("slots out of whack"));*/ \
+} while (0)
+
+#define SLOT_USE(kg) \
+do { \
+ kg->kg_avail_opennings--; \
+ CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+/* KASSERT((kg->kg_avail_opennings >= 0), \
+ ("slots out of whack"));*/ \
+} while (0)
+
/*
* KSE_CAN_MIGRATE macro returns true if the kse can migrate between
* cpus.
@@ -802,7 +824,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (newtd) {
KASSERT((newtd->td_inhibitors == 0),
("trying to run inhibitted thread"));
- newtd->td_ksegrp->kg_avail_opennings--;
+ SLOT_USE(newtd->td_ksegrp);
newtd->td_kse->ke_flags |= KEF_DIDRUN;
TD_SET_RUNNING(newtd);
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
@@ -822,7 +844,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td == PCPU_GET(idlethread))
TD_SET_CAN_RUN(td);
else {
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
@@ -1030,7 +1052,7 @@ sched_add(struct thread *td, int flags)
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
- td->td_ksegrp->kg_avail_opennings--;
+ SLOT_USE(td->td_ksegrp);
runq_add(ke->ke_runq, ke);
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
@@ -1051,7 +1073,7 @@ sched_rem(struct thread *td)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index e248d6d..68b96ad 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -164,6 +164,28 @@ struct kg_sched {
#define kg_runtime kg_sched->skg_runtime
#define kg_slptime kg_sched->skg_slptime
+#define SLOT_RELEASE(kg) \
+do { \
+ kg->kg_avail_opennings++; \
+ CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+ /*KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \
+ ("slots out of whack")); */ \
+} while (0)
+
+#define SLOT_USE(kg) \
+do { \
+ kg->kg_avail_opennings--; \
+ CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \
+ kg, \
+ kg->kg_concurrency, \
+ kg->kg_avail_opennings); \
+ /*KASSERT((kg->kg_avail_opennings >= 0), \
+ ("slots out of whack"));*/ \
+} while (0)
+
static struct kse kse0;
static struct kg_sched kg_sched0;
@@ -1149,9 +1171,9 @@ schedinit(void)
/*
* Set up the scheduler specific parts of proc0.
*/
- ksegrp0.kg_sched = &kg_sched0;
proc0.p_sched = NULL; /* XXX */
- thread0.td_kse = &kse0;
+ ksegrp0.kg_sched = &kg_sched0;
+ thread0.td_sched = &kse0;
kse0.ke_thread = &thread0;
kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
kse0.ke_state = KES_THREAD;
@@ -1238,15 +1260,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
td->td_pflags &= ~TDP_OWEPREEMPT;
/*
- * If we bring in a thread,
- * then account for it as if it had been added to the run queue and then chosen.
- */
- if (newtd) {
- newtd->td_ksegrp->kg_avail_opennings--;
- newtd->td_kse->ke_flags |= KEF_DIDRUN;
- TD_SET_RUNNING(newtd);
- }
- /*
* If the KSE has been assigned it may be in the process of switching
* to the new cpu. This is the case in sched_bind().
*/
@@ -1255,7 +1268,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
TD_SET_CAN_RUN(td);
} else {
/* We are ending our run so make our slot available again */
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
if (TD_IS_RUNNING(td)) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
/*
@@ -1278,9 +1291,10 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
}
}
}
- if (newtd != NULL)
+ if (newtd != NULL) {
+ SLOT_USE(newtd->td_ksegrp);
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
- else
+ } else
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
@@ -1773,7 +1787,7 @@ sched_add_internal(struct thread *td, int preemptive)
curthread->td_flags |= TDF_NEEDRESCHED;
if (preemptive && maybe_preempt(td))
return;
- td->td_ksegrp->kg_avail_opennings--;
+ SLOT_USE(td->td_ksegrp);
ke->ke_ksegrp->kg_runq_threads++;
ke->ke_state = KES_ONRUNQ;
@@ -1801,7 +1815,7 @@ sched_rem(struct thread *td)
("sched_rem: KSE not on run queue"));
ke->ke_state = KES_THREAD;
- td->td_ksegrp->kg_avail_opennings++;
+ SLOT_RELEASE(td->td_ksegrp);
ke->ke_ksegrp->kg_runq_threads--;
kseq = KSEQ_CPU(ke->ke_cpu);
kseq_runq_rem(kseq, ke);
OpenPOWER on IntegriCloud