summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_switch.c
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2004-09-16 07:12:59 +0000
committerjulian <julian@FreeBSD.org>2004-09-16 07:12:59 +0000
commit6461286b213d045e058cd62bba53b08477d34e1f (patch)
treed07e704218909383fd325c02883dbc86dfaeb565 /sys/kern/kern_switch.c
parent3d3172e3cda61cc88a5040098d4bf5cae6f93d77 (diff)
downloadFreeBSD-src-6461286b213d045e058cd62bba53b08477d34e1f.zip
FreeBSD-src-6461286b213d045e058cd62bba53b08477d34e1f.tar.gz
clean up thread runq accounting a bit.
MFC after: 3 days
Diffstat (limited to 'sys/kern/kern_switch.c')
-rw-r--r--sys/kern/kern_switch.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 7d3a44f..ee04bf3 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -192,7 +192,7 @@ retry:
* sched_thread_exit() (local)
* sched_switch() (local)
* sched_thread_exit() (local)
- * remrunqueue() (local)
+ * remrunqueue() (local) (not at the moment)
*/
static void
slot_fill(struct ksegrp *kg)
@@ -214,7 +214,6 @@ slot_fill(struct ksegrp *kg)
*/
if (td) {
kg->kg_last_assigned = td;
- kg->kg_avail_opennings--;
sched_add(td, SRQ_BORING);
CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
} else {
@@ -250,7 +249,6 @@ remrunqueue(struct thread *td)
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
/* remve from sys run queue and free up a slot */
sched_rem(td);
- kg->kg_avail_opennings++;
ke->ke_state = KES_THREAD;
return;
}
@@ -265,7 +263,6 @@ remrunqueue(struct thread *td)
* see if we need to move the KSE in the run queues.
*/
sched_rem(td);
- kg->kg_avail_opennings++;
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value"));
@@ -305,17 +302,16 @@ adjustrunqueue( struct thread *td, int newpri)
/* It is a threaded process */
kg = td->td_ksegrp;
- TD_SET_CAN_RUN(td);
if (ke->ke_state == KES_ONRUNQ) {
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned =
TAILQ_PREV(td, threadqueue, td_runq);
}
sched_rem(td);
- kg->kg_avail_opennings++;
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
+ TD_SET_CAN_RUN(td);
td->td_priority = newpri;
setrunqueue(td, SRQ_BORING);
}
@@ -326,7 +322,6 @@ setrunqueue(struct thread *td, int flags)
struct ksegrp *kg;
struct thread *td2;
struct thread *tda;
- int count;
CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
td, td->td_ksegrp, td->td_proc->p_pid);
@@ -352,14 +347,23 @@ setrunqueue(struct thread *td, int flags)
}
kg->kg_avail_opennings = 1;
}
- kg->kg_avail_opennings--;
sched_add(td, flags);
return;
}
+ /*
+ * If the concurrency has reduced, and we would go in the
+ * assigned section, then keep removing entries from the
+ * system run queue, until we are not in that section
+ * or there is room for us to be put in that section.
+ * What we MUST avoid is the case where there are threads of less
+ * priority than the new one scheduled, but it can not
+ * be scheduled itself. That would lead to a non contiguous set
+ * of scheduled threads, and everything would break.
+ */
tda = kg->kg_last_assigned;
- if ((kg->kg_avail_opennings <= 0) &&
- (tda && (tda->td_priority > td->td_priority))) {
+ while ((kg->kg_avail_opennings <= 0) &&
+ (tda && (tda->td_priority > td->td_priority))) {
/*
* None free, but there is one we can commandeer.
*/
@@ -375,18 +379,12 @@ setrunqueue(struct thread *td, int flags)
* Add the thread to the ksegrp's run queue at
* the appropriate place.
*/
- count = 0;
TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
if (td2->td_priority > td->td_priority) {
kg->kg_runnable++;
TAILQ_INSERT_BEFORE(td2, td, td_runq);
break;
}
- /* XXX Debugging hack */
- if (++count > 10000) {
- printf("setrunqueue(): corrupt kq_runq, td= %p\n", td);
- panic("deadlock in setrunqueue");
- }
}
if (td2 == NULL) {
/* We ran off the end of the TAILQ or it was empty. */
@@ -397,12 +395,15 @@ setrunqueue(struct thread *td, int flags)
/*
* If we have a slot to use, then put the thread on the system
* run queue and if needed, readjust the last_assigned pointer.
+ * it may be that we need to schedule something anyhow
+ * even if the availabel slots are -ve so that
+ * all the items < last_assigned are scheduled.
*/
if (kg->kg_avail_opennings > 0) {
if (tda == NULL) {
/*
* No pre-existing last assigned so whoever is first
- * gets the KSE we brought in.. (maybe us)
+ * gets the slot.. (maybe us)
*/
td2 = TAILQ_FIRST(&kg->kg_runq);
kg->kg_last_assigned = td2;
@@ -411,13 +412,12 @@ setrunqueue(struct thread *td, int flags)
} else {
/*
* We are past last_assigned, so
- * gave the next slot to whatever is next,
+ * give the next slot to whatever is next,
* which may or may not be us.
*/
td2 = TAILQ_NEXT(tda, td_runq);
kg->kg_last_assigned = td2;
}
- kg->kg_avail_opennings--;
sched_add(td2, flags);
} else {
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
@@ -544,7 +544,7 @@ maybe_preempt(struct thread *td)
kg = td->td_ksegrp;
if (kg->kg_last_assigned == td)
kg->kg_last_assigned =
- TAILQ_PREV(td, threadqueue, td_runq);
+ TAILQ_PREV(td, threadqueue, td_runq);
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
}
OpenPOWER on IntegriCloud