summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_switch.c40
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/sched_4bsd.c14
-rw-r--r--sys/kern/sched_ule.c2
4 files changed, 36 insertions, 22 deletions
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index 7d3a44f..ee04bf3 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -192,7 +192,7 @@ retry:
* sched_thread_exit() (local)
* sched_switch() (local)
* sched_thread_exit() (local)
- * remrunqueue() (local)
+ * remrunqueue() (local) (not at the moment)
*/
static void
slot_fill(struct ksegrp *kg)
@@ -214,7 +214,6 @@ slot_fill(struct ksegrp *kg)
*/
if (td) {
kg->kg_last_assigned = td;
- kg->kg_avail_opennings--;
sched_add(td, SRQ_BORING);
CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
} else {
@@ -250,7 +249,6 @@ remrunqueue(struct thread *td)
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
/* remve from sys run queue and free up a slot */
sched_rem(td);
- kg->kg_avail_opennings++;
ke->ke_state = KES_THREAD;
return;
}
@@ -265,7 +263,6 @@ remrunqueue(struct thread *td)
* see if we need to move the KSE in the run queues.
*/
sched_rem(td);
- kg->kg_avail_opennings++;
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value"));
@@ -305,17 +302,16 @@ adjustrunqueue( struct thread *td, int newpri)
/* It is a threaded process */
kg = td->td_ksegrp;
- TD_SET_CAN_RUN(td);
if (ke->ke_state == KES_ONRUNQ) {
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned =
TAILQ_PREV(td, threadqueue, td_runq);
}
sched_rem(td);
- kg->kg_avail_opennings++;
}
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
kg->kg_runnable--;
+ TD_SET_CAN_RUN(td);
td->td_priority = newpri;
setrunqueue(td, SRQ_BORING);
}
@@ -326,7 +322,6 @@ setrunqueue(struct thread *td, int flags)
struct ksegrp *kg;
struct thread *td2;
struct thread *tda;
- int count;
CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
td, td->td_ksegrp, td->td_proc->p_pid);
@@ -352,14 +347,23 @@ setrunqueue(struct thread *td, int flags)
}
kg->kg_avail_opennings = 1;
}
- kg->kg_avail_opennings--;
sched_add(td, flags);
return;
}
+ /*
+ * If the concurrency has reduced, and we would go in the
+ * assigned section, then keep removing entries from the
+ * system run queue, until we are not in that section
+ * or there is room for us to be put in that section.
+ * What we MUST avoid is the case where there are threads of less
+ * priority than the new one scheduled, but it can not
+ * be scheduled itself. That would lead to a non contiguous set
+ * of scheduled threads, and everything would break.
+ */
tda = kg->kg_last_assigned;
- if ((kg->kg_avail_opennings <= 0) &&
- (tda && (tda->td_priority > td->td_priority))) {
+ while ((kg->kg_avail_opennings <= 0) &&
+ (tda && (tda->td_priority > td->td_priority))) {
/*
* None free, but there is one we can commandeer.
*/
@@ -375,18 +379,12 @@ setrunqueue(struct thread *td, int flags)
* Add the thread to the ksegrp's run queue at
* the appropriate place.
*/
- count = 0;
TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
if (td2->td_priority > td->td_priority) {
kg->kg_runnable++;
TAILQ_INSERT_BEFORE(td2, td, td_runq);
break;
}
- /* XXX Debugging hack */
- if (++count > 10000) {
- printf("setrunqueue(): corrupt kq_runq, td= %p\n", td);
- panic("deadlock in setrunqueue");
- }
}
if (td2 == NULL) {
/* We ran off the end of the TAILQ or it was empty. */
@@ -397,12 +395,15 @@ setrunqueue(struct thread *td, int flags)
/*
* If we have a slot to use, then put the thread on the system
* run queue and if needed, readjust the last_assigned pointer.
+ * it may be that we need to schedule something anyhow
+ * even if the availabel slots are -ve so that
+ * all the items < last_assigned are scheduled.
*/
if (kg->kg_avail_opennings > 0) {
if (tda == NULL) {
/*
* No pre-existing last assigned so whoever is first
- * gets the KSE we brought in.. (maybe us)
+ * gets the slot.. (maybe us)
*/
td2 = TAILQ_FIRST(&kg->kg_runq);
kg->kg_last_assigned = td2;
@@ -411,13 +412,12 @@ setrunqueue(struct thread *td, int flags)
} else {
/*
* We are past last_assigned, so
- * gave the next slot to whatever is next,
+ * give the next slot to whatever is next,
* which may or may not be us.
*/
td2 = TAILQ_NEXT(tda, td_runq);
kg->kg_last_assigned = td2;
}
- kg->kg_avail_opennings--;
sched_add(td2, flags);
} else {
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
@@ -544,7 +544,7 @@ maybe_preempt(struct thread *td)
kg = td->td_ksegrp;
if (kg->kg_last_assigned == td)
kg->kg_last_assigned =
- TAILQ_PREV(td, threadqueue, td_runq);
+ TAILQ_PREV(td, threadqueue, td_runq);
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 7534e21..13749e4 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -335,7 +335,7 @@ mi_switch(int flags, struct thread *newtd)
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
- if (td->td_proc->p_flag & P_SA)
+ if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA))
newtd = thread_switchout(td, flags, newtd);
sched_switch(td, newtd, flags);
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 02c8f3c..a88e266 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -792,6 +792,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
/*
* The thread we are about to run needs to be counted as if it had been
* added to the run queue and selected.
+ * it came from:
+ * A preemption
+ * An upcall
+ * A followon
+ * Do this before saving curthread so that the slot count
+ * doesn't give an overly optimistic view when that happens.
*/
if (newtd) {
KASSERT((newtd->td_inhibitors == 0),
@@ -1024,6 +1030,7 @@ sched_add(struct thread *td, int flags)
}
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt++;
+ td->td_ksegrp->kg_avail_opennings--;
runq_add(ke->ke_runq, ke);
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
@@ -1044,12 +1051,17 @@ sched_rem(struct thread *td)
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
+ td->td_ksegrp->kg_avail_opennings++;
runq_remove(ke->ke_runq, ke);
ke->ke_state = KES_THREAD;
- ke->ke_ksegrp->kg_runq_kses--;
+ td->td_ksegrp->kg_runq_kses--;
}
+/*
+ * Select threads to run.
+ * Notice that the running threads still consume a slot.
+ */
struct kse *
sched_choose(void)
{
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index bab2bde..e248d6d 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1773,6 +1773,7 @@ sched_add_internal(struct thread *td, int preemptive)
curthread->td_flags |= TDF_NEEDRESCHED;
if (preemptive && maybe_preempt(td))
return;
+ td->td_ksegrp->kg_avail_opennings--;
ke->ke_ksegrp->kg_runq_threads++;
ke->ke_state = KES_ONRUNQ;
@@ -1800,6 +1801,7 @@ sched_rem(struct thread *td)
("sched_rem: KSE not on run queue"));
ke->ke_state = KES_THREAD;
+ td->td_ksegrp->kg_avail_opennings++;
ke->ke_ksegrp->kg_runq_threads--;
kseq = KSEQ_CPU(ke->ke_cpu);
kseq_runq_rem(kseq, ke);
OpenPOWER on IntegriCloud