summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2004-07-02 19:09:50 +0000
committerjhb <jhb@FreeBSD.org>2004-07-02 19:09:50 +0000
commit1b16b181d12075ffc084f2a593e16fe3dd8f6f6d (patch)
treee89f55b0c72410220a98331e61987c812e0316dd /sys/kern/sched_4bsd.c
parent4b39413aeb9e3e3ff1341b98477edddee8807db5 (diff)
downloadFreeBSD-src-1b16b181d12075ffc084f2a593e16fe3dd8f6f6d.zip
FreeBSD-src-1b16b181d12075ffc084f2a593e16fe3dd8f6f6d.tar.gz
- Change mi_switch() and sched_switch() to accept an optional thread to
switch to. If a non-NULL thread pointer is passed in, then the CPU will switch to that thread directly rather than calling choosethread() to pick a thread to choose to. - Make sched_switch() aware of idle threads and know to do TD_SET_CAN_RUN() instead of sticking them on the run queue rather than requiring all callers of mi_switch() to know to do this if they can be called from an idlethread. - Move constants for arguments to mi_switch() and thread_single() out of the middle of the function prototypes and up above into their own section.
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index ae8492f..5d8961e 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -637,9 +637,8 @@ sched_sleep(struct thread *td)
}
void
-sched_switch(struct thread *td)
+sched_switch(struct thread *td, struct thread *newtd)
{
- struct thread *newtd;
struct kse *ke;
struct proc *p;
@@ -651,6 +650,8 @@ sched_switch(struct thread *td)
if ((p->p_flag & P_NOLOAD) == 0)
sched_tdcnt--;
+ if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0)
+ sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
td->td_flags &= ~TDF_NEEDRESCHED;
@@ -658,9 +659,12 @@ sched_switch(struct thread *td)
/*
* At the last moment, if this thread is still marked RUNNING,
* then put it back on the run queue as it has not been suspended
- * or stopped or any thing else similar.
+ * or stopped or any thing else similar. We never put the idle
+ * threads on the run queue, however.
*/
- if (TD_IS_RUNNING(td)) {
+ if (td == PCPU_GET(idlethread))
+ TD_SET_CAN_RUN(td);
+ else if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue (kse and all). */
setrunqueue(td);
} else if (p->p_flag & P_SA) {
@@ -671,7 +675,8 @@ sched_switch(struct thread *td)
*/
kse_reassign(ke);
}
- newtd = choosethread();
+ if (newtd == NULL)
+ newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd);
sched_lock.mtx_lock = (uintptr_t)td;
@@ -830,7 +835,7 @@ sched_bind(struct thread *td, int cpu)
ke->ke_state = KES_THREAD;
- mi_switch(SW_VOL);
+ mi_switch(SW_VOL, NULL);
#endif
}
OpenPOWER on IntegriCloud