diff options
author | jeff <jeff@FreeBSD.org> | 2007-01-23 08:46:51 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2007-01-23 08:46:51 +0000 |
commit | 474b917526db60cd113b34f9bbb30e8d252bae24 (patch) | |
tree | b133e2bceeb7a9d12a55f7f5eda206c4edcf51e2 /sys/kern/sched_4bsd.c | |
parent | f53a7830f79b8d9247e5d2ae879f0a43c42b49fa (diff) | |
download | FreeBSD-src-474b917526db60cd113b34f9bbb30e8d252bae24.zip FreeBSD-src-474b917526db60cd113b34f9bbb30e8d252bae24.tar.gz |
- Remove setrunqueue and replace it with direct calls to sched_add().
setrunqueue() was mostly empty. The few asserts and thread state
setting were moved to the individual schedulers. sched_add() was
chosen to displace it for naming consistency reasons.
- Remove adjustrunqueue, it was 4 lines of code that was ifdef'd to be
different on all three schedulers where it was only called in one place
each.
- Remove the long ifdef'd out remrunqueue code.
- Remove the now redundant ts_state. Inspect the thread state directly.
- Don't set TSF_* flags from kern_switch.c, we were only doing this to
support a feature in one scheduler.
- Change sched_choose() to return a thread rather than a td_sched. Also,
rely on the schedulers to return the idlethread. This simplifies the
logic in choosethread(). Aside from the run queue links kern_switch.c
mostly does not care about the contents of td_sched.
Discussed with: julian
- Move the idle thread loop into the per scheduler area. ULE wants to
do something different from the other schedulers.
Suggested by: jhb
Tested on: x86/amd64 sched_{4BSD, ULE, CORE}.
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r-- | sys/kern/sched_4bsd.c | 99 |
1 files changed, 65 insertions, 34 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 4a52a71..3d795ea 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -83,10 +83,6 @@ struct td_sched { struct thread *ts_thread; /* (*) Active associated thread. */ fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ u_char ts_rqindex; /* (j) Run queue index. */ - enum { - TSS_THREAD = 0x0, /* slaved to thread state */ - TSS_ONRUNQ - } ts_state; /* (j) TD_STAT in scheduler status. */ int ts_cpticks; /* (j) Ticks of cpu time. */ struct runq *ts_runq; /* runq the thread is currently on */ }; @@ -112,8 +108,6 @@ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ static struct callout roundrobin_callout; -static struct td_sched *sched_choose(void); - static void setup_runqs(void); static void roundrobin(void *arg); static void schedcpu(void); @@ -404,11 +398,10 @@ schedcpu(void) * because the thread may not HAVE everything in * memory? XXX I think this is out of date. */ - if (ts->ts_state == TSS_ONRUNQ) { + if (TD_ON_RUNQ(td)) { awake = 1; ts->ts_flags &= ~TSF_DIDRUN; - } else if ((ts->ts_state == TSS_THREAD) && - (TD_IS_RUNNING(td))) { + } else if (TD_IS_RUNNING(td)) { awake = 1; /* Do not clear TSF_DIDRUN */ } else if (ts->ts_flags & TSF_DIDRUN) { @@ -584,7 +577,6 @@ schedinit(void) proc0.p_sched = NULL; /* XXX */ thread0.td_sched = &td_sched0; td_sched0.ts_thread = &thread0; - td_sched0.ts_state = TSS_THREAD; } int @@ -709,10 +701,11 @@ sched_priority(struct thread *td, u_char prio) mtx_assert(&sched_lock, MA_OWNED); if (td->td_priority == prio) return; - if (TD_ON_RUNQ(td)) { - adjustrunqueue(td, prio); - } else { - td->td_priority = prio; + td->td_priority = prio; + if (TD_ON_RUNQ(td) && + td->td_sched->ts_rqindex != (prio / RQ_PPQ)) { + sched_rem(td); + sched_add(td, SRQ_BORING); } } @@ -878,7 +871,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) else { if (TD_IS_RUNNING(td)) { /* Put us back on the run queue. */ - setrunqueue(td, (flags & SW_PREEMPT) ? + sched_add(td, (flags & SW_PREEMPT) ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); } @@ -928,7 +921,7 @@ sched_wakeup(struct thread *td) resetpriority(td); } td->td_slptime = 0; - setrunqueue(td, SRQ_BORING); + sched_add(td, SRQ_BORING); } #ifdef SMP @@ -1065,15 +1058,16 @@ sched_add(struct thread *td, int flags) ts = td->td_sched; mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ts->ts_state != TSS_ONRUNQ, - ("sched_add: td_sched %p (%s) already in run queue", ts, - td->td_proc->p_comm)); + KASSERT((td->td_inhibitors == 0), + ("sched_add: trying to run inhibited thread")); + KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), + ("sched_add: bad thread state")); KASSERT(td->td_proc->p_sflag & PS_INMEM, ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); - + TD_SET_RUNQ(td); if (td->td_pinned != 0) { cpu = td->td_lastcpu; @@ -1119,21 +1113,22 @@ sched_add(struct thread *td, int flags) if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); runq_add(ts->ts_runq, ts, flags); - ts->ts_state = TSS_ONRUNQ; } #else /* SMP */ { struct td_sched *ts; ts = td->td_sched; mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ts->ts_state != TSS_ONRUNQ, - ("sched_add: td_sched %p (%s) already in run queue", ts, - td->td_proc->p_comm)); + KASSERT((td->td_inhibitors == 0), + ("sched_add: trying to run inhibited thread")); + KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), + ("sched_add: bad thread state")); KASSERT(td->td_proc->p_sflag & PS_INMEM, ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); + TD_SET_RUNQ(td); CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); ts->ts_runq = &runq; @@ -1155,7 +1150,6 @@ sched_add(struct thread *td, int flags) if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); runq_add(ts->ts_runq, ts, flags); - ts->ts_state = TSS_ONRUNQ; maybe_resched(td); } #endif /* SMP */ @@ -1168,7 +1162,7 @@ sched_rem(struct thread *td) ts = td->td_sched; KASSERT(td->td_proc->p_sflag & PS_INMEM, ("sched_rem: process swapped out")); - KASSERT((ts->ts_state == TSS_ONRUNQ), + KASSERT(TD_ON_RUNQ(td), ("sched_rem: thread not on run queue")); mtx_assert(&sched_lock, MA_OWNED); CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", @@ -1178,15 +1172,14 @@ sched_rem(struct thread *td) if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); runq_remove(ts->ts_runq, ts); - - ts->ts_state = TSS_THREAD; + TD_SET_CAN_RUN(td); } /* * Select threads to run. * Notice that the running threads still consume a slot. */ -struct td_sched * +struct thread * sched_choose(void) { struct td_sched *ts; @@ -1217,12 +1210,13 @@ sched_choose(void) if (ts) { runq_remove(rq, ts); - ts->ts_state = TSS_THREAD; + ts->ts_flags |= TSF_DIDRUN; KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM, ("sched_choose: process swapped out")); - } - return (ts); + return (ts->ts_thread); + } + return (PCPU_GET(idlethread)); } void @@ -1264,8 +1258,6 @@ sched_bind(struct thread *td, int cpu) if (PCPU_GET(cpuid) == cpu) return; - ts->ts_state = TSS_THREAD; - mi_switch(SW_VOL, NULL); #endif } @@ -1325,5 +1317,44 @@ void sched_tick(void) { } + +/* + * The actual idle process. + */ +void +sched_idletd(void *dummy) +{ + struct proc *p; + struct thread *td; +#ifdef SMP + cpumask_t mycpu; +#endif + + td = curthread; + p = td->td_proc; +#ifdef SMP + mycpu = PCPU_GET(cpumask); + mtx_lock_spin(&sched_lock); + idle_cpus_mask |= mycpu; + mtx_unlock_spin(&sched_lock); +#endif + for (;;) { + mtx_assert(&Giant, MA_NOTOWNED); + + while (sched_runnable() == 0) + cpu_idle(); + + mtx_lock_spin(&sched_lock); +#ifdef SMP + idle_cpus_mask &= ~mycpu; +#endif + mi_switch(SW_VOL, NULL); +#ifdef SMP + idle_cpus_mask |= mycpu; +#endif + mtx_unlock_spin(&sched_lock); + } +} + #define KERN_SWITCH_INCLUDE 1 #include "kern/kern_switch.c" |