diff options
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r-- | sys/kern/sched_4bsd.c | 764 |
1 files changed, 135 insertions, 629 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index dc9d288..3fe2682 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -37,8 +37,6 @@ __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" -#define kse td_sched - #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> @@ -75,99 +73,38 @@ __FBSDID("$FreeBSD$"); #endif #define NICE_WEIGHT 1 /* Priorities per nice level. */ -#ifdef KSE -/* - * The schedulable entity that can be given a context to run. - * A process may have several of these. Probably one per processor - * but possibly a few more. In this universe they are grouped - * with a KSEG that contains the priority and niceness - * for the group. - */ -#else /* * The schedulable entity that runs a context. - * A process may have several of these. Probably one per processor - * but posibly a few more. + * This is an extension to the thread structure and is tailored to + * the requirements of this scheduler */ -#endif -struct kse { - TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ - struct thread *ke_thread; /* (*) Active associated thread. */ - fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ - u_char ke_rqindex; /* (j) Run queue index. */ +struct td_sched { + TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */ + struct thread *ts_thread; /* (*) Active associated thread. */ + fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ + u_char ts_rqindex; /* (j) Run queue index. */ enum { - KES_THREAD = 0x0, /* slaved to thread state */ - KES_ONRUNQ - } ke_state; /* (j) KSE status. */ - int ke_cpticks; /* (j) Ticks of cpu time. */ - struct runq *ke_runq; /* runq the kse is currently on */ + TSS_THREAD = 0x0, /* slaved to thread state */ + TSS_ONRUNQ + } ts_state; /* (j) TD_STAT in scheduler status. */ + int ts_cpticks; /* (j) Ticks of cpu time. */ + struct runq *ts_runq; /* runq the thread is currently on */ }; -#ifdef KSE -#define ke_proc ke_thread->td_proc -#define ke_ksegrp ke_thread->td_ksegrp -#endif - -#define td_kse td_sched - /* flags kept in td_flags */ -#define TDF_DIDRUN TDF_SCHED0 /* KSE actually ran. */ -#define TDF_EXIT TDF_SCHED1 /* KSE is being killed. */ +#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ +#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */ #define TDF_BOUND TDF_SCHED2 -#define ke_flags ke_thread->td_flags -#define KEF_DIDRUN TDF_DIDRUN /* KSE actually ran. */ -#define KEF_EXIT TDF_EXIT /* KSE is being killed. */ -#define KEF_BOUND TDF_BOUND /* stuck to one CPU */ - -#define SKE_RUNQ_PCPU(ke) \ - ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) - -#ifdef KSE -struct kg_sched { - struct thread *skg_last_assigned; /* (j) Last thread assigned to */ - /* the system scheduler. */ - int skg_avail_opennings; /* (j) Num KSEs requested in group. */ - int skg_concurrency; /* (j) Num KSEs requested in group. */ -}; -#define kg_last_assigned kg_sched->skg_last_assigned -#define kg_avail_opennings kg_sched->skg_avail_opennings -#define kg_concurrency kg_sched->skg_concurrency - -#define SLOT_RELEASE(kg) \ -do { \ - kg->kg_avail_opennings++; \ - CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \ - kg, \ - kg->kg_concurrency, \ - kg->kg_avail_opennings); \ -/* KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \ - ("slots out of whack"));*/ \ -} while (0) - -#define SLOT_USE(kg) \ -do { \ - kg->kg_avail_opennings--; \ - CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \ - kg, \ - kg->kg_concurrency, \ - kg->kg_avail_opennings); \ -/* KASSERT((kg->kg_avail_opennings >= 0), \ - ("slots out of whack"));*/ \ -} while (0) -#endif +#define ts_flags ts_thread->td_flags +#define TSF_DIDRUN TDF_DIDRUN /* thread actually ran. */ +#define TSF_EXIT TDF_EXIT /* thread is being killed. */ +#define TSF_BOUND TDF_BOUND /* stuck to one CPU */ -/* - * KSE_CAN_MIGRATE macro returns true if the kse can migrate between - * cpus. - */ -#define KSE_CAN_MIGRATE(ke) \ - ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) +#define SKE_RUNQ_PCPU(ts) \ + ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) -static struct kse kse0; -#ifdef KSE -static struct kg_sched kg_sched0; -#endif +static struct td_sched td_sched0; static int sched_tdcnt; /* Total runnable threads in the system. */ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ @@ -175,12 +112,7 @@ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ static struct callout roundrobin_callout; -#ifdef KSE -static void slot_fill(struct ksegrp *kg); -static struct kse *sched_choose(void); /* XXX Should be thread * */ -#else -static struct thread *sched_choose(void); -#endif +static struct td_sched *sched_choose(void); static void setup_runqs(void); static void roundrobin(void *arg); @@ -189,15 +121,9 @@ static void schedcpu_thread(void); static void sched_priority(struct thread *td, u_char prio); static void sched_setup(void *dummy); static void maybe_resched(struct thread *td); -#ifdef KSE -static void updatepri(struct ksegrp *kg); -static void resetpriority(struct ksegrp *kg); -static void resetpriority_thread(struct thread *td, struct ksegrp *kg); -#else static void updatepri(struct thread *td); static void resetpriority(struct thread *td); static void resetpriority_thread(struct thread *td); -#endif #ifdef SMP static int forward_wakeup(int cpunum); #endif @@ -300,21 +226,11 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, "account for htt"); #endif -#ifdef KSE +#if 0 static int sched_followon = 0; SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, &sched_followon, 0, "allow threads to share a quantum"); - -static int sched_pfollowons = 0; -SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD, - &sched_pfollowons, 0, - "number of followons done to a different ksegrp"); - -static int sched_kgfollowons = 0; -SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD, - &sched_kgfollowons, 0, - "number of followons done in a ksegrp"); #endif static __inline void @@ -366,40 +282,20 @@ roundrobin(void *arg) /* * Constants for digital decay and forget: - * ifdef KSE - * 90% of (kg_estcpu) usage in 5 * loadav time - * else * 90% of (td_estcpu) usage in 5 * loadav time - * endif - * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) + * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) * Note that, as ps(1) mentions, this can let percentages * total over 100% (I've seen 137.9% for 3 processes). * - * ifdef KSE - * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. - * else * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. - * endif * - * ifdef KSE - * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. - * else * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. - * endif * That is, the system wants to compute a value of decay such * that the following for loop: * for (i = 0; i < (5 * loadavg); i++) - * ifdef KSE - * kg_estcpu *= decay; - * else * td_estcpu *= decay; - * endif * will compute - * ifdef KSE - * kg_estcpu *= 0.1; - * else * td_estcpu *= 0.1; - * endif * for all values of loadavg: * * Mathematically this loop can be expressed by saying: @@ -452,7 +348,7 @@ roundrobin(void *arg) #define loadfactor(loadav) (2 * (loadav)) #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) -/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ +/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); @@ -481,10 +377,7 @@ schedcpu(void) register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); struct thread *td; struct proc *p; - struct kse *ke; -#ifdef KSE - struct ksegrp *kg; -#endif + struct td_sched *ts; int awake, realstathz; realstathz = stathz ? stathz : hz; @@ -499,126 +392,63 @@ schedcpu(void) * 16-bit int's (remember them?) overflow takes 45 days. */ p->p_swtime++; -#ifdef KSE - FOREACH_KSEGRP_IN_PROC(p, kg) { -#else FOREACH_THREAD_IN_PROC(p, td) { -#endif awake = 0; -#ifdef KSE - FOREACH_THREAD_IN_GROUP(kg, td) { - ke = td->td_kse; - /* - * Increment sleep time (if sleeping). We - * ignore overflow, as above. - */ - /* - * The kse slptimes are not touched in wakeup - * because the thread may not HAVE a KSE. - */ - if (ke->ke_state == KES_ONRUNQ) { - awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; - } else if ((ke->ke_state == KES_THREAD) && - (TD_IS_RUNNING(td))) { - awake = 1; - /* Do not clear KEF_DIDRUN */ - } else if (ke->ke_flags & KEF_DIDRUN) { - awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; - } - - /* - * ke_pctcpu is only for ps and ttyinfo(). - * Do it per kse, and add them up at the end? - * XXXKSE - */ - ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> - FSHIFT; - /* - * If the kse has been idle the entire second, - * stop recalculating its priority until - * it wakes up. - */ - if (ke->ke_cpticks == 0) - continue; -#if (FSHIFT >= CCPU_SHIFT) - ke->ke_pctcpu += (realstathz == 100) - ? ((fixpt_t) ke->ke_cpticks) << - (FSHIFT - CCPU_SHIFT) : - 100 * (((fixpt_t) ke->ke_cpticks) - << (FSHIFT - CCPU_SHIFT)) / realstathz; -#else - ke->ke_pctcpu += ((FSCALE - ccpu) * - (ke->ke_cpticks * - FSCALE / realstathz)) >> FSHIFT; -#endif - ke->ke_cpticks = 0; - } /* end of kse loop */ -#else - ke = td->td_kse; + ts = td->td_sched; /* * Increment sleep time (if sleeping). We * ignore overflow, as above. */ /* - * The kse slptimes are not touched in wakeup - * because the thread may not HAVE a KSE. + * The td_sched slptimes are not touched in wakeup + * because the thread may not HAVE everything in + * memory? XXX I think this is out of date. */ - if (ke->ke_state == KES_ONRUNQ) { + if (ts->ts_state == TSS_ONRUNQ) { awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; - } else if ((ke->ke_state == KES_THREAD) && + ts->ts_flags &= ~TSF_DIDRUN; + } else if ((ts->ts_state == TSS_THREAD) && (TD_IS_RUNNING(td))) { awake = 1; - /* Do not clear KEF_DIDRUN */ - } else if (ke->ke_flags & KEF_DIDRUN) { + /* Do not clear TSF_DIDRUN */ + } else if (ts->ts_flags & TSF_DIDRUN) { awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; + ts->ts_flags &= ~TSF_DIDRUN; } /* - * ke_pctcpu is only for ps and ttyinfo(). - * Do it per kse, and add them up at the end? + * ts_pctcpu is only for ps and ttyinfo(). + * Do it per td_sched, and add them up at the end? * XXXKSE */ - ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> - FSHIFT; + ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; /* - * If the kse has been idle the entire second, + * If the td_sched has been idle the entire second, * stop recalculating its priority until * it wakes up. */ - if (ke->ke_cpticks != 0) { + if (ts->ts_cpticks != 0) { #if (FSHIFT >= CCPU_SHIFT) - ke->ke_pctcpu += (realstathz == 100) - ? ((fixpt_t) ke->ke_cpticks) << - (FSHIFT - CCPU_SHIFT) : - 100 * (((fixpt_t) ke->ke_cpticks) - << (FSHIFT - CCPU_SHIFT)) / realstathz; + ts->ts_pctcpu += (realstathz == 100) + ? ((fixpt_t) ts->ts_cpticks) << + (FSHIFT - CCPU_SHIFT) : + 100 * (((fixpt_t) ts->ts_cpticks) + << (FSHIFT - CCPU_SHIFT)) / realstathz; #else - ke->ke_pctcpu += ((FSCALE - ccpu) * - (ke->ke_cpticks * - FSCALE / realstathz)) >> FSHIFT; + ts->ts_pctcpu += ((FSCALE - ccpu) * + (ts->ts_cpticks * + FSCALE / realstathz)) >> FSHIFT; #endif - ke->ke_cpticks = 0; + ts->ts_cpticks = 0; } -#endif - /* - * ifdef KSE - * If there are ANY running threads in this KSEGRP, - * else * If there are ANY running threads in this process, - * endif * then don't count it as sleeping. +XXX this is broken + */ if (awake) { -#ifdef KSE - if (kg->kg_slptime > 1) { -#else - if (td->td_slptime > 1) { -#endif + if (p->p_slptime > 1) { /* * In an ideal world, this should not * happen, because whoever woke us @@ -628,21 +458,6 @@ schedcpu(void) * priority. Should KASSERT at some * point when all the cases are fixed. */ -#ifdef KSE - updatepri(kg); - } - kg->kg_slptime = 0; - } else - kg->kg_slptime++; - if (kg->kg_slptime > 1) - continue; - kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); - resetpriority(kg); - FOREACH_THREAD_IN_GROUP(kg, td) { - resetpriority_thread(td, kg); - } - } /* end of ksegrp loop */ -#else updatepri(td); } td->td_slptime = 0; @@ -654,7 +469,6 @@ schedcpu(void) resetpriority(td); resetpriority_thread(td); } /* end of thread loop */ -#endif mtx_unlock_spin(&sched_lock); } /* end of process loop */ sx_sunlock(&allproc_lock); @@ -676,48 +490,24 @@ schedcpu_thread(void) /* * Recalculate the priority of a process after it has slept for a while. - * ifdef KSE - * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at - * least six times the loadfactor will decay kg_estcpu to zero. - * else * For all load averages >= 1 and max td_estcpu of 255, sleeping for at * least six times the loadfactor will decay td_estcpu to zero. - * endif */ static void -#ifdef KSE -updatepri(struct ksegrp *kg) -#else updatepri(struct thread *td) -#endif { register fixpt_t loadfac; register unsigned int newcpu; loadfac = loadfactor(averunnable.ldavg[0]); -#ifdef KSE - if (kg->kg_slptime > 5 * loadfac) - kg->kg_estcpu = 0; -#else if (td->td_slptime > 5 * loadfac) td->td_estcpu = 0; -#endif else { -#ifdef KSE - newcpu = kg->kg_estcpu; - kg->kg_slptime--; /* was incremented in schedcpu() */ - while (newcpu && --kg->kg_slptime) -#else newcpu = td->td_estcpu; td->td_slptime--; /* was incremented in schedcpu() */ while (newcpu && --td->td_slptime) -#endif newcpu = decay_cpu(loadfac, newcpu); -#ifdef KSE - kg->kg_estcpu = newcpu; -#else td->td_estcpu = newcpu; -#endif } } @@ -727,43 +517,25 @@ updatepri(struct thread *td) * than that of the current process. */ static void -#ifdef KSE -resetpriority(struct ksegrp *kg) -#else resetpriority(struct thread *td) -#endif { register unsigned int newpriority; -#ifdef KSE - if (kg->kg_pri_class == PRI_TIMESHARE) { - newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + - NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN); -#else if (td->td_pri_class == PRI_TIMESHARE) { newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); -#endif newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), PRI_MAX_TIMESHARE); -#ifdef KSE - sched_user_prio(kg, newpriority); -#else sched_user_prio(td, newpriority); -#endif } } /* - * Update the thread's priority when the associated ksegroup's user + * Update the thread's priority when the associated process's user * priority changes. */ static void -#ifdef KSE -resetpriority_thread(struct thread *td, struct ksegrp *kg) -#else resetpriority_thread(struct thread *td) -#endif { /* Only change threads with a time sharing user priority. */ @@ -774,11 +546,7 @@ resetpriority_thread(struct thread *td) /* XXX the whole needresched thing is broken, but not silly. */ maybe_resched(td); -#ifdef KSE - sched_prio(td, kg->kg_user_pri); -#else sched_prio(td, td->td_user_pri); -#endif } /* ARGSUSED */ @@ -814,16 +582,9 @@ schedinit(void) * Set up the scheduler specific parts of proc0. */ proc0.p_sched = NULL; /* XXX */ -#ifdef KSE - ksegrp0.kg_sched = &kg_sched0; -#endif - thread0.td_sched = &kse0; - kse0.ke_thread = &thread0; - kse0.ke_state = KES_THREAD; -#ifdef KSE - kg_sched0.skg_concurrency = 1; - kg_sched0.skg_avail_opennings = 0; /* we are already running */ -#endif + thread0.td_sched = &td_sched0; + td_sched0.ts_thread = &thread0; + td_sched0.ts_state = TSS_THREAD; } int @@ -847,13 +608,8 @@ sched_rr_interval(void) /* * We adjust the priority of the current process. The priority of * a process gets worse as it accumulates CPU time. The cpu usage - * ifdef KSE - * estimator (kg_estcpu) is increased here. resetpriority() will - * compute a different priority each time kg_estcpu increases by - * else * estimator (td_estcpu) is increased here. resetpriority() will * compute a different priority each time td_estcpu increases by - * endif * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached). The cpu usage estimator ramps up * quite quickly when the process is running (linearly), and decays @@ -866,163 +622,83 @@ sched_rr_interval(void) void sched_clock(struct thread *td) { -#ifdef KSE - struct ksegrp *kg; -#endif - struct kse *ke; + struct td_sched *ts; mtx_assert(&sched_lock, MA_OWNED); -#ifdef KSE - kg = td->td_ksegrp; -#endif - ke = td->td_kse; - - ke->ke_cpticks++; -#ifdef KSE - kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); - if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { - resetpriority(kg); - resetpriority_thread(td, kg); -#else + ts = td->td_sched; + + ts->ts_cpticks++; td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { resetpriority(td); resetpriority_thread(td); -#endif } } -#ifdef KSE -/* - * charge childs scheduling cpu usage to parent. - * - * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. - * Charge it to the ksegrp that did the wait since process estcpu is sum of - * all ksegrps, this is strictly as expected. Assume that the child process - * aggregated all the estcpu into the 'built-in' ksegrp. - */ -#else /* * charge childs scheduling cpu usage to parent. */ -#endif void sched_exit(struct proc *p, struct thread *td) { -#ifdef KSE - sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); - sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); -#else - struct thread *parent = FIRST_THREAD_IN_PROC(p); CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", td, td->td_proc->p_comm, td->td_priority); - parent->td_estcpu = ESTCPULIM(parent->td_estcpu + td->td_estcpu); - if ((td->td_proc->p_flag & P_NOLOAD) == 0) - sched_load_rem(); -#endif -} - -#ifdef KSE -void -sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd) -{ - - mtx_assert(&sched_lock, MA_OWNED); - kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu); + sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); } void sched_exit_thread(struct thread *td, struct thread *child) { + struct proc *childproc = child->td_proc; + CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", - child, child->td_proc->p_comm, child->td_priority); + child, childproc->p_comm, child->td_priority); + td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); + childproc->p_estcpu = ESTCPULIM(childproc->p_estcpu + + child->td_estcpu); if ((child->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); } -#endif void sched_fork(struct thread *td, struct thread *childtd) { -#ifdef KSE - sched_fork_ksegrp(td, childtd->td_ksegrp); sched_fork_thread(td, childtd); -#else - childtd->td_estcpu = td->td_estcpu; - sched_newthread(childtd); -#endif -} - -#ifdef KSE -void -sched_fork_ksegrp(struct thread *td, struct ksegrp *child) -{ - mtx_assert(&sched_lock, MA_OWNED); - child->kg_estcpu = td->td_ksegrp->kg_estcpu; } void sched_fork_thread(struct thread *td, struct thread *childtd) { + childtd->td_estcpu = td->td_estcpu; sched_newthread(childtd); } -#endif void sched_nice(struct proc *p, int nice) { -#ifdef KSE - struct ksegrp *kg; -#endif struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); p->p_nice = nice; -#ifdef KSE - FOREACH_KSEGRP_IN_PROC(p, kg) { - resetpriority(kg); - FOREACH_THREAD_IN_GROUP(kg, td) { - resetpriority_thread(td, kg); - } - } -#else FOREACH_THREAD_IN_PROC(p, td) { resetpriority(td); resetpriority_thread(td); } -#endif } void -#ifdef KSE -sched_class(struct ksegrp *kg, int class) -#else sched_class(struct thread *td, int class) -#endif { mtx_assert(&sched_lock, MA_OWNED); -#ifdef KSE - kg->kg_pri_class = class; -#else td->td_pri_class = class; -#endif } -#ifdef KSE -/* - * Adjust the priority of a thread. - * This may include moving the thread within the KSEGRP, - * changing the assignment of a kse to the thread, - * and moving a KSE in the system run queue. - */ -#else /* * Adjust the priority of a thread. */ -#endif static void sched_priority(struct thread *td, u_char prio) { @@ -1067,11 +743,7 @@ sched_unlend_prio(struct thread *td, u_char prio) if (td->td_base_pri >= PRI_MIN_TIMESHARE && td->td_base_pri <= PRI_MAX_TIMESHARE) -#ifdef KSE - base_pri = td->td_ksegrp->kg_user_pri; -#else base_pri = td->td_user_pri; -#endif else base_pri = td->td_base_pri; if (prio >= base_pri) { @@ -1109,40 +781,15 @@ sched_prio(struct thread *td, u_char prio) } void -#ifdef KSE -sched_user_prio(struct ksegrp *kg, u_char prio) -#else sched_user_prio(struct thread *td, u_char prio) -#endif { -#ifdef KSE - struct thread *td; -#endif u_char oldprio; -#ifdef KSE - kg->kg_base_user_pri = prio; - - /* XXXKSE only for 1:1 */ - - td = TAILQ_FIRST(&kg->kg_threads); - if (td == NULL) { - kg->kg_user_pri = prio; - return; - } - - if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio) - return; - - oldprio = kg->kg_user_pri; - kg->kg_user_pri = prio; -#else td->td_base_user_pri = prio; if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) return; oldprio = td->td_user_pri; td->td_user_pri = prio; -#endif if (TD_ON_UPILOCK(td) && oldprio != prio) umtx_pi_adjust(td, oldprio); @@ -1155,13 +802,8 @@ sched_lend_user_prio(struct thread *td, u_char prio) td->td_flags |= TDF_UBORROWING; -#ifdef KSE - oldprio = td->td_ksegrp->kg_user_pri; - td->td_ksegrp->kg_user_pri = prio; -#else oldprio = td->td_user_pri; td->td_user_pri = prio; -#endif if (TD_ON_UPILOCK(td) && oldprio != prio) umtx_pi_adjust(td, oldprio); @@ -1170,23 +812,12 @@ sched_lend_user_prio(struct thread *td, u_char prio) void sched_unlend_user_prio(struct thread *td, u_char prio) { -#ifdef KSE - struct ksegrp *kg = td->td_ksegrp; -#endif u_char base_pri; -#ifdef KSE - base_pri = kg->kg_base_user_pri; -#else base_pri = td->td_base_user_pri; -#endif if (prio >= base_pri) { td->td_flags &= ~TDF_UBORROWING; -#ifdef KSE - sched_user_prio(kg, base_pri); -#else sched_user_prio(td, base_pri); -#endif } else sched_lend_user_prio(td, prio); } @@ -1196,59 +827,37 @@ sched_sleep(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); -#ifdef KSE - td->td_ksegrp->kg_slptime = 0; -#else td->td_slptime = 0; -#endif } -#ifdef KSE -static void remrunqueue(struct thread *td); -#endif - void sched_switch(struct thread *td, struct thread *newtd, int flags) { - struct kse *ke; -#ifdef KSE - struct ksegrp *kg; -#endif + struct td_sched *ts; struct proc *p; - ke = td->td_kse; + ts = td->td_sched; p = td->td_proc; mtx_assert(&sched_lock, MA_OWNED); if ((p->p_flag & P_NOLOAD) == 0) sched_load_rem(); -#ifdef KSE +#if 0 /* * We are volunteering to switch out so we get to nominate * a successor for the rest of our quantum - * First try another thread in our ksegrp, and then look for - * other ksegrps in our process. + * First try another thread in our process + * + * this is too expensive to do without per process run queues + * so skip it for now. + * XXX keep this comment as a marker. */ if (sched_followon && (p->p_flag & P_HADTHREADS) && (flags & SW_VOL) && - newtd == NULL) { - /* lets schedule another thread from this process */ - kg = td->td_ksegrp; - if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { - remrunqueue(newtd); - sched_kgfollowons++; - } else { - FOREACH_KSEGRP_IN_PROC(p, kg) { - if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { - sched_pfollowons++; - remrunqueue(newtd); - break; - } - } - } - } + newtd == NULL) + newtd = mumble(); #endif if (newtd) @@ -1267,25 +876,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) if (td == PCPU_GET(idlethread)) TD_SET_CAN_RUN(td); else { -#ifdef KSE - SLOT_RELEASE(td->td_ksegrp); -#endif if (TD_IS_RUNNING(td)) { - /* Put us back on the run queue (kse and all). */ + /* Put us back on the run queue. */ setrunqueue(td, (flags & SW_PREEMPT) ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); -#ifdef KSE - } else if (p->p_flag & P_HADTHREADS) { - /* - * We will not be on the run queue. So we must be - * sleeping or similar. As it's available, - * someone else can use the KSE if they need it. - * It's NOT available if we are about to need it - */ - if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp) - slot_fill(td->td_ksegrp); -#endif } } if (newtd) { @@ -1294,17 +889,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) * as if it had been added to the run queue and selected. * It came from: * * A preemption - * ifdef KSE * * An upcall - * endif * * A followon */ KASSERT((newtd->td_inhibitors == 0), ("trying to run inhibitted thread")); -#ifdef KSE - SLOT_USE(newtd->td_ksegrp); -#endif - newtd->td_kse->ke_flags |= KEF_DIDRUN; + newtd->td_sched->ts_flags |= TSF_DIDRUN; TD_SET_RUNNING(newtd); if ((newtd->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); @@ -1332,25 +922,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) void sched_wakeup(struct thread *td) { -#ifdef KSE - struct ksegrp *kg; -#endif - mtx_assert(&sched_lock, MA_OWNED); -#ifdef KSE - kg = td->td_ksegrp; - if (kg->kg_slptime > 1) { - updatepri(kg); - resetpriority(kg); - } - kg->kg_slptime = 0; -#else if (td->td_slptime > 1) { updatepri(td); resetpriority(td); } td->td_slptime = 0; -#endif setrunqueue(td, SRQ_BORING); } @@ -1481,22 +1058,17 @@ void sched_add(struct thread *td, int flags) #ifdef SMP { - struct kse *ke; + struct td_sched *ts; int forwarded = 0; int cpu; int single_cpu = 0; - ke = td->td_kse; + ts = td->td_sched; mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ke->ke_state != KES_ONRUNQ, - ("sched_add: kse %p (%s) already in run queue", ke, -#ifdef KSE - ke->ke_proc->p_comm)); - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, -#else + KASSERT(ts->ts_state != TSS_ONRUNQ, + ("sched_add: td_sched %p (%s) already in run queue", ts, td->td_proc->p_comm)); KASSERT(td->td_proc->p_sflag & PS_INMEM, -#endif ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, @@ -1505,22 +1077,22 @@ sched_add(struct thread *td, int flags) if (td->td_pinned != 0) { cpu = td->td_lastcpu; - ke->ke_runq = &runq_pcpu[cpu]; + ts->ts_runq = &runq_pcpu[cpu]; single_cpu = 1; CTR3(KTR_RUNQ, - "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); - } else if ((ke)->ke_flags & KEF_BOUND) { + "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); + } else if ((ts)->ts_flags & TSF_BOUND) { /* Find CPU from bound runq */ - KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq")); - cpu = ke->ke_runq - &runq_pcpu[0]; + KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq")); + cpu = ts->ts_runq - &runq_pcpu[0]; single_cpu = 1; CTR3(KTR_RUNQ, - "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); + "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); } else { CTR2(KTR_RUNQ, - "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td); + "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td); cpu = NOCPU; - ke->ke_runq = &runq; + ts->ts_runq = &runq; } if (single_cpu && (cpu != PCPU_GET(cpuid))) { @@ -1546,32 +1118,24 @@ sched_add(struct thread *td, int flags) if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); -#ifdef KSE - SLOT_USE(td->td_ksegrp); -#endif - runq_add(ke->ke_runq, ke, flags); - ke->ke_state = KES_ONRUNQ; + runq_add(ts->ts_runq, ts, flags); + ts->ts_state = TSS_ONRUNQ; } #else /* SMP */ { - struct kse *ke; - ke = td->td_kse; + struct td_sched *ts; + ts = td->td_sched; mtx_assert(&sched_lock, MA_OWNED); - KASSERT(ke->ke_state != KES_ONRUNQ, - ("sched_add: kse %p (%s) already in run queue", ke, -#ifdef KSE - ke->ke_proc->p_comm)); - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, -#else + KASSERT(ts->ts_state != TSS_ONRUNQ, + ("sched_add: td_sched %p (%s) already in run queue", ts, td->td_proc->p_comm)); KASSERT(td->td_proc->p_sflag & PS_INMEM, -#endif ("sched_add: process swapped out")); CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, curthread->td_proc->p_comm); - CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td); - ke->ke_runq = &runq; + CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); + ts->ts_runq = &runq; /* * If we are yielding (on the way out anyhow) @@ -1590,11 +1154,8 @@ sched_add(struct thread *td, int flags) } if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_add(); -#ifdef KSE - SLOT_USE(td->td_ksegrp); -#endif - runq_add(ke->ke_runq, ke, flags); - ke->ke_state = KES_ONRUNQ; + runq_add(ts->ts_runq, ts, flags); + ts->ts_state = TSS_ONRUNQ; maybe_resched(td); } #endif /* SMP */ @@ -1602,17 +1163,13 @@ sched_add(struct thread *td, int flags) void sched_rem(struct thread *td) { - struct kse *ke; + struct td_sched *ts; - ke = td->td_kse; -#ifdef KSE - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, -#else + ts = td->td_sched; KASSERT(td->td_proc->p_sflag & PS_INMEM, -#endif ("sched_rem: process swapped out")); - KASSERT((ke->ke_state == KES_ONRUNQ), - ("sched_rem: KSE not on run queue")); + KASSERT((ts->ts_state == TSS_ONRUNQ), + ("sched_rem: thread not on run queue")); mtx_assert(&sched_lock, MA_OWNED); CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", td, td->td_proc->p_comm, td->td_priority, curthread, @@ -1620,81 +1177,57 @@ sched_rem(struct thread *td) if ((td->td_proc->p_flag & P_NOLOAD) == 0) sched_load_rem(); -#ifdef KSE - SLOT_RELEASE(td->td_ksegrp); -#endif - runq_remove(ke->ke_runq, ke); + runq_remove(ts->ts_runq, ts); - ke->ke_state = KES_THREAD; + ts->ts_state = TSS_THREAD; } /* * Select threads to run. * Notice that the running threads still consume a slot. */ -#ifdef KSE -struct kse * -#else -struct thread * -#endif +struct td_sched * sched_choose(void) { - struct kse *ke; + struct td_sched *ts; struct runq *rq; #ifdef SMP - struct kse *kecpu; + struct td_sched *kecpu; rq = &runq; - ke = runq_choose(&runq); + ts = runq_choose(&runq); kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); - if (ke == NULL || + if (ts == NULL || (kecpu != NULL && - kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { - CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu, + kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) { + CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu, PCPU_GET(cpuid)); - ke = kecpu; + ts = kecpu; rq = &runq_pcpu[PCPU_GET(cpuid)]; } else { - CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke); + CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts); } #else rq = &runq; - ke = runq_choose(&runq); + ts = runq_choose(&runq); #endif -#ifdef KSE - if (ke != NULL) { -#else - if (ke) { -#endif - runq_remove(rq, ke); - ke->ke_state = KES_THREAD; + if (ts) { + runq_remove(rq, ts); + ts->ts_state = TSS_THREAD; -#ifdef KSE - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM, ("sched_choose: process swapped out")); -#else - KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM, - ("sched_choose: process swapped out")); - return (ke->ke_thread); -#endif } -#ifdef KSE - return (ke); -#else - return (NULL); -#endif + return (ts); } void sched_userret(struct thread *td) { -#ifdef KSE - struct ksegrp *kg; -#endif /* * XXX we cheat slightly on the locking here to avoid locking in * the usual case. Setting td_priority here is essentially an @@ -1706,42 +1239,32 @@ sched_userret(struct thread *td) */ KASSERT((td->td_flags & TDF_BORROWING) == 0, ("thread with borrowed priority returning to userland")); -#ifdef KSE - kg = td->td_ksegrp; - if (td->td_priority != kg->kg_user_pri) { - mtx_lock_spin(&sched_lock); - td->td_priority = kg->kg_user_pri; - td->td_base_pri = kg->kg_user_pri; - mtx_unlock_spin(&sched_lock); - } -#else if (td->td_priority != td->td_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = td->td_user_pri; td->td_base_pri = td->td_user_pri; mtx_unlock_spin(&sched_lock); } -#endif } void sched_bind(struct thread *td, int cpu) { - struct kse *ke; + struct td_sched *ts; mtx_assert(&sched_lock, MA_OWNED); KASSERT(TD_IS_RUNNING(td), ("sched_bind: cannot bind non-running thread")); - ke = td->td_kse; + ts = td->td_sched; - ke->ke_flags |= KEF_BOUND; + ts->ts_flags |= TSF_BOUND; #ifdef SMP - ke->ke_runq = &runq_pcpu[cpu]; + ts->ts_runq = &runq_pcpu[cpu]; if (PCPU_GET(cpuid) == cpu) return; - ke->ke_state = KES_THREAD; + ts->ts_state = TSS_THREAD; mi_switch(SW_VOL, NULL); #endif @@ -1751,30 +1274,21 @@ void sched_unbind(struct thread* td) { mtx_assert(&sched_lock, MA_OWNED); - td->td_kse->ke_flags &= ~KEF_BOUND; + td->td_sched->ts_flags &= ~TSF_BOUND; } int sched_is_bound(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); - return (td->td_kse->ke_flags & KEF_BOUND); + return (td->td_sched->ts_flags & TSF_BOUND); } void sched_relinquish(struct thread *td) { -#ifdef KSE - struct ksegrp *kg; - - kg = td->td_ksegrp; -#endif mtx_lock_spin(&sched_lock); -#ifdef KSE - if (kg->kg_pri_class == PRI_TIMESHARE) -#else if (td->td_pri_class == PRI_TIMESHARE) -#endif sched_prio(td, PRI_MAX_TIMESHARE); mi_switch(SW_VOL, NULL); mtx_unlock_spin(&sched_lock); @@ -1786,14 +1300,6 @@ sched_load(void) return (sched_tdcnt); } -#ifdef KSE -int -sched_sizeof_ksegrp(void) -{ - return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); -} -#endif - int sched_sizeof_proc(void) { @@ -1803,16 +1309,16 @@ sched_sizeof_proc(void) int sched_sizeof_thread(void) { - return (sizeof(struct thread) + sizeof(struct kse)); + return (sizeof(struct thread) + sizeof(struct td_sched)); } fixpt_t sched_pctcpu(struct thread *td) { - struct kse *ke; + struct td_sched *ts; - ke = td->td_kse; - return (ke->ke_pctcpu); + ts = td->td_sched; + return (ts->ts_pctcpu); } void |