diff options
author | jeff <jeff@FreeBSD.org> | 2002-10-12 05:32:24 +0000 |
---|---|---|
committer | jeff <jeff@FreeBSD.org> | 2002-10-12 05:32:24 +0000 |
commit | ef4d4e378e012b3efd909e2abc5c1ddcf38faee7 (patch) | |
tree | 69991942d3c51153d9210031e7380779edf05aaf | |
parent | cf318b70e5aa88b25cdf3d47eacce75c5aa889db (diff) | |
download | FreeBSD-src-ef4d4e378e012b3efd909e2abc5c1ddcf38faee7.zip FreeBSD-src-ef4d4e378e012b3efd909e2abc5c1ddcf38faee7.tar.gz |
- Create a new scheduler api that is defined in sys/sched.h
- Begin moving scheduler specific functionality into sched_4bsd.c
- Replace direct manipulation of scheduler data with hooks provided by the
new api.
- Remove KSE specific state modifications and single runq assumptions from
kern_switch.c
Reviewed by: -arch
-rw-r--r-- | sys/amd64/amd64/machdep.c | 5 | ||||
-rw-r--r-- | sys/conf/files | 1 | ||||
-rw-r--r-- | sys/i386/i386/machdep.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_clock.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_exit.c | 17 | ||||
-rw-r--r-- | sys/kern/kern_fork.c | 13 | ||||
-rw-r--r-- | sys/kern/kern_idle.c | 5 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 6 | ||||
-rw-r--r-- | sys/kern/kern_resource.c | 4 | ||||
-rw-r--r-- | sys/kern/kern_subr.c | 3 | ||||
-rw-r--r-- | sys/kern/kern_switch.c | 60 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 436 | ||||
-rw-r--r-- | sys/kern/ksched.c | 3 | ||||
-rw-r--r-- | sys/kern/sched_4bsd.c | 635 | ||||
-rw-r--r-- | sys/kern/subr_trap.c | 18 | ||||
-rw-r--r-- | sys/kern/subr_turnstile.c | 6 | ||||
-rw-r--r-- | sys/posix4/ksched.c | 3 | ||||
-rw-r--r-- | sys/sys/proc.h | 6 | ||||
-rw-r--r-- | sys/sys/sched.h | 65 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 5 | ||||
-rw-r--r-- | sys/vm/vm_zeroidle.c | 3 |
21 files changed, 774 insertions, 528 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index b414eaf..380ddee 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -69,6 +69,7 @@ #include <sys/reboot.h> #include <sys/callout.h> #include <sys/msgbuf.h> +#include <sys/sched.h> #include <sys/sysent.h> #include <sys/sysctl.h> #include <sys/ucontext.h> @@ -818,7 +819,7 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, /* * Note that we have to be careful here to avoid a race between checking - * kserunnable() and actually halting. If we don't do this, we may waste + * sched_runnable() and actually halting. If we don't do this, we may waste * the time between calling hlt and the next interrupt even though there * is a runnable process. */ @@ -827,7 +828,7 @@ cpu_idle(void) { if (cpu_idle_hlt) { disable_intr(); - if (kserunnable()) { + if (sched_runnable()) { enable_intr(); } else { /* diff --git a/sys/conf/files b/sys/conf/files index 2c19bbd..cfc6427 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -946,6 +946,7 @@ kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/md5c.c standard +kern/sched_4bsd.c standard kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index b414eaf..380ddee 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -69,6 +69,7 @@ #include <sys/reboot.h> #include <sys/callout.h> #include <sys/msgbuf.h> +#include <sys/sched.h> #include <sys/sysent.h> #include <sys/sysctl.h> #include <sys/ucontext.h> @@ -818,7 +819,7 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, /* * Note that we have to be careful here to avoid a race between checking - * kserunnable() and actually halting. If we don't do this, we may waste + * sched_runnable() and actually halting. If we don't do this, we may waste * the time between calling hlt and the next interrupt even though there * is a runnable process. */ @@ -827,7 +828,7 @@ cpu_idle(void) { if (cpu_idle_hlt) { disable_intr(); - if (kserunnable()) { + if (sched_runnable()) { enable_intr(); } else { /* diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index e50b731..d918728 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -51,6 +51,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/signalvar.h> #include <sys/smp.h> #include <vm/vm.h> @@ -437,7 +438,7 @@ statclock_process(ke, pc, user) } } - schedclock(ke->ke_thread); + sched_clock(ke->ke_thread); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index a586bef..6c83432 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -57,6 +57,7 @@ #include <sys/vnode.h> #include <sys/resourcevar.h> #include <sys/signalvar.h> +#include <sys/sched.h> #include <sys/sx.h> #include <sys/ptrace.h> #include <sys/acct.h> /* for acct_process() function prototype */ @@ -605,21 +606,13 @@ loop: nfound++; if (p->p_state == PRS_ZOMBIE) { /* - * charge childs scheduling cpu usage to parent - * XXXKSE assume only one thread & kse & ksegrp - * keep estcpu in each ksegrp - * so charge it to the ksegrp that did the wait - * since process estcpu is sum of all ksegrps, - * this is strictly as expected. - * Assume that the child process aggregated all - * tke estcpu into the 'build-in' ksegrp. - * XXXKSE + * Allow the scheduler to adjust the priority of the + * parent when a kseg is exiting. */ if (curthread->td_proc->p_pid != 1) { mtx_lock_spin(&sched_lock); - curthread->td_ksegrp->kg_estcpu = - ESTCPULIM(curthread->td_ksegrp->kg_estcpu + - FIRST_KSEGRP_IN_PROC(p)->kg_estcpu); + sched_exit(curthread->td_ksegrp, + FIRST_KSEGRP_IN_PROC(p)); mtx_unlock_spin(&sched_lock); } diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 0af883b..9c2169a 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -53,6 +53,7 @@ #include <sys/proc.h> #include <sys/pioctl.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/syscall.h> #include <sys/vnode.h> #include <sys/acct.h> @@ -515,6 +516,12 @@ again: p2->p_sflag = PS_INMEM; if (p1->p_sflag & PS_PROFIL) startprofclock(p2); + /* + * Allow the scheduler to adjust the priority of the child and + * parent while we hold the sched_lock. + */ + sched_fork(td->td_ksegrp, kg2); + mtx_unlock_spin(&sched_lock); p2->p_ucred = crhold(td->td_ucred); td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */ @@ -635,12 +642,6 @@ again: } /* - * set priority of child to be that of parent. - * XXXKSE this needs redefining.. - */ - kg2->kg_estcpu = td->td_ksegrp->kg_estcpu; - - /* * This begins the section where we must prevent the parent * from being swapped. */ diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c index bf8e922..4d57749 100644 --- a/sys/kern/kern_idle.c +++ b/sys/kern/kern_idle.c @@ -16,6 +16,7 @@ #include <sys/pcpu.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/smp.h> #include <sys/unistd.h> #ifdef KTRACE @@ -90,9 +91,9 @@ idle_proc(void *dummy) #ifdef DIAGNOSTIC count = 0; - while (count >= 0 && kserunnable() == 0) { + while (count >= 0 && sched_runnable() == 0) { #else - while (kserunnable() == 0) { + while (sched_runnable() == 0) { #endif /* * This is a good place to put things to be done in diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index e60d805..16f598a 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -47,6 +47,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/sbuf.h> #include <sys/stdint.h> #include <sys/sysctl.h> @@ -146,13 +147,10 @@ propagate_priority(struct thread *td) * If on run queue move to new run queue, and quit. * XXXKSE this gets a lot more complicated under threads * but try anyhow. - * We should have a special call to do this more efficiently. */ if (TD_ON_RUNQ(td)) { MPASS(td->td_blocked == NULL); - remrunqueue(td); - td->td_priority = pri; - setrunqueue(td); + sched_prio(td, pri); return; } /* diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 668a8a2..a0f263d 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -51,6 +51,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/sx.h> #include <sys/sysent.h> #include <sys/time.h> @@ -295,8 +296,7 @@ donice(struct thread *td, struct proc *p, int n) if (n < low && suser(td)) return (EACCES); FOREACH_KSEGRP_IN_PROC(p, kg) { - kg->kg_nice = n; - (void)resetpriority(kg); + sched_nice(kg, n); } return (0); } diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c index 1a44b85..0656598 100644 --- a/sys/kern/kern_subr.c +++ b/sys/kern/kern_subr.c @@ -50,6 +50,7 @@ #include <sys/proc.h> #include <sys/malloc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/sysctl.h> #include <sys/vnode.h> @@ -554,7 +555,7 @@ uio_yield() td = curthread; mtx_lock_spin(&sched_lock); DROP_GIANT(); - td->td_priority = td->td_ksegrp->kg_user_pri; /* XXXKSE */ + sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */ td->td_proc->p_stats->p_ru.ru_nivcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 37500a1..14d6b2f 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -97,16 +97,11 @@ reassigned to keep this true. #include <sys/mutex.h> #include <sys/proc.h> #include <sys/queue.h> +#include <sys/sched.h> #include <machine/critical.h> CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); -/* - * Global run queue. - */ -static struct runq runq; -SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) - void panc(char *string1, char *string2); #if 0 @@ -129,7 +124,7 @@ choosethread(void) struct ksegrp *kg; retry: - if ((ke = runq_choose(&runq))) { + if ((ke = sched_choose())) { td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; @@ -228,7 +223,7 @@ kse_reassign(struct kse *ke) kg->kg_last_assigned = td; td->td_kse = ke; ke->ke_thread = td; - runq_add(&runq, ke); + sched_add(ke); /* * if we have already borrowed this, * just pass it to the new thread, @@ -282,12 +277,6 @@ kse_reassign(struct kse *ke) CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke); } -int -kserunnable(void) -{ - return runq_check(&runq); -} - /* * Remove a thread from its KSEGRP's run queue. * This in turn may remove it from a KSE if it was already assigned @@ -314,7 +303,7 @@ remrunqueue(struct thread *td) TD_SET_CAN_RUN(td); if ((td->td_flags & TDF_UNBOUND) == 0) { /* Bring its kse with it, leave the thread attached */ - runq_remove(&runq, ke); + sched_rem(ke); ke->ke_state = KES_THREAD; return; } @@ -358,7 +347,7 @@ setrunqueue(struct thread *td) * and the KSE is always already attached. * Totally ignore the ksegrp run queue. */ - runq_add(&runq, td->td_kse); + sched_add(td->td_kse); return; } if ((td->td_flags & TDF_UNBOUND) == 0) { @@ -371,7 +360,7 @@ setrunqueue(struct thread *td) TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist); kg->kg_loan_kses--; } - runq_add(&runq, td->td_kse); + sched_add(td->td_kse); return; } @@ -416,7 +405,7 @@ setrunqueue(struct thread *td) ke->ke_thread = NULL; tda = kg->kg_last_assigned = TAILQ_PREV(tda, threadqueue, td_runq); - runq_remove(&runq, ke); + sched_rem(ke); } } else { /* @@ -475,7 +464,7 @@ setrunqueue(struct thread *td) td2->td_kse = ke; ke->ke_thread = td2; } - runq_add(&runq, ke); + sched_add(ke); } } @@ -592,15 +581,6 @@ runq_add(struct runq *rq, struct kse *ke) struct rqhead *rqh; int pri; - mtx_assert(&sched_lock, MA_OWNED); - KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); - KASSERT((ke->ke_thread->td_kse != NULL), - ("runq_add: No KSE on thread")); - KASSERT(ke->ke_state != KES_ONRUNQ, - ("runq_add: kse %p (%s) already in run queue", ke, - ke->ke_proc->p_comm)); - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, - ("runq_add: process swapped out")); pri = ke->ke_thread->td_priority / RQ_PPQ; ke->ke_rqindex = pri; runq_setbit(rq, pri); @@ -608,8 +588,6 @@ runq_add(struct runq *rq, struct kse *ke) CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); TAILQ_INSERT_TAIL(rqh, ke, ke_procq); - ke->ke_ksegrp->kg_runq_kses++; - ke->ke_state = KES_ONRUNQ; } /* @@ -636,9 +614,7 @@ runq_check(struct runq *rq) } /* - * Find and remove the highest priority process from the run queue. - * If there are no runnable processes, the per-cpu idle process is - * returned. Will not return NULL under any circumstances. + * Find the highest priority process on the run queue. */ struct kse * runq_choose(struct runq *rq) @@ -654,20 +630,6 @@ runq_choose(struct runq *rq) KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); CTR3(KTR_RUNQ, "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); - TAILQ_REMOVE(rqh, ke, ke_procq); - ke->ke_ksegrp->kg_runq_kses--; - if (TAILQ_EMPTY(rqh)) { - CTR0(KTR_RUNQ, "runq_choose: empty"); - runq_clrbit(rq, pri); - } - - ke->ke_state = KES_THREAD; - KASSERT((ke->ke_thread != NULL), - ("runq_choose: No thread on KSE")); - KASSERT((ke->ke_thread->td_kse != NULL), - ("runq_choose: No KSE on thread")); - KASSERT(ke->ke_proc->p_sflag & PS_INMEM, - ("runq_choose: process swapped out")); return (ke); } CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); @@ -686,8 +648,6 @@ runq_remove(struct runq *rq, struct kse *ke) struct rqhead *rqh; int pri; - KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); - mtx_assert(&sched_lock, MA_OWNED); KASSERT(ke->ke_proc->p_sflag & PS_INMEM, ("runq_remove: process swapped out")); pri = ke->ke_rqindex; @@ -700,8 +660,6 @@ runq_remove(struct runq *rq, struct kse *ke) CTR0(KTR_RUNQ, "runq_remove: empty"); runq_clrbit(rq, pri); } - ke->ke_state = KES_THREAD; - ke->ke_ksegrp->kg_runq_kses--; } #if 0 diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 29c3838..b758c96 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -51,6 +51,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/signalvar.h> #include <sys/smp.h> #include <sys/sx.h> @@ -72,11 +73,8 @@ SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) int hogticks; int lbolt; -int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ static struct callout loadav_callout; -static struct callout schedcpu_callout; -static struct callout roundrobin_callout; struct loadavg averunnable = { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ @@ -92,316 +90,6 @@ static fixpt_t cexp[3] = { static void endtsleep(void *); static void loadav(void *arg); -static void roundrobin(void *arg); -static void schedcpu(void *arg); - -static int -sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) -{ - int error, new_val; - - new_val = sched_quantum * tick; - error = sysctl_handle_int(oidp, &new_val, 0, req); - if (error != 0 || req->newptr == NULL) - return (error); - if (new_val < tick) - return (EINVAL); - sched_quantum = new_val / tick; - hogticks = 2 * sched_quantum; - return (0); -} - -SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, - 0, sizeof sched_quantum, sysctl_kern_quantum, "I", - "Roundrobin scheduling quantum in microseconds"); - -/* - * Arrange to reschedule if necessary, taking the priorities and - * schedulers into account. - */ -void -maybe_resched(struct thread *td) -{ - - mtx_assert(&sched_lock, MA_OWNED); - if (td->td_priority < curthread->td_priority) - curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; -} - -int -roundrobin_interval(void) -{ - return (sched_quantum); -} - -/* - * Force switch among equal priority processes every 100ms. - * We don't actually need to force a context switch of the current process. - * The act of firing the event triggers a context switch to softclock() and - * then switching back out again which is equivalent to a preemption, thus - * no further work is needed on the local CPU. - */ -/* ARGSUSED */ -static void -roundrobin(arg) - void *arg; -{ - -#ifdef SMP - mtx_lock_spin(&sched_lock); - forward_roundrobin(); - mtx_unlock_spin(&sched_lock); -#endif - - callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); -} - -/* - * Constants for digital decay and forget: - * 90% of (p_estcpu) usage in 5 * loadav time - * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) - * Note that, as ps(1) mentions, this can let percentages - * total over 100% (I've seen 137.9% for 3 processes). - * - * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. - * - * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. - * That is, the system wants to compute a value of decay such - * that the following for loop: - * for (i = 0; i < (5 * loadavg); i++) - * p_estcpu *= decay; - * will compute - * p_estcpu *= 0.1; - * for all values of loadavg: - * - * Mathematically this loop can be expressed by saying: - * decay ** (5 * loadavg) ~= .1 - * - * The system computes decay as: - * decay = (2 * loadavg) / (2 * loadavg + 1) - * - * We wish to prove that the system's computation of decay - * will always fulfill the equation: - * decay ** (5 * loadavg) ~= .1 - * - * If we compute b as: - * b = 2 * loadavg - * then - * decay = b / (b + 1) - * - * We now need to prove two things: - * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) - * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) - * - * Facts: - * For x close to zero, exp(x) =~ 1 + x, since - * exp(x) = 0! + x**1/1! + x**2/2! + ... . - * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. - * For x close to zero, ln(1+x) =~ x, since - * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 - * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). - * ln(.1) =~ -2.30 - * - * Proof of (1): - * Solve (factor)**(power) =~ .1 given power (5*loadav): - * solving for factor, - * ln(factor) =~ (-2.30/5*loadav), or - * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = - * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED - * - * Proof of (2): - * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): - * solving for power, - * power*ln(b/(b+1)) =~ -2.30, or - * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED - * - * Actual power values for the implemented algorithm are as follows: - * loadav: 1 2 3 4 - * power: 5.68 10.32 14.94 19.55 - */ - -/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ -#define loadfactor(loadav) (2 * (loadav)) -#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) - -/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ -static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ -SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); - -/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ -static int fscale __unused = FSCALE; -SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); - -/* - * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the - * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below - * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). - * - * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: - * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). - * - * If you don't want to bother with the faster/more-accurate formula, you - * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate - * (more general) method of calculating the %age of CPU used by a process. - */ -#define CCPU_SHIFT 11 - -/* - * Recompute process priorities, every hz ticks. - * MP-safe, called without the Giant mutex. - */ -/* ARGSUSED */ -static void -schedcpu(arg) - void *arg; -{ - register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); - struct thread *td; - struct proc *p; - struct kse *ke; - struct ksegrp *kg; - int realstathz; - int awake; - - realstathz = stathz ? stathz : hz; - sx_slock(&allproc_lock); - FOREACH_PROC_IN_SYSTEM(p) { - mtx_lock_spin(&sched_lock); - p->p_swtime++; - FOREACH_KSEGRP_IN_PROC(p, kg) { - awake = 0; - FOREACH_KSE_IN_GROUP(kg, ke) { - /* - * Increment time in/out of memory and sleep - * time (if sleeping). We ignore overflow; - * with 16-bit int's (remember them?) - * overflow takes 45 days. - */ - /* - * The kse slptimes are not touched in wakeup - * because the thread may not HAVE a KSE. - */ - if (ke->ke_state == KES_ONRUNQ) { - awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; - } else if ((ke->ke_state == KES_THREAD) && - (TD_IS_RUNNING(ke->ke_thread))) { - awake = 1; - /* Do not clear KEF_DIDRUN */ - } else if (ke->ke_flags & KEF_DIDRUN) { - awake = 1; - ke->ke_flags &= ~KEF_DIDRUN; - } - - /* - * pctcpu is only for ps? - * Do it per kse.. and add them up at the end? - * XXXKSE - */ - ke->ke_pctcpu - = (ke->ke_pctcpu * ccpu) >> FSHIFT; - /* - * If the kse has been idle the entire second, - * stop recalculating its priority until - * it wakes up. - */ - if (ke->ke_cpticks == 0) - continue; -#if (FSHIFT >= CCPU_SHIFT) - ke->ke_pctcpu += (realstathz == 100) ? - ((fixpt_t) ke->ke_cpticks) << - (FSHIFT - CCPU_SHIFT) : - 100 * (((fixpt_t) ke->ke_cpticks) << - (FSHIFT - CCPU_SHIFT)) / realstathz; -#else - ke->ke_pctcpu += ((FSCALE - ccpu) * - (ke->ke_cpticks * FSCALE / realstathz)) >> - FSHIFT; -#endif - ke->ke_cpticks = 0; - } /* end of kse loop */ - /* - * If there are ANY running threads in this KSEGRP, - * then don't count it as sleeping. - */ - if (awake) { - if (kg->kg_slptime > 1) { - /* - * In an ideal world, this should not - * happen, because whoever woke us - * up from the long sleep should have - * unwound the slptime and reset our - * priority before we run at the stale - * priority. Should KASSERT at some - * point when all the cases are fixed. - */ - updatepri(kg); - } - kg->kg_slptime = 0; - } else { - kg->kg_slptime++; - } - if (kg->kg_slptime > 1) - continue; - kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); - resetpriority(kg); - FOREACH_THREAD_IN_GROUP(kg, td) { - int changedqueue; - if (td->td_priority >= PUSER) { - /* - * Only change the priority - * of threads that are still at their - * user priority. - * XXXKSE This is problematic - * as we may need to re-order - * the threads on the KSEG list. - */ - changedqueue = - ((td->td_priority / RQ_PPQ) != - (kg->kg_user_pri / RQ_PPQ)); - - td->td_priority = kg->kg_user_pri; - if (changedqueue && TD_ON_RUNQ(td)) { - /* this could be optimised */ - remrunqueue(td); - td->td_priority = - kg->kg_user_pri; - setrunqueue(td); - } else { - td->td_priority = kg->kg_user_pri; - } - } - } - } /* end of ksegrp loop */ - mtx_unlock_spin(&sched_lock); - } /* end of process loop */ - sx_sunlock(&allproc_lock); - wakeup(&lbolt); - callout_reset(&schedcpu_callout, hz, schedcpu, NULL); -} - -/* - * Recalculate the priority of a process after it has slept for a while. - * For all load averages >= 1 and max p_estcpu of 255, sleeping for at - * least six times the loadfactor will decay p_estcpu to zero. - */ -void -updatepri(struct ksegrp *kg) -{ - register unsigned int newcpu; - register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); - - newcpu = kg->kg_estcpu; - if (kg->kg_slptime > 5 * loadfac) - kg->kg_estcpu = 0; - else { - kg->kg_slptime--; /* the first time was done in schedcpu */ - while (newcpu && --kg->kg_slptime) - newcpu = decay_cpu(loadfac, newcpu); - kg->kg_estcpu = newcpu; - } - resetpriority(kg); -} /* * We're only looking at 7 bits of the address; everything is @@ -417,8 +105,7 @@ sleepinit(void) { int i; - sched_quantum = hz/10; - hogticks = 2 * sched_quantum; + hogticks = (hz / 10) * 2; /* Default only. */ for (i = 0; i < TABLESIZE; i++) TAILQ_INIT(&slpque[i]); } @@ -519,8 +206,6 @@ msleep(ident, mtx, priority, wmesg, timo) td->td_wchan = ident; td->td_wmesg = wmesg; - td->td_ksegrp->kg_slptime = 0; - td->td_priority = priority & PRIMASK; TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq); TD_SET_ON_SLEEPQ(td); if (timo) @@ -551,11 +236,20 @@ msleep(ident, mtx, priority, wmesg, timo) catch = 0; } else sig = 0; + + /* + * Let the scheduler know we're about to voluntarily go to sleep. + */ + sched_sleep(td, priority & PRIMASK); + if (TD_ON_SLEEPQ(td)) { p->p_stats->p_ru.ru_nvcsw++; TD_SET_SLEEPING(td); mi_switch(); } + /* + * We're awake from voluntary sleep. + */ CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); @@ -754,7 +448,7 @@ mi_switch(void) u_int sched_nest; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); - KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); + KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); #ifdef INVARIANTS if (!TD_ON_LOCK(td) && @@ -800,38 +494,21 @@ mi_switch(void) PCPU_SET(switchtime, new_switchtime); CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); + sched_nest = sched_lock.mtx_recurse; - td->td_lastcpu = ke->ke_oncpu; - ke->ke_oncpu = NOCPU; - ke->ke_flags &= ~KEF_NEEDRESCHED; - /* - * At the last moment, if this thread is still marked RUNNING, - * then put it back on the run queue as it has not been suspended - * or stopped or any thing else similar. - */ - if (TD_IS_RUNNING(td)) { - /* Put us back on the run queue (kse and all). */ - setrunqueue(td); - } else if (p->p_flag & P_KSES) { - /* - * We will not be on the run queue. So we must be - * sleeping or similar. As it's available, - * someone else can use the KSE if they need it. - * (If bound LOANING can still occur). - */ - kse_reassign(ke); - } + sched_switchout(td); cpu_switch(); /* SHAZAM!!*/ + sched_lock.mtx_recurse = sched_nest; + sched_lock.mtx_lock = (uintptr_t)td; + sched_switchin(td); + /* * Start setting up stats etc. for the incoming thread. * Similar code in fork_exit() is returned to by cpu_switch() * in the case of a new thread/process. */ - td->td_kse->ke_oncpu = PCPU_GET(cpuid); - sched_lock.mtx_recurse = sched_nest; - sched_lock.mtx_lock = (uintptr_t)td; CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); if (PCPU_GET(switchtime.sec) == 0) @@ -855,7 +532,6 @@ void setrunnable(struct thread *td) { struct proc *p = td->td_proc; - struct ksegrp *kg; mtx_assert(&sched_lock, MA_OWNED); switch (p->p_state) { @@ -886,40 +562,8 @@ setrunnable(struct thread *td) p->p_sflag |= PS_SWAPINREQ; wakeup(&proc0); } - } else { - kg = td->td_ksegrp; - if (kg->kg_slptime > 1) - updatepri(kg); - kg->kg_slptime = 0; - setrunqueue(td); - maybe_resched(td); - } -} - -/* - * Compute the priority of a process when running in user mode. - * Arrange to reschedule if the resulting priority is better - * than that of the current process. - */ -void -resetpriority(kg) - register struct ksegrp *kg; -{ - register unsigned int newpriority; - struct thread *td; - - mtx_lock_spin(&sched_lock); - if (kg->kg_pri_class == PRI_TIMESHARE) { - newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + - NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); - newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), - PRI_MAX_TIMESHARE); - kg->kg_user_pri = newpriority; - } - FOREACH_THREAD_IN_GROUP(kg, td) { - maybe_resched(td); /* XXXKSE silly */ - } - mtx_unlock_spin(&sched_lock); + } else + sched_wakeup(td); } /* @@ -973,51 +617,13 @@ static void sched_setup(dummy) void *dummy; { - - callout_init(&schedcpu_callout, 1); - callout_init(&roundrobin_callout, 0); callout_init(&loadav_callout, 0); /* Kick off timeout driven events by calling first time. */ - roundrobin(NULL); - schedcpu(NULL); loadav(NULL); } /* - * We adjust the priority of the current process. The priority of - * a process gets worse as it accumulates CPU time. The cpu usage - * estimator (p_estcpu) is increased here. resetpriority() will - * compute a different priority each time p_estcpu increases by - * INVERSE_ESTCPU_WEIGHT - * (until MAXPRI is reached). The cpu usage estimator ramps up - * quite quickly when the process is running (linearly), and decays - * away exponentially, at a rate which is proportionally slower when - * the system is busy. The basic principle is that the system will - * 90% forget that the process used a lot of CPU time in 5 * loadav - * seconds. This causes the system to favor processes which haven't - * run much recently, and to round-robin among other processes. - */ -void -schedclock(td) - struct thread *td; -{ - struct kse *ke; - struct ksegrp *kg; - - KASSERT((td != NULL), ("schedclock: null thread pointer")); - ke = td->td_kse; - kg = td->td_ksegrp; - ke->ke_cpticks++; - kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); - if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { - resetpriority(kg); - if (td->td_priority >= PUSER) - td->td_priority = kg->kg_user_pri; - } -} - -/* * General purpose yield system call */ int @@ -1027,8 +633,8 @@ yield(struct thread *td, struct yield_args *uap) mtx_assert(&Giant, MA_NOTOWNED); mtx_lock_spin(&sched_lock); - td->td_priority = PRI_MAX_TIMESHARE; kg->kg_proc->p_stats->p_ru.ru_nvcsw++; + sched_prio(td, PRI_MAX_TIMESHARE); mi_switch(); mtx_unlock_spin(&sched_lock); td->td_retval[0] = 0; diff --git a/sys/kern/ksched.c b/sys/kern/ksched.c index 881d4a3..62ab684 100644 --- a/sys/kern/ksched.c +++ b/sys/kern/ksched.c @@ -41,6 +41,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resource.h> +#include <sys/sched.h> #include <posix4/posix4.h> @@ -56,7 +57,7 @@ int ksched_attach(struct ksched **p) struct ksched *ksched= p31b_malloc(sizeof(*ksched)); ksched->rr_interval.tv_sec = 0; - ksched->rr_interval.tv_nsec = 1000000000L / roundrobin_interval(); + ksched->rr_interval.tv_nsec = 1000000000L / sched_rr_interval(); *p = ksched; return 0; diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c new file mode 100644 index 0000000..99d23aa --- /dev/null +++ b/sys/kern/sched_4bsd.c @@ -0,0 +1,635 @@ +/*- + * Copyright (c) 1982, 1986, 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/ktr.h> +#include <sys/lock.h> +#include <sys/mutex.h> +#include <sys/proc.h> +#include <sys/resourcevar.h> +#include <sys/sched.h> +#include <sys/smp.h> +#include <sys/sysctl.h> +#include <sys/sx.h> + + +static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ +#define SCHED_QUANTUM (hz / 10); /* Default sched quantum */ + +static struct callout schedcpu_callout; +static struct callout roundrobin_callout; + +static void roundrobin(void *arg); +static void schedcpu(void *arg); +static void sched_setup(void *dummy); +static void maybe_resched(struct thread *td); +static void updatepri(struct ksegrp *kg); +static void resetpriority(struct ksegrp *kg); + +SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) + +/* + * Global run queue. + */ +static struct runq runq; +SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) + +static int +sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) +{ + int error, new_val; + + new_val = sched_quantum * tick; + error = sysctl_handle_int(oidp, &new_val, 0, req); + if (error != 0 || req->newptr == NULL) + return (error); + if (new_val < tick) + return (EINVAL); + sched_quantum = new_val / tick; + hogticks = 2 * sched_quantum; + return (0); +} + +SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, + 0, sizeof sched_quantum, sysctl_kern_quantum, "I", + "Roundrobin scheduling quantum in microseconds"); + +/* + * Arrange to reschedule if necessary, taking the priorities and + * schedulers into account. + */ +static void +maybe_resched(struct thread *td) +{ + + mtx_assert(&sched_lock, MA_OWNED); + if (td->td_priority < curthread->td_priority) + curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; +} + +/* + * Force switch among equal priority processes every 100ms. + * We don't actually need to force a context switch of the current process. + * The act of firing the event triggers a context switch to softclock() and + * then switching back out again which is equivalent to a preemption, thus + * no further work is needed on the local CPU. + */ +/* ARGSUSED */ +static void +roundrobin(void *arg) +{ + +#ifdef SMP + mtx_lock_spin(&sched_lock); + forward_roundrobin(); + mtx_unlock_spin(&sched_lock); +#endif + + callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); +} + +/* + * Constants for digital decay and forget: + * 90% of (p_estcpu) usage in 5 * loadav time + * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) + * Note that, as ps(1) mentions, this can let percentages + * total over 100% (I've seen 137.9% for 3 processes). + * + * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. + * + * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. + * That is, the system wants to compute a value of decay such + * that the following for loop: + * for (i = 0; i < (5 * loadavg); i++) + * p_estcpu *= decay; + * will compute + * p_estcpu *= 0.1; + * for all values of loadavg: + * + * Mathematically this loop can be expressed by saying: + * decay ** (5 * loadavg) ~= .1 + * + * The system computes decay as: + * decay = (2 * loadavg) / (2 * loadavg + 1) + * + * We wish to prove that the system's computation of decay + * will always fulfill the equation: + * decay ** (5 * loadavg) ~= .1 + * + * If we compute b as: + * b = 2 * loadavg + * then + * decay = b / (b + 1) + * + * We now need to prove two things: + * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) + * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) + * + * Facts: + * For x close to zero, exp(x) =~ 1 + x, since + * exp(x) = 0! + x**1/1! + x**2/2! + ... . + * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. + * For x close to zero, ln(1+x) =~ x, since + * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 + * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). + * ln(.1) =~ -2.30 + * + * Proof of (1): + * Solve (factor)**(power) =~ .1 given power (5*loadav): + * solving for factor, + * ln(factor) =~ (-2.30/5*loadav), or + * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = + * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED + * + * Proof of (2): + * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): + * solving for power, + * power*ln(b/(b+1)) =~ -2.30, or + * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED + * + * Actual power values for the implemented algorithm are as follows: + * loadav: 1 2 3 4 + * power: 5.68 10.32 14.94 19.55 + */ + +/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ +#define loadfactor(loadav) (2 * (loadav)) +#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) + +/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ +static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ +SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); + +/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ +static int fscale __unused = FSCALE; +SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); + +/* + * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the + * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below + * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). + * + * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: + * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). + * + * If you don't want to bother with the faster/more-accurate formula, you + * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate + * (more general) method of calculating the %age of CPU used by a process. + */ +#define CCPU_SHIFT 11 + +/* + * Recompute process priorities, every hz ticks. + * MP-safe, called without the Giant mutex. + */ +/* ARGSUSED */ +static void +schedcpu(void *arg) +{ + register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); + struct thread *td; + struct proc *p; + struct kse *ke; + struct ksegrp *kg; + int realstathz; + int awake; + + realstathz = stathz ? stathz : hz; + sx_slock(&allproc_lock); + FOREACH_PROC_IN_SYSTEM(p) { + mtx_lock_spin(&sched_lock); + p->p_swtime++; + FOREACH_KSEGRP_IN_PROC(p, kg) { + awake = 0; + FOREACH_KSE_IN_GROUP(kg, ke) { + /* + * Increment time in/out of memory and sleep + * time (if sleeping). We ignore overflow; + * with 16-bit int's (remember them?) + * overflow takes 45 days. + */ + /* + * The kse slptimes are not touched in wakeup + * because the thread may not HAVE a KSE. + */ + if (ke->ke_state == KES_ONRUNQ) { + awake = 1; + ke->ke_flags &= ~KEF_DIDRUN; + } else if ((ke->ke_state == KES_THREAD) && + (TD_IS_RUNNING(ke->ke_thread))) { + awake = 1; + /* Do not clear KEF_DIDRUN */ + } else if (ke->ke_flags & KEF_DIDRUN) { + awake = 1; + ke->ke_flags &= ~KEF_DIDRUN; + } + + /* + * pctcpu is only for ps? + * Do it per kse.. and add them up at the end? + * XXXKSE + */ + ke->ke_pctcpu + = (ke->ke_pctcpu * ccpu) >> FSHIFT; + /* + * If the kse has been idle the entire second, + * stop recalculating its priority until + * it wakes up. + */ + if (ke->ke_cpticks == 0) + continue; +#if (FSHIFT >= CCPU_SHIFT) + ke->ke_pctcpu += (realstathz == 100) ? + ((fixpt_t) ke->ke_cpticks) << + (FSHIFT - CCPU_SHIFT) : + 100 * (((fixpt_t) ke->ke_cpticks) << + (FSHIFT - CCPU_SHIFT)) / realstathz; +#else + ke->ke_pctcpu += ((FSCALE - ccpu) * + (ke->ke_cpticks * FSCALE / realstathz)) >> + FSHIFT; +#endif + ke->ke_cpticks = 0; + } /* end of kse loop */ + /* + * If there are ANY running threads in this KSEGRP, + * then don't count it as sleeping. + */ + if (awake) { + if (kg->kg_slptime > 1) { + /* + * In an ideal world, this should not + * happen, because whoever woke us + * up from the long sleep should have + * unwound the slptime and reset our + * priority before we run at the stale + * priority. Should KASSERT at some + * point when all the cases are fixed. + */ + updatepri(kg); + } + kg->kg_slptime = 0; + } else { + kg->kg_slptime++; + } + if (kg->kg_slptime > 1) + continue; + kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); + resetpriority(kg); + FOREACH_THREAD_IN_GROUP(kg, td) { + int changedqueue; + if (td->td_priority >= PUSER) { + /* + * Only change the priority + * of threads that are still at their + * user priority. + * XXXKSE This is problematic + * as we may need to re-order + * the threads on the KSEG list. + */ + changedqueue = + ((td->td_priority / RQ_PPQ) != + (kg->kg_user_pri / RQ_PPQ)); + + td->td_priority = kg->kg_user_pri; + if (changedqueue && TD_ON_RUNQ(td)) { + /* this could be optimised */ + remrunqueue(td); + td->td_priority = + kg->kg_user_pri; + setrunqueue(td); + } else { + td->td_priority = kg->kg_user_pri; + } + } + } + } /* end of ksegrp loop */ + mtx_unlock_spin(&sched_lock); + } /* end of process loop */ + sx_sunlock(&allproc_lock); + wakeup(&lbolt); + callout_reset(&schedcpu_callout, hz, schedcpu, NULL); +} + +/* + * Recalculate the priority of a process after it has slept for a while. + * For all load averages >= 1 and max p_estcpu of 255, sleeping for at + * least six times the loadfactor will decay p_estcpu to zero. + */ +static void +updatepri(struct ksegrp *kg) +{ + register unsigned int newcpu; + register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); + + newcpu = kg->kg_estcpu; + if (kg->kg_slptime > 5 * loadfac) + kg->kg_estcpu = 0; + else { + kg->kg_slptime--; /* the first time was done in schedcpu */ + while (newcpu && --kg->kg_slptime) + newcpu = decay_cpu(loadfac, newcpu); + kg->kg_estcpu = newcpu; + } + resetpriority(kg); +} + +/* + * Compute the priority of a process when running in user mode. + * Arrange to reschedule if the resulting priority is better + * than that of the current process. + */ +static void +resetpriority(struct ksegrp *kg) +{ + register unsigned int newpriority; + struct thread *td; + + mtx_lock_spin(&sched_lock); + if (kg->kg_pri_class == PRI_TIMESHARE) { + newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + + NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); + newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), + PRI_MAX_TIMESHARE); + kg->kg_user_pri = newpriority; + } + FOREACH_THREAD_IN_GROUP(kg, td) { + maybe_resched(td); /* XXXKSE silly */ + } + mtx_unlock_spin(&sched_lock); +} + +/* ARGSUSED */ +static void +sched_setup(void *dummy) +{ + if (sched_quantum == 0) + sched_quantum = SCHED_QUANTUM; + hogticks = 2 * sched_quantum; + + callout_init(&schedcpu_callout, 1); + callout_init(&roundrobin_callout, 0); + + /* Kick off timeout driven events by calling first time. */ + roundrobin(NULL); + schedcpu(NULL); +} + +/* External interfaces start here */ +int +sched_runnable(void) +{ + return runq_check(&runq); +} + +int +sched_rr_interval(void) +{ + if (sched_quantum == 0) + sched_quantum = SCHED_QUANTUM; + return (sched_quantum); +} + +/* + * We adjust the priority of the current process. The priority of + * a process gets worse as it accumulates CPU time. The cpu usage + * estimator (p_estcpu) is increased here. resetpriority() will + * compute a different priority each time p_estcpu increases by + * INVERSE_ESTCPU_WEIGHT + * (until MAXPRI is reached). The cpu usage estimator ramps up + * quite quickly when the process is running (linearly), and decays + * away exponentially, at a rate which is proportionally slower when + * the system is busy. The basic principle is that the system will + * 90% forget that the process used a lot of CPU time in 5 * loadav + * seconds. This causes the system to favor processes which haven't + * run much recently, and to round-robin among other processes. + */ +void +sched_clock(struct thread *td) +{ + struct kse *ke; + struct ksegrp *kg; + + KASSERT((td != NULL), ("schedclock: null thread pointer")); + ke = td->td_kse; + kg = td->td_ksegrp; + ke->ke_cpticks++; + kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); + if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { + resetpriority(kg); + if (td->td_priority >= PUSER) + td->td_priority = kg->kg_user_pri; + } +} +/* + * charge childs scheduling cpu usage to parent. + * + * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. + * Charge it to the ksegrp that did the wait since process estcpu is sum of + * all ksegrps, this is strictly as expected. Assume that the child process + * aggregated all the estcpu into the 'built-in' ksegrp. + */ +void +sched_exit(struct ksegrp *kg, struct ksegrp *child) +{ + kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); +} + +void +sched_fork(struct ksegrp *kg, struct ksegrp *child) +{ + /* + * set priority of child to be that of parent. + * XXXKSE this needs redefining.. + */ + child->kg_estcpu = kg->kg_estcpu; +} + +void +sched_nice(struct ksegrp *kg, int nice) +{ + kg->kg_nice = nice; + resetpriority(kg); +} + +void +sched_prio(struct thread *td, u_char prio) +{ + td->td_priority = prio; + + if (TD_ON_RUNQ(td)) { + remrunqueue(td); + setrunqueue(td); + } +} + +void +sched_sleep(struct thread *td, u_char prio) +{ + td->td_ksegrp->kg_slptime = 0; + td->td_priority = prio; +} + +void +sched_switchin(struct thread *td) +{ + td->td_kse->ke_oncpu = PCPU_GET(cpuid); +} + +void +sched_switchout(struct thread *td) +{ + struct kse *ke; + struct proc *p; + + ke = td->td_kse; + p = td->td_proc; + + KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); + + td->td_lastcpu = ke->ke_oncpu; + ke->ke_oncpu = NOCPU; + ke->ke_flags &= ~KEF_NEEDRESCHED; + /* + * At the last moment, if this thread is still marked RUNNING, + * then put it back on the run queue as it has not been suspended + * or stopped or any thing else similar. + */ + if (TD_IS_RUNNING(td)) { + /* Put us back on the run queue (kse and all). */ + setrunqueue(td); + } else if (p->p_flag & P_KSES) { + /* + * We will not be on the run queue. So we must be + * sleeping or similar. As it's available, + * someone else can use the KSE if they need it. + * (If bound LOANING can still occur). + */ + kse_reassign(ke); + } +} + +void +sched_wakeup(struct thread *td) +{ + struct ksegrp *kg; + + kg = td->td_ksegrp; + if (kg->kg_slptime > 1) + updatepri(kg); + kg->kg_slptime = 0; + setrunqueue(td); + maybe_resched(td); +} + +void +sched_add(struct kse *ke) +{ + mtx_assert(&sched_lock, MA_OWNED); + KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); + KASSERT((ke->ke_thread->td_kse != NULL), + ("runq_add: No KSE on thread")); + KASSERT(ke->ke_state != KES_ONRUNQ, + ("runq_add: kse %p (%s) already in run queue", ke, + ke->ke_proc->p_comm)); + KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + ("runq_add: process swapped out")); + ke->ke_ksegrp->kg_runq_kses++; + ke->ke_state = KES_ONRUNQ; + + runq_add(&runq, ke); +} + +void +sched_rem(struct kse *ke) +{ + KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + ("runq_remove: process swapped out")); + KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); + mtx_assert(&sched_lock, MA_OWNED); + + runq_remove(&runq, ke); + ke->ke_state = KES_THREAD; + ke->ke_ksegrp->kg_runq_kses--; +} + +struct kse * +sched_choose(void) +{ + struct kse *ke; + + ke = runq_choose(&runq); + + if (ke != NULL) { + runq_remove(&runq, ke); + ke->ke_state = KES_THREAD; + + KASSERT((ke->ke_thread != NULL), + ("runq_choose: No thread on KSE")); + KASSERT((ke->ke_thread->td_kse != NULL), + ("runq_choose: No KSE on thread")); + KASSERT(ke->ke_proc->p_sflag & PS_INMEM, + ("runq_choose: process swapped out")); + } + return (ke); +} + +void +sched_userret(struct thread *td) +{ + struct ksegrp *kg; + /* + * XXX we cheat slightly on the locking here to avoid locking in + * the usual case. Setting td_priority here is essentially an + * incomplete workaround for not setting it properly elsewhere. + * Now that some interrupt handlers are threads, not setting it + * properly elsewhere can clobber it in the window between setting + * it here and returning to user mode, so don't waste time setting + * it perfectly here. + */ + kg = td->td_ksegrp; + if (td->td_priority != kg->kg_user_pri) { + mtx_lock_spin(&sched_lock); + td->td_priority = kg->kg_user_pri; + mtx_unlock_spin(&sched_lock); + } +} diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index c53edc3..9f8bed0 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -53,6 +53,7 @@ #include <sys/kse.h> #include <sys/ktr.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/signalvar.h> #include <sys/systm.h> #include <sys/vmmeter.h> @@ -73,7 +74,6 @@ userret(td, frame, oticks) { struct proc *p = td->td_proc; struct kse *ke = td->td_kse; - struct ksegrp *kg = td->td_ksegrp; CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); @@ -95,19 +95,9 @@ userret(td, frame, oticks) #endif /* - * XXX we cheat slightly on the locking here to avoid locking in - * the usual case. Setting td_priority here is essentially an - * incomplete workaround for not setting it properly elsewhere. - * Now that some interrupt handlers are threads, not setting it - * properly elsewhere can clobber it in the window between setting - * it here and returning to user mode, so don't waste time setting - * it perfectly here. + * Let the scheduler adjust our priority etc. */ - if (td->td_priority != kg->kg_user_pri) { - mtx_lock_spin(&sched_lock); - td->td_priority = kg->kg_user_pri; - mtx_unlock_spin(&sched_lock); - } + sched_userret(td); /* * We need to check to see if we have to exit or wait due to a @@ -250,7 +240,7 @@ ast(struct trapframe *framep) } if (flags & KEF_NEEDRESCHED) { mtx_lock_spin(&sched_lock); - td->td_priority = kg->kg_user_pri; + sched_prio(td, kg->kg_user_pri); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index e60d805..16f598a 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -47,6 +47,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/sbuf.h> #include <sys/stdint.h> #include <sys/sysctl.h> @@ -146,13 +147,10 @@ propagate_priority(struct thread *td) * If on run queue move to new run queue, and quit. * XXXKSE this gets a lot more complicated under threads * but try anyhow. - * We should have a special call to do this more efficiently. */ if (TD_ON_RUNQ(td)) { MPASS(td->td_blocked == NULL); - remrunqueue(td); - td->td_priority = pri; - setrunqueue(td); + sched_prio(td, pri); return; } /* diff --git a/sys/posix4/ksched.c b/sys/posix4/ksched.c index 881d4a3..62ab684 100644 --- a/sys/posix4/ksched.c +++ b/sys/posix4/ksched.c @@ -41,6 +41,7 @@ #include <sys/mutex.h> #include <sys/proc.h> #include <sys/resource.h> +#include <sys/sched.h> #include <posix4/posix4.h> @@ -56,7 +57,7 @@ int ksched_attach(struct ksched **p) struct ksched *ksched= p31b_malloc(sizeof(*ksched)); ksched->rr_interval.tv_sec = 0; - ksched->rr_interval.tv_nsec = 1000000000L / roundrobin_interval(); + ksched->rr_interval.tv_nsec = 1000000000L / sched_rr_interval(); *p = ksched; return 0; diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 2c5c437..a10ac1c 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -872,9 +872,6 @@ void proc_linkup(struct proc *p, struct ksegrp *kg, struct kse *ke, struct thread *td); void proc_reparent(struct proc *child, struct proc *newparent); void remrunqueue(struct thread *); -void resetpriority(struct ksegrp *); -int roundrobin_interval(void); -void schedclock(struct thread *); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void setrunnable(struct thread *); @@ -886,9 +883,7 @@ void cpu_idle(void); void cpu_switch(void); void cpu_throw(void) __dead2; void unsleep(struct thread *); -void updatepri(struct ksegrp *); void userret(struct thread *, struct trapframe *, u_int); -void maybe_resched(struct thread *); void cpu_exit(struct thread *); void cpu_sched_exit(struct thread *); @@ -911,7 +906,6 @@ void cpu_thread_setup(struct thread *td); void kse_reassign(struct kse *ke); void kse_link(struct kse *ke, struct ksegrp *kg); void ksegrp_link(struct ksegrp *kg, struct proc *p); -int kserunnable(void); void make_kse_runnable(struct kse *ke); struct thread *signal_upcall(struct proc *p, int sig); void thread_exit(void) __dead2; diff --git a/sys/sys/sched.h b/sys/sys/sched.h new file mode 100644 index 0000000..753378a --- /dev/null +++ b/sys/sys/sched.h @@ -0,0 +1,65 @@ +/*- + * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS_SCHED_H_ +#define _SYS_SCHED_H_ + +/* + * General scheduling info. + */ +int sched_rr_interval(void); +int sched_runnable(void); + +/* + * KSE Groups contain scheduling priority information. They record the + * behavior of groups of KSEs and threads. + */ +void sched_exit(struct ksegrp *kg, struct ksegrp *child); +void sched_fork(struct ksegrp *kg, struct ksegrp *child); +void sched_nice(struct ksegrp *kg, int nice); +void sched_prio(struct thread *td, u_char prio); +void sched_userret(struct thread *td); + +/* + * Threads are switched in and out, block on resources, and have temporary + * priorities inherited from their ksegs. + */ +void sched_clock(struct thread *td); +void sched_sleep(struct thread *td, u_char prio); +void sched_switchin(struct thread *td); +void sched_switchout(struct thread *td); +void sched_wakeup(struct thread *td); + +/* + * KSEs are moved on and off of run queues. + */ +void sched_add(struct kse *ke); +void sched_rem(struct kse *ke); +struct kse *sched_choose(void); + +#endif /* !_SYS_SCHED_H_ */ diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index ee40de4..14d058e 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -82,6 +82,7 @@ #include <sys/kthread.h> #include <sys/ktr.h> #include <sys/resourcevar.h> +#include <sys/sched.h> #include <sys/signalvar.h> #include <sys/vnode.h> #include <sys/vmmeter.h> @@ -1191,9 +1192,7 @@ rescan0: killproc(bigproc, "out of swap space"); mtx_lock_spin(&sched_lock); FOREACH_KSEGRP_IN_PROC(bigproc, kg) { - kg->kg_estcpu = 0; - kg->kg_nice = PRIO_MIN; /* XXXKSE ??? */ - resetpriority(kg); + sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */ } mtx_unlock_spin(&sched_lock); PROC_UNLOCK(bigproc); diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c index a239cfa..af5b7b2 100644 --- a/sys/vm/vm_zeroidle.c +++ b/sys/vm/vm_zeroidle.c @@ -18,6 +18,7 @@ #include <sys/vmmeter.h> #include <sys/lock.h> #include <sys/mutex.h> +#include <sys/sched.h> #include <sys/sysctl.h> #include <sys/kthread.h> @@ -128,7 +129,7 @@ vm_pagezero(void) for (;;) { if (vm_page_zero_check()) { pages += vm_page_zero_idle(); - if (pages > idlezero_maxrun || kserunnable()) { + if (pages > idlezero_maxrun || sched_runnable()) { mtx_lock_spin(&sched_lock); td->td_proc->p_stats->p_ru.ru_nvcsw++; mi_switch(); |