summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-09-21 04:10:23 +0000
committerjeff <jeff@FreeBSD.org>2007-09-21 04:10:23 +0000
commitbc0eadb21d99ae42fb629bab36adfe6ac36c8ec2 (patch)
treeb0aa705537c625191bcfc8f15329374acc84160b /sys/kern/sched_4bsd.c
parent13d3160ef56965cf078529f5abb63d74e18d015a (diff)
downloadFreeBSD-src-bc0eadb21d99ae42fb629bab36adfe6ac36c8ec2.zip
FreeBSD-src-bc0eadb21d99ae42fb629bab36adfe6ac36c8ec2.tar.gz
- Redefine p_swtime and td_slptime as p_swtick and td_slptick. This
changes the units from seconds to the value of 'ticks' when swapped in/out. ULE does not have a periodic timer that scans all threads in the system and as such maintaining a per-second counter is difficult. - Change computations requiring the unit in seconds to subtract ticks and divide by hz. This does make the wraparound condition hz times more frequent but this is still in the range of several months to years and the adverse effects are minimal. Approved by: re
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c37
1 files changed, 20 insertions, 17 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 3692f0f..0573be0 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -84,6 +84,7 @@ struct td_sched {
fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
u_char ts_rqindex; /* (j) Run queue index. */
int ts_cpticks; /* (j) Ticks of cpu time. */
+ int ts_slptime; /* (j) Seconds !RUNNING. */
struct runq *ts_runq; /* runq the thread is currently on */
};
@@ -379,11 +380,6 @@ schedcpu(void)
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
PROC_SLOCK(p);
- /*
- * Increment time in/out of memory. We ignore overflow; with
- * 16-bit int's (remember them?) overflow takes 45 days.
- */
- p->p_swtime++;
FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
thread_lock(td);
@@ -440,7 +436,7 @@ XXX this is broken
*/
if (awake) {
- if (td->td_slptime > 1) {
+ if (ts->ts_slptime > 1) {
/*
* In an ideal world, this should not
* happen, because whoever woke us
@@ -452,10 +448,10 @@ XXX this is broken
*/
updatepri(td);
}
- td->td_slptime = 0;
+ ts->ts_slptime = 0;
} else
- td->td_slptime++;
- if (td->td_slptime > 1) {
+ ts->ts_slptime++;
+ if (ts->ts_slptime > 1) {
thread_unlock(td);
continue;
}
@@ -490,16 +486,18 @@ schedcpu_thread(void)
static void
updatepri(struct thread *td)
{
- register fixpt_t loadfac;
- register unsigned int newcpu;
+ struct td_sched *ts;
+ fixpt_t loadfac;
+ unsigned int newcpu;
+ ts = td->td_sched;
loadfac = loadfactor(averunnable.ldavg[0]);
- if (td->td_slptime > 5 * loadfac)
+ if (ts->ts_slptime > 5 * loadfac)
td->td_estcpu = 0;
else {
newcpu = td->td_estcpu;
- td->td_slptime--; /* was incremented in schedcpu() */
- while (newcpu && --td->td_slptime)
+ ts->ts_slptime--; /* was incremented in schedcpu() */
+ while (newcpu && --ts->ts_slptime)
newcpu = decay_cpu(loadfac, newcpu);
td->td_estcpu = newcpu;
}
@@ -827,7 +825,8 @@ sched_sleep(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
- td->td_slptime = 0;
+ td->td_slptick = ticks;
+ td->td_sched->ts_slptime = 0;
}
void
@@ -939,12 +938,16 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
void
sched_wakeup(struct thread *td)
{
+ struct td_sched *ts;
+
THREAD_LOCK_ASSERT(td, MA_OWNED);
- if (td->td_slptime > 1) {
+ ts = td->td_sched;
+ if (ts->ts_slptime > 1) {
updatepri(td);
resetpriority(td);
}
- td->td_slptime = 0;
+ td->td_slptick = ticks;
+ ts->ts_slptime = 0;
sched_add(td, SRQ_BORING);
}
OpenPOWER on IntegriCloud