summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c68
1 files changed, 22 insertions, 46 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 5fb15e35..f930afc 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -99,7 +99,6 @@ struct td_sched {
u_int ts_slptime; /* Number of ticks we vol. slept */
u_int ts_runtime; /* Number of ticks we were running */
int ts_ltick; /* Last tick that we were running on */
- int ts_incrtick; /* Last tick that we incremented on */
int ts_ftick; /* First tick that we were running on */
int ts_ticks; /* Tick count */
#ifdef KTR
@@ -291,7 +290,7 @@ static void sched_thread_priority(struct thread *, u_char);
static int sched_interact_score(struct thread *);
static void sched_interact_update(struct thread *);
static void sched_interact_fork(struct thread *);
-static void sched_pctcpu_update(struct td_sched *);
+static void sched_pctcpu_update(struct td_sched *, int);
/* Operations on per processor queues */
static struct thread *tdq_choose(struct tdq *);
@@ -671,7 +670,7 @@ cpu_search(const struct cpu_group *cg, struct cpu_search *low,
}
}
if (match & CPU_SEARCH_HIGHEST)
- if (hgroup.cs_load != -1 &&
+ if (hgroup.cs_load >= 0 &&
(load > hload ||
(load == hload && hgroup.cs_load > high->cs_load))) {
hload = load;
@@ -1590,24 +1589,21 @@ sched_rr_interval(void)
* mechanism since it happens with less regular and frequent events.
*/
static void
-sched_pctcpu_update(struct td_sched *ts)
+sched_pctcpu_update(struct td_sched *ts, int run)
{
+ int t = ticks;
- if (ts->ts_ticks == 0)
- return;
- if (ticks - (hz / 10) < ts->ts_ltick &&
- SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
- return;
- /*
- * Adjust counters and watermark for pctcpu calc.
- */
- if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
- ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
- SCHED_TICK_TARG;
- else
+ if (t - ts->ts_ltick >= SCHED_TICK_TARG) {
ts->ts_ticks = 0;
- ts->ts_ltick = ticks;
- ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
+ ts->ts_ftick = t - SCHED_TICK_TARG;
+ } else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
+ ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
+ (ts->ts_ltick - (t - SCHED_TICK_TARG));
+ ts->ts_ftick = t - SCHED_TICK_TARG;
+ }
+ if (run)
+ ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
+ ts->ts_ltick = t;
}
/*
@@ -1826,6 +1822,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
tdq = TDQ_CPU(cpuid);
ts = td->td_sched;
mtx = td->td_lock;
+ sched_pctcpu_update(ts, 1);
ts->ts_rltick = ticks;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
@@ -1880,6 +1877,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#endif
lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
+ sched_pctcpu_update(newtd->td_sched, 0);
#ifdef KDTRACE_HOOKS
/*
@@ -1974,12 +1972,9 @@ sched_wakeup(struct thread *td)
slptick = td->td_slptick;
td->td_slptick = 0;
if (slptick && slptick != ticks) {
- u_int hzticks;
-
- hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
- ts->ts_slptime += hzticks;
+ ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
sched_interact_update(td);
- sched_pctcpu_update(ts);
+ sched_pctcpu_update(ts, 0);
}
/* Reset the slice value after we sleep. */
ts->ts_slice = sched_slice;
@@ -1994,6 +1989,7 @@ void
sched_fork(struct thread *td, struct thread *child)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
+ sched_pctcpu_update(td->td_sched, 1);
sched_fork_thread(td, child);
/*
* Penalize the parent and child for forking.
@@ -2029,7 +2025,6 @@ sched_fork_thread(struct thread *td, struct thread *child)
*/
ts2->ts_ticks = ts->ts_ticks;
ts2->ts_ltick = ts->ts_ltick;
- ts2->ts_incrtick = ts->ts_incrtick;
ts2->ts_ftick = ts->ts_ftick;
/*
* Do not inherit any borrowed priority from the parent.
@@ -2186,6 +2181,7 @@ sched_clock(struct thread *td)
tdq->tdq_ridx = tdq->tdq_idx;
}
ts = td->td_sched;
+ sched_pctcpu_update(ts, 1);
if (td->td_pri_class & PRI_FIFO_BIT)
return;
if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
@@ -2210,31 +2206,12 @@ sched_clock(struct thread *td)
}
/*
- * Called once per hz tick. Used for cpu utilization information. This
- * is easier than trying to scale based on stathz.
+ * Called once per hz tick.
*/
void
sched_tick(int cnt)
{
- struct td_sched *ts;
- ts = curthread->td_sched;
- /*
- * Ticks is updated asynchronously on a single cpu. Check here to
- * avoid incrementing ts_ticks multiple times in a single tick.
- */
- if (ts->ts_incrtick == ticks)
- return;
- /* Adjust ticks for pctcpu */
- ts->ts_ticks += cnt << SCHED_TICK_SHIFT;
- ts->ts_ltick = ticks;
- ts->ts_incrtick = ticks;
- /*
- * Update if we've exceeded our desired tick threshold by over one
- * second.
- */
- if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
- sched_pctcpu_update(ts);
}
/*
@@ -2276,7 +2253,6 @@ sched_choose(void)
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
td = tdq_choose(tdq);
if (td) {
- td->td_sched->ts_ltick = ticks;
tdq_runq_rem(tdq, td);
tdq->tdq_lowpri = td->td_priority;
return (td);
@@ -2422,10 +2398,10 @@ sched_pctcpu(struct thread *td)
return (0);
THREAD_LOCK_ASSERT(td, MA_OWNED);
+ sched_pctcpu_update(ts, TD_IS_RUNNING(td));
if (ts->ts_ticks) {
int rtick;
- sched_pctcpu_update(ts);
/* How many rtick per second ? */
rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
OpenPOWER on IntegriCloud