summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-01-23 03:10:18 +0000
committerjeff <jeff@FreeBSD.org>2008-01-23 03:10:18 +0000
commitbe58be75dda104890b17bc4c8cd863f9fac52e60 (patch)
tree508028a42b3e4c31a90bbb0c736101c4c68c4226 /sys
parent061b80383063221dd24d9c9e2d9d6131b6cf12bd (diff)
downloadFreeBSD-src-be58be75dda104890b17bc4c8cd863f9fac52e60.zip
FreeBSD-src-be58be75dda104890b17bc4c8cd863f9fac52e60.tar.gz
- sched_prio() should only adjust tdq_lowpri if the thread is running or on
a run-queue. If the priority is numerically raised only change lowpri if we're certain it will be correct. Some slop is allowed however previously we could erroneously raise lowpri for an idle cpu that a thread had recently run on which lead to errors in load balancing decisions.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/sched_ule.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 9957a25..706adf9 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -1617,16 +1617,18 @@ sched_thread_priority(struct thread *td, u_char prio)
sched_rem(td);
td->td_priority = prio;
sched_add(td, SRQ_BORROWING);
- } else {
#ifdef SMP
+ } else if (TD_IS_RUNNING(td)) {
struct tdq *tdq;
tdq = TDQ_CPU(ts->ts_cpu);
- if (prio < tdq->tdq_lowpri)
+ if (prio < tdq->tdq_lowpri ||
+ (td->td_priority == tdq->tdq_lowpri && tdq->tdq_load <= 1))
tdq->tdq_lowpri = prio;
+ td->td_priority = prio;
#endif
+ } else
td->td_priority = prio;
- }
}
/*
@@ -1843,8 +1845,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
mtx = td->td_lock;
#ifdef SMP
ts->ts_rltick = ticks;
- if (newtd && newtd->td_priority < tdq->tdq_lowpri)
- tdq->tdq_lowpri = newtd->td_priority;
#endif
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
@@ -2265,6 +2265,7 @@ sched_choose(void)
struct tdq_group *tdg;
#endif
struct td_sched *ts;
+ struct thread *td;
struct tdq *tdq;
tdq = TDQ_SELF();
@@ -2274,6 +2275,7 @@ sched_choose(void)
tdq_runq_rem(tdq, ts);
return (ts->ts_thread);
}
+ td = PCPU_GET(idlethread);
#ifdef SMP
/*
* We only set the idled bit when all of the cpus in the group are
@@ -2284,9 +2286,9 @@ sched_choose(void)
tdg->tdg_idlemask |= PCPU_GET(cpumask);
if (tdg->tdg_idlemask == tdg->tdg_cpumask)
atomic_set_int(&tdq_idle, tdg->tdg_mask);
- tdq->tdq_lowpri = PRI_MAX_IDLE;
+ tdq->tdq_lowpri = td->td_priority;
#endif
- return (PCPU_GET(idlethread));
+ return (td);
}
/*
OpenPOWER on IntegriCloud