summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-03-10 22:48:27 +0000
committerjeff <jeff@FreeBSD.org>2008-03-10 22:48:27 +0000
commit540fa064d9cfc1fff7b06504893684dc0d934999 (patch)
tree475f07b01383353a7b25f7102447d1a5c5d3debb /sys/kern/sched_ule.c
parent18300325cbb0d05941fff043af793858b4aef3dd (diff)
downloadFreeBSD-src-540fa064d9cfc1fff7b06504893684dc0d934999.zip
FreeBSD-src-540fa064d9cfc1fff7b06504893684dc0d934999.tar.gz
- Fix the invalid priority panics people are seeing by forcing
tdq_runq_add to select the runq rather than hoping we set it properly when we adjusted the priority. This involves the same number of branches as before so should perform identically without the extra fragility. Tested by: bz Reviewed by: bz
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c35
1 files changed, 10 insertions, 25 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index a4117cf..c89e45b 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -383,6 +383,8 @@ sched_shouldpreempt(int pri, int cpri, int remote)
static __inline void
tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
{
+ u_char pri;
+
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
@@ -391,10 +393,11 @@ tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
tdq->tdq_transferable++;
ts->ts_flags |= TSF_XFERABLE;
}
- if (ts->ts_runq == &tdq->tdq_timeshare) {
- u_char pri;
-
- pri = ts->ts_thread->td_priority;
+ pri = ts->ts_thread->td_priority;
+ if (pri <= PRI_MAX_REALTIME) {
+ ts->ts_runq = &tdq->tdq_realtime;
+ } else if (pri <= PRI_MAX_TIMESHARE) {
+ ts->ts_runq = &tdq->tdq_timeshare;
KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
("Invalid priority %d on timeshare runq", pri));
/*
@@ -415,25 +418,10 @@ tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
} else
pri = tdq->tdq_ridx;
runq_add_pri(ts->ts_runq, ts, pri, flags);
+ return;
} else
- runq_add(ts->ts_runq, ts, flags);
-}
-
-/*
- * Pick the run queue based on priority.
- */
-static __inline void
-tdq_runq_pick(struct tdq *tdq, struct td_sched *ts)
-{
- int pri;
-
- pri = ts->ts_thread->td_priority;
- if (pri <= PRI_MAX_REALTIME)
- ts->ts_runq = &tdq->tdq_realtime;
- else if (pri <= PRI_MAX_TIMESHARE)
- ts->ts_runq = &tdq->tdq_timeshare;
- else
ts->ts_runq = &tdq->tdq_idle;
+ runq_add(ts->ts_runq, ts, flags);
}
/*
@@ -456,7 +444,6 @@ tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
else
runq_remove_idx(ts->ts_runq, ts, NULL);
- ts->ts_ltick = ticks;
} else
runq_remove(ts->ts_runq, ts);
}
@@ -1250,7 +1237,6 @@ sched_setup(void *dummy)
/* Add thread0's load since it's running. */
TDQ_LOCK(tdq);
thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
- tdq_runq_pick(tdq, &td_sched0);
tdq_load_add(tdq, &td_sched0);
tdq->tdq_lowpri = thread0.td_priority;
TDQ_UNLOCK(tdq);
@@ -1547,7 +1533,6 @@ sched_thread_priority(struct thread *td, u_char prio)
tdq = TDQ_CPU(ts->ts_cpu);
oldpri = td->td_priority;
td->td_priority = prio;
- tdq_runq_pick(tdq, ts);
if (TD_IS_RUNNING(td)) {
if (prio < tdq->tdq_lowpri)
tdq->tdq_lowpri = prio;
@@ -2202,6 +2187,7 @@ sched_choose(void)
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
ts = tdq_choose(tdq);
if (ts) {
+ ts->ts_ltick = ticks;
tdq_runq_rem(tdq, ts);
return (ts->ts_thread);
}
@@ -2254,7 +2240,6 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags)
ts = td->td_sched;
if (td->td_priority < tdq->tdq_lowpri)
tdq->tdq_lowpri = td->td_priority;
- tdq_runq_pick(tdq, ts);
tdq_runq_add(tdq, ts, flags);
tdq_load_add(tdq, ts);
}
OpenPOWER on IntegriCloud