summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2003-03-03 04:28:07 +0000
committerjeff <jeff@FreeBSD.org>2003-03-03 04:28:07 +0000
commit2d1c8006af34da5cb4a92951bf42e868823f6e04 (patch)
treebce426a6dbb674d53ff6480937215e0874053bd1 /sys/kern/sched_ule.c
parentf0f35853d03f434ca1f8a49c3ca4b0fbf8aac94a (diff)
downloadFreeBSD-src-2d1c8006af34da5cb4a92951bf42e868823f6e04.zip
FreeBSD-src-2d1c8006af34da5cb4a92951bf42e868823f6e04.tar.gz
- In sched_add() special case PRI_TIMESHARE and PRI_ITHD|PRI_REALTIME. We
always place ITHD & REALTIME threads on the current queue of the current cpu. Prior to this change an interrupt thread would only ever run on one cpu.
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 8e3af08..7de1c2e 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -176,8 +176,7 @@ sched_slp_ratio(int b, int s)
* XXX nice value should effect how interactive a kg is.
*/
#define SCHED_CURR(kg) (((kg)->kg_slptime > (kg)->kg_runtime && \
- sched_slp_ratio((kg)->kg_slptime, (kg)->kg_runtime) > 4) || \
- (kg)->kg_pri_class != PRI_TIMESHARE)
+ sched_slp_ratio((kg)->kg_slptime, (kg)->kg_runtime) > 4))
/*
* Cpu percentage computation macros and defines.
@@ -808,13 +807,27 @@ sched_add(struct kse *ke)
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
("sched_add: process swapped out"));
- kseq = KSEQ_CPU(ke->ke_cpu);
-
- if (ke->ke_runq == NULL) {
- if (SCHED_CURR(ke->ke_ksegrp))
- ke->ke_runq = kseq->ksq_curr;
- else
- ke->ke_runq = kseq->ksq_next;
+ /*
+ * Timeshare threads get placed on the appropriate queue on their
+ * bound cpu.
+ */
+ if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
+ kseq = KSEQ_CPU(ke->ke_cpu);
+
+ if (ke->ke_runq == NULL) {
+ if (SCHED_CURR(ke->ke_ksegrp))
+ ke->ke_runq = kseq->ksq_curr;
+ else
+ ke->ke_runq = kseq->ksq_next;
+ }
+ /*
+ * If we're a real-time or interrupt thread place us on the curr
+ * queue for the current processor. Hopefully this will yield the
+ * lowest latency response.
+ */
+ } else {
+ kseq = KSEQ_SELF();
+ ke->ke_runq = kseq->ksq_curr;
}
ke->ke_ksegrp->kg_runq_kses++;
ke->ke_state = KES_ONRUNQ;
OpenPOWER on IntegriCloud