summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-01-20 09:03:43 +0000
committerjeff <jeff@FreeBSD.org>2007-01-20 09:03:43 +0000
commit3f693f3417ce89f53cf3562bd678d867c6c1ad1e (patch)
tree36413104185bbba005bd45345a0e6309448d53fa /sys
parent14e97fe311a138dc0acd53104973ecfa05ed3604 (diff)
downloadFreeBSD-src-3f693f3417ce89f53cf3562bd678d867c6c1ad1e.zip
FreeBSD-src-3f693f3417ce89f53cf3562bd678d867c6c1ad1e.tar.gz
- In tdq_transfer() always set NEEDRESCHED when necessary regardless of
the ipi settings. If NEEDRESCHED is set and an ipi is later delivered it will clear it rather than cause extra context switches. However, if we miss setting it we can have terrible latency. - In sched_bind() correctly implement bind. Also be slightly more tolerant of code which calls bind multiple times. However, we don't change binding if another call is made with a different cpu. This does not presently work with hwpmc which I believe should be changed.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/sched_ule.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 363ba41..cbec5c5 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -61,6 +61,12 @@ __FBSDID("$FreeBSD$");
#include <machine/smp.h>
/*
+ * TODO:
+ * Pick idle from affinity group or self group first.
+ * Implement pick_score.
+ */
+
+/*
* Thread scheduler specific section.
*/
struct td_sched {
@@ -703,6 +709,15 @@ tdq_notify(struct td_sched *ts)
cpu = ts->ts_cpu;
pcpu = pcpu_find(cpu);
td = pcpu->pc_curthread;
+
+ /*
+ * If our priority is not better than the current priority there is
+ * nothing to do.
+ */
+ if (prio > td->td_priority)
+ return;
+ /* Always set NEEDRESCHED. */
+ td->td_flags |= TDF_NEEDRESCHED;
/*
* IPI if we exceed the threshold or if the target cpu is running an
* idle thread.
@@ -710,18 +725,14 @@ tdq_notify(struct td_sched *ts)
if (prio > ipi_thresh && td->td_priority < PRI_MIN_IDLE)
return;
/*
- * IPI only if our priority is better than the running thread and
- * the running thread is not the per cpu idle thread. The
- * idlethread finds new work via sched_runnable().
+ * The idlethread finds new work via sched_runnable(), don't IPI
+ * here.
*/
if (td == pcpu->pc_idlethread)
return;
- if (prio > td->td_priority)
- return;
- if (ipi_ast) {
- td->td_flags |= TDF_NEEDRESCHED;
+ if (ipi_ast)
ipi_selected(1 << cpu, IPI_AST);
- } else if (ipi_preempt)
+ else if (ipi_preempt)
ipi_selected(1 << cpu, IPI_PREEMPT);
}
@@ -1913,17 +1924,17 @@ sched_bind(struct thread *td, int cpu)
mtx_assert(&sched_lock, MA_OWNED);
ts = td->td_sched;
- KASSERT((ts->ts_flags & TSF_BOUND) == 0,
- ("sched_bind: thread %p already bound.", td));
+ if (ts->ts_flags & TSF_BOUND)
+ return;
ts->ts_flags |= TSF_BOUND;
#ifdef SMP
+ sched_pin();
if (PCPU_GET(cpuid) == cpu)
return;
- /* sched_rem without the runq_remove */
+ ts->ts_cpu = cpu;
ts->ts_state = TSS_THREAD;
/* When we return from mi_switch we'll be on the correct cpu. */
mi_switch(SW_VOL, NULL);
- sched_pin();
#endif
}
@@ -1934,9 +1945,8 @@ sched_unbind(struct thread *td)
mtx_assert(&sched_lock, MA_OWNED);
ts = td->td_sched;
- KASSERT(ts->ts_flags & TSF_BOUND,
- ("sched_unbind: thread %p not bound.", td));
- mtx_assert(&sched_lock, MA_OWNED);
+ if ((ts->ts_flags & TSF_BOUND) == 0)
+ return;
ts->ts_flags &= ~TSF_BOUND;
#ifdef SMP
sched_unpin();
OpenPOWER on IntegriCloud