summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2008-04-25 05:18:50 +0000
committerjeff <jeff@FreeBSD.org>2008-04-25 05:18:50 +0000
commit14b586bf964970197e12228d127a713b7db7abcb (patch)
treeae275695b8c2f48f9a96c11fe4405ef85fe1bc79 /sys/kern/sched_ule.c
parent3cd35a205135ce8a15349a035286650c3452cafd (diff)
downloadFreeBSD-src-14b586bf964970197e12228d127a713b7db7abcb.zip
FreeBSD-src-14b586bf964970197e12228d127a713b7db7abcb.tar.gz
- Add an integer argument to idle to indicate how likely we are to wake
from idle over the next tick. - Add a new MD routine, cpu_wake_idle() to wakeup idle threads who are suspended in cpu specific states. This function can fail and cause the scheduler to fall back to another mechanism (ipi). - Implement support for mwait in cpu_idle() on i386/amd64 machines that support it. mwait is a higher performance way to synchronize cpus as compared to hlt & ipis. - Allow selecting the idle routine by name via sysctl machdep.idle. This replaces machdep.cpu_idle_hlt. Only idle routines supported by the current machine are permitted. Sponsored by: Nokia
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 7f5b597..7fe80af 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -954,6 +954,12 @@ tdq_notify(struct tdq *tdq, struct thread *td)
*/
if (tdq->tdq_idlestate == TDQ_RUNNING)
return;
+ /*
+ * If the MD code has an idle wakeup routine try that before
+ * falling back to IPI.
+ */
+ if (cpu_idle_wakeup(cpu))
+ return;
}
tdq->tdq_ipipending = 1;
ipi_selected(1 << cpu, IPI_PREEMPT);
@@ -2095,10 +2101,7 @@ sched_clock(struct thread *td)
* If there is some activity seed it to reflect that.
*/
tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
- if (tdq->tdq_load)
- tdq->tdq_switchcnt = 2;
- else
- tdq->tdq_switchcnt = 0;
+ tdq->tdq_switchcnt = tdq->tdq_load;
/*
* Advance the insert index once for each tick to ensure that all
* threads get a chance to run.
@@ -2507,9 +2510,10 @@ sched_idletd(void *dummy)
* tdq_notify().
*/
if (tdq->tdq_load == 0) {
+ switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
tdq->tdq_idlestate = TDQ_IDLE;
if (tdq->tdq_load == 0)
- cpu_idle();
+ cpu_idle(switchcnt > 1);
}
if (tdq->tdq_load) {
thread_lock(td);
OpenPOWER on IntegriCloud