summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/machdep.c
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2011-10-03 14:23:00 +0000
committerattilio <attilio@FreeBSD.org>2011-10-03 14:23:00 +0000
commit1d578b7f371a13305217ae300f26fd6f9336b7f7 (patch)
treead9930853c232ca1450453ae1493f38eb3489f8c /sys/amd64/amd64/machdep.c
parentcb6db8fe7cc49d51852eaee9b88009b7118261f6 (diff)
downloadFreeBSD-src-1d578b7f371a13305217ae300f26fd6f9336b7f7.zip
FreeBSD-src-1d578b7f371a13305217ae300f26fd6f9336b7f7.tar.gz
Add some improvements in the idle table callbacks:
- Replace instances of manual assembly instruction "hlt" call with halt() function calling. - In cpu_idle_mwait() avoid races in check to sched_runnable() using the same pattern used in cpu_idle_hlt() with the 'hlt' instruction. - Add comments explaining the logic behind the pattern used in cpu_idle_hlt() and other idle callbacks. In collabouration with: jhb, mav Reviewed by: adri, kib MFC after: 3 weeks
Diffstat (limited to 'sys/amd64/amd64/machdep.c')
-rw-r--r--sys/amd64/amd64/machdep.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 40effb4..0818800 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -609,7 +609,7 @@ void
cpu_halt(void)
{
for (;;)
- __asm__ ("hlt");
+ halt();
}
void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
@@ -630,6 +630,8 @@ cpu_idle_acpi(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_SLEEPING;
+
+ /* See comments in cpu_idle_hlt(). */
disable_intr();
if (sched_runnable())
enable_intr();
@@ -647,9 +649,22 @@ cpu_idle_hlt(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_SLEEPING;
+
/*
- * We must absolutely guarentee that hlt is the next instruction
- * after sti or we introduce a timing window.
+ * Since we may be in a critical section from cpu_idle(), if
+ * an interrupt fires during that critical section we may have
+ * a pending preemption. If the CPU halts, then that thread
+ * may not execute until a later interrupt awakens the CPU.
+ * To handle this race, check for a runnable thread after
+ * disabling interrupts and immediately return if one is
+ * found. Also, we must absolutely guarentee that hlt is
+ * the next instruction after sti. This ensures that any
+ * interrupt that fires after the call to disable_intr() will
+ * immediately awaken the CPU from hlt. Finally, please note
+ * that on x86 this works fine because of interrupts enabled only
+ * after the instruction following sti takes place, while IF is set
+ * to 1 immediately, allowing hlt instruction to acknowledge the
+ * interrupt.
*/
disable_intr();
if (sched_runnable())
@@ -675,11 +690,19 @@ cpu_idle_mwait(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_MWAIT;
- if (!sched_runnable()) {
- cpu_monitor(state, 0, 0);
- if (*state == STATE_MWAIT)
- cpu_mwait(0, MWAIT_C1);
+
+ /* See comments in cpu_idle_hlt(). */
+ disable_intr();
+ if (sched_runnable()) {
+ enable_intr();
+ *state = STATE_RUNNING;
+ return;
}
+ cpu_monitor(state, 0, 0);
+ if (*state == STATE_MWAIT)
+ __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
+ else
+ enable_intr();
*state = STATE_RUNNING;
}
@@ -691,6 +714,12 @@ cpu_idle_spin(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_RUNNING;
+
+ /*
+ * The sched_runnable() call is racy but as long as there is
+ * a loop missing it one time will have just a little impact if any
+ * (and it is much better than missing the check at all).
+ */
for (i = 0; i < 1000; i++) {
if (sched_runnable())
return;
OpenPOWER on IntegriCloud