summaryrefslogtreecommitdiffstats
path: root/sys/pc98
diff options
context:
space:
mode:
authornyan <nyan@FreeBSD.org>2011-10-04 13:24:22 +0000
committernyan <nyan@FreeBSD.org>2011-10-04 13:24:22 +0000
commit2d080ae31f8dcded3dfc26f912370acee51b3ed0 (patch)
tree8db138d032c27d31fb621c674bba1511443f018e /sys/pc98
parent9fc0ff740a4428feca44ea19d7ba81648b0eda19 (diff)
downloadFreeBSD-src-2d080ae31f8dcded3dfc26f912370acee51b3ed0.zip
FreeBSD-src-2d080ae31f8dcded3dfc26f912370acee51b3ed0.tar.gz
MFi386: revision 225936
Add some improvements in the idle table callbacks: - Replace instances of manual assembly instruction "hlt" call with halt() function calling. - In cpu_idle_mwait() avoid races in check to sched_runnable() using the same pattern used in cpu_idle_hlt() with the 'hlt' instruction. - Add comments explaining the logic behind the pattern used in cpu_idle_hlt() and other idle callbacks.
Diffstat (limited to 'sys/pc98')
-rw-r--r--sys/pc98/pc98/machdep.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index de37dcb..c9e1a91 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -1117,7 +1117,7 @@ void
cpu_halt(void)
{
for (;;)
- __asm__ ("hlt");
+ halt();
}
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
@@ -1136,9 +1136,22 @@ cpu_idle_hlt(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_SLEEPING;
+
/*
- * We must absolutely guarentee that hlt is the next instruction
- * after sti or we introduce a timing window.
+ * Since we may be in a critical section from cpu_idle(), if
+ * an interrupt fires during that critical section we may have
+ * a pending preemption. If the CPU halts, then that thread
+ * may not execute until a later interrupt awakens the CPU.
+ * To handle this race, check for a runnable thread after
+ * disabling interrupts and immediately return if one is
+ * found. Also, we must absolutely guarentee that hlt is
+ * the next instruction after sti. This ensures that any
+ * interrupt that fires after the call to disable_intr() will
+ * immediately awaken the CPU from hlt. Finally, please note
+ * that on x86 this works fine because of interrupts enabled only
+ * after the instruction following sti takes place, while IF is set
+ * to 1 immediately, allowing hlt instruction to acknowledge the
+ * interrupt.
*/
disable_intr();
if (sched_runnable())
@@ -1164,11 +1177,19 @@ cpu_idle_mwait(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_MWAIT;
- if (!sched_runnable()) {
- cpu_monitor(state, 0, 0);
- if (*state == STATE_MWAIT)
- cpu_mwait(0, MWAIT_C1);
+
+ /* See comments in cpu_idle_hlt(). */
+ disable_intr();
+ if (sched_runnable()) {
+ enable_intr();
+ *state = STATE_RUNNING;
+ return;
}
+ cpu_monitor(state, 0, 0);
+ if (*state == STATE_MWAIT)
+ __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
+ else
+ enable_intr();
*state = STATE_RUNNING;
}
@@ -1180,6 +1201,12 @@ cpu_idle_spin(int busy)
state = (int *)PCPU_PTR(monitorbuf);
*state = STATE_RUNNING;
+
+ /*
+ * The sched_runnable() call is racy but as long as there is
+ * a loop missing it one time will have just a little impact if any
+ * (and it is much better than missing the check at all).
+ */
for (i = 0; i < 1000; i++) {
if (sched_runnable())
return;
OpenPOWER on IntegriCloud