summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormsmith <msmith@FreeBSD.org>2000-09-22 03:18:20 +0000
committermsmith <msmith@FreeBSD.org>2000-09-22 03:18:20 +0000
commit5fd4296a8cbaf918bb6da046a9d5e38cc65bae23 (patch)
treeb8e71b4830ef9ef0ad6f5709e84a14f1ca001b5b
parenta4bf8740bb46d337eb18ae335c2f3b023098c63d (diff)
downloadFreeBSD-src-5fd4296a8cbaf918bb6da046a9d5e38cc65bae23.zip
FreeBSD-src-5fd4296a8cbaf918bb6da046a9d5e38cc65bae23.tar.gz
Implement halt-on-idle in the !SMP case, which should significantly
reduce power consumption on most systems.
-rw-r--r--sys/amd64/amd64/cpu_switch.S10
-rw-r--r--sys/amd64/amd64/machdep.c37
-rw-r--r--sys/amd64/amd64/swtch.s10
-rw-r--r--sys/i386/i386/machdep.c37
-rw-r--r--sys/i386/i386/swtch.s10
5 files changed, 74 insertions, 30 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index db56a1b..d7bc755 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -60,9 +60,6 @@
.data
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
-
.globl _panic
#if defined(SWTCH_OPTIM_STATS)
@@ -73,13 +70,6 @@ _tlb_flush_count: .long 0
.text
-ENTRY(default_halt)
- sti
-#ifndef SMP
- hlt /* XXX: until a wakeup IPI */
-#endif
- ret
-
/*
* cpu_throw()
*/
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 8b1b08b..a3c09f4 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -72,6 +72,7 @@
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/bus.h>
+#include <sys/eventhandler.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -1005,6 +1006,42 @@ cpu_halt(void)
}
/*
+ * Hook to idle the CPU when possible. This currently only works in
+ * the !SMP case, as there is no clean way to ensure that a CPU will be
+ * woken when there is work available for it.
+ */
+#ifndef SMP
+static int cpu_idle_hlt = 1;
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, &cpu_idle_hlt, 0, "Idle loop HLT enable");
+
+/*
+ * Note that we have to be careful here to avoid a race between checking
+ * procrunnable() and actually halting. If we don't do this, we may waste
+ * the time between calling hlt and the next interrupt even though there
+ * is a runnable process.
+ */
+static void
+cpu_idle(void *junk, int count)
+{
+ if (cpu_idle_hlt){
+ disable_intr();
+ if (procrunnable()) {
+ enable_intr();
+ } else {
+ enable_intr();
+ __asm__ ("hlt");
+ }
+ }
+}
+
+static void cpu_idle_register(void *junk)
+{
+ EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
+}
+SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND, cpu_idle_register, NULL)
+#endif /* !SMP */
+
+/*
* Clear registers on exec
*/
void
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index db56a1b..d7bc755 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -60,9 +60,6 @@
.data
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
-
.globl _panic
#if defined(SWTCH_OPTIM_STATS)
@@ -73,13 +70,6 @@ _tlb_flush_count: .long 0
.text
-ENTRY(default_halt)
- sti
-#ifndef SMP
- hlt /* XXX: until a wakeup IPI */
-#endif
- ret
-
/*
* cpu_throw()
*/
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 8b1b08b..a3c09f4 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -72,6 +72,7 @@
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/bus.h>
+#include <sys/eventhandler.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -1005,6 +1006,42 @@ cpu_halt(void)
}
/*
+ * Hook to idle the CPU when possible. This currently only works in
+ * the !SMP case, as there is no clean way to ensure that a CPU will be
+ * woken when there is work available for it.
+ */
+#ifndef SMP
+static int cpu_idle_hlt = 1;
+SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, &cpu_idle_hlt, 0, "Idle loop HLT enable");
+
+/*
+ * Note that we have to be careful here to avoid a race between checking
+ * procrunnable() and actually halting. If we don't do this, we may waste
+ * the time between calling hlt and the next interrupt even though there
+ * is a runnable process.
+ */
+static void
+cpu_idle(void *junk, int count)
+{
+ if (cpu_idle_hlt){
+ disable_intr();
+ if (procrunnable()) {
+ enable_intr();
+ } else {
+ enable_intr();
+ __asm__ ("hlt");
+ }
+ }
+}
+
+static void cpu_idle_register(void *junk)
+{
+ EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
+}
+SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND, cpu_idle_register, NULL)
+#endif /* !SMP */
+
+/*
* Clear registers on exec
*/
void
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index db56a1b..d7bc755 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -60,9 +60,6 @@
.data
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
-
.globl _panic
#if defined(SWTCH_OPTIM_STATS)
@@ -73,13 +70,6 @@ _tlb_flush_count: .long 0
.text
-ENTRY(default_halt)
- sti
-#ifndef SMP
- hlt /* XXX: until a wakeup IPI */
-#endif
- ret
-
/*
* cpu_throw()
*/
OpenPOWER on IntegriCloud