summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2000-10-19 07:47:16 +0000
committerjhb <jhb@FreeBSD.org>2000-10-19 07:47:16 +0000
commitbe2e1033657d185aaa5ec90961b64fef7dc02618 (patch)
tree6cadb359071cf30e979c15e9fcda598255b7239c
parentfbf77bccd2e2e9e6ac48b8c447b0e570d1a71d7a (diff)
downloadFreeBSD-src-be2e1033657d185aaa5ec90961b64fef7dc02618.zip
FreeBSD-src-be2e1033657d185aaa5ec90961b64fef7dc02618.tar.gz
Axe the idle_event eventhandler, and add a MD cpu_idle function used
for things such as halting CPU's, idling CPU's, etc. Discussed with: msmith
-rw-r--r--sys/amd64/amd64/machdep.c15
-rw-r--r--sys/i386/i386/machdep.c15
-rw-r--r--sys/kern/kern_idle.c7
-rw-r--r--sys/sys/proc.h1
4 files changed, 12 insertions, 26 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index b334617..6634aba 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -994,7 +994,6 @@ cpu_halt(void)
* the !SMP case, as there is no clean way to ensure that a CPU will be
* woken when there is work available for it.
*/
-#ifndef SMP
static int cpu_idle_hlt = 1;
SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
&cpu_idle_hlt, 0, "Idle loop HLT enable");
@@ -1005,9 +1004,10 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
* the time between calling hlt and the next interrupt even though there
* is a runnable process.
*/
-static void
-cpu_idle(void *junk, int count)
+void
+cpu_idle(void)
{
+#ifndef SMP
if (cpu_idle_hlt) {
disable_intr();
if (procrunnable())
@@ -1017,16 +1017,9 @@ cpu_idle(void *junk, int count)
__asm __volatile("hlt");
}
}
+#endif
}
-static void cpu_idle_register(void *junk)
-{
- EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
-}
-SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND,
- cpu_idle_register, NULL)
-#endif /* !SMP */
-
/*
* Clear registers on exec
*/
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index b334617..6634aba 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -994,7 +994,6 @@ cpu_halt(void)
* the !SMP case, as there is no clean way to ensure that a CPU will be
* woken when there is work available for it.
*/
-#ifndef SMP
static int cpu_idle_hlt = 1;
SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
&cpu_idle_hlt, 0, "Idle loop HLT enable");
@@ -1005,9 +1004,10 @@ SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
* the time between calling hlt and the next interrupt even though there
* is a runnable process.
*/
-static void
-cpu_idle(void *junk, int count)
+void
+cpu_idle(void)
{
+#ifndef SMP
if (cpu_idle_hlt) {
disable_intr();
if (procrunnable())
@@ -1017,16 +1017,9 @@ cpu_idle(void *junk, int count)
__asm __volatile("hlt");
}
}
+#endif
}
-static void cpu_idle_register(void *junk)
-{
- EVENTHANDLER_FAST_REGISTER(idle_event, cpu_idle, NULL, IDLE_PRI_LAST);
-}
-SYSINIT(cpu_idle_register, SI_SUB_SCHED_IDLE, SI_ORDER_SECOND,
- cpu_idle_register, NULL)
-#endif /* !SMP */
-
/*
* Clear registers on exec
*/
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index b545d0e..e77fed9 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -40,8 +40,6 @@ SYSINIT(idle_setup, SI_SUB_SCHED_IDLE, SI_ORDER_FIRST, idle_setup, NULL)
static void idle_proc(void *dummy);
-EVENTHANDLER_FAST_DEFINE(idle_event, idle_eventhandler_t);
-
/*
* setup per-cpu idle process contexts
*/
@@ -102,8 +100,9 @@ idle_proc(void *dummy)
if (vm_page_zero_idle() != 0)
continue;
- /* call out to any cpu-becoming-idle events */
- EVENTHANDLER_FAST_INVOKE(idle_event, 0);
+#ifdef __i386__
+ cpu_idle();
+#endif
}
mtx_enter(&sched_lock, MTX_SPIN);
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 1390d40..554f066 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -517,6 +517,7 @@ int suser_xxx __P((const struct ucred *cred, const struct proc *proc,
void remrunqueue __P((struct proc *));
void cpu_switch __P((void));
void cpu_throw __P((void)) __dead2;
+void cpu_idle __P((void));
void unsleep __P((struct proc *));
void cpu_exit __P((struct proc *)) __dead2;
OpenPOWER on IntegriCloud