summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-01-11 22:44:36 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-11 19:04:55 -0800
commit95833c83f3b812c78e48db4eaa19f6c74958470b (patch)
tree8ad17da708141d66cf53c2113b4fe1710af929ec
parent6b050f8075823b0d9ec4fad38f4f552b74e5c5af (diff)
downloadop-kernel-dev-95833c83f3b812c78e48db4eaa19f6c74958470b.zip
op-kernel-dev-95833c83f3b812c78e48db4eaa19f6c74958470b.tar.gz
[PATCH] x86_64: Add idle notifiers
This adds a new notifier chain that is called with IDLE_START when a CPU goes idle and IDLE_END when it goes out of idle. The context can be idle thread or interrupt context. Since we cannot rely on MONITOR/MWAIT existing the idle end check currently has to be done in all interrupt handlers. They were originally inspired by the similar s390 implementation. They have a variety of applications: - They will be needed for CONFIG_NO_IDLE_HZ - They can be used for oprofile to fix up the missing time in idle when performance counters don't tick. - They can be used for better C state management in ACPI - They could be used for microstate accounting. This is just infrastructure so far, no users. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/x86_64/kernel/apic.c4
-rw-r--r--arch/x86_64/kernel/irq.c2
-rw-r--r--arch/x86_64/kernel/mce_amd.c2
-rw-r--r--arch/x86_64/kernel/mce_intel.c2
-rw-r--r--arch/x86_64/kernel/process.c48
-rw-r--r--arch/x86_64/kernel/smp.c2
-rw-r--r--include/asm-x86_64/idle.h14
7 files changed, 74 insertions, 0 deletions
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 40cbd60..628aebf 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -34,6 +34,7 @@
#include <asm/pgalloc.h>
#include <asm/mach_apic.h>
#include <asm/nmi.h>
+#include <asm/idle.h>
int apic_verbosity;
@@ -922,6 +923,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
+ exit_idle();
irq_enter();
smp_local_timer_interrupt(regs);
irq_exit();
@@ -981,6 +983,7 @@ __init int oem_force_hpet_timer(void)
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
+ exit_idle();
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
@@ -1016,6 +1019,7 @@ asmlinkage void smp_error_interrupt(void)
{
unsigned int v, v1;
+ exit_idle();
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d6a04a8..8bf57f7 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io_apic.h>
+#include <asm/idle.h>
atomic_t irq_err_count;
#ifdef CONFIG_X86_IO_APIC
@@ -98,6 +99,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
/* high bits used in ret_from_ code */
unsigned irq = regs->orig_rax & 0xff;
+ exit_idle();
irq_enter();
__do_IRQ(irq, regs);
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 1f76175..0b62a44 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -27,6 +27,7 @@
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/percpu.h>
+#include <asm/idle.h>
#define PFX "mce_threshold: "
#define VERSION "version 1.00.9"
@@ -140,6 +141,7 @@ asmlinkage void mce_threshold_interrupt(void)
struct mce m;
ack_APIC_irq();
+ exit_idle();
irq_enter();
memset(&m, 0, sizeof(m));
diff --git a/arch/x86_64/kernel/mce_intel.c b/arch/x86_64/kernel/mce_intel.c
index 0be0a79..3ef845e 100644
--- a/arch/x86_64/kernel/mce_intel.c
+++ b/arch/x86_64/kernel/mce_intel.c
@@ -10,6 +10,7 @@
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
+#include <asm/idle.h>
static DEFINE_PER_CPU(unsigned long, next_check);
@@ -19,6 +20,7 @@ asmlinkage void smp_thermal_interrupt(void)
ack_APIC_irq();
+ exit_idle();
irq_enter();
if (time_before(jiffies, __get_cpu_var(next_check)))
goto done;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index c9df991..669cf0e 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -36,6 +36,7 @@
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/kprobes.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -50,6 +51,7 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
+#include <asm/idle.h>
asmlinkage extern void ret_from_fork(void);
@@ -64,6 +66,50 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
+static struct notifier_block *idle_notifier;
+static DEFINE_SPINLOCK(idle_notifier_lock);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_register(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_unregister(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
+}
+EXPORT_SYMBOL(idle_notifier_unregister);
+
+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
+
+void enter_idle(void)
+{
+ __get_cpu_var(idle_state) = CPU_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+}
+
+static void __exit_idle(void)
+{
+ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+ if (current->pid | read_pda(irqcount))
+ return;
+ __exit_idle();
+}
+
/*
* We use this if we don't have any better
* idle routine..
@@ -180,7 +226,9 @@ void cpu_idle (void)
idle = default_idle;
if (cpu_is_offline(smp_processor_id()))
play_dead();
+ enter_idle();
idle();
+ __exit_idle();
}
preempt_enable_no_resched();
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index cfc3d9d..6a666d2 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apicdef.h>
+#include <asm/idle.h>
/*
* Smarter SMP flushing macros.
@@ -512,6 +513,7 @@ asmlinkage void smp_call_function_interrupt(void)
/*
* At this point the info structure may be out of scope unless wait==1
*/
+ exit_idle();
irq_enter();
(*func)(info);
irq_exit();
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
new file mode 100644
index 0000000..6bd47dc
--- /dev/null
+++ b/include/asm-x86_64/idle.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_64_IDLE_H
+#define _ASM_X86_64_IDLE_H 1
+
+#define IDLE_START 1
+#define IDLE_END 2
+
+struct notifier_block;
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+
+void enter_idle(void);
+void exit_idle(void);
+
+#endif
OpenPOWER on IntegriCloud