summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2009-02-12 13:49:32 +0100
committerH. Peter Anvin <hpa@zytor.com>2009-02-24 13:24:42 -0800
commitf9695df42cdbca78530b4458c38ecfdd0bb90079 (patch)
tree308cf4c9eeaae84663559a47f5797072fd0bce65
parentb276268631af3a1b0df871e10d19d492f0513d4b (diff)
downloadop-kernel-dev-f9695df42cdbca78530b4458c38ecfdd0bb90079.zip
op-kernel-dev-f9695df42cdbca78530b4458c38ecfdd0bb90079.tar.gz
x86, mce, cmci: avoid potential reentry of threshold interrupt
Impact: minor bugfix The threshold handler on AMD (and soon on Intel) could be theoretically reentered by the hardware. This could lead to corrupted events because the machine check poll code assumes it is not reentered. Move the APIC ACK to the end of the interrupt handler to let the hardware avoid that. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index 4319142..e4b8a38 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -15,10 +15,11 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt;
asmlinkage void mce_threshold_interrupt(void)
{
- ack_APIC_irq();
exit_idle();
irq_enter();
inc_irq_stat(irq_threshold_count);
mce_threshold_vector();
irq_exit();
+ /* Ack only at the end to avoid potential reentry */
+ ack_APIC_irq();
}
OpenPOWER on IntegriCloud