summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2009-09-10 02:53:50 +0100
committerH. Peter Anvin <hpa@zytor.com>2009-09-10 16:50:19 -0700
commit5367b6887e7d8c870a5da7d9b8c6e9c207684e43 (patch)
treeeab362078a7964850b00f8d781aa9c9f5ff01df7 /arch
parentb19ae3999891cad21a3995c34d313dda5df014e2 (diff)
downloadop-kernel-dev-5367b6887e7d8c870a5da7d9b8c6e9c207684e43.zip
op-kernel-dev-5367b6887e7d8c870a5da7d9b8c6e9c207684e43.tar.gz
x86: Fix code patching for paravirt-alternatives on 486
As reported in <http://bugs.debian.org/511703> and <http://bugs.debian.org/515982>, kernels with paravirt-alternatives enabled crash in text_poke_early() on at least some 486-class processors. The problem is that text_poke_early() itself uses inline functions affected by paravirt-alternatives and so will modify instructions that have already been prefetched. Pentium and later processors will invalidate the prefetched instructions in this case, but 486-class processors do not. Change sync_core() to limit prefetching on 486-class (and 386-class) processors, and move the call to sync_core() above the call to the modifiable local_irq_restore(). Signed-off-by: Ben Hutchings <ben@decadent.org.uk> LKML-Reference: <1252547631.3423.134.camel@localhost> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/processor.h16
-rw-r--r--arch/x86/kernel/alternative.c2
2 files changed, 14 insertions, 4 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c776826..2db56c5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -703,13 +703,23 @@ static inline void cpu_relax(void)
rep_nop();
}
-/* Stop speculative execution: */
+/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
{
int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1)
- : "ebx", "ecx", "edx", "memory");
+#if defined(CONFIG_M386) || defined(CONFIG_M486)
+ if (boot_cpu_data.x86 < 5)
+ /* There is no speculative execution.
+ * jmp is a barrier to prefetching. */
+ asm volatile("jmp 1f\n1:\n" ::: "memory");
+ else
+#endif
+ /* cpuid is a barrier to speculative execution.
+ * Prefetched instructions are automatically
+ * invalidated when modified. */
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1)
+ : "ebx", "ecx", "edx", "memory");
}
static inline void __monitor(const void *eax, unsigned long ecx,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f576587..b8ebd0b 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -490,8 +490,8 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
unsigned long flags;
local_irq_save(flags);
memcpy(addr, opcode, len);
- local_irq_restore(flags);
sync_core();
+ local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
OpenPOWER on IntegriCloud