summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c158
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c48
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
6 files changed, 178 insertions, 49 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index d19e903..4aa9fd3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -144,6 +144,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{
int i;
+ /*
+ * We know that the hypervisor lie to us on the microcode version so
+ * we may as well hope that it is running the correct version.
+ */
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index aa1b9a4..70ecbc8 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -22,13 +22,16 @@
#define pr_fmt(fmt) "microcode: " fmt
#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
+#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/mm.h>
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
*/
static DEFINE_MUTEX(microcode_mutex);
+/*
+ * Serialize late loading so that CPUs get updated one-by-one.
+ */
+static DEFINE_SPINLOCK(update_lock);
+
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
return ret;
}
-struct apply_microcode_ctx {
- enum ucode_state err;
-};
-
static void apply_microcode_local(void *arg)
{
- struct apply_microcode_ctx *ctx = arg;
+ enum ucode_state *err = arg;
- ctx->err = microcode_ops->apply_microcode(smp_processor_id());
+ *err = microcode_ops->apply_microcode(smp_processor_id());
}
static int apply_microcode_on_target(int cpu)
{
- struct apply_microcode_ctx ctx = { .err = 0 };
+ enum ucode_state err;
int ret;
- ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
- if (!ret)
- ret = ctx.err;
-
+ ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
+ if (!ret) {
+ if (err == UCODE_ERROR)
+ ret = 1;
+ }
return ret;
}
@@ -489,19 +494,100 @@ static void __exit microcode_dev_exit(void)
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
-static enum ucode_state reload_for_cpu(int cpu)
+/*
+ * Late loading dance. Why the heavy-handed stomp_machine effort?
+ *
+ * - HT siblings must be idle and not execute other code while the other sibling
+ * is loading microcode in order to avoid any negative interactions caused by
+ * the loading.
+ *
+ * - In addition, microcode update on the cores must be serialized until this
+ * requirement can be relaxed in the future. Right now, this is conservative
+ * and good.
+ */
+#define SPINUNIT 100 /* 100 nsec */
+
+static int check_online_cpus(void)
{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- enum ucode_state ustate;
+ if (num_online_cpus() == num_present_cpus())
+ return 0;
- if (!uci->valid)
- return UCODE_OK;
+ pr_err("Not all CPUs online, aborting microcode update.\n");
+
+ return -EINVAL;
+}
+
+static atomic_t late_cpus;
+
+/*
+ * Returns:
+ * < 0 - on error
+ * 0 - no update done
+ * 1 - microcode was updated
+ */
+static int __reload_late(void *info)
+{
+ unsigned int timeout = NSEC_PER_SEC;
+ int all_cpus = num_online_cpus();
+ int cpu = smp_processor_id();
+ enum ucode_state err;
+ int ret = 0;
+
+ atomic_dec(&late_cpus);
+
+ /*
+ * Wait for all CPUs to arrive. A load will not be attempted unless all
+ * CPUs show up.
+ * */
+ while (atomic_read(&late_cpus)) {
+ if (timeout < SPINUNIT) {
+ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+ atomic_read(&late_cpus));
+ return -1;
+ }
- ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
- if (ustate != UCODE_OK)
- return ustate;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
- return apply_microcode_on_target(cpu);
+ touch_nmi_watchdog();
+ }
+
+ spin_lock(&update_lock);
+ apply_microcode_local(&err);
+ spin_unlock(&update_lock);
+
+ if (err > UCODE_NFOUND) {
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+ ret = -1;
+ } else if (err == UCODE_UPDATED) {
+ ret = 1;
+ }
+
+ atomic_inc(&late_cpus);
+
+ while (atomic_read(&late_cpus) != all_cpus)
+ cpu_relax();
+
+ return ret;
+}
+
+/*
+ * Reload microcode late on all CPUs. Wait for a sec until they
+ * all gather together.
+ */
+static int microcode_reload_late(void)
+{
+ int ret;
+
+ atomic_set(&late_cpus, num_online_cpus());
+
+ ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ microcode_check();
+
+ return ret;
}
static ssize_t reload_store(struct device *dev,
@@ -509,10 +595,9 @@ static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
enum ucode_state tmp_ret = UCODE_OK;
- bool do_callback = false;
+ int bsp = boot_cpu_data.cpu_index;
unsigned long val;
ssize_t ret = 0;
- int cpu;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -521,29 +606,24 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
- get_online_cpus();
- mutex_lock(&microcode_mutex);
- for_each_online_cpu(cpu) {
- tmp_ret = reload_for_cpu(cpu);
- if (tmp_ret > UCODE_NFOUND) {
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
-
- /* set retval for the first encountered reload error */
- if (!ret)
- ret = -EINVAL;
- }
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+ if (tmp_ret != UCODE_OK)
+ return size;
- if (tmp_ret == UCODE_UPDATED)
- do_callback = true;
- }
+ get_online_cpus();
- if (!ret && do_callback)
- microcode_check();
+ ret = check_online_cpus();
+ if (ret)
+ goto put;
+ mutex_lock(&microcode_mutex);
+ ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);
+
+put:
put_online_cpus();
- if (!ret)
+ if (ret >= 0)
ret = size;
return ret;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 923054a..2aded9d 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
if (!mc)
return 0;
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = rev;
+ return UCODE_OK;
+ }
+
+ /*
+ * Writeback and invalidate caches before updating microcode to avoid
+ * internal issues depending on what the microcode is updating.
+ */
+ native_wbinvd();
+
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
static enum ucode_state apply_microcode_intel(int cpu)
{
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
- struct ucode_cpu_info *uci;
- struct cpuinfo_x86 *c;
static int prev_rev;
u32 rev;
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
if (WARN_ON(raw_smp_processor_id() != cpu))
return UCODE_ERROR;
- uci = ucode_cpu_info + cpu;
- mc = uci->mc;
+ /* Look for a newer patch in our cache: */
+ mc = find_patch(uci);
if (!mc) {
- /* Look for a newer patch in our cache: */
- mc = find_patch(uci);
+ mc = uci->mc;
if (!mc)
return UCODE_NFOUND;
}
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+ return UCODE_OK;
+ }
+
+ /*
+ * Writeback and invalidate caches before updating microcode to avoid
+ * internal issues depending on what the microcode is updating.
+ */
+ native_wbinvd();
+
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
prev_rev = rev;
}
- c = &cpu_data(cpu);
-
uci->cpu_sig.rev = rev;
c->microcode = rev;
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 2f72330..38deafe 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -23,7 +23,7 @@
/*
* this changes the io permissions bitmap in the current task.
*/
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
{
struct thread_struct *t = &current->thread;
struct tss_struct *tss;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index bd36f3c..0715f82 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
bool arch_within_kprobe_blacklist(unsigned long addr)
{
+ bool is_in_entry_trampoline_section = false;
+
+#ifdef CONFIG_X86_64
+ is_in_entry_trampoline_section =
+ (addr >= (unsigned long)__entry_trampoline_start &&
+ addr < (unsigned long)__entry_trampoline_end);
+#endif
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end);
+ addr < (unsigned long)__entry_text_end) ||
+ is_in_entry_trampoline_section;
}
int __init arch_init_kprobes(void)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9b138a0..b854ebf 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -118,9 +118,11 @@ SECTIONS
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
+ VMLINUX_SYMBOL(__entry_trampoline_start) = .;
_entry_trampoline = .;
*(.entry_trampoline)
. = ALIGN(PAGE_SIZE);
+ VMLINUX_SYMBOL(__entry_trampoline_end) = .;
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
#endif
OpenPOWER on IntegriCloud