summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
authorgrehan <grehan@FreeBSD.org>2013-08-01 01:18:51 +0000
committergrehan <grehan@FreeBSD.org>2013-08-01 01:18:51 +0000
commit045ef38328f828a12a923239928c0e5c78ec93d1 (patch)
treea5e5a8d53cbe23485767c249ff9de1b71063398f /sys/amd64/vmm
parent24c5871f5aaf01f006fc71eb30589d12f6f2ecb2 (diff)
downloadFreeBSD-src-045ef38328f828a12a923239928c0e5c78ec93d1.zip
FreeBSD-src-045ef38328f828a12a923239928c0e5c78ec93d1.tar.gz
Correctly maintain the CR0/CR4 shadow registers.
This was exposed with AP spinup of Linux, and booting OpenBSD, where the CR0 register is unconditionally written to prior to the longjump to enter protected mode. The CR-vmexit handling was not updating CPU state which resulted in a vmentry failure with invalid guest state. A follow-on submit will fix the CPU state issue, but this fix prevents the CR-vmexit prior to entering protected mode by properly initializing and maintaining CR* state. Reviewed by: neel Reported by: Gopakumar.T @ netapp
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/intel/vmx.c65
1 files changed, 54 insertions, 11 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 79c5538..cc18abe 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -647,10 +647,10 @@ vmx_vpid(void)
}
static int
-vmx_setup_cr_shadow(int which, struct vmcs *vmcs)
+vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
{
int error, mask_ident, shadow_ident;
- uint64_t mask_value, shadow_value;
+ uint64_t mask_value;
if (which != 0 && which != 4)
panic("vmx_setup_cr_shadow: unknown cr%d", which);
@@ -659,26 +659,24 @@ vmx_setup_cr_shadow(int which, struct vmcs *vmcs)
mask_ident = VMCS_CR0_MASK;
mask_value = cr0_ones_mask | cr0_zeros_mask;
shadow_ident = VMCS_CR0_SHADOW;
- shadow_value = cr0_ones_mask;
} else {
mask_ident = VMCS_CR4_MASK;
mask_value = cr4_ones_mask | cr4_zeros_mask;
shadow_ident = VMCS_CR4_SHADOW;
- shadow_value = cr4_ones_mask;
}
error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
if (error)
return (error);
- error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), shadow_value);
+ error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
if (error)
return (error);
return (0);
}
-#define vmx_setup_cr0_shadow(vmcs) vmx_setup_cr_shadow(0, (vmcs))
-#define vmx_setup_cr4_shadow(vmcs) vmx_setup_cr_shadow(4, (vmcs))
+#define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
+#define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
static void *
vmx_vminit(struct vm *vm)
@@ -784,11 +782,17 @@ vmx_vminit(struct vm *vm)
if (error != 0)
panic("vmcs_set_msr_save error %d", error);
- error = vmx_setup_cr0_shadow(&vmx->vmcs[i]);
+ /*
+ * Set up the CR0/4 shadows, and init the read shadow
+ * to the power-on register value from the Intel Sys Arch.
+ * CR0 - 0x60000010
+ * CR4 - 0
+ */
+ error = vmx_setup_cr0_shadow(&vmx->vmcs[i], 0x60000010);
if (error != 0)
panic("vmx_setup_cr0_shadow %d", error);
- error = vmx_setup_cr4_shadow(&vmx->vmcs[i]);
+ error = vmx_setup_cr4_shadow(&vmx->vmcs[i], 0);
if (error != 0)
panic("vmx_setup_cr4_shadow %d", error);
}
@@ -1079,7 +1083,7 @@ cantinject:
static int
vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
{
- int error, cr, vmcs_guest_cr;
+ int error, cr, vmcs_guest_cr, vmcs_shadow_cr;
uint64_t regval, ones_mask, zeros_mask;
const struct vmxctx *vmxctx;
@@ -1156,11 +1160,20 @@ vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
ones_mask = cr0_ones_mask;
zeros_mask = cr0_zeros_mask;
vmcs_guest_cr = VMCS_GUEST_CR0;
+ vmcs_shadow_cr = VMCS_CR0_SHADOW;
} else {
ones_mask = cr4_ones_mask;
zeros_mask = cr4_zeros_mask;
vmcs_guest_cr = VMCS_GUEST_CR4;
+ vmcs_shadow_cr = VMCS_CR4_SHADOW;
+ }
+
+ error = vmwrite(vmcs_shadow_cr, regval);
+ if (error) {
+ panic("vmx_emulate_cr_access: error %d writing cr%d shadow",
+ error, cr);
}
+
regval |= ones_mask;
regval &= ~zeros_mask;
error = vmwrite(vmcs_guest_cr, regval);
@@ -1615,6 +1628,27 @@ vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
}
static int
+vmx_shadow_reg(int reg)
+{
+ int shreg;
+
+ shreg = -1;
+
+ switch (reg) {
+ case VM_REG_GUEST_CR0:
+ shreg = VMCS_CR0_SHADOW;
+ break;
+ case VM_REG_GUEST_CR4:
+ shreg = VMCS_CR4_SHADOW;
+ break;
+ default:
+ break;
+ }
+
+ return (shreg);
+}
+
+static int
vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
{
int running, hostcpu;
@@ -1633,7 +1667,7 @@ vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
static int
vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
{
- int error, hostcpu, running;
+ int error, hostcpu, running, shadow;
uint64_t ctls;
struct vmx *vmx = arg;
@@ -1663,6 +1697,15 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
vmcs_setreg(&vmx->vmcs[vcpu], running,
VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
}
+
+ shadow = vmx_shadow_reg(reg);
+ if (shadow > 0) {
+ /*
+ * Store the unmodified value in the shadow
+ */
+ error = vmcs_setreg(&vmx->vmcs[vcpu], running,
+ VMCS_IDENT(shadow), val);
+ }
}
return (error);
OpenPOWER on IntegriCloud