summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm/intel/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64/vmm/intel/vmx.c')
-rw-r--r--sys/amd64/vmm/intel/vmx.c350
1 files changed, 266 insertions, 84 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 2cbb159..b2c5702 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -149,8 +149,6 @@ SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
&cr4_zeros_mask, 0, NULL);
-static int vmx_no_patmsr;
-
static int vmx_initialized;
SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
&vmx_initialized, 0, "Intel VMX initialized");
@@ -158,18 +156,38 @@ SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
/*
* Optional capabilities
*/
+static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
+
+static int vmx_patmsr;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, patmsr, CTLFLAG_RD, &vmx_patmsr, 0,
+ "PAT MSR saved and restored in VCMS");
+
static int cap_halt_exit;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
+ "HLT triggers a VM-exit");
+
static int cap_pause_exit;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
+ 0, "PAUSE triggers a VM-exit");
+
static int cap_unrestricted_guest;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
+ &cap_unrestricted_guest, 0, "Unrestricted guests");
+
static int cap_monitor_trap;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
+ &cap_monitor_trap, 0, "Monitor trap flag");
+
static int cap_invpcid;
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
+ 0, "Guests are allowed to use INVPCID");
static int virtual_interrupt_delivery;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
&virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
static int posted_interrupts;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
&posted_interrupts, 0, "APICv posted interrupt support");
static int pirvec;
@@ -618,6 +636,7 @@ vmx_init(int ipinum)
}
/* Check support for VM-exit controls */
+ vmx_patmsr = 1;
error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
VM_EXIT_CTLS_ONE_SETTING,
VM_EXIT_CTLS_ZERO_SETTING,
@@ -637,12 +656,12 @@ vmx_init(int ipinum)
if (bootverbose)
printf("vmm: PAT MSR access not supported\n");
guest_msr_valid(MSR_PAT);
- vmx_no_patmsr = 1;
+ vmx_patmsr = 0;
}
}
/* Check support for VM-entry controls */
- if (!vmx_no_patmsr) {
+ if (vmx_patmsr) {
error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
MSR_VMX_TRUE_ENTRY_CTLS,
VM_ENTRY_CTLS_ONE_SETTING,
@@ -918,7 +937,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
* MSR_PAT save/restore support, leave access disabled so accesses
* will be trapped.
*/
- if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
+ if (vmx_patmsr && guest_msr_rw(vmx, MSR_PAT))
panic("vmx_vminit: error setting guest pat msr access");
vpid_alloc(vpid, VM_MAXCPU);
@@ -974,7 +993,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmx->cap[i].proc_ctls = procbased_ctls;
vmx->cap[i].proc_ctls2 = procbased_ctls2;
- vmx->state[i].lastcpu = -1;
+ vmx->state[i].lastcpu = NOCPU;
vmx->state[i].vpid = vpid[i];
msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
@@ -1047,27 +1066,37 @@ vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
}
static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
+static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
-static void
-vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
+/*
+ * Invalidate guest mappings identified by its vpid from the TLB.
+ */
+static __inline void
+vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
{
struct vmxstate *vmxstate;
struct invvpid_desc invvpid_desc;
vmxstate = &vmx->state[vcpu];
- if (vmxstate->lastcpu == curcpu)
+ if (vmxstate->vpid == 0)
return;
- vmxstate->lastcpu = curcpu;
-
- vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
+ if (!running) {
+ /*
+ * Set the 'lastcpu' to an invalid host cpu.
+ *
+ * This will invalidate TLB entries tagged with the vcpu's
+ * vpid the next time it runs via vmx_set_pcpu_defaults().
+ */
+ vmxstate->lastcpu = NOCPU;
+ return;
+ }
- vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
- vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
- vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
+ KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
+ "critical section", __func__, vcpu));
/*
- * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
+ * Invalidate all mappings tagged with 'vpid'
*
* We do this because this vcpu was executing on a different host
* cpu when it last ran. We do not track whether it invalidated
@@ -1081,25 +1110,43 @@ vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
* Note also that this will invalidate mappings tagged with 'vpid'
* for "all" EP4TAs.
*/
- if (vmxstate->vpid != 0) {
- if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
- invvpid_desc._res1 = 0;
- invvpid_desc._res2 = 0;
- invvpid_desc.vpid = vmxstate->vpid;
- invvpid_desc.linear_addr = 0;
- invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
- } else {
- /*
- * The invvpid can be skipped if an invept is going to
- * be performed before entering the guest. The invept
- * will invalidate combined mappings tagged with
- * 'vmx->eptp' for all vpids.
- */
- vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
- }
+ if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
+ invvpid_desc._res1 = 0;
+ invvpid_desc._res2 = 0;
+ invvpid_desc.vpid = vmxstate->vpid;
+ invvpid_desc.linear_addr = 0;
+ invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
+ vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
+ } else {
+ /*
+ * The invvpid can be skipped if an invept is going to
+ * be performed before entering the guest. The invept
+ * will invalidate combined mappings tagged with
+ * 'vmx->eptp' for all vpids.
+ */
+ vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
}
}
+static void
+vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
+{
+ struct vmxstate *vmxstate;
+
+ vmxstate = &vmx->state[vcpu];
+ if (vmxstate->lastcpu == curcpu)
+ return;
+
+ vmxstate->lastcpu = curcpu;
+
+ vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
+
+ vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
+ vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
+ vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
+ vmx_invvpid(vmx, vcpu, pmap, 1);
+}
+
/*
* We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
*/
@@ -1183,24 +1230,32 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu)
static void
vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
{
- struct vm_exception exc;
int vector, need_nmi_exiting, extint_pending;
- uint64_t rflags;
+ uint64_t rflags, entryinfo;
uint32_t gi, info;
- if (vm_exception_pending(vmx->vm, vcpu, &exc)) {
- KASSERT(exc.vector >= 0 && exc.vector < 32,
- ("%s: invalid exception vector %d", __func__, exc.vector));
+ if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
+ KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
+ "intinfo is not valid: %#lx", __func__, entryinfo));
info = vmcs_read(VMCS_ENTRY_INTR_INFO);
KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
- "pending exception %d: %#x", __func__, exc.vector, info));
+ "pending exception: %#lx/%#x", __func__, entryinfo, info));
- info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID;
- if (exc.error_code_valid) {
- info |= VMCS_INTR_DEL_ERRCODE;
- vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code);
+ info = entryinfo;
+ vector = info & 0xff;
+ if (vector == IDT_BP || vector == IDT_OF) {
+ /*
+ * VT-x requires #BP and #OF to be injected as software
+ * exceptions.
+ */
+ info &= ~VMCS_INTR_T_MASK;
+ info |= VMCS_INTR_T_SWEXCEPTION;
}
+
+ if (info & VMCS_INTR_DEL_ERRCODE)
+ vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
+
vmcs_write(VMCS_ENTRY_INTR_INFO, info);
}
@@ -1379,6 +1434,16 @@ vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
+static void
+vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
+{
+ uint32_t gi;
+
+ gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+ KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
+ ("NMI blocking is not in effect %#x", gi));
+}
+
static int
vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
@@ -1659,11 +1724,19 @@ vmx_cpl(void)
static enum vm_cpu_mode
vmx_cpu_mode(void)
{
+ uint32_t csar;
- if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA)
- return (CPU_MODE_64BIT);
- else
- return (CPU_MODE_COMPATIBILITY);
+ if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
+ csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
+ if (csar & 0x2000)
+ return (CPU_MODE_64BIT); /* CS.L = 1 */
+ else
+ return (CPU_MODE_COMPATIBILITY);
+ } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
+ return (CPU_MODE_PROTECTED);
+ } else {
+ return (CPU_MODE_REAL);
+ }
}
static enum vm_paging_mode
@@ -1757,10 +1830,25 @@ vmx_paging_info(struct vm_guest_paging *paging)
static void
vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
{
+ struct vm_guest_paging *paging;
+ uint32_t csar;
+
+ paging = &vmexit->u.inst_emul.paging;
+
vmexit->exitcode = VM_EXITCODE_INST_EMUL;
vmexit->u.inst_emul.gpa = gpa;
vmexit->u.inst_emul.gla = gla;
- vmx_paging_info(&vmexit->u.inst_emul.paging);
+ vmx_paging_info(paging);
+ switch (paging->cpu_mode) {
+ case CPU_MODE_PROTECTED:
+ case CPU_MODE_COMPATIBILITY:
+ csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
+ vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
+ break;
+ default:
+ vmexit->u.inst_emul.cs_d = 0;
+ break;
+ }
}
static int
@@ -1969,6 +2057,26 @@ vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
return (UNHANDLED);
}
+static enum task_switch_reason
+vmx_task_switch_reason(uint64_t qual)
+{
+ int reason;
+
+ reason = (qual >> 30) & 0x3;
+ switch (reason) {
+ case 0:
+ return (TSR_CALL);
+ case 1:
+ return (TSR_IRET);
+ case 2:
+ return (TSR_JMP);
+ case 3:
+ return (TSR_IDT_GATE);
+ default:
+ panic("%s: invalid reason %d", __func__, reason);
+ }
+}
+
static int
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
@@ -1976,9 +2084,10 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
struct vmxctx *vmxctx;
struct vlapic *vlapic;
struct vm_inout_str *vis;
+ struct vm_task_switch *ts;
uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
- uint32_t reason;
- uint64_t qual, gpa;
+ uint32_t intr_type, reason;
+ uint64_t exitintinfo, qual, gpa;
bool retu;
CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
@@ -1994,46 +2103,99 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
/*
- * VM exits that could be triggered during event injection on the
- * previous VM entry need to be handled specially by re-injecting
- * the event.
+ * VM exits that can be triggered during event delivery need to
+ * be handled specially by re-injecting the event if the IDT
+ * vectoring information field's valid bit is set.
*
* See "Information for VM Exits During Event Delivery" in Intel SDM
* for details.
*/
- switch (reason) {
- case EXIT_REASON_EPT_FAULT:
- case EXIT_REASON_EPT_MISCONFIG:
- case EXIT_REASON_APIC_ACCESS:
- case EXIT_REASON_TASK_SWITCH:
- case EXIT_REASON_EXCEPTION:
- idtvec_info = vmcs_idt_vectoring_info();
- if (idtvec_info & VMCS_IDT_VEC_VALID) {
- idtvec_info &= ~(1 << 12); /* clear undefined bit */
- vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
- if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
- idtvec_err = vmcs_idt_vectoring_err();
- vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
- idtvec_err);
- }
- /*
- * If 'virtual NMIs' are being used and the VM-exit
- * happened while injecting an NMI during the previous
- * VM-entry, then clear "blocking by NMI" in the Guest
- * Interruptibility-state.
- */
- if ((idtvec_info & VMCS_INTR_T_MASK) ==
- VMCS_INTR_T_NMI) {
- vmx_clear_nmi_blocking(vmx, vcpu);
- }
+ idtvec_info = vmcs_idt_vectoring_info();
+ if (idtvec_info & VMCS_IDT_VEC_VALID) {
+ idtvec_info &= ~(1 << 12); /* clear undefined bit */
+ exitintinfo = idtvec_info;
+ if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
+ idtvec_err = vmcs_idt_vectoring_err();
+ exitintinfo |= (uint64_t)idtvec_err << 32;
+ }
+ error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
+ KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
+ __func__, error));
+
+ /*
+ * If 'virtual NMIs' are being used and the VM-exit
+ * happened while injecting an NMI during the previous
+ * VM-entry, then clear "blocking by NMI" in the
+ * Guest Interruptibility-State so the NMI can be
+ * reinjected on the subsequent VM-entry.
+ *
+ * However, if the NMI was being delivered through a task
+ * gate, then the new task must start execution with NMIs
+ * blocked so don't clear NMI blocking in this case.
+ */
+ intr_type = idtvec_info & VMCS_INTR_T_MASK;
+ if (intr_type == VMCS_INTR_T_NMI) {
+ if (reason != EXIT_REASON_TASK_SWITCH)
+ vmx_clear_nmi_blocking(vmx, vcpu);
+ else
+ vmx_assert_nmi_blocking(vmx, vcpu);
+ }
+
+ /*
+ * Update VM-entry instruction length if the event being
+ * delivered was a software interrupt or software exception.
+ */
+ if (intr_type == VMCS_INTR_T_SWINTR ||
+ intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
+ intr_type == VMCS_INTR_T_SWEXCEPTION) {
vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
}
- default:
- idtvec_info = 0;
- break;
}
switch (reason) {
+ case EXIT_REASON_TASK_SWITCH:
+ ts = &vmexit->u.task_switch;
+ ts->tsssel = qual & 0xffff;
+ ts->reason = vmx_task_switch_reason(qual);
+ ts->ext = 0;
+ ts->errcode_valid = 0;
+ vmx_paging_info(&ts->paging);
+ /*
+ * If the task switch was due to a CALL, JMP, IRET, software
+ * interrupt (INT n) or software exception (INT3, INTO),
+ * then the saved %rip references the instruction that caused
+ * the task switch. The instruction length field in the VMCS
+ * is valid in this case.
+ *
+ * In all other cases (e.g., NMI, hardware exception) the
+ * saved %rip is one that would have been saved in the old TSS
+ * had the task switch completed normally so the instruction
+ * length field is not needed in this case and is explicitly
+ * set to 0.
+ */
+ if (ts->reason == TSR_IDT_GATE) {
+ KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
+ ("invalid idtvec_info %#x for IDT task switch",
+ idtvec_info));
+ intr_type = idtvec_info & VMCS_INTR_T_MASK;
+ if (intr_type != VMCS_INTR_T_SWINTR &&
+ intr_type != VMCS_INTR_T_SWEXCEPTION &&
+ intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
+ /* Task switch triggered by external event */
+ ts->ext = 1;
+ vmexit->inst_length = 0;
+ if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
+ ts->errcode_valid = 1;
+ ts->errcode = vmcs_idt_vectoring_err();
+ }
+ }
+ }
+ vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
+ VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
+ "%s errcode 0x%016lx", ts->reason, ts->tsssel,
+ ts->ext ? "external" : "internal",
+ ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
+ break;
case EXIT_REASON_CR_ACCESS:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
switch (qual & 0xf) {
@@ -2179,6 +2341,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* the guest.
*
* See "Resuming Guest Software after Handling an Exception".
+ * See "Information for VM Exits Due to Vectored Events".
*/
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
(intr_info & 0xff) != IDT_DF &&
@@ -2396,6 +2559,13 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
* pmap_invalidate_ept().
*/
disable_intr();
+ vmx_inject_interrupts(vmx, vcpu, vlapic);
+
+ /*
+ * Check for vcpu suspension after injecting events because
+ * vmx_inject_interrupts() can suspend the vcpu due to a
+ * triple fault.
+ */
if (vcpu_suspended(suspend_cookie)) {
enable_intr();
vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
@@ -2408,7 +2578,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
break;
}
- if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
+ if (vcpu_should_yield(vm, vcpu)) {
enable_intr();
vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
vmx_astpending_trace(vmx, vcpu, vmexit->rip);
@@ -2416,7 +2586,6 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
break;
}
- vmx_inject_interrupts(vmx, vcpu, vlapic);
vmx_run_trace(vmx, vcpu);
rc = vmx_enter_guest(vmxctx, vmx, launched);
@@ -2584,6 +2753,7 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
{
int error, hostcpu, running, shadow;
uint64_t ctls;
+ pmap_t pmap;
struct vmx *vmx = arg;
running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
@@ -2621,6 +2791,18 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
error = vmcs_setreg(&vmx->vmcs[vcpu], running,
VMCS_IDENT(shadow), val);
}
+
+ if (reg == VM_REG_GUEST_CR3) {
+ /*
+ * Invalidate the guest vcpu's TLB mappings to emulate
+ * the behavior of updating %cr3.
+ *
+ * XXX the processor retains global mappings when %cr3
+ * is updated but vmx_invvpid() does not.
+ */
+ pmap = vmx->ctx[vcpu].pmap;
+ vmx_invvpid(vmx, vcpu, pmap, running);
+ }
}
return (error);
OpenPOWER on IntegriCloud