summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm/intel
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2015-01-06 19:04:02 +0000
committerneel <neel@FreeBSD.org>2015-01-06 19:04:02 +0000
commite72e75f02d5f7f68423a86185d451eadc25b38b0 (patch)
tree876a608472cf41b64aa948e1dadca2a58b3b691e /sys/amd64/vmm/intel
parent0ef5ebd1f79ac2ea8a513919098fce6d5c6fda18 (diff)
downloadFreeBSD-src-e72e75f02d5f7f68423a86185d451eadc25b38b0.zip
FreeBSD-src-e72e75f02d5f7f68423a86185d451eadc25b38b0.tar.gz
Clear blocking due to STI or MOV SS in the hypervisor when an instruction is
emulated or when the vcpu incurs an exception. This matches the CPU behavior. Remove special case code in HLT processing that was clearing the interrupt shadow. This is now redundant because the interrupt shadow is always cleared when the vcpu is resumed after an instruction is emulated. Reported by: David Reed (david.reed@tidalscale.com) MFC after: 2 weeks
Diffstat (limited to 'sys/amd64/vmm/intel')
-rw-r--r--sys/amd64/vmm/intel/vmx.c38
-rw-r--r--sys/amd64/vmm/intel/vmx.h1
2 files changed, 29 insertions, 10 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index c3dd04e..a3fc16a 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -941,6 +941,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmx->cap[i].proc_ctls = procbased_ctls;
vmx->cap[i].proc_ctls2 = procbased_ctls2;
+ vmx->state[i].nextrip = ~0;
vmx->state[i].lastcpu = NOCPU;
vmx->state[i].vpid = vpid[i];
@@ -1169,12 +1170,24 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu)
}
static void
-vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
+vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
+ uint64_t guestrip)
{
int vector, need_nmi_exiting, extint_pending;
uint64_t rflags, entryinfo;
uint32_t gi, info;
+ if (vmx->state[vcpu].nextrip != guestrip) {
+ gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+ if (gi & HWINTR_BLOCKING) {
+ VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
+ "cleared due to rip change: %#lx/%#lx",
+ vmx->state[vcpu].nextrip, guestrip);
+ gi &= ~HWINTR_BLOCKING;
+ vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
+ }
+ }
+
if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
"intinfo is not valid: %#lx", __func__, entryinfo));
@@ -2540,7 +2553,7 @@ vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
}
static int
-vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
+vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
void *rendezvous_cookie, void *suspend_cookie)
{
int rc, handled, launched;
@@ -2550,7 +2563,6 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
struct vmcs *vmcs;
struct vm_exit *vmexit;
struct vlapic *vlapic;
- uint64_t rip;
uint32_t exit_reason;
vmx = arg;
@@ -2578,11 +2590,13 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
*/
vmcs_write(VMCS_HOST_CR3, rcr3());
- vmcs_write(VMCS_GUEST_RIP, startrip);
+ vmcs_write(VMCS_GUEST_RIP, rip);
vmx_set_pcpu_defaults(vmx, vcpu, pmap);
do {
- handled = UNHANDLED;
+ KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
+ "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
+ handled = UNHANDLED;
/*
* Interrupts are disabled from this point on until the
* guest starts executing. This is done for the following
@@ -2602,7 +2616,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
* pmap_invalidate_ept().
*/
disable_intr();
- vmx_inject_interrupts(vmx, vcpu, vlapic);
+ vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
/*
* Check for vcpu suspension after injecting events because
@@ -2611,20 +2625,20 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
*/
if (vcpu_suspended(suspend_cookie)) {
enable_intr();
- vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
+ vm_exit_suspended(vmx->vm, vcpu, rip);
break;
}
if (vcpu_rendezvous_pending(rendezvous_cookie)) {
enable_intr();
- vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
+ vm_exit_rendezvous(vmx->vm, vcpu, rip);
break;
}
if (vcpu_should_yield(vm, vcpu)) {
enable_intr();
- vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
- vmx_astpending_trace(vmx, vcpu, vmexit->rip);
+ vm_exit_astpending(vmx->vm, vcpu, rip);
+ vmx_astpending_trace(vmx, vcpu, rip);
handled = HANDLED;
break;
}
@@ -2638,6 +2652,9 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
+ /* Update 'nextrip' */
+ vmx->state[vcpu].nextrip = rip;
+
if (rc == VMX_GUEST_VMEXIT) {
vmx_exit_handle_nmi(vmx, vcpu, vmexit);
enable_intr();
@@ -2648,6 +2665,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
}
launched = 1;
vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
+ rip = vmexit->rip;
} while (handled);
/*
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index 2124554..cfd2599 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -78,6 +78,7 @@ struct vmxcap {
};
struct vmxstate {
+ uint64_t nextrip; /* next instruction to be executed by guest */
int lastcpu; /* host cpu that this 'vcpu' last ran on */
uint16_t vpid;
};
OpenPOWER on IntegriCloud