summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/vmm/amd/svm.c13
-rw-r--r--sys/amd64/vmm/amd/svm_softc.h1
-rw-r--r--sys/amd64/vmm/intel/vmx.c38
-rw-r--r--sys/amd64/vmm/intel/vmx.h1
-rw-r--r--sys/amd64/vmm/vmm.c29
5 files changed, 55 insertions, 27 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index f1cb7d6..3a96e56 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -554,6 +554,7 @@ svm_vminit(struct vm *vm, pmap_t pmap)
pml4_pa = svm_sc->nptp;
for (i = 0; i < VM_MAXCPU; i++) {
vcpu = svm_get_vcpu(svm_sc, i);
+ vcpu->nextrip = ~0;
vcpu->lastcpu = NOCPU;
vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
@@ -1479,15 +1480,24 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
{
struct vmcb_ctrl *ctrl;
struct vmcb_state *state;
+ struct svm_vcpu *vcpustate;
uint8_t v_tpr;
int vector, need_intr_window, pending_apic_vector;
state = svm_get_vmcb_state(sc, vcpu);
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ vcpustate = svm_get_vcpu(sc, vcpu);
need_intr_window = 0;
pending_apic_vector = 0;
+ if (vcpustate->nextrip != state->rip) {
+ ctrl->intr_shadow = 0;
+ VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
+ "cleared due to rip change: %#lx/%#lx",
+ vcpustate->nextrip, state->rip);
+ }
+
/*
* Inject pending events or exceptions for this vcpu.
*
@@ -1958,6 +1968,9 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
/* #VMEXIT disables interrupts so re-enable them here. */
enable_gintr();
+ /* Update 'nextrip' */
+ vcpustate->nextrip = state->rip;
+
/* Handle #VMEXIT and if required return to user space. */
handled = svm_vmexit(svm_sc, vcpu, vmexit);
} while (handled);
diff --git a/sys/amd64/vmm/amd/svm_softc.h b/sys/amd64/vmm/amd/svm_softc.h
index a5bb57c..de0c3f7 100644
--- a/sys/amd64/vmm/amd/svm_softc.h
+++ b/sys/amd64/vmm/amd/svm_softc.h
@@ -45,6 +45,7 @@ struct svm_vcpu {
struct vmcb vmcb; /* hardware saved vcpu context */
struct svm_regctx swctx; /* software saved vcpu context */
uint64_t vmcb_pa; /* VMCB physical address */
+ uint64_t nextrip; /* next instruction to be executed by guest */
int lastcpu; /* host cpu that the vcpu last ran on */
uint32_t dirty; /* state cache bits that must be cleared */
long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index c3dd04e..a3fc16a 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -941,6 +941,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmx->cap[i].proc_ctls = procbased_ctls;
vmx->cap[i].proc_ctls2 = procbased_ctls2;
+ vmx->state[i].nextrip = ~0;
vmx->state[i].lastcpu = NOCPU;
vmx->state[i].vpid = vpid[i];
@@ -1169,12 +1170,24 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu)
}
static void
-vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
+vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
+ uint64_t guestrip)
{
int vector, need_nmi_exiting, extint_pending;
uint64_t rflags, entryinfo;
uint32_t gi, info;
+ if (vmx->state[vcpu].nextrip != guestrip) {
+ gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
+ if (gi & HWINTR_BLOCKING) {
+ VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
+ "cleared due to rip change: %#lx/%#lx",
+ vmx->state[vcpu].nextrip, guestrip);
+ gi &= ~HWINTR_BLOCKING;
+ vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
+ }
+ }
+
if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
"intinfo is not valid: %#lx", __func__, entryinfo));
@@ -2540,7 +2553,7 @@ vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
}
static int
-vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
+vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
void *rendezvous_cookie, void *suspend_cookie)
{
int rc, handled, launched;
@@ -2550,7 +2563,6 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
struct vmcs *vmcs;
struct vm_exit *vmexit;
struct vlapic *vlapic;
- uint64_t rip;
uint32_t exit_reason;
vmx = arg;
@@ -2578,11 +2590,13 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
*/
vmcs_write(VMCS_HOST_CR3, rcr3());
- vmcs_write(VMCS_GUEST_RIP, startrip);
+ vmcs_write(VMCS_GUEST_RIP, rip);
vmx_set_pcpu_defaults(vmx, vcpu, pmap);
do {
- handled = UNHANDLED;
+ KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
+ "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
+ handled = UNHANDLED;
/*
* Interrupts are disabled from this point on until the
* guest starts executing. This is done for the following
@@ -2602,7 +2616,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
* pmap_invalidate_ept().
*/
disable_intr();
- vmx_inject_interrupts(vmx, vcpu, vlapic);
+ vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
/*
* Check for vcpu suspension after injecting events because
@@ -2611,20 +2625,20 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
*/
if (vcpu_suspended(suspend_cookie)) {
enable_intr();
- vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
+ vm_exit_suspended(vmx->vm, vcpu, rip);
break;
}
if (vcpu_rendezvous_pending(rendezvous_cookie)) {
enable_intr();
- vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
+ vm_exit_rendezvous(vmx->vm, vcpu, rip);
break;
}
if (vcpu_should_yield(vm, vcpu)) {
enable_intr();
- vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
- vmx_astpending_trace(vmx, vcpu, vmexit->rip);
+ vm_exit_astpending(vmx->vm, vcpu, rip);
+ vmx_astpending_trace(vmx, vcpu, rip);
handled = HANDLED;
break;
}
@@ -2638,6 +2652,9 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
+ /* Update 'nextrip' */
+ vmx->state[vcpu].nextrip = rip;
+
if (rc == VMX_GUEST_VMEXIT) {
vmx_exit_handle_nmi(vmx, vcpu, vmexit);
enable_intr();
@@ -2648,6 +2665,7 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
}
launched = 1;
vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
+ rip = vmexit->rip;
} while (handled);
/*
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index 2124554..cfd2599 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -78,6 +78,7 @@ struct vmxcap {
};
struct vmxstate {
+ uint64_t nextrip; /* next instruction to be executed by guest */
int lastcpu; /* host cpu that this 'vcpu' last ran on */
uint16_t vpid;
};
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 6c55271..6fd3d46 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1109,7 +1109,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
{
struct vcpu *vcpu;
const char *wmesg;
- int error, t, vcpu_halted, vm_halted;
+ int t, vcpu_halted, vm_halted;
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
@@ -1117,22 +1117,6 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
vcpu_halted = 0;
vm_halted = 0;
- /*
- * The typical way to halt a cpu is to execute: "sti; hlt"
- *
- * STI sets RFLAGS.IF to enable interrupts. However, the processor
- * remains in an "interrupt shadow" for an additional instruction
- * following the STI. This guarantees that "sti; hlt" sequence is
- * atomic and a pending interrupt will be recognized after the HLT.
- *
- * After the HLT emulation is done the vcpu is no longer in an
- * interrupt shadow and a pending interrupt can be injected on
- * the next entry into the guest.
- */
- error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
- KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
- __func__, error));
-
vcpu_lock(vcpu);
while (1) {
/*
@@ -1741,6 +1725,7 @@ int
vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception)
{
struct vcpu *vcpu;
+ int error;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
@@ -1765,6 +1750,16 @@ vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception)
return (EBUSY);
}
+ /*
+ * From section 26.6.1 "Interruptibility State" in Intel SDM:
+ *
+ * Event blocking by "STI" or "MOV SS" is cleared after guest executes
+ * one instruction or incurs an exception.
+ */
+ error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
+ KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
+ __func__, error));
+
vcpu->exception_pending = 1;
vcpu->exception = *exception;
VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector);
OpenPOWER on IntegriCloud