summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-09-12 06:15:20 +0000
committerneel <neel@FreeBSD.org>2014-09-12 06:15:20 +0000
commitb2ca87a5d0d3d89fbe72128a3e8f2dcfe15cbaaf (patch)
tree1bd8111e0925664f03e1eea50076c8625f732f3f /sys/amd64/vmm
parentcc95d45388990b1b28acfc90e86a926760f01f16 (diff)
downloadFreeBSD-src-b2ca87a5d0d3d89fbe72128a3e8f2dcfe15cbaaf.zip
FreeBSD-src-b2ca87a5d0d3d89fbe72128a3e8f2dcfe15cbaaf.tar.gz
Optimize the common case of injecting an interrupt into a vcpu after a HLT
by explicitly moving it out of the interrupt shadow. The hypervisor is done "executing" the HLT and by definition this moves the vcpu out of the 1-instruction interrupt shadow. Prior to this change the interrupt would be held pending because the VMCS guest-interruptibility-state would indicate that "blocking by STI" was in effect. This resulted in an unnecessary round trip into the guest before the pending interrupt could be injected. Reviewed by: grehan
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/intel/vmx.c46
-rw-r--r--sys/amd64/vmm/vmm.c18
2 files changed, 63 insertions, 1 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index b2c5702..b7ecf2b 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -2712,6 +2712,46 @@ vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
}
static int
+vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
+{
+ uint64_t gi;
+ int error;
+
+ error = vmcs_getreg(&vmx->vmcs[vcpu], running,
+ VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
+ *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
+ return (error);
+}
+
+static int
+vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
+{
+ struct vmcs *vmcs;
+ uint64_t gi;
+ int error, ident;
+
+ /*
+ * Forcing the vcpu into an interrupt shadow is not supported.
+ */
+ if (val) {
+ error = EINVAL;
+ goto done;
+ }
+
+ vmcs = &vmx->vmcs[vcpu];
+ ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
+ error = vmcs_getreg(vmcs, running, ident, &gi);
+ if (error == 0) {
+ gi &= ~HWINTR_BLOCKING;
+ error = vmcs_setreg(vmcs, running, ident, gi);
+ }
+done:
+ VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
+ error ? "failed" : "succeeded");
+ return (error);
+}
+
+static int
vmx_shadow_reg(int reg)
{
int shreg;
@@ -2742,6 +2782,9 @@ vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
if (running && hostcpu != curcpu)
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
+ if (reg == VM_REG_GUEST_INTR_SHADOW)
+ return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
+
if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
return (0);
@@ -2760,6 +2803,9 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
if (running && hostcpu != curcpu)
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
+ if (reg == VM_REG_GUEST_INTR_SHADOW)
+ return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
+
if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
return (0);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index ae67c58..f7c9ce4 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1090,7 +1090,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
{
struct vcpu *vcpu;
const char *wmesg;
- int t, vcpu_halted, vm_halted;
+ int error, t, vcpu_halted, vm_halted;
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
@@ -1098,6 +1098,22 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
vcpu_halted = 0;
vm_halted = 0;
+ /*
+ * The typical way to halt a cpu is to execute: "sti; hlt"
+ *
+ * STI sets RFLAGS.IF to enable interrupts. However, the processor
+ * remains in an "interrupt shadow" for an additional instruction
+ * following the STI. This guarantees that "sti; hlt" sequence is
+ * atomic and a pending interrupt will be recognized after the HLT.
+ *
+ * After the HLT emulation is done the vcpu is no longer in an
+ * interrupt shadow and a pending interrupt can be injected on
+ * the next entry into the guest.
+ */
+ error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
+ KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
+ __func__, error));
+
vcpu_lock(vcpu);
while (1) {
/*
OpenPOWER on IntegriCloud