summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-09-16 03:31:40 +0000
committerneel <neel@FreeBSD.org>2014-09-16 03:31:40 +0000
commit97bffd44f67b710ce5f834be181e1edf14fcd978 (patch)
tree4f797e62b09e86bca90ef9678e28a5629dd5475a /sys/amd64/vmm
parentcbc92dc7092c681df37cd3a57e9c5379d05abb4f (diff)
downloadFreeBSD-src-97bffd44f67b710ce5f834be181e1edf14fcd978.zip
FreeBSD-src-97bffd44f67b710ce5f834be181e1edf14fcd978.tar.gz
Use V_IRQ, V_INTR_VECTOR and V_TPR to offload APIC interrupt delivery to the
processor. Briefly, the hypervisor sets V_INTR_VECTOR to the APIC vector and sets V_IRQ to 1 to indicate a pending interrupt. The hardware then takes care of injecting this vector when the guest is able to receive it. Legacy PIC interrupts are still delivered via the event injection mechanism. This is because the vector injected by the PIC must reflect the state of its pins at the time the CPU is ready to accept the interrupt. Accesses to the TPR via %CR8 are handled entirely in hardware. This requires that the emulated TPR must be synced to V_TPR after a #VMEXIT. The guest can also modify the TPR via the memory mapped APIC. This requires that the V_TPR must be synced with the emulated TPR before a VMRUN. Reviewed by: Anish Gupta (akgupt3@gmail.com)
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/amd/svm.c193
-rw-r--r--sys/amd64/vmm/io/vlapic.c8
2 files changed, 155 insertions, 46 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index f370003..2de59ac 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -948,6 +948,37 @@ svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
}
static void
+svm_update_virqinfo(struct svm_softc *sc, int vcpu)
+{
+ struct vm *vm;
+ struct vlapic *vlapic;
+ struct vmcb_ctrl *ctrl;
+ int pending;
+
+ vm = sc->vm;
+ vlapic = vm_lapic(vm, vcpu);
+ ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+
+ /* Update %cr8 in the emulated vlapic */
+ vlapic_set_cr8(vlapic, ctrl->v_tpr);
+
+ /*
+ * If V_IRQ indicates that the interrupt injection attempted on then
+ * last VMRUN was successful then update the vlapic accordingly.
+ */
+ if (ctrl->v_intr_vector != 0) {
+ pending = ctrl->v_irq;
+ KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid "
+ "v_intr_vector %d", __func__, ctrl->v_intr_vector));
+ KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
+ VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector,
+ pending ? "pending" : "accepted");
+ if (!pending)
+ vlapic_intr_accepted(vlapic, ctrl->v_intr_vector);
+ }
+}
+
+static void
svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
{
struct vmcb_ctrl *ctrl;
@@ -970,6 +1001,14 @@ svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
}
+static __inline int
+vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
+{
+
+ return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
+ VMCB_INTCPT_VINTR));
+}
+
static __inline void
enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
{
@@ -977,14 +1016,19 @@ enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
- if (ctrl->v_irq == 0) {
- VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
- ctrl->v_irq = 1;
- ctrl->v_ign_tpr = 1;
- vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
- svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
- VMCB_INTCPT_VINTR);
+ if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
+ KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
+ KASSERT(vintr_intercept_enabled(sc, vcpu),
+ ("%s: vintr intercept should be enabled", __func__));
+ return;
}
+
+ VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
+ ctrl->v_irq = 1;
+ ctrl->v_ign_tpr = 1;
+ ctrl->v_intr_vector = 0;
+ vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static __inline void
@@ -994,13 +1038,22 @@ disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
- if (ctrl->v_irq) {
- VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
- ctrl->v_irq = 0;
- vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
- svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
- VMCB_INTCPT_VINTR);
+ if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
+ KASSERT(!vintr_intercept_enabled(sc, vcpu),
+ ("%s: vintr intercept should be disabled", __func__));
+ return;
}
+
+#ifdef KTR
+ if (ctrl->v_intr_vector == 0)
+ VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
+ else
+ VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection");
+#endif
+ ctrl->v_irq = 0;
+ ctrl->v_intr_vector = 0;
+ vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static int
@@ -1146,6 +1199,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
vmexit->inst_length, code, info1, info2));
+ svm_update_virqinfo(svm_sc, vcpu);
svm_save_intinfo(svm_sc, vcpu);
switch (code) {
@@ -1317,13 +1371,14 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
{
struct vmcb_ctrl *ctrl;
struct vmcb_state *state;
- int extint_pending;
- int vector, need_intr_window;
+ uint8_t v_tpr;
+ int vector, need_intr_window, pending_apic_vector;
state = svm_get_vmcb_state(sc, vcpu);
ctrl = svm_get_vmcb_ctrl(sc, vcpu);
need_intr_window = 0;
+ pending_apic_vector = 0;
/*
* Inject pending events or exceptions for this vcpu.
@@ -1378,22 +1433,40 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
}
}
- extint_pending = vm_extint_pending(sc->vm, vcpu);
-
- if (!extint_pending) {
- /* Ask the local apic for a vector to inject */
- if (!vlapic_pending_intr(vlapic, &vector)) {
- goto done; /* nothing to inject */
+ if (!vm_extint_pending(sc->vm, vcpu)) {
+ /*
+ * APIC interrupts are delivered using the V_IRQ offload.
+ *
+ * The primary benefit is that the hypervisor doesn't need to
+ * deal with the various conditions that inhibit interrupts.
+ * It also means that TPR changes via CR8 will be handled
+ * without any hypervisor involvement.
+ *
+ * Note that the APIC vector must remain pending in the vIRR
+ * until it is confirmed that it was delivered to the guest.
+ * This can be confirmed based on the value of V_IRQ at the
+ * next #VMEXIT (1 = pending, 0 = delivered).
+ *
+ * Also note that it is possible that another higher priority
+ * vector can become pending before this vector is delivered
+ * to the guest. This is alright because vcpu_notify_event()
+ * will send an IPI and force the vcpu to trap back into the
+ * hypervisor. The higher priority vector will be injected on
+ * the next VMRUN.
+ */
+ if (vlapic_pending_intr(vlapic, &vector)) {
+ KASSERT(vector >= 16 && vector <= 255,
+ ("invalid vector %d from local APIC", vector));
+ pending_apic_vector = vector;
}
- KASSERT(vector >= 16 && vector <= 255,
- ("invalid vector %d from local APIC", vector));
- } else {
- /* Ask the legacy pic for a vector to inject */
- vatpic_pending_intr(sc->vm, &vector);
- KASSERT(vector >= 0 && vector <= 255,
- ("invalid vector %d from local APIC", vector));
+ goto done;
}
+ /* Ask the legacy pic for a vector to inject */
+ vatpic_pending_intr(sc->vm, &vector);
+ KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR",
+ vector));
+
/*
* If the guest has disabled interrupts or is in an interrupt shadow
* then we cannot inject the pending interrupt.
@@ -1419,26 +1492,58 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
goto done;
}
+ /*
+ * Legacy PIC interrupts are delivered via the event injection
+ * mechanism.
+ */
svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
- if (!extint_pending) {
- /* Update the Local APIC ISR */
- vlapic_intr_accepted(vlapic, vector);
- } else {
- vm_extint_clear(sc->vm, vcpu);
- vatpic_intr_accepted(sc->vm, vector);
+ vm_extint_clear(sc->vm, vcpu);
+ vatpic_intr_accepted(sc->vm, vector);
+
+ /*
+ * Force a VM-exit as soon as the vcpu is ready to accept another
+ * interrupt. This is done because the PIC might have another vector
+ * that it wants to inject. Also, if the APIC has a pending interrupt
+ * that was preempted by the ExtInt then it allows us to inject the
+ * APIC vector as soon as possible.
+ */
+ need_intr_window = 1;
+done:
+ /*
+ * The guest can modify the TPR by writing to %CR8. In guest mode
+ * the processor reflects this write to V_TPR without hypervisor
+ * intervention.
+ *
+ * The guest can also modify the TPR by writing to it via the memory
+ * mapped APIC page. In this case, the write will be emulated by the
+ * hypervisor. For this reason V_TPR must be updated before every
+ * VMRUN.
+ */
+ v_tpr = vlapic_get_cr8(vlapic);
+ KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
+ if (ctrl->v_tpr != v_tpr) {
+ VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
+ ctrl->v_tpr, v_tpr);
+ ctrl->v_tpr = v_tpr;
+ vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ }
+
+ if (pending_apic_vector) {
/*
- * Force a VM-exit as soon as the vcpu is ready to accept
- * another interrupt. This is done because the PIC might
- * have another vector that it wants to inject. Also, if
- * the vlapic has a pending interrupt that was preempted
- * by the ExtInt then it allows us to inject the APIC
- * vector as soon as possible.
+ * If an APIC vector is being injected then interrupt window
+ * exiting is not possible on this VMRUN.
*/
- need_intr_window = 1;
- }
-done:
- if (need_intr_window) {
+ KASSERT(!need_intr_window, ("intr_window exiting impossible"));
+ VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ",
+ pending_apic_vector);
+
+ ctrl->v_irq = 1;
+ ctrl->v_ign_tpr = 0;
+ ctrl->v_intr_vector = pending_apic_vector;
+ ctrl->v_intr_prio = pending_apic_vector >> 4;
+ vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ } else if (need_intr_window) {
/*
* We use V_IRQ in conjunction with the VINTR intercept to
* trap into the hypervisor as soon as a virtual interrupt
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 3c93463..c6336bc 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -911,8 +911,12 @@ vlapic_set_tpr(struct vlapic *vlapic, uint8_t val)
{
struct LAPIC *lapic = vlapic->apic_page;
- lapic->tpr = val;
- vlapic_update_ppr(vlapic);
+ if (lapic->tpr != val) {
+ VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vlapic TPR changed "
+ "from %#x to %#x", lapic->tpr, val);
+ lapic->tpr = val;
+ vlapic_update_ppr(vlapic);
+ }
}
static uint8_t
OpenPOWER on IntegriCloud