summaryrefslogtreecommitdiffstats
path: root/sys/amd64/vmm/intel/vmx.c
diff options
context:
space:
mode:
authortychon <tychon@FreeBSD.org>2014-06-06 18:23:49 +0000
committertychon <tychon@FreeBSD.org>2014-06-06 18:23:49 +0000
commitc04c953593c844c91eafa9a73ec29f44bf76e07a (patch)
tree99c2251212d2e58ddf69adae65fb234bd876432f /sys/amd64/vmm/intel/vmx.c
parentc55588c12ba6dc6be00a46af8226b72a2609ddc0 (diff)
downloadFreeBSD-src-c04c953593c844c91eafa9a73ec29f44bf76e07a.zip
FreeBSD-src-c04c953593c844c91eafa9a73ec29f44bf76e07a.tar.gz
Support guest accesses to %cr8.
Reviewed by: neel
Diffstat (limited to 'sys/amd64/vmm/intel/vmx.c')
-rw-r--r--sys/amd64/vmm/intel/vmx.c194
1 files changed, 144 insertions, 50 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 85e176a..5c74a6b 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -83,7 +83,9 @@ __FBSDID("$FreeBSD$");
(PROCBASED_SECONDARY_CONTROLS | \
PROCBASED_IO_EXITING | \
PROCBASED_MSR_BITMAPS | \
- PROCBASED_CTLS_WINDOW_SETTING)
+ PROCBASED_CTLS_WINDOW_SETTING | \
+ PROCBASED_CR8_LOAD_EXITING | \
+ PROCBASED_CR8_STORE_EXITING)
#define PROCBASED_CTLS_ZERO_SETTING \
(PROCBASED_CR3_LOAD_EXITING | \
PROCBASED_CR3_STORE_EXITING | \
@@ -714,6 +716,13 @@ vmx_init(int ipinum)
procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
/*
+ * No need to emulate accesses to %CR8 if virtual
+ * interrupt delivery is enabled.
+ */
+ procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
+ procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
+
+ /*
* Check for Posted Interrupts only if Virtual Interrupt
* Delivery is enabled.
*/
@@ -1426,97 +1435,130 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
return (HANDLED);
}
-static int
-vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+static uint64_t
+vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
{
- int cr, vmcs_guest_cr, vmcs_shadow_cr;
- uint64_t crval, regval, ones_mask, zeros_mask;
const struct vmxctx *vmxctx;
- /* We only handle mov to %cr0 or %cr4 at this time */
- if ((exitqual & 0xf0) != 0x00)
- return (UNHANDLED);
+ vmxctx = &vmx->ctx[vcpu];
- cr = exitqual & 0xf;
- if (cr != 0 && cr != 4)
- return (UNHANDLED);
+ switch (ident) {
+ case 0:
+ return (vmxctx->guest_rax);
+ case 1:
+ return (vmxctx->guest_rcx);
+ case 2:
+ return (vmxctx->guest_rdx);
+ case 3:
+ return (vmxctx->guest_rbx);
+ case 4:
+ return (vmcs_read(VMCS_GUEST_RSP));
+ case 5:
+ return (vmxctx->guest_rbp);
+ case 6:
+ return (vmxctx->guest_rsi);
+ case 7:
+ return (vmxctx->guest_rdi);
+ case 8:
+ return (vmxctx->guest_r8);
+ case 9:
+ return (vmxctx->guest_r9);
+ case 10:
+ return (vmxctx->guest_r10);
+ case 11:
+ return (vmxctx->guest_r11);
+ case 12:
+ return (vmxctx->guest_r12);
+ case 13:
+ return (vmxctx->guest_r13);
+ case 14:
+ return (vmxctx->guest_r14);
+ case 15:
+ return (vmxctx->guest_r15);
+ default:
+ panic("invalid vmx register %d", ident);
+ }
+}
+
+static void
+vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
+{
+ struct vmxctx *vmxctx;
- regval = 0; /* silence gcc */
vmxctx = &vmx->ctx[vcpu];
- /*
- * We must use vmcs_write() directly here because vmcs_setreg() will
- * call vmclear(vmcs) as a side-effect which we certainly don't want.
- */
- switch ((exitqual >> 8) & 0xf) {
+ switch (ident) {
case 0:
- regval = vmxctx->guest_rax;
+ vmxctx->guest_rax = regval;
break;
case 1:
- regval = vmxctx->guest_rcx;
+ vmxctx->guest_rcx = regval;
break;
case 2:
- regval = vmxctx->guest_rdx;
+ vmxctx->guest_rdx = regval;
break;
case 3:
- regval = vmxctx->guest_rbx;
+ vmxctx->guest_rbx = regval;
break;
case 4:
- regval = vmcs_read(VMCS_GUEST_RSP);
+ vmcs_write(VMCS_GUEST_RSP, regval);
break;
case 5:
- regval = vmxctx->guest_rbp;
+ vmxctx->guest_rbp = regval;
break;
case 6:
- regval = vmxctx->guest_rsi;
+ vmxctx->guest_rsi = regval;
break;
case 7:
- regval = vmxctx->guest_rdi;
+ vmxctx->guest_rdi = regval;
break;
case 8:
- regval = vmxctx->guest_r8;
+ vmxctx->guest_r8 = regval;
break;
case 9:
- regval = vmxctx->guest_r9;
+ vmxctx->guest_r9 = regval;
break;
case 10:
- regval = vmxctx->guest_r10;
+ vmxctx->guest_r10 = regval;
break;
case 11:
- regval = vmxctx->guest_r11;
+ vmxctx->guest_r11 = regval;
break;
case 12:
- regval = vmxctx->guest_r12;
+ vmxctx->guest_r12 = regval;
break;
case 13:
- regval = vmxctx->guest_r13;
+ vmxctx->guest_r13 = regval;
break;
case 14:
- regval = vmxctx->guest_r14;
+ vmxctx->guest_r14 = regval;
break;
case 15:
- regval = vmxctx->guest_r15;
+ vmxctx->guest_r15 = regval;
break;
+ default:
+ panic("invalid vmx register %d", ident);
}
+}
- if (cr == 0) {
- ones_mask = cr0_ones_mask;
- zeros_mask = cr0_zeros_mask;
- vmcs_guest_cr = VMCS_GUEST_CR0;
- vmcs_shadow_cr = VMCS_CR0_SHADOW;
- } else {
- ones_mask = cr4_ones_mask;
- zeros_mask = cr4_zeros_mask;
- vmcs_guest_cr = VMCS_GUEST_CR4;
- vmcs_shadow_cr = VMCS_CR4_SHADOW;
- }
- vmcs_write(vmcs_shadow_cr, regval);
+static int
+vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ uint64_t crval, regval;
- crval = regval | ones_mask;
- crval &= ~zeros_mask;
- vmcs_write(vmcs_guest_cr, crval);
+ /* We only handle mov to %cr0 at this time */
+ if ((exitqual & 0xf0) != 0x00)
+ return (UNHANDLED);
- if (cr == 0 && regval & CR0_PG) {
+ regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+ vmcs_write(VMCS_CR0_SHADOW, regval);
+
+ crval = regval | cr0_ones_mask;
+ crval &= ~cr0_zeros_mask;
+ vmcs_write(VMCS_GUEST_CR0, crval);
+
+ if (regval & CR0_PG) {
uint64_t efer, entry_ctls;
/*
@@ -1537,6 +1579,48 @@ vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
return (HANDLED);
}
+static int
+vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ uint64_t crval, regval;
+
+ /* We only handle mov to %cr4 at this time */
+ if ((exitqual & 0xf0) != 0x00)
+ return (UNHANDLED);
+
+ regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+ vmcs_write(VMCS_CR4_SHADOW, regval);
+
+ crval = regval | cr4_ones_mask;
+ crval &= ~cr4_zeros_mask;
+ vmcs_write(VMCS_GUEST_CR4, crval);
+
+ return (HANDLED);
+}
+
+static int
+vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ uint64_t regval;
+
+ /* We only handle mov %cr8 to/from a register at this time. */
+ if ((exitqual & 0xe0) != 0x00) {
+ return (UNHANDLED);
+ }
+
+ if (exitqual & 0x10) {
+ regval = vlapic_get_tpr(vm_lapic(vmx->vm, vcpu));
+ vmx_set_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf,
+ regval >> 4);
+ } else {
+ regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+ vlapic_set_tpr(vm_lapic(vmx->vm, vcpu), regval << 4);
+ }
+
+ return (HANDLED);
+}
+
/*
* From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
*/
@@ -1929,7 +2013,17 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
switch (reason) {
case EXIT_REASON_CR_ACCESS:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
- handled = vmx_emulate_cr_access(vmx, vcpu, qual);
+ switch (qual & 0xf) {
+ case 0:
+ handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
+ break;
+ case 4:
+ handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
+ break;
+ case 8:
+ handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
+ break;
+ }
break;
case EXIT_REASON_RDMSR:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
OpenPOWER on IntegriCloud