summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorgrehan <grehan@FreeBSD.org>2014-08-17 01:16:40 +0000
committergrehan <grehan@FreeBSD.org>2014-08-17 01:16:40 +0000
commitc00f011a6ecb134c74a4cf17b0cae12586601eb4 (patch)
treed4c9a7e8d1a7db1964cf30c7d89e7bd7f3d21b6f /sys/amd64
parent799b3dd5bf5db05a79bfdd83ba81e4fe8b6dfc02 (diff)
downloadFreeBSD-src-c00f011a6ecb134c74a4cf17b0cae12586601eb4.zip
FreeBSD-src-c00f011a6ecb134c74a4cf17b0cae12586601eb4.tar.gz
MFC r267178, r267300
Support guest accesses to %cr8 Add reserved bit checking when doing %CR8 emulation and inject #GP if required.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/vmm/intel/vmx.c197
-rw-r--r--sys/amd64/vmm/io/vlapic.c45
-rw-r--r--sys/amd64/vmm/io/vlapic.h3
3 files changed, 192 insertions, 53 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 94b7417..7390ccc 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -83,7 +83,9 @@ __FBSDID("$FreeBSD$");
(PROCBASED_SECONDARY_CONTROLS | \
PROCBASED_IO_EXITING | \
PROCBASED_MSR_BITMAPS | \
- PROCBASED_CTLS_WINDOW_SETTING)
+ PROCBASED_CTLS_WINDOW_SETTING | \
+ PROCBASED_CR8_LOAD_EXITING | \
+ PROCBASED_CR8_STORE_EXITING)
#define PROCBASED_CTLS_ZERO_SETTING \
(PROCBASED_CR3_LOAD_EXITING | \
PROCBASED_CR3_STORE_EXITING | \
@@ -714,6 +716,13 @@ vmx_init(int ipinum)
procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
/*
+ * No need to emulate accesses to %CR8 if virtual
+ * interrupt delivery is enabled.
+ */
+ procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
+ procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
+
+ /*
* Check for Posted Interrupts only if Virtual Interrupt
* Delivery is enabled.
*/
@@ -1442,97 +1451,130 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
return (HANDLED);
}
-static int
-vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+static uint64_t
+vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
{
- int cr, vmcs_guest_cr, vmcs_shadow_cr;
- uint64_t crval, regval, ones_mask, zeros_mask;
const struct vmxctx *vmxctx;
- /* We only handle mov to %cr0 or %cr4 at this time */
- if ((exitqual & 0xf0) != 0x00)
- return (UNHANDLED);
+ vmxctx = &vmx->ctx[vcpu];
- cr = exitqual & 0xf;
- if (cr != 0 && cr != 4)
- return (UNHANDLED);
+ switch (ident) {
+ case 0:
+ return (vmxctx->guest_rax);
+ case 1:
+ return (vmxctx->guest_rcx);
+ case 2:
+ return (vmxctx->guest_rdx);
+ case 3:
+ return (vmxctx->guest_rbx);
+ case 4:
+ return (vmcs_read(VMCS_GUEST_RSP));
+ case 5:
+ return (vmxctx->guest_rbp);
+ case 6:
+ return (vmxctx->guest_rsi);
+ case 7:
+ return (vmxctx->guest_rdi);
+ case 8:
+ return (vmxctx->guest_r8);
+ case 9:
+ return (vmxctx->guest_r9);
+ case 10:
+ return (vmxctx->guest_r10);
+ case 11:
+ return (vmxctx->guest_r11);
+ case 12:
+ return (vmxctx->guest_r12);
+ case 13:
+ return (vmxctx->guest_r13);
+ case 14:
+ return (vmxctx->guest_r14);
+ case 15:
+ return (vmxctx->guest_r15);
+ default:
+ panic("invalid vmx register %d", ident);
+ }
+}
+
+static void
+vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
+{
+ struct vmxctx *vmxctx;
- regval = 0; /* silence gcc */
vmxctx = &vmx->ctx[vcpu];
- /*
- * We must use vmcs_write() directly here because vmcs_setreg() will
- * call vmclear(vmcs) as a side-effect which we certainly don't want.
- */
- switch ((exitqual >> 8) & 0xf) {
+ switch (ident) {
case 0:
- regval = vmxctx->guest_rax;
+ vmxctx->guest_rax = regval;
break;
case 1:
- regval = vmxctx->guest_rcx;
+ vmxctx->guest_rcx = regval;
break;
case 2:
- regval = vmxctx->guest_rdx;
+ vmxctx->guest_rdx = regval;
break;
case 3:
- regval = vmxctx->guest_rbx;
+ vmxctx->guest_rbx = regval;
break;
case 4:
- regval = vmcs_read(VMCS_GUEST_RSP);
+ vmcs_write(VMCS_GUEST_RSP, regval);
break;
case 5:
- regval = vmxctx->guest_rbp;
+ vmxctx->guest_rbp = regval;
break;
case 6:
- regval = vmxctx->guest_rsi;
+ vmxctx->guest_rsi = regval;
break;
case 7:
- regval = vmxctx->guest_rdi;
+ vmxctx->guest_rdi = regval;
break;
case 8:
- regval = vmxctx->guest_r8;
+ vmxctx->guest_r8 = regval;
break;
case 9:
- regval = vmxctx->guest_r9;
+ vmxctx->guest_r9 = regval;
break;
case 10:
- regval = vmxctx->guest_r10;
+ vmxctx->guest_r10 = regval;
break;
case 11:
- regval = vmxctx->guest_r11;
+ vmxctx->guest_r11 = regval;
break;
case 12:
- regval = vmxctx->guest_r12;
+ vmxctx->guest_r12 = regval;
break;
case 13:
- regval = vmxctx->guest_r13;
+ vmxctx->guest_r13 = regval;
break;
case 14:
- regval = vmxctx->guest_r14;
+ vmxctx->guest_r14 = regval;
break;
case 15:
- regval = vmxctx->guest_r15;
+ vmxctx->guest_r15 = regval;
break;
+ default:
+ panic("invalid vmx register %d", ident);
}
+}
- if (cr == 0) {
- ones_mask = cr0_ones_mask;
- zeros_mask = cr0_zeros_mask;
- vmcs_guest_cr = VMCS_GUEST_CR0;
- vmcs_shadow_cr = VMCS_CR0_SHADOW;
- } else {
- ones_mask = cr4_ones_mask;
- zeros_mask = cr4_zeros_mask;
- vmcs_guest_cr = VMCS_GUEST_CR4;
- vmcs_shadow_cr = VMCS_CR4_SHADOW;
- }
- vmcs_write(vmcs_shadow_cr, regval);
+static int
+vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ uint64_t crval, regval;
+
+ /* We only handle mov to %cr0 at this time */
+ if ((exitqual & 0xf0) != 0x00)
+ return (UNHANDLED);
+
+ regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+ vmcs_write(VMCS_CR0_SHADOW, regval);
- crval = regval | ones_mask;
- crval &= ~zeros_mask;
- vmcs_write(vmcs_guest_cr, crval);
+ crval = regval | cr0_ones_mask;
+ crval &= ~cr0_zeros_mask;
+ vmcs_write(VMCS_GUEST_CR0, crval);
- if (cr == 0 && regval & CR0_PG) {
+ if (regval & CR0_PG) {
uint64_t efer, entry_ctls;
/*
@@ -1553,6 +1595,51 @@ vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
return (HANDLED);
}
+static int
+vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ uint64_t crval, regval;
+
+ /* We only handle mov to %cr4 at this time */
+ if ((exitqual & 0xf0) != 0x00)
+ return (UNHANDLED);
+
+ regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+ vmcs_write(VMCS_CR4_SHADOW, regval);
+
+ crval = regval | cr4_ones_mask;
+ crval &= ~cr4_zeros_mask;
+ vmcs_write(VMCS_GUEST_CR4, crval);
+
+ return (HANDLED);
+}
+
+static int
+vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+ struct vlapic *vlapic;
+ uint64_t cr8;
+ int regnum;
+
+ /* We only handle mov %cr8 to/from a register at this time. */
+ if ((exitqual & 0xe0) != 0x00) {
+ return (UNHANDLED);
+ }
+
+ vlapic = vm_lapic(vmx->vm, vcpu);
+ regnum = (exitqual >> 8) & 0xf;
+ if (exitqual & 0x10) {
+ cr8 = vlapic_get_cr8(vlapic);
+ vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
+ } else {
+ cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
+ vlapic_set_cr8(vlapic, cr8);
+ }
+
+ return (HANDLED);
+}
+
/*
* From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
*/
@@ -1945,7 +2032,17 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
switch (reason) {
case EXIT_REASON_CR_ACCESS:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
- handled = vmx_emulate_cr_access(vmx, vcpu, qual);
+ switch (qual & 0xf) {
+ case 0:
+ handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
+ break;
+ case 4:
+ handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
+ break;
+ case 8:
+ handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
+ break;
+ }
break;
case EXIT_REASON_RDMSR:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 4034d34..3c93463 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -906,6 +906,46 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
static VMM_STAT_ARRAY(IPIS_SENT, VM_MAXCPU, "ipis sent to vcpu");
+static void
+vlapic_set_tpr(struct vlapic *vlapic, uint8_t val)
+{
+ struct LAPIC *lapic = vlapic->apic_page;
+
+ lapic->tpr = val;
+ vlapic_update_ppr(vlapic);
+}
+
+static uint8_t
+vlapic_get_tpr(struct vlapic *vlapic)
+{
+ struct LAPIC *lapic = vlapic->apic_page;
+
+ return (lapic->tpr);
+}
+
+void
+vlapic_set_cr8(struct vlapic *vlapic, uint64_t val)
+{
+ uint8_t tpr;
+
+ if (val & ~0xf) {
+ vm_inject_gp(vlapic->vm, vlapic->vcpuid);
+ return;
+ }
+
+ tpr = val << 4;
+ vlapic_set_tpr(vlapic, tpr);
+}
+
+uint64_t
+vlapic_get_cr8(struct vlapic *vlapic)
+{
+ uint8_t tpr;
+
+ tpr = vlapic_get_tpr(vlapic);
+ return (tpr >> 4);
+}
+
int
vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
{
@@ -1184,7 +1224,7 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
*data = lapic->version;
break;
case APIC_OFFSET_TPR:
- *data = lapic->tpr;
+ *data = vlapic_get_tpr(vlapic);
break;
case APIC_OFFSET_APR:
*data = lapic->apr;
@@ -1305,8 +1345,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
vlapic_id_write_handler(vlapic);
break;
case APIC_OFFSET_TPR:
- lapic->tpr = data & 0xff;
- vlapic_update_ppr(vlapic);
+ vlapic_set_tpr(vlapic, data & 0xff);
break;
case APIC_OFFSET_EOI:
vlapic_process_eoi(vlapic);
diff --git a/sys/amd64/vmm/io/vlapic.h b/sys/amd64/vmm/io/vlapic.h
index 3195cc6..0e68b2f 100644
--- a/sys/amd64/vmm/io/vlapic.h
+++ b/sys/amd64/vmm/io/vlapic.h
@@ -92,6 +92,9 @@ void vlapic_reset_tmr(struct vlapic *vlapic);
void vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
int delmode, int vector);
+void vlapic_set_cr8(struct vlapic *vlapic, uint64_t val);
+uint64_t vlapic_get_cr8(struct vlapic *vlapic);
+
/* APIC write handlers */
void vlapic_id_write_handler(struct vlapic *vlapic);
void vlapic_ldr_write_handler(struct vlapic *vlapic);
OpenPOWER on IntegriCloud