summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/vmm/amd/svm.c98
-rw-r--r--sys/amd64/vmm/amd/svm_msr.c7
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.c7
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c16
-rw-r--r--sys/amd64/vmm/x86.c51
-rw-r--r--sys/amd64/vmm/x86.h13
6 files changed, 168 insertions, 24 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 7cc13ca..20e8f76 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -564,6 +564,19 @@ svm_vminit(struct vm *vm, pmap_t pmap)
return (svm_sc);
}
+/*
+ * Collateral for a generic SVM VM-exit.
+ */
+static void
+vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
+{
+
+ vme->exitcode = VM_EXITCODE_SVM;
+ vme->u.svm.exitcode = code;
+ vme->u.svm.exitinfo1 = info1;
+ vme->u.svm.exitinfo2 = info2;
+}
+
static int
svm_cpl(struct vmcb_state *state)
{
@@ -1080,6 +1093,76 @@ clear_nmi_blocking(struct svm_softc *sc, int vcpu)
KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
}
+#define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
+
+static int
+svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
+{
+ struct vm_exit *vme;
+ struct vmcb_state *state;
+ uint64_t changed, lma, oldval;
+ int error;
+
+ state = svm_get_vmcb_state(sc, vcpu);
+
+ oldval = state->efer;
+ VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
+
+ newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
+ changed = oldval ^ newval;
+
+ if (newval & EFER_MBZ_BITS)
+ goto gpf;
+
+ /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
+ if (changed & EFER_LME) {
+ if (state->cr0 & CR0_PG)
+ goto gpf;
+ }
+
+ /* EFER.LMA = EFER.LME & CR0.PG */
+ if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
+ lma = EFER_LMA;
+ else
+ lma = 0;
+
+ if ((newval & EFER_LMA) != lma)
+ goto gpf;
+
+ if (newval & EFER_NXE) {
+ if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
+ goto gpf;
+ }
+
+ /*
+ * XXX bhyve does not enforce segment limits in 64-bit mode. Until
+ * this is fixed flag guest attempt to set EFER_LMSLE as an error.
+ */
+ if (newval & EFER_LMSLE) {
+ vme = vm_exitinfo(sc->vm, vcpu);
+ vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
+ *retu = true;
+ return (0);
+ }
+
+ if (newval & EFER_FFXSR) {
+ if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
+ goto gpf;
+ }
+
+ if (newval & EFER_TCE) {
+ if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
+ goto gpf;
+ }
+
+ error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
+ KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
+ return (0);
+gpf:
+ vm_inject_gp(sc->vm, vcpu);
+ return (0);
+}
+
static int
emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
bool *retu)
@@ -1089,7 +1172,7 @@ emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
if (lapic_msr(num))
error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
else if (num == MSR_EFER)
- error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val);
+ error = svm_write_efer(sc, vcpu, val, retu);
else
error = svm_wrmsr(sc, vcpu, num, val, retu);
@@ -1189,19 +1272,6 @@ nrip_valid(uint64_t exitcode)
}
}
-/*
- * Collateral for a generic SVM VM-exit.
- */
-static void
-vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
-{
-
- vme->exitcode = VM_EXITCODE_SVM;
- vme->u.svm.exitcode = code;
- vme->u.svm.exitinfo1 = info1;
- vme->u.svm.exitinfo2 = info2;
-}
-
static int
svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
{
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
index d3a6fe8..088751a 100644
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -110,6 +110,10 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
int error = 0;
switch (num) {
+ case MSR_MCG_CAP:
+ case MSR_MCG_STATUS:
+ *result = 0;
+ break;
case MSR_MTRRcap:
case MSR_MTRRdefType:
case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
@@ -135,6 +139,9 @@ svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
int error = 0;
switch (num) {
+ case MSR_MCG_CAP:
+ case MSR_MCG_STATUS:
+ break; /* ignore writes */
case MSR_MTRRcap:
vm_inject_gp(sc->vm, vcpu);
break;
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
index 526b0d1..3091f68 100644
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -395,6 +395,10 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
error = 0;
switch (num) {
+ case MSR_MCG_CAP:
+ case MSR_MCG_STATUS:
+ *val = 0;
+ break;
case MSR_MTRRcap:
case MSR_MTRRdefType:
case MSR_MTRR4kBase ... MSR_MTRR4kBase + 8:
@@ -433,6 +437,9 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
error = 0;
switch (num) {
+ case MSR_MCG_CAP:
+ case MSR_MCG_STATUS:
+ break; /* ignore writes */
case MSR_MTRRcap:
vm_inject_gp(vmx->vm, vcpuid);
break;
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 7172365..c83f3e0 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -178,14 +178,20 @@ static const struct vie_op one_byte_opcodes[256] = {
.op_byte = 0x23,
.op_type = VIE_OP_TYPE_AND,
},
+ [0x80] = {
+ /* Group 1 extended opcode */
+ .op_byte = 0x80,
+ .op_type = VIE_OP_TYPE_GROUP1,
+ .op_flags = VIE_OP_F_IMM8,
+ },
[0x81] = {
- /* XXX Group 1 extended opcode */
+ /* Group 1 extended opcode */
.op_byte = 0x81,
.op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM,
},
[0x83] = {
- /* XXX Group 1 extended opcode */
+ /* Group 1 extended opcode */
.op_byte = 0x83,
.op_type = VIE_OP_TYPE_GROUP1,
.op_flags = VIE_OP_F_IMM8,
@@ -1066,9 +1072,13 @@ emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
rflags2 = getcc(size, op1, op2);
break;
+ case 0x80:
case 0x81:
case 0x83:
/*
+ * 80 /7 cmp r/m8, imm8
+ * REX + 80 /7 cmp r/m8, imm8
+ *
* 81 /7 cmp r/m16, imm16
* 81 /7 cmp r/m32, imm32
* REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
@@ -1084,6 +1094,8 @@ emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
* the status flags.
*
*/
+ if (vie->op.op_byte == 0x80)
+ size = 1;
/* get the first operand */
error = memread(vm, vcpuid, gpa, &op1, size, arg);
diff --git a/sys/amd64/vmm/x86.c b/sys/amd64/vmm/x86.c
index 45e08b5..525e1d9 100644
--- a/sys/amd64/vmm/x86.c
+++ b/sys/amd64/vmm/x86.c
@@ -230,10 +230,11 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
/*
- * Don't expose VMX, SpeedStep or TME capability.
+ * Don't expose VMX, SpeedStep, TME or SMX capability.
* Advertise x2APIC capability and Hypervisor guest.
*/
regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
+ regs[2] &= ~(CPUID2_SMX);
regs[2] |= CPUID2_HV;
@@ -285,17 +286,20 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
* Hide thermal monitoring
*/
regs[3] &= ~(CPUID_ACPI | CPUID_TM);
-
+
/*
- * Machine check handling is done in the host.
+ * Hide the debug store capability.
*/
- regs[3] &= ~(CPUID_MCA | CPUID_MCE);
-
- /*
- * Hide the debug store capability.
- */
regs[3] &= ~CPUID_DS;
+ /*
+ * Advertise the Machine Check and MTRR capability.
+ *
+ * Some guest OSes (e.g. Windows) will not boot if
+ * these features are absent.
+ */
+ regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
+
logical_cpus = threads_per_core * cores_per_package;
regs[1] &= ~CPUID_HTT_CORES;
regs[1] |= (logical_cpus & 0xff) << 16;
@@ -484,3 +488,34 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
return (1);
}
+
+bool
+vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap)
+{
+ bool rv;
+
+ KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
+ __func__, cap));
+
+ /*
+ * Simply passthrough the capabilities of the host cpu for now.
+ */
+ rv = false;
+ switch (cap) {
+ case VCC_NO_EXECUTE:
+ if (amd_feature & AMDID_NX)
+ rv = true;
+ break;
+ case VCC_FFXSR:
+ if (amd_feature & AMDID_FFXSR)
+ rv = true;
+ break;
+ case VCC_TCE:
+ if (amd_feature2 & AMDID2_TCE)
+ rv = true;
+ break;
+ default:
+ panic("%s: unknown vm_cpu_capability %d", __func__, cap);
+ }
+ return (rv);
+}
diff --git a/sys/amd64/vmm/x86.h b/sys/amd64/vmm/x86.h
index 8401c15..6f99d52 100644
--- a/sys/amd64/vmm/x86.h
+++ b/sys/amd64/vmm/x86.h
@@ -62,4 +62,17 @@
int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
+enum vm_cpuid_capability {
+ VCC_NONE,
+ VCC_NO_EXECUTE,
+ VCC_FFXSR,
+ VCC_TCE,
+ VCC_LAST
+};
+
+/*
+ * Return 'true' if the capability 'cap' is enabled in this virtual cpu
+ * and 'false' otherwise.
+ */
+bool vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability);
#endif
OpenPOWER on IntegriCloud