summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-09-20 21:46:31 +0000
committerneel <neel@FreeBSD.org>2014-09-20 21:46:31 +0000
commitef294abb97223f3be985144e9f4cea60c6b7b7bc (patch)
tree27e6c53b5655b0a434f5189d5c812403c5faa71c /sys
parent77d107b3b9c3f3d4518f2e83cf622ae5dc91d6d6 (diff)
downloadFreeBSD-src-ef294abb97223f3be985144e9f4cea60c6b7b7bc.zip
FreeBSD-src-ef294abb97223f3be985144e9f4cea60c6b7b7bc.tar.gz
IFC r271888.
Restructure MSR emulation so it is all done in processor-specific code.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/include/vmm.h1
-rw-r--r--sys/amd64/vmm/amd/svm.c77
-rw-r--r--sys/amd64/vmm/amd/svm_msr.c136
-rw-r--r--sys/amd64/vmm/amd/svm_msr.h44
-rw-r--r--sys/amd64/vmm/intel/ept.c1
-rw-r--r--sys/amd64/vmm/intel/vmcs.h5
-rw-r--r--sys/amd64/vmm/intel/vmx.c91
-rw-r--r--sys/amd64/vmm/intel/vmx.h15
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.c115
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.h15
-rw-r--r--sys/amd64/vmm/vmm.c13
-rw-r--r--sys/amd64/vmm/vmm_msr.c281
-rw-r--r--sys/amd64/vmm/vmm_msr.h44
-rw-r--r--sys/modules/vmm/Makefile4
-rw-r--r--sys/x86/include/specialreg.h4
15 files changed, 439 insertions, 407 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index ee5ed83..44fd2ea 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -195,7 +195,6 @@ void vm_nmi_clear(struct vm *vm, int vcpuid);
int vm_inject_extint(struct vm *vm, int vcpu);
int vm_extint_pending(struct vm *vm, int vcpuid);
void vm_extint_clear(struct vm *vm, int vcpuid);
-uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
struct vlapic *vm_lapic(struct vm *vm, int cpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index 32ac257..1a3b96a 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$");
#include <x86/apicreg.h>
#include "vmm_lapic.h"
-#include "vmm_msr.h"
#include "vmm_stat.h"
#include "vmm_ktr.h"
#include "vmm_ioport.h"
@@ -66,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include "vmcb.h"
#include "svm.h"
#include "svm_softc.h"
+#include "svm_msr.h"
#include "npt.h"
SYSCTL_DECL(_hw_vmm);
@@ -303,6 +303,7 @@ svm_init(int ipinum)
asid[cpu].num = nasid - 1;
}
+ svm_msr_init();
svm_npt_init(ipinum);
/* Start SVM on all CPUs */
@@ -606,6 +607,7 @@ svm_vminit(struct vm *vm, pmap_t pmap)
vcpu->lastcpu = NOCPU;
vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
+ svm_msr_guest_init(svm_sc, i);
}
return (svm_sc);
}
@@ -867,8 +869,8 @@ svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
* Intercept access to MSR_EFER to prevent the guest from clearing the
* SVM enable bit.
*/
-static void
-svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax)
+static int
+svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t val)
{
struct vmcb_state *state;
uint64_t oldval;
@@ -876,12 +878,13 @@ svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax)
state = svm_get_vmcb_state(sc, vcpu);
oldval = state->efer;
- state->efer = (uint64_t)edx << 32 | eax | EFER_SVM;
+ state->efer = val | EFER_SVM;
if (state->efer != oldval) {
VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx",
oldval, state->efer);
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR);
}
+ return (0);
}
#ifdef KTR
@@ -1132,6 +1135,45 @@ clear_nmi_blocking(struct svm_softc *sc, int vcpu)
KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
}
+static int
+emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
+ bool *retu)
+{
+ int error;
+
+ if (lapic_msr(num))
+ error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
+ else if (num == MSR_EFER)
+ error = svm_write_efer(sc, vcpu, val);
+ else
+ error = svm_wrmsr(sc, vcpu, num, val, retu);
+
+ return (error);
+}
+
+static int
+emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
+{
+ struct vmcb_state *state;
+ struct svm_regctx *ctx;
+ uint64_t result;
+ int error;
+
+ if (lapic_msr(num))
+ error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
+ else
+ error = svm_rdmsr(sc, vcpu, num, &result, retu);
+
+ if (error == 0) {
+ state = svm_get_vmcb_state(sc, vcpu);
+ ctx = svm_get_guest_regctx(sc, vcpu);
+ state->rax = result & 0xffffffff;
+ ctx->e.g.sctx_rdx = result >> 32;
+ }
+
+ return (error);
+}
+
#ifdef KTR
static const char *
exit_reason_to_str(uint64_t reason)
@@ -1288,31 +1330,12 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
edx = ctx->e.g.sctx_rdx;
retu = false;
- if (ecx == MSR_EFER) {
- KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not emulated: "
- "info1(%#lx) info2(%#lx)", info1, info2));
- svm_write_efer(svm_sc, vcpu, edx, eax);
- handled = 1;
- break;
- }
-
-#define MSR_AMDK8_IPM 0xc0010055
- /*
- * Ignore access to the "Interrupt Pending Message" MSR.
- */
- if (ecx == MSR_AMDK8_IPM) {
- if (!info1)
- state->rax = ctx->e.g.sctx_rdx = 0;
- handled = 1;
- break;
- }
-
if (info1) {
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
val = (uint64_t)edx << 32 | eax;
VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
ecx, val);
- if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, &retu)) {
+ if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
vmexit->exitcode = VM_EXITCODE_WRMSR;
vmexit->u.msr.code = ecx;
vmexit->u.msr.wval = val;
@@ -1325,7 +1348,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
} else {
VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
- if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, &retu)) {
+ if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx;
} else if (!retu) {
@@ -1823,6 +1846,8 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
}
+ svm_msr_guest_enter(svm_sc, vcpu);
+
/* Update Guest RIP */
state->rip = rip;
@@ -1904,6 +1929,8 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
handled = svm_vmexit(svm_sc, vcpu, vmexit);
} while (handled);
+ svm_msr_guest_exit(svm_sc, vcpu);
+
return (0);
}
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
new file mode 100644
index 0000000..100af4b
--- /dev/null
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -0,0 +1,136 @@
+/*-
+ * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/errno.h>
+
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+
+#include "svm_msr.h"
+
+#ifndef MSR_AMDK8_IPM
+#define MSR_AMDK8_IPM 0xc0010055
+#endif
+
+enum {
+ IDX_MSR_LSTAR,
+ IDX_MSR_CSTAR,
+ IDX_MSR_STAR,
+ IDX_MSR_SF_MASK,
+ HOST_MSR_NUM /* must be the last enumeration */
+};
+
+static uint64_t host_msrs[HOST_MSR_NUM];
+
+void
+svm_msr_init(void)
+{
+ /*
+ * It is safe to cache the values of the following MSRs because they
+ * don't change based on curcpu, curproc or curthread.
+ */
+ host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+ host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+ host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+ host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+}
+
+void
+svm_msr_guest_init(struct svm_softc *sc, int vcpu)
+{
+ /*
+ * All the MSRs accessible to the guest are either saved/restored by
+ * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
+ * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
+ *
+ * There are no guest MSRs that are saved/restored "by hand" so nothing
+ * more to do here.
+ */
+ return;
+}
+
+void
+svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
+{
+ /*
+ * Save host MSRs (if any) and restore guest MSRs (if any).
+ */
+}
+
+void
+svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
+{
+ /*
+ * Save guest MSRs (if any) and restore host MSRs.
+ */
+ wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
+ wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
+ wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
+ wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
+
+ /* MSR_KGSBASE will be restored on the way back to userspace */
+}
+
+int
+svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+ bool *retu)
+{
+ int error = 0;
+
+ switch (num) {
+ case MSR_AMDK8_IPM:
+ *result = 0;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+int
+svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
+{
+ int error = 0;
+
+ switch (num) {
+ case MSR_AMDK8_IPM:
+ /*
+ * Ignore writes to the "Interrupt Pending Message" MSR.
+ */
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
diff --git a/sys/amd64/vmm/amd/svm_msr.h b/sys/amd64/vmm/amd/svm_msr.h
new file mode 100644
index 0000000..07716c8
--- /dev/null
+++ b/sys/amd64/vmm/amd/svm_msr.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2014 Neel Natu (neel@freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_MSR_H_
+#define _SVM_MSR_H_
+
+struct svm_softc;
+
+void svm_msr_init(void);
+void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
+
+int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
+ bool *retu);
+int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+ bool *retu);
+
+#endif /* _SVM_MSR_H_ */
diff --git a/sys/amd64/vmm/intel/ept.c b/sys/amd64/vmm/intel/ept.c
index 5f6c4d0..13c9788 100644
--- a/sys/amd64/vmm/intel/ept.c
+++ b/sys/amd64/vmm/intel/ept.c
@@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
#include "vmx_cpufunc.h"
#include "vmm_ipi.h"
-#include "vmx_msr.h"
#include "ept.h"
#define EPT_SUPPORTS_EXEC_ONLY(cap) ((cap) & (1UL << 0))
diff --git a/sys/amd64/vmm/intel/vmcs.h b/sys/amd64/vmm/intel/vmcs.h
index 4e9557c..6122de5 100644
--- a/sys/amd64/vmm/intel/vmcs.h
+++ b/sys/amd64/vmm/intel/vmcs.h
@@ -54,6 +54,10 @@ int vmcs_getdesc(struct vmcs *vmcs, int running, int ident,
int vmcs_setdesc(struct vmcs *vmcs, int running, int ident,
struct seg_desc *desc);
+/*
+ * Avoid header pollution caused by inline use of 'vtophys()' in vmx_cpufunc.h
+ */
+#ifdef _VMX_CPUFUNC_H_
static __inline uint64_t
vmcs_read(uint32_t encoding)
{
@@ -73,6 +77,7 @@ vmcs_write(uint32_t encoding, uint64_t val)
error = vmwrite(encoding, val);
KASSERT(error == 0, ("vmcs_write(%u) error %d", encoding, error));
}
+#endif /* _VMX_CPUFUNC_H_ */
#define vmexit_instruction_length() vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH)
#define vmcs_guest_rip() vmcs_read(VMCS_GUEST_RIP)
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 5e42679..64d9fff 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -52,20 +52,20 @@ __FBSDID("$FreeBSD$");
#include <machine/vmm.h>
#include <machine/vmm_dev.h>
#include <machine/vmm_instruction_emul.h>
+#include "vmm_lapic.h"
#include "vmm_host.h"
#include "vmm_ioport.h"
#include "vmm_ipi.h"
-#include "vmm_msr.h"
#include "vmm_ktr.h"
#include "vmm_stat.h"
#include "vatpic.h"
#include "vlapic.h"
#include "vlapic_priv.h"
-#include "vmx_msr.h"
#include "ept.h"
#include "vmx_cpufunc.h"
#include "vmx.h"
+#include "vmx_msr.h"
#include "x86.h"
#include "vmx_controls.h"
@@ -116,12 +116,6 @@ __FBSDID("$FreeBSD$");
VM_ENTRY_INTO_SMM | \
VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
-#define guest_msr_rw(vmx, msr) \
- msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
-
-#define guest_msr_ro(vmx, msr) \
- msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
-
#define HANDLED 1
#define UNHANDLED 0
@@ -208,6 +202,7 @@ SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
+static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
static void vmx_inject_pir(struct vlapic *vlapic);
#ifdef KTR
@@ -475,22 +470,6 @@ vpid_init(void)
}
static void
-msr_save_area_init(struct msr_entry *g_area, int *g_count)
-{
- int cnt;
-
- static struct msr_entry guest_msrs[] = {
- { MSR_KGSBASE, 0, 0 },
- };
-
- cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
- if (cnt > GUEST_MSR_MAX_ENTRIES)
- panic("guest msr save area overrun");
- bcopy(guest_msrs, g_area, sizeof(guest_msrs));
- *g_count = cnt;
-}
-
-static void
vmx_disable(void *arg __unused)
{
struct invvpid_desc invvpid_desc = { 0 };
@@ -655,7 +634,6 @@ vmx_init(int ipinum)
} else {
if (bootverbose)
printf("vmm: PAT MSR access not supported\n");
- guest_msr_valid(MSR_PAT);
vmx_patmsr = 0;
}
}
@@ -800,6 +778,8 @@ vmx_init(int ipinum)
vpid_init();
+ vmx_msr_init();
+
/* enable VMX operation */
smp_rendezvous(NULL, vmx_enable, NULL, NULL);
@@ -869,7 +849,7 @@ static void *
vmx_vminit(struct vm *vm, pmap_t pmap)
{
uint16_t vpid[VM_MAXCPU];
- int i, error, guest_msr_count;
+ int i, error;
struct vmx *vmx;
struct vmcs *vmcs;
@@ -958,6 +938,8 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
error, i);
}
+ vmx_msr_guest_init(vmx, i);
+
error = vmcs_init(vmcs);
KASSERT(error == 0, ("vmcs_init error %d", error));
@@ -996,13 +978,6 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmx->state[i].lastcpu = NOCPU;
vmx->state[i].vpid = vpid[i];
- msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
-
- error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]),
- guest_msr_count);
- if (error != 0)
- panic("vmcs_set_msr_save error %d", error);
-
/*
* Set up the CR0/4 shadows, and init the read shadow
* to the power-on register value from the Intel Sys Arch.
@@ -2077,6 +2052,46 @@ vmx_task_switch_reason(uint64_t qual)
}
static int
+emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+{
+ int error;
+
+ if (lapic_msr(num))
+ error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
+ else
+ error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
+
+ return (error);
+}
+
+static int
+emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
+{
+ struct vmxctx *vmxctx;
+ uint64_t result;
+ uint32_t eax, edx;
+ int error;
+
+ if (lapic_msr(num))
+ error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
+ else
+ error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
+
+ if (error == 0) {
+ eax = result;
+ vmxctx = &vmx->ctx[vcpuid];
+ error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
+ KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
+
+ edx = result >> 32;
+ error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
+ KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
+ }
+
+ return (error);
+}
+
+static int
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
int error, handled, in;
@@ -2214,7 +2229,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
retu = false;
ecx = vmxctx->guest_rcx;
VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
- error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
+ error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
if (error) {
vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx;
@@ -2223,7 +2238,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
} else {
/* Return to userspace with a valid exitcode */
KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
- ("emulate_wrmsr retu with bogus exitcode"));
+ ("emulate_rdmsr retu with bogus exitcode"));
}
break;
case EXIT_REASON_WRMSR:
@@ -2234,7 +2249,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
edx = vmxctx->guest_rdx;
VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
ecx, (uint64_t)edx << 32 | eax);
- error = emulate_wrmsr(vmx->vm, vcpu, ecx,
+ error = emulate_wrmsr(vmx, vcpu, ecx,
(uint64_t)edx << 32 | eax, &retu);
if (error) {
vmexit->exitcode = VM_EXITCODE_WRMSR;
@@ -2522,6 +2537,8 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
KASSERT(vmxctx->pmap == pmap,
("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
+ vmx_msr_guest_enter(vmx, vcpu);
+
VMPTRLD(vmcs);
/*
@@ -2623,6 +2640,8 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
vmexit->exitcode);
VMCLEAR(vmcs);
+ vmx_msr_guest_exit(vmx, vcpu);
+
return (0);
}
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index 208fcee..2124554 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -33,8 +33,6 @@
struct pmap;
-#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
-
struct vmxctx {
register_t guest_rdi; /* Guest state */
register_t guest_rsi;
@@ -97,13 +95,23 @@ struct pir_desc {
} __aligned(64);
CTASSERT(sizeof(struct pir_desc) == 64);
+/* Index into the 'guest_msrs[]' array */
+enum {
+ IDX_MSR_LSTAR,
+ IDX_MSR_CSTAR,
+ IDX_MSR_STAR,
+ IDX_MSR_SF_MASK,
+ IDX_MSR_KGSBASE,
+ GUEST_MSR_NUM /* must be the last enumeration */
+};
+
/* virtual machine softc */
struct vmx {
struct vmcs vmcs[VM_MAXCPU]; /* one vmcs per virtual cpu */
struct apic_page apic_page[VM_MAXCPU]; /* one apic page per vcpu */
char msr_bitmap[PAGE_SIZE];
struct pir_desc pir_desc[VM_MAXCPU];
- struct msr_entry guest_msrs[VM_MAXCPU][GUEST_MSR_MAX_ENTRIES];
+ uint64_t guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
struct vmxctx ctx[VM_MAXCPU];
struct vmxcap cap[VM_MAXCPU];
struct vmxstate state[VM_MAXCPU];
@@ -113,7 +121,6 @@ struct vmx {
};
CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
-CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
CTASSERT((offsetof(struct vmx, pir_desc[0]) & 63) == 0);
#define VMX_GUEST_VMEXIT 0
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
index a3428db..94d52e3 100644
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -31,10 +31,13 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/cpuset.h>
#include <machine/cpufunc.h>
#include <machine/specialreg.h>
+#include <machine/vmm.h>
+#include "vmx.h"
#include "vmx_msr.h"
static boolean_t
@@ -171,3 +174,115 @@ msr_bitmap_change_access(char *bitmap, u_int msr, int access)
return (0);
}
+
+static uint64_t misc_enable;
+static uint64_t host_msrs[GUEST_MSR_NUM];
+
+void
+vmx_msr_init(void)
+{
+ /*
+ * It is safe to cache the values of the following MSRs because
+ * they don't change based on curcpu, curproc or curthread.
+ */
+ host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+ host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+ host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+ host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+
+ /*
+ * Initialize emulated MSRs
+ */
+ misc_enable = rdmsr(MSR_IA32_MISC_ENABLE);
+ /*
+ * Set mandatory bits
+ * 11: branch trace disabled
+ * 12: PEBS unavailable
+ * Clear unsupported features
+ * 16: SpeedStep enable
+ * 18: enable MONITOR FSM
+ */
+ misc_enable |= (1 << 12) | (1 << 11);
+ misc_enable &= ~((1 << 18) | (1 << 16));
+}
+
+void
+vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
+{
+ /*
+ * The permissions bitmap is shared between all vcpus so initialize it
+ * once when initializing the vBSP.
+ */
+ if (vcpuid == 0) {
+ guest_msr_rw(vmx, MSR_LSTAR);
+ guest_msr_rw(vmx, MSR_CSTAR);
+ guest_msr_rw(vmx, MSR_STAR);
+ guest_msr_rw(vmx, MSR_SF_MASK);
+ guest_msr_rw(vmx, MSR_KGSBASE);
+ }
+ return;
+}
+
+void
+vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
+{
+ uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
+
+ /* Save host MSRs (if any) and restore guest MSRs */
+ wrmsr(MSR_LSTAR, guest_msrs[IDX_MSR_LSTAR]);
+ wrmsr(MSR_CSTAR, guest_msrs[IDX_MSR_CSTAR]);
+ wrmsr(MSR_STAR, guest_msrs[IDX_MSR_STAR]);
+ wrmsr(MSR_SF_MASK, guest_msrs[IDX_MSR_SF_MASK]);
+ wrmsr(MSR_KGSBASE, guest_msrs[IDX_MSR_KGSBASE]);
+}
+
+void
+vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
+{
+ uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
+
+ /* Save guest MSRs */
+ guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+ guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+ guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+ guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+ guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
+
+ /* Restore host MSRs */
+ wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
+ wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
+ wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
+ wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
+
+ /* MSR_KGSBASE will be restored on the way back to userspace */
+}
+
+int
+vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
+{
+ int error = 0;
+
+ switch (num) {
+ case MSR_IA32_MISC_ENABLE:
+ *val = misc_enable;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+int
+vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+{
+ int error = 0;
+
+ switch (num) {
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
diff --git a/sys/amd64/vmm/intel/vmx_msr.h b/sys/amd64/vmm/intel/vmx_msr.h
index 340b0f7..e77881c 100644
--- a/sys/amd64/vmm/intel/vmx_msr.h
+++ b/sys/amd64/vmm/intel/vmx_msr.h
@@ -29,6 +29,15 @@
#ifndef _VMX_MSR_H_
#define _VMX_MSR_H_
+struct vmx;
+
+void vmx_msr_init(void);
+void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
+void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
+void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
+int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val, bool *retu);
+int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val, bool *retu);
+
uint32_t vmx_revision(void);
int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
@@ -52,4 +61,10 @@ int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
void msr_bitmap_initialize(char *bitmap);
int msr_bitmap_change_access(char *bitmap, u_int msr, int access);
+#define guest_msr_rw(vmx, msr) \
+ msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
+
+#define guest_msr_ro(vmx, msr) \
+ msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
+
#endif
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 7a3ccc8..26f7c59 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
#include "vhpet.h"
#include "vioapic.h"
#include "vlapic.h"
-#include "vmm_msr.h"
#include "vmm_ipi.h"
#include "vmm_stat.h"
#include "vmm_lapic.h"
@@ -105,7 +104,6 @@ struct vcpu {
struct savefpu *guestfpu; /* (a,i) guest fpu state */
uint64_t guest_xcr0; /* (i) guest %xcr0 register */
void *stats; /* (a,i) statistics */
- uint64_t guest_msrs[VMM_MSR_NUM]; /* (i) emulated MSRs */
struct vm_exit exitinfo; /* (x) exit reason and collateral */
};
@@ -188,7 +186,6 @@ static struct vmm_ops *ops;
#define fpu_stop_emulating() clts()
static MALLOC_DEFINE(M_VM, "vm", "vm");
-CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
/* statistics */
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
@@ -249,7 +246,6 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
fpu_save_area_reset(vcpu->guestfpu);
vmm_stat_init(vcpu->stats);
- guest_msrs_init(vm, vcpu_id);
}
struct vm_exit *
@@ -293,7 +289,6 @@ vmm_init(void)
else
return (ENXIO);
- vmm_msr_init();
vmm_resume_p = vmm_resume;
return (VMM_INIT(vmm_ipinum));
@@ -1456,7 +1451,6 @@ restart:
pcb = PCPU_GET(curpcb);
set_pcb_flags(pcb, PCB_FULL_IRET);
- restore_guest_msrs(vm, vcpuid);
restore_guest_fpustate(vcpu);
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
@@ -1464,7 +1458,6 @@ restart:
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
save_guest_fpustate(vcpu);
- restore_host_msrs(vm, vcpuid);
vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
@@ -1906,12 +1899,6 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
return (VMSETCAP(vm->cookie, vcpu, type, val));
}
-uint64_t *
-vm_guest_msrs(struct vm *vm, int cpu)
-{
- return (vm->vcpu[cpu].guest_msrs);
-}
-
struct vlapic *
vm_lapic(struct vm *vm, int cpu)
{
diff --git a/sys/amd64/vmm/vmm_msr.c b/sys/amd64/vmm/vmm_msr.c
deleted file mode 100644
index b4abc90..0000000
--- a/sys/amd64/vmm/vmm_msr.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*-
- * Copyright (c) 2011 NetApp, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/smp.h>
-
-#include <machine/specialreg.h>
-
-#include <machine/vmm.h>
-#include "vmm_lapic.h"
-#include "vmm_msr.h"
-
-#define VMM_MSR_F_EMULATE 0x01
-#define VMM_MSR_F_READONLY 0x02
-#define VMM_MSR_F_INVALID 0x04 /* guest_msr_valid() can override this */
-
-struct vmm_msr {
- int num;
- int flags;
- uint64_t hostval;
-};
-
-static struct vmm_msr vmm_msr[] = {
- { MSR_LSTAR, 0 },
- { MSR_CSTAR, 0 },
- { MSR_STAR, 0 },
- { MSR_SF_MASK, 0 },
- { MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
- { MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
- { MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
-#if 0
- { MSR_IA32_PLATFORM_ID, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
- { MSR_IA32_MISC_ENABLE, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
-#endif
-};
-
-#define vmm_msr_num (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
-CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
-
-#define readonly_msr(idx) \
- ((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
-
-#define emulated_msr(idx) \
- ((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
-
-#define invalid_msr(idx) \
- ((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
-
-void
-vmm_msr_init(void)
-{
- int i;
-
- for (i = 0; i < vmm_msr_num; i++) {
- if (emulated_msr(i))
- continue;
- /*
- * XXX this assumes that the value of the host msr does not
- * change after we have cached it.
- */
- vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
- }
-}
-
-void
-guest_msrs_init(struct vm *vm, int cpu)
-{
- int i;
-#if 0
- uint64_t *guest_msrs, misc;
-#else
- uint64_t *guest_msrs;
-#endif
-
- guest_msrs = vm_guest_msrs(vm, cpu);
-
- for (i = 0; i < vmm_msr_num; i++) {
- switch (vmm_msr[i].num) {
- case MSR_LSTAR:
- case MSR_CSTAR:
- case MSR_STAR:
- case MSR_SF_MASK:
- case MSR_BIOS_SIGN:
- case MSR_MCG_CAP:
- guest_msrs[i] = 0;
- break;
- case MSR_PAT:
- guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
- PAT_VALUE(1, PAT_WRITE_THROUGH) |
- PAT_VALUE(2, PAT_UNCACHED) |
- PAT_VALUE(3, PAT_UNCACHEABLE) |
- PAT_VALUE(4, PAT_WRITE_BACK) |
- PAT_VALUE(5, PAT_WRITE_THROUGH) |
- PAT_VALUE(6, PAT_UNCACHED) |
- PAT_VALUE(7, PAT_UNCACHEABLE);
- break;
-#if 0 /* XXX unsupported on AMD */
- case MSR_IA32_MISC_ENABLE:
- misc = rdmsr(MSR_IA32_MISC_ENABLE);
- /*
- * Set mandatory bits
- * 11: branch trace disabled
- * 12: PEBS unavailable
- * Clear unsupported features
- * 16: SpeedStep enable
- * 18: enable MONITOR FSM
- */
- misc |= (1 << 12) | (1 << 11);
- misc &= ~((1 << 18) | (1 << 16));
- guest_msrs[i] = misc;
- break;
- case MSR_IA32_PLATFORM_ID:
- guest_msrs[i] = 0;
- break;
-#endif
- default:
- panic("guest_msrs_init: missing initialization for msr "
- "0x%0x", vmm_msr[i].num);
- }
- }
-}
-
-static int
-msr_num_to_idx(u_int num)
-{
- int i;
-
- for (i = 0; i < vmm_msr_num; i++)
- if (vmm_msr[i].num == num)
- return (i);
-
- return (-1);
-}
-
-int
-emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val, bool *retu)
-{
- int idx;
- uint64_t *guest_msrs;
-
- if (lapic_msr(num))
- return (lapic_wrmsr(vm, cpu, num, val, retu));
-
- idx = msr_num_to_idx(num);
- if (idx < 0 || invalid_msr(idx))
- return (EINVAL);
-
- if (!readonly_msr(idx)) {
- guest_msrs = vm_guest_msrs(vm, cpu);
-
- /* Stash the value */
- guest_msrs[idx] = val;
-
- /* Update processor state for non-emulated MSRs */
- if (!emulated_msr(idx))
- wrmsr(vmm_msr[idx].num, val);
- }
-
- return (0);
-}
-
-int
-emulate_rdmsr(struct vm *vm, int cpu, u_int num, bool *retu)
-{
- int error, idx;
- uint32_t eax, edx;
- uint64_t result, *guest_msrs;
-
- if (lapic_msr(num)) {
- error = lapic_rdmsr(vm, cpu, num, &result, retu);
- goto done;
- }
-
- idx = msr_num_to_idx(num);
- if (idx < 0 || invalid_msr(idx)) {
- error = EINVAL;
- goto done;
- }
-
- guest_msrs = vm_guest_msrs(vm, cpu);
- result = guest_msrs[idx];
-
- /*
- * If this is not an emulated msr register make sure that the processor
- * state matches our cached state.
- */
- if (!emulated_msr(idx) && (rdmsr(num) != result)) {
- panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
- "(0x%016lx) and actual (0x%016lx) values", num,
- result, rdmsr(num));
- }
-
- error = 0;
-
-done:
- if (error == 0) {
- eax = result;
- edx = result >> 32;
- error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
- if (error)
- panic("vm_set_register(rax) error %d", error);
- error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
- if (error)
- panic("vm_set_register(rdx) error %d", error);
- }
- return (error);
-}
-
-void
-restore_guest_msrs(struct vm *vm, int cpu)
-{
- int i;
- uint64_t *guest_msrs;
-
- guest_msrs = vm_guest_msrs(vm, cpu);
-
- for (i = 0; i < vmm_msr_num; i++) {
- if (emulated_msr(i))
- continue;
- else
- wrmsr(vmm_msr[i].num, guest_msrs[i]);
- }
-}
-
-void
-restore_host_msrs(struct vm *vm, int cpu)
-{
- int i;
-
- for (i = 0; i < vmm_msr_num; i++) {
- if (emulated_msr(i))
- continue;
- else
- wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
- }
-}
-
-/*
- * Must be called by the CPU-specific code before any guests are
- * created
- */
-void
-guest_msr_valid(int msr)
-{
- int i;
-
- for (i = 0; i < vmm_msr_num; i++) {
- if (vmm_msr[i].num == msr && invalid_msr(i)) {
- vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;
- }
- }
-}
diff --git a/sys/amd64/vmm/vmm_msr.h b/sys/amd64/vmm/vmm_msr.h
deleted file mode 100644
index e070037..0000000
--- a/sys/amd64/vmm/vmm_msr.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*-
- * Copyright (c) 2011 NetApp, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _VMM_MSR_H_
-#define _VMM_MSR_H_
-
-#define VMM_MSR_NUM 16
-struct vm;
-
-void vmm_msr_init(void);
-int emulate_wrmsr(struct vm *vm, int vcpu, u_int msr, uint64_t val,
- bool *retu);
-int emulate_rdmsr(struct vm *vm, int vcpu, u_int msr, bool *retu);
-void guest_msrs_init(struct vm *vm, int cpu);
-void guest_msr_valid(int msr);
-void restore_host_msrs(struct vm *vm, int cpu);
-void restore_guest_msrs(struct vm *vm, int cpu);
-
-#endif
diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile
index 7ee3942..5bb31f5 100644
--- a/sys/modules/vmm/Makefile
+++ b/sys/modules/vmm/Makefile
@@ -20,7 +20,6 @@ SRCS+= vmm.c \
vmm_ipi.c \
vmm_lapic.c \
vmm_mem.c \
- vmm_msr.c \
vmm_stat.c \
vmm_util.c \
x86.c \
@@ -48,7 +47,8 @@ SRCS+= ept.c \
SRCS+= vmcb.c \
svm.c \
npt.c \
- amdv.c
+ amdv.c \
+ svm_msr.c
OBJS= vmx_support.o svm_support.o
diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h
index 8e2808b..0e7bb44 100644
--- a/sys/x86/include/specialreg.h
+++ b/sys/x86/include/specialreg.h
@@ -438,6 +438,10 @@
#define MSR_MC4_STATUS 0x411
#define MSR_MC4_ADDR 0x412
#define MSR_MC4_MISC 0x413
+#define MSR_PKG_ENERGY_STATUS 0x611
+#define MSR_DRAM_ENERGY_STATUS 0x619
+#define MSR_PP0_ENERGY_STATUS 0x639
+#define MSR_PP1_ENERGY_STATUS 0x641
/*
* VMX MSRs
OpenPOWER on IntegriCloud