summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2015-02-24 05:35:15 +0000
committerneel <neel@FreeBSD.org>2015-02-24 05:35:15 +0000
commitf458d8769b166545af7d9b45e377be8f224f26dc (patch)
tree836e306eea311a27ee3f92ec921f42937f046c77
parent16ee311597eea5a4e6c57c4fcab81bbce813ce82 (diff)
downloadFreeBSD-src-f458d8769b166545af7d9b45e377be8f224f26dc.zip
FreeBSD-src-f458d8769b166545af7d9b45e377be8f224f26dc.tar.gz
Always emulate MSR_PAT on Intel processors and don't rely on PAT save/restore
capability of VT-x. This lets bhyve run nested in older VMware versions that don't support the PAT save/restore capability. Note that the actual value programmed by the guest in MSR_PAT is irrelevant because bhyve sets the 'Ignore PAT' bit in the nested PTE. Reported by: marcel Tested by: Leon Dang (ldang@nahannisys.com) Sponsored by: Nahanni Systems MFC after: 2 weeks
-rw-r--r--sys/amd64/vmm/intel/vmcs.c12
-rw-r--r--sys/amd64/vmm/intel/vmx.c11
-rw-r--r--sys/amd64/vmm/intel/vmx.h1
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.c54
4 files changed, 56 insertions, 22 deletions
diff --git a/sys/amd64/vmm/intel/vmcs.c b/sys/amd64/vmm/intel/vmcs.c
index ae4d9db..5962526 100644
--- a/sys/amd64/vmm/intel/vmcs.c
+++ b/sys/amd64/vmm/intel/vmcs.c
@@ -342,18 +342,6 @@ vmcs_init(struct vmcs *vmcs)
*/
VMPTRLD(vmcs);
- /* Initialize guest IA32_PAT MSR with the default value */
- pat = PAT_VALUE(0, PAT_WRITE_BACK) |
- PAT_VALUE(1, PAT_WRITE_THROUGH) |
- PAT_VALUE(2, PAT_UNCACHED) |
- PAT_VALUE(3, PAT_UNCACHEABLE) |
- PAT_VALUE(4, PAT_WRITE_BACK) |
- PAT_VALUE(5, PAT_WRITE_THROUGH) |
- PAT_VALUE(6, PAT_UNCACHED) |
- PAT_VALUE(7, PAT_UNCACHEABLE);
- if ((error = vmwrite(VMCS_GUEST_IA32_PAT, pat)) != 0)
- goto done;
-
/* Host state */
/* Initialize host IA32_PAT MSR */
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index c83a8b2..b81e48b 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -100,13 +100,11 @@ __FBSDID("$FreeBSD$");
(VM_EXIT_HOST_LMA | \
VM_EXIT_SAVE_EFER | \
VM_EXIT_LOAD_EFER | \
- VM_EXIT_ACKNOWLEDGE_INTERRUPT | \
- VM_EXIT_SAVE_PAT | \
- VM_EXIT_LOAD_PAT)
+ VM_EXIT_ACKNOWLEDGE_INTERRUPT)
#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
-#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
+#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER)
#define VM_ENTRY_CTLS_ZERO_SETTING \
(VM_ENTRY_LOAD_DEBUG_CONTROLS | \
@@ -859,10 +857,6 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
* VM exit and entry respectively. It is also restored from the
* host VMCS area on a VM exit.
*
- * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
- * and entry respectively. It is also restored from the host VMCS
- * area on a VM exit.
- *
* The TSC MSR is exposed read-only. Writes are disallowed as that
* will impact the host TSC.
* XXX Writes would be implemented with a wrmsr trap, and
@@ -874,7 +868,6 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
guest_msr_rw(vmx, MSR_EFER) ||
- guest_msr_rw(vmx, MSR_PAT) ||
guest_msr_ro(vmx, MSR_TSC))
panic("vmx_vminit: error setting guest msr access");
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index cfd2599..bc48861 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -103,6 +103,7 @@ enum {
IDX_MSR_STAR,
IDX_MSR_SF_MASK,
IDX_MSR_KGSBASE,
+ IDX_MSR_PAT,
GUEST_MSR_NUM /* must be the last enumeration */
};
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
index f6bbf2a..e517778 100644
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -230,6 +230,25 @@ westmere_cpu(void)
return (false);
}
+static bool
+pat_valid(uint64_t val)
+{
+ int i, pa;
+
+ /*
+ * From Intel SDM: Table "Memory Types That Can Be Encoded With PAT"
+ *
+ * Extract PA0 through PA7 and validate that each one encodes a
+ * valid memory type.
+ */
+ for (i = 0; i < 8; i++) {
+ pa = (val >> (i * 8)) & 0xff;
+ if (pa == 2 || pa == 3 || pa >= 8)
+ return (false);
+ }
+ return (true);
+}
+
void
vmx_msr_init(void)
{
@@ -302,6 +321,10 @@ vmx_msr_init(void)
void
vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
{
+ uint64_t *guest_msrs;
+
+ guest_msrs = vmx->guest_msrs[vcpuid];
+
/*
* The permissions bitmap is shared between all vcpus so initialize it
* once when initializing the vBSP.
@@ -313,6 +336,19 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
guest_msr_rw(vmx, MSR_SF_MASK);
guest_msr_rw(vmx, MSR_KGSBASE);
}
+
+ /*
+ * Initialize guest IA32_PAT MSR with default value after reset.
+ */
+ guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
+ PAT_VALUE(1, PAT_WRITE_THROUGH) |
+ PAT_VALUE(2, PAT_UNCACHED) |
+ PAT_VALUE(3, PAT_UNCACHEABLE) |
+ PAT_VALUE(4, PAT_WRITE_BACK) |
+ PAT_VALUE(5, PAT_WRITE_THROUGH) |
+ PAT_VALUE(6, PAT_UNCACHED) |
+ PAT_VALUE(7, PAT_UNCACHEABLE);
+
return;
}
@@ -353,7 +389,11 @@ vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
int
vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
{
- int error = 0;
+ const uint64_t *guest_msrs;
+ int error;
+
+ guest_msrs = vmx->guest_msrs[vcpuid];
+ error = 0;
switch (num) {
case MSR_IA32_MISC_ENABLE:
@@ -366,6 +406,9 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
case MSR_TURBO_RATIO_LIMIT1:
*val = turbo_ratio_limit;
break;
+ case MSR_PAT:
+ *val = guest_msrs[IDX_MSR_PAT];
+ break;
default:
error = EINVAL;
break;
@@ -376,10 +419,13 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
int
vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
{
+ uint64_t *guest_msrs;
uint64_t changed;
int error;
+ guest_msrs = vmx->guest_msrs[vcpuid];
error = 0;
+
switch (num) {
case MSR_IA32_MISC_ENABLE:
changed = val ^ misc_enable;
@@ -401,6 +447,12 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
error = EINVAL;
break;
+ case MSR_PAT:
+ if (pat_valid(val))
+ guest_msrs[IDX_MSR_PAT] = val;
+ else
+ vm_inject_gp(vmx->vm, vcpuid);
+ break;
default:
error = EINVAL;
break;
OpenPOWER on IntegriCloud