summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2014-12-30 00:00:42 +0000
committerneel <neel@FreeBSD.org>2014-12-30 00:00:42 +0000
commit3b591af2d9206cc8238757dc71fb88cbc8126e54 (patch)
tree4a70e257b106bcb834d9b41a09e1f5116dbfb134 /sys/amd64
parent80b8a49df45350ef1df84cb477766defc817ae56 (diff)
downloadFreeBSD-src-3b591af2d9206cc8238757dc71fb88cbc8126e54.zip
FreeBSD-src-3b591af2d9206cc8238757dc71fb88cbc8126e54.tar.gz
MFC 261321
Rename the AMD MSR_PERFCTR[0-3] so the Pentium Pro MSR_PERFCTR[0-1] aren't redefined. MFC r273214 Fix build to not bogusly always rebuild vmm.ko. MFC r273338 Add support for AMD's nested page tables in pmap.c: - Provide the correct bit mask for various bit fields in a PTE (e.g. valid bit) for a pmap of type PT_RVI. - Add a function 'pmap_type_guest(pmap)' that returns TRUE if the pmap is of type PT_EPT or PT_RVI. Add CPU_SET_ATOMIC_ACQ(num, cpuset): This is used when activating a vcpu in the nested pmap. Using the 'acquire' variant guarantees that the load of the 'pm_eptgen' will happen only after the vcpu is activated in 'pm_active'. Add defines for various AMD-specific MSRs. Discussed with: kib (r261321)
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c29
-rw-r--r--sys/amd64/vmm/intel/vmx_support.S2
2 files changed, 25 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f77edcb..6d894f3 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -146,6 +146,13 @@ __FBSDID("$FreeBSD$");
#endif
static __inline boolean_t
+pmap_type_guest(pmap_t pmap)
+{
+
+ return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
+}
+
+static __inline boolean_t
pmap_emulate_ad_bits(pmap_t pmap)
{
@@ -159,6 +166,7 @@ pmap_valid_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_V;
break;
case PT_EPT:
@@ -181,6 +189,7 @@ pmap_rw_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_RW;
break;
case PT_EPT:
@@ -205,6 +214,7 @@ pmap_global_bit(pmap_t pmap)
case PT_X86:
mask = X86_PG_G;
break;
+ case PT_RVI:
case PT_EPT:
mask = 0;
break;
@@ -222,6 +232,7 @@ pmap_accessed_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_A;
break;
case PT_EPT:
@@ -244,6 +255,7 @@ pmap_modified_bit(pmap_t pmap)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = X86_PG_M;
break;
case PT_EPT:
@@ -1103,6 +1115,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
/* Verify that both PAT bits are not set at the same time */
KASSERT((entry & x86_pat_bits) != x86_pat_bits,
("Invalid PAT bits in entry %#lx", entry));
@@ -1138,6 +1151,7 @@ pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
/* The PAT bit is different for PTE's and PDE's. */
pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
@@ -1172,6 +1186,7 @@ pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
switch (pmap->pm_type) {
case PT_X86:
+ case PT_RVI:
mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
break;
case PT_EPT:
@@ -1198,6 +1213,7 @@ pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
switch (pmap->pm_type) {
case PT_X86:
break;
+ case PT_RVI:
case PT_EPT:
/*
* XXX
@@ -1233,7 +1249,7 @@ pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
{
pt_entry_t PG_G;
- if (pmap->pm_type == PT_EPT)
+ if (pmap_type_guest(pmap))
return;
KASSERT(pmap->pm_type == PT_X86,
@@ -1347,7 +1363,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
cpuset_t other_cpus;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1425,7 +1441,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t addr;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1484,7 +1500,7 @@ pmap_invalidate_all(pmap_t pmap)
uint64_t cr3;
u_int cpuid;
- if (pmap->pm_type == PT_EPT) {
+ if (pmap_type_guest(pmap)) {
pmap_invalidate_ept(pmap);
return;
}
@@ -1606,7 +1622,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
- if (pmap == kernel_pmap || pmap->pm_type == PT_EPT)
+ if (pmap == kernel_pmap || pmap_type_guest(pmap))
active = all_cpus;
else {
active = pmap->pm_active;
@@ -1644,6 +1660,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invlpg(va);
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1663,6 +1680,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
for (addr = sva; addr < eva; addr += PAGE_SIZE)
invlpg(addr);
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
@@ -1680,6 +1698,7 @@ pmap_invalidate_all(pmap_t pmap)
if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
invltlb();
break;
+ case PT_RVI:
case PT_EPT:
pmap->pm_eptgen++;
break;
diff --git a/sys/amd64/vmm/intel/vmx_support.S b/sys/amd64/vmm/intel/vmx_support.S
index 840b7e0..84fb5b0 100644
--- a/sys/amd64/vmm/intel/vmx_support.S
+++ b/sys/amd64/vmm/intel/vmx_support.S
@@ -29,7 +29,7 @@
#include <machine/asmacros.h>
-#include "vmx_assym.s"
+#include "vmx_assym.h"
#ifdef SMP
#define LK lock ;
OpenPOWER on IntegriCloud