diff options
author | neel <neel@FreeBSD.org> | 2014-12-30 00:00:42 +0000 |
---|---|---|
committer | neel <neel@FreeBSD.org> | 2014-12-30 00:00:42 +0000 |
commit | 3b591af2d9206cc8238757dc71fb88cbc8126e54 (patch) | |
tree | 4a70e257b106bcb834d9b41a09e1f5116dbfb134 | |
parent | 80b8a49df45350ef1df84cb477766defc817ae56 (diff) | |
download | FreeBSD-src-3b591af2d9206cc8238757dc71fb88cbc8126e54.zip FreeBSD-src-3b591af2d9206cc8238757dc71fb88cbc8126e54.tar.gz |
MFC 261321
Rename the AMD MSR_PERFCTR[0-3] so the Pentium Pro MSR_PERFCTR[0-1] aren't
redefined.
MFC r273214
Fix build to not bogusly always rebuild vmm.ko.
MFC r273338
Add support for AMD's nested page tables in pmap.c:
- Provide the correct bit mask for various bit fields in a PTE (e.g. valid bit)
for a pmap of type PT_RVI.
- Add a function 'pmap_type_guest(pmap)' that returns TRUE if the pmap is of
type PT_EPT or PT_RVI.
Add CPU_SET_ATOMIC_ACQ(num, cpuset):
This is used when activating a vcpu in the nested pmap. Using the 'acquire'
variant guarantees that the load of the 'pm_eptgen' will happen only after
the vcpu is activated in 'pm_active'.
Add defines for various AMD-specific MSRs.
Discussed with: kib (r261321)
-rw-r--r-- | sys/amd64/amd64/pmap.c | 29 | ||||
-rw-r--r-- | sys/amd64/vmm/intel/vmx_support.S | 2 | ||||
-rw-r--r-- | sys/modules/vmm/Makefile | 15 | ||||
-rw-r--r-- | sys/sys/bitset.h | 4 | ||||
-rw-r--r-- | sys/sys/cpuset.h | 1 | ||||
-rw-r--r-- | sys/x86/include/specialreg.h | 24 |
6 files changed, 55 insertions, 20 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index f77edcb..6d894f3 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -146,6 +146,13 @@ __FBSDID("$FreeBSD$"); #endif static __inline boolean_t +pmap_type_guest(pmap_t pmap) +{ + + return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI)); +} + +static __inline boolean_t pmap_emulate_ad_bits(pmap_t pmap) { @@ -159,6 +166,7 @@ pmap_valid_bit(pmap_t pmap) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: mask = X86_PG_V; break; case PT_EPT: @@ -181,6 +189,7 @@ pmap_rw_bit(pmap_t pmap) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: mask = X86_PG_RW; break; case PT_EPT: @@ -205,6 +214,7 @@ pmap_global_bit(pmap_t pmap) case PT_X86: mask = X86_PG_G; break; + case PT_RVI: case PT_EPT: mask = 0; break; @@ -222,6 +232,7 @@ pmap_accessed_bit(pmap_t pmap) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: mask = X86_PG_A; break; case PT_EPT: @@ -244,6 +255,7 @@ pmap_modified_bit(pmap_t pmap) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: mask = X86_PG_M; break; case PT_EPT: @@ -1103,6 +1115,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: /* Verify that both PAT bits are not set at the same time */ KASSERT((entry & x86_pat_bits) != x86_pat_bits, ("Invalid PAT bits in entry %#lx", entry)); @@ -1138,6 +1151,7 @@ pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: /* The PAT bit is different for PTE's and PDE's. */ pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT; @@ -1172,6 +1186,7 @@ pmap_cache_mask(pmap_t pmap, boolean_t is_pde) switch (pmap->pm_type) { case PT_X86: + case PT_RVI: mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE; break; case PT_EPT: @@ -1198,6 +1213,7 @@ pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde) switch (pmap->pm_type) { case PT_X86: break; + case PT_RVI: case PT_EPT: /* * XXX @@ -1233,7 +1249,7 @@ pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde) { pt_entry_t PG_G; - if (pmap->pm_type == PT_EPT) + if (pmap_type_guest(pmap)) return; KASSERT(pmap->pm_type == PT_X86, @@ -1347,7 +1363,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va) cpuset_t other_cpus; u_int cpuid; - if (pmap->pm_type == PT_EPT) { + if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } @@ -1425,7 +1441,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) vm_offset_t addr; u_int cpuid; - if (pmap->pm_type == PT_EPT) { + if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } @@ -1484,7 +1500,7 @@ pmap_invalidate_all(pmap_t pmap) uint64_t cr3; u_int cpuid; - if (pmap->pm_type == PT_EPT) { + if (pmap_type_guest(pmap)) { pmap_invalidate_ept(pmap); return; } @@ -1606,7 +1622,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde) cpuid = PCPU_GET(cpuid); other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); - if (pmap == kernel_pmap || pmap->pm_type == PT_EPT) + if (pmap == kernel_pmap || pmap_type_guest(pmap)) active = all_cpus; else { active = pmap->pm_active; @@ -1644,6 +1660,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va) if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) invlpg(va); break; + case PT_RVI: case PT_EPT: pmap->pm_eptgen++; break; @@ -1663,6 +1680,7 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) for (addr = sva; addr < eva; addr += PAGE_SIZE) invlpg(addr); break; + case PT_RVI: case PT_EPT: pmap->pm_eptgen++; break; @@ -1680,6 +1698,7 @@ pmap_invalidate_all(pmap_t pmap) if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) invltlb(); break; + case PT_RVI: case PT_EPT: pmap->pm_eptgen++; break; diff --git a/sys/amd64/vmm/intel/vmx_support.S b/sys/amd64/vmm/intel/vmx_support.S index 840b7e0..84fb5b0 100644 --- a/sys/amd64/vmm/intel/vmx_support.S +++ b/sys/amd64/vmm/intel/vmx_support.S @@ -29,7 +29,7 @@ #include <machine/asmacros.h> -#include "vmx_assym.s" +#include "vmx_assym.h" #ifdef SMP #define LK lock ; diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile index 702587b..81ff005 100644 --- a/sys/modules/vmm/Makefile +++ b/sys/modules/vmm/Makefile @@ -2,7 +2,7 @@ KMOD= vmm -SRCS= opt_acpi.h opt_ddb.h device_if.h bus_if.h pci_if.h +SRCS= opt_acpi.h opt_ddb.h device_if.h bus_if.h pci_if.h vmx_assym.h CFLAGS+= -DVMM_KEEP_STATS -DSMP CFLAGS+= -I${.CURDIR}/../../amd64/vmm @@ -38,6 +38,7 @@ SRCS+= iommu.c \ SRCS+= ept.c \ vmcs.c \ vmx_msr.c \ + vmx_support.S \ vmx.c \ vtd.c @@ -45,21 +46,19 @@ SRCS+= ept.c \ .PATH: ${.CURDIR}/../../amd64/vmm/amd SRCS+= amdv.c -OBJS= vmx_support.o +CLEANFILES= vmx_assym.h vmx_genassym.o -CLEANFILES= vmx_assym.s vmx_genassym.o - -vmx_assym.s: vmx_genassym.o +vmx_assym.h: vmx_genassym.o .if exists(@) -vmx_assym.s: @/kern/genassym.sh +vmx_assym.h: @/kern/genassym.sh .endif sh @/kern/genassym.sh vmx_genassym.o > ${.TARGET} -vmx_support.o: vmx_support.S vmx_assym.s +vmx_support.o: ${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \ ${.IMPSRC} -o ${.TARGET} -vmx_genassym.o: vmx_genassym.c @ machine x86 +vmx_genassym.o: ${CC} -c ${CFLAGS:N-fno-common} ${.IMPSRC} .include <bsd.kmod.mk> diff --git a/sys/sys/bitset.h b/sys/sys/bitset.h index 7c24ecd..e6c4dc3 100644 --- a/sys/sys/bitset.h +++ b/sys/sys/bitset.h @@ -135,6 +135,10 @@ atomic_set_long(&(p)->__bits[__bitset_word(_s, n)], \ __bitset_mask((_s), n)) +#define BIT_SET_ATOMIC_ACQ(_s, n, p) \ + atomic_set_acq_long(&(p)->__bits[__bitset_word(_s, n)], \ + __bitset_mask((_s), n)) + /* Convenience functions catering special cases. */ #define BIT_AND_ATOMIC(_s, d, s) do { \ __size_t __i; \ diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h index e1ee37d..f3ec3da 100644 --- a/sys/sys/cpuset.h +++ b/sys/sys/cpuset.h @@ -55,6 +55,7 @@ #define CPU_NAND(d, s) BIT_NAND(CPU_SETSIZE, d, s) #define CPU_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(CPU_SETSIZE, n, p) #define CPU_SET_ATOMIC(n, p) BIT_SET_ATOMIC(CPU_SETSIZE, n, p) +#define CPU_SET_ATOMIC_ACQ(n, p) BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p) #define CPU_AND_ATOMIC(n, p) BIT_AND_ATOMIC(CPU_SETSIZE, n, p) #define CPU_OR_ATOMIC(d, s) BIT_OR_ATOMIC(CPU_SETSIZE, d, s) #define CPU_COPY_STORE_REL(f, t) BIT_COPY_STORE_REL(CPU_SETSIZE, f, t) diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h index 7298a2e..e6079e7 100644 --- a/sys/x86/include/specialreg.h +++ b/sys/x86/include/specialreg.h @@ -81,6 +81,7 @@ #define EFER_LME 0x000000100 /* Long mode enable (R/W) */ #define EFER_LMA 0x000000400 /* Long mode active (R) */ #define EFER_NXE 0x000000800 /* PTE No-Execute bit enable (R/W) */ +#define EFER_SVM 0x000001000 /* SVM enable bit for AMD, reserved for Intel */ /* * Intel Extended Features registers @@ -771,12 +772,10 @@ #define MSR_PERFEVSEL1 0xc0010001 #define MSR_PERFEVSEL2 0xc0010002 #define MSR_PERFEVSEL3 0xc0010003 -#undef MSR_PERFCTR0 -#undef MSR_PERFCTR1 -#define MSR_PERFCTR0 0xc0010004 -#define MSR_PERFCTR1 0xc0010005 -#define MSR_PERFCTR2 0xc0010006 -#define MSR_PERFCTR3 0xc0010007 +#define MSR_K7_PERFCTR0 0xc0010004 +#define MSR_K7_PERFCTR1 0xc0010005 +#define MSR_K7_PERFCTR2 0xc0010006 +#define MSR_K7_PERFCTR3 0xc0010007 #define MSR_SYSCFG 0xc0010010 #define MSR_HWCR 0xc0010015 #define MSR_IORRBASE0 0xc0010016 @@ -785,8 +784,21 @@ #define MSR_IORRMASK1 0xc0010019 #define MSR_TOP_MEM 0xc001001a /* boundary for ram below 4G */ #define MSR_TOP_MEM2 0xc001001d /* boundary for ram above 4G */ +#define MSR_NB_CFG1 0xc001001f /* NB configuration 1 */ +#define MSR_P_STATE_LIMIT 0xc0010061 /* P-state Current Limit Register */ +#define MSR_P_STATE_CONTROL 0xc0010062 /* P-state Control Register */ +#define MSR_P_STATE_STATUS 0xc0010063 /* P-state Status Register */ +#define MSR_P_STATE_CONFIG(n) (0xc0010064 + (n)) /* P-state Config */ +#define MSR_SMM_ADDR 0xc0010112 /* SMM TSEG base address */ +#define MSR_SMM_MASK 0xc0010113 /* SMM TSEG address mask */ +#define MSR_IC_CFG 0xc0011021 /* Instruction Cache Configuration */ #define MSR_K8_UCODE_UPDATE 0xc0010020 /* update microcode */ #define MSR_MC0_CTL_MASK 0xc0010044 +#define MSR_VM_CR 0xc0010114 /* SVM: feature control */ +#define MSR_VM_HSAVE_PA 0xc0010117 /* SVM: host save area address */ + +/* MSR_VM_CR related */ +#define VM_CR_SVMDIS 0x10 /* SVM: disabled by BIOS */ /* VIA ACE crypto featureset: for via_feature_rng */ #define VIA_HAS_RNG 1 /* cpu has RNG */ |