diff options
-rw-r--r-- | sys/amd64/vmm/intel/vmcs.c | 30 | ||||
-rw-r--r-- | sys/amd64/vmm/intel/vmx.c | 11 | ||||
-rw-r--r-- | sys/amd64/vmm/vmm.c | 2 | ||||
-rw-r--r-- | sys/amd64/vmm/vmm_host.c | 124 | ||||
-rw-r--r-- | sys/amd64/vmm/vmm_host.h | 75 | ||||
-rw-r--r-- | sys/modules/vmm/Makefile | 1 | ||||
-rw-r--r-- | sys/x86/include/specialreg.h | 1 |
7 files changed, 220 insertions, 24 deletions
diff --git a/sys/amd64/vmm/intel/vmcs.c b/sys/amd64/vmm/intel/vmcs.c index 26ac5f8..a5784dd 100644 --- a/sys/amd64/vmm/intel/vmcs.c +++ b/sys/amd64/vmm/intel/vmcs.c @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$"); #include <machine/pmap.h> #include <machine/vmm.h> +#include "vmm_host.h" #include "vmcs.h" #include "vmx_cpufunc.h" #include "ept.h" @@ -314,12 +315,12 @@ vmcs_set_defaults(struct vmcs *vmcs, { int error, codesel, datasel, tsssel; u_long cr0, cr4, efer; - uint64_t eptp, pat; + uint64_t eptp, pat, fsbase, idtrbase; uint32_t exc_bitmap; - codesel = GSEL(GCODE_SEL, SEL_KPL); - datasel = GSEL(GDATA_SEL, SEL_KPL); - tsssel = GSEL(GPROC0_SEL, SEL_KPL); + codesel = vmm_get_host_codesel(); + datasel = vmm_get_host_datasel(); + tsssel = vmm_get_host_tsssel(); /* * Make sure we have a "current" VMCS to work with. @@ -357,29 +358,22 @@ vmcs_set_defaults(struct vmcs *vmcs, /* Host state */ /* Initialize host IA32_PAT MSR */ - pat = rdmsr(MSR_PAT); + pat = vmm_get_host_pat(); if ((error = vmwrite(VMCS_HOST_IA32_PAT, pat)) != 0) goto done; /* Load the IA32_EFER MSR */ - efer = rdmsr(MSR_EFER); + efer = vmm_get_host_efer(); if ((error = vmwrite(VMCS_HOST_IA32_EFER, efer)) != 0) goto done; /* Load the control registers */ - /* - * We always want CR0.TS to be set when the processor does a VM exit. - * - * With emulation turned on unconditionally after a VM exit, we are - * able to trap inadvertent use of the FPU until the guest FPU state - * has been safely squirreled away. - */ - cr0 = rcr0() | CR0_TS; + cr0 = vmm_get_host_cr0(); if ((error = vmwrite(VMCS_HOST_CR0, cr0)) != 0) goto done; - cr4 = rcr4(); + cr4 = vmm_get_host_cr4() | CR4_VMXE; if ((error = vmwrite(VMCS_HOST_CR4, cr4)) != 0) goto done; @@ -411,10 +405,12 @@ vmcs_set_defaults(struct vmcs *vmcs, * Note that we exclude %gs, tss and gdtr here because their base * address is pcpu specific. */ - if ((error = vmwrite(VMCS_HOST_FS_BASE, 0)) != 0) + fsbase = vmm_get_host_fsbase(); + if ((error = vmwrite(VMCS_HOST_FS_BASE, fsbase)) != 0) goto done; - if ((error = vmwrite(VMCS_HOST_IDTR_BASE, r_idt.rd_base)) != 0) + idtrbase = vmm_get_host_idtrbase(); + if ((error = vmwrite(VMCS_HOST_IDTR_BASE, idtrbase)) != 0) goto done; /* instruction pointer */ diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c index 2052dc9..ace2683 100644 --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$"); #include <x86/apicreg.h> #include <machine/vmm.h> +#include "vmm_host.h" #include "vmm_lapic.h" #include "vmm_msr.h" #include "vmm_ktr.h" @@ -64,8 +65,6 @@ __FBSDID("$FreeBSD$"); #include "vmx_controls.h" #include "vmm_instruction_emul.h" -#define CR4_VMXE (1UL << 13) - #define PINBASED_CTLS_ONE_SETTING \ (PINBASED_EXTINT_EXITING | \ PINBASED_NMI_EXITING | \ @@ -118,8 +117,6 @@ __FBSDID("$FreeBSD$"); MALLOC_DEFINE(M_VMX, "vmx", "vmx"); -extern struct pcpu __pcpu[]; - int vmxon_enabled[MAXCPU]; static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); @@ -836,15 +833,15 @@ vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); - error = vmwrite(VMCS_HOST_TR_BASE, (u_long)PCPU_GET(tssp)); + error = vmwrite(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); if (error != 0) goto done; - error = vmwrite(VMCS_HOST_GDTR_BASE, (u_long)&gdt[NGDT * curcpu]); + error = vmwrite(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); if (error != 0) goto done; - error = vmwrite(VMCS_HOST_GS_BASE, (u_long)&__pcpu[curcpu]); + error = vmwrite(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); if (error != 0) goto done; diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index 6efc01f..eae9ccc 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$"); #include <x86/apicreg.h> #include <machine/vmm.h> +#include "vmm_host.h" #include "vmm_mem.h" #include "vmm_util.h" #include <machine/vmm_dev.h> @@ -196,6 +197,7 @@ vmm_init(void) { int error; + vmm_host_state_init(); vmm_ipi_init(); error = vmm_mem_init(); diff --git a/sys/amd64/vmm/vmm_host.c b/sys/amd64/vmm/vmm_host.c new file mode 100644 index 0000000..8dfef73 --- /dev/null +++ b/sys/amd64/vmm/vmm_host.c @@ -0,0 +1,124 @@ +/*- + * Copyright (c) 2012 NetApp, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/pcpu.h> + +#include <machine/cpufunc.h> +#include <machine/segments.h> +#include <machine/specialreg.h> + +#include "vmm_host.h" + +static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4; + +void +vmm_host_state_init(void) +{ + + vmm_host_efer = rdmsr(MSR_EFER); + vmm_host_pat = rdmsr(MSR_PAT); + + /* + * We always want CR0.TS to be set when the processor does a VM exit. + * + * With emulation turned on unconditionally after a VM exit, we are + * able to trap inadvertent use of the FPU until the guest FPU state + * has been safely squirreled away. + */ + vmm_host_cr0 = rcr0() | CR0_TS; + + vmm_host_cr4 = rcr4(); +} + +uint64_t +vmm_get_host_pat(void) +{ + + return (vmm_host_pat); +} + +uint64_t +vmm_get_host_efer(void) +{ + + return (vmm_host_efer); +} + +uint64_t +vmm_get_host_cr0(void) +{ + + return (vmm_host_cr0); +} + +uint64_t +vmm_get_host_cr4(void) +{ + + return (vmm_host_cr4); +} + +uint64_t +vmm_get_host_datasel(void) +{ + + return (GSEL(GDATA_SEL, SEL_KPL)); + +} + +uint64_t +vmm_get_host_codesel(void) +{ + + return (GSEL(GCODE_SEL, SEL_KPL)); +} + +uint64_t +vmm_get_host_tsssel(void) +{ + + return (GSEL(GPROC0_SEL, SEL_KPL)); +} + +uint64_t +vmm_get_host_fsbase(void) +{ + + return (0); +} + +uint64_t +vmm_get_host_idtrbase(void) +{ + + return (r_idt.rd_base); +} diff --git a/sys/amd64/vmm/vmm_host.h b/sys/amd64/vmm/vmm_host.h new file mode 100644 index 0000000..839f54a --- /dev/null +++ b/sys/amd64/vmm/vmm_host.h @@ -0,0 +1,75 @@ +/*- + * Copyright (c) 2012 NetApp, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _VMM_HOST_H_ +#define _VMM_HOST_H_ + +#ifndef _KERNEL +#error "no user-servicable parts inside" +#endif + +void vmm_host_state_init(void); + +uint64_t vmm_get_host_pat(void); +uint64_t vmm_get_host_efer(void); +uint64_t vmm_get_host_cr0(void); +uint64_t vmm_get_host_cr4(void); +uint64_t vmm_get_host_datasel(void); +uint64_t vmm_get_host_codesel(void); +uint64_t vmm_get_host_tsssel(void); +uint64_t vmm_get_host_fsbase(void); +uint64_t vmm_get_host_idtrbase(void); + +/* + * Inline access to host state that is used on every VM entry + */ +static __inline uint64_t +vmm_get_host_trbase(void) +{ + + return ((uint64_t)PCPU_GET(tssp)); +} + +static __inline uint64_t +vmm_get_host_gdtrbase(void) +{ + + return ((uint64_t)&gdt[NGDT * curcpu]); +} + +struct pcpu; +extern struct pcpu __pcpu[]; + +static __inline uint64_t +vmm_get_host_gsbase(void) +{ + + return ((uint64_t)&__pcpu[curcpu]); +} + +#endif diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile index af93567..8b565da 100644 --- a/sys/modules/vmm/Makefile +++ b/sys/modules/vmm/Makefile @@ -13,6 +13,7 @@ CFLAGS+= -I${.CURDIR}/../../amd64/vmm/intel .PATH: ${.CURDIR}/../../amd64/vmm SRCS+= vmm.c \ vmm_dev.c \ + vmm_host.c \ vmm_instruction_emul.c \ vmm_ipi.c \ vmm_lapic.c \ diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h index 9f83f14..3ca9f17 100644 --- a/sys/x86/include/specialreg.h +++ b/sys/x86/include/specialreg.h @@ -66,6 +66,7 @@ #define CR4_PCE 0x00000100 /* Performance monitoring counter enable */ #define CR4_FXSR 0x00000200 /* Fast FPU save/restore used by OS */ #define CR4_XMM 0x00000400 /* enable SIMD/MMX2 to use except 16 */ +#define CR4_VMXE 0x00002000 /* enable VMX operation (Intel-specific) */ #define CR4_XSAVE 0x00040000 /* XSETBV/XGETBV */ /* |