summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2015-07-01 19:46:57 +0000
committerneel <neel@FreeBSD.org>2015-07-01 19:46:57 +0000
commitb3962232547830a914bb0eb30f16da744b17ad73 (patch)
treed20ca30cf84541db0c4c2216e1ddb17e9febd91f /sys/amd64
parent517a5ffe96e74555736a6a797879390fe59f18ba (diff)
downloadFreeBSD-src-b3962232547830a914bb0eb30f16da744b17ad73.zip
FreeBSD-src-b3962232547830a914bb0eb30f16da744b17ad73.tar.gz
MFC r284712:
Restore the host's GS.base before returning from 'svm_launch()' so the Dtrace FBT provider works with vmm.ko on AMD.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/vmm/amd/svm.c38
-rw-r--r--sys/amd64/vmm/amd/svm.h4
-rw-r--r--sys/amd64/vmm/amd/svm_genassym.c2
-rw-r--r--sys/amd64/vmm/amd/svm_support.S13
4 files changed, 24 insertions, 33 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index b25d69d..abca613 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -1916,7 +1916,6 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
struct vlapic *vlapic;
struct vm *vm;
uint64_t vmcb_pa;
- u_int thiscpu;
int handled;
svm_sc = arg;
@@ -1928,19 +1927,10 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
vmexit = vm_exitinfo(vm, vcpu);
vlapic = vm_lapic(vm, vcpu);
- /*
- * Stash 'curcpu' on the stack as 'thiscpu'.
- *
- * The per-cpu data area is not accessible until MSR_GSBASE is restored
- * after the #VMEXIT. Since VMRUN is executed inside a critical section
- * 'curcpu' and 'thiscpu' are guaranteed to identical.
- */
- thiscpu = curcpu;
-
gctx = svm_get_guest_regctx(svm_sc, vcpu);
vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
- if (vcpustate->lastcpu != thiscpu) {
+ if (vcpustate->lastcpu != curcpu) {
/*
* Force new ASID allocation by invalidating the generation.
*/
@@ -1961,7 +1951,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
* This works for now but any new side-effects of vcpu
* migration should take this case into account.
*/
- vcpustate->lastcpu = thiscpu;
+ vcpustate->lastcpu = curcpu;
vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
}
@@ -2007,14 +1997,14 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
svm_inj_interrupts(svm_sc, vcpu, vlapic);
- /* Activate the nested pmap on 'thiscpu' */
- CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active);
+ /* Activate the nested pmap on 'curcpu' */
+ CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
/*
* Check the pmap generation and the ASID generation to
* ensure that the vcpu does not use stale TLB mappings.
*/
- check_asid(svm_sc, vcpu, pmap, thiscpu);
+ check_asid(svm_sc, vcpu, pmap, curcpu);
ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
vcpustate->dirty = 0;
@@ -2022,23 +2012,9 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
/* Launch Virtual Machine. */
VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
- svm_launch(vmcb_pa, gctx);
-
- CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
+ svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]);
- /*
- * Restore MSR_GSBASE to point to the pcpu data area.
- *
- * Note that accesses done via PCPU_GET/PCPU_SET will work
- * only after MSR_GSBASE is restored.
- *
- * Also note that we don't bother restoring MSR_KGSBASE
- * since it is not used in the kernel and will be restored
- * when the VMRUN ioctl returns to userspace.
- */
- wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]);
- KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch",
- thiscpu, curcpu));
+ CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
/*
* The host GDTR and IDTR is saved by VMRUN and restored
diff --git a/sys/amd64/vmm/amd/svm.h b/sys/amd64/vmm/amd/svm.h
index 86bd638..4a931ae 100644
--- a/sys/amd64/vmm/amd/svm.h
+++ b/sys/amd64/vmm/amd/svm.h
@@ -29,6 +29,8 @@
#ifndef _SVM_H_
#define _SVM_H_
+struct pcpu;
+
/*
* Guest register state that is saved outside the VMCB.
*/
@@ -49,6 +51,6 @@ struct svm_regctx {
register_t sctx_r15;
};
-void svm_launch(uint64_t pa, struct svm_regctx *);
+void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
#endif /* _SVM_H_ */
diff --git a/sys/amd64/vmm/amd/svm_genassym.c b/sys/amd64/vmm/amd/svm_genassym.c
index b7831eb..7f3b460 100644
--- a/sys/amd64/vmm/amd/svm_genassym.c
+++ b/sys/amd64/vmm/amd/svm_genassym.c
@@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/assym.h>
+#include <x86/specialreg.h>
#include "svm.h"
@@ -46,3 +47,4 @@ ASSYM(SCTX_R12, offsetof(struct svm_regctx, sctx_r12));
ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));
+ASSYM(MSR_GSBASE, MSR_GSBASE);
diff --git a/sys/amd64/vmm/amd/svm_support.S b/sys/amd64/vmm/amd/svm_support.S
index b363101..e9493bd 100644
--- a/sys/amd64/vmm/amd/svm_support.S
+++ b/sys/amd64/vmm/amd/svm_support.S
@@ -42,13 +42,17 @@
#define VMSAVE .byte 0x0f, 0x01, 0xdb
/*
- * svm_launch(uint64_t vmcb, struct svm_regctx *gctx)
+ * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
* %rdi: physical address of VMCB
* %rsi: pointer to guest context
+ * %rdx: pointer to the pcpu data
*/
ENTRY(svm_launch)
VENTER
+ /* save pointer to the pcpu data */
+ push %rdx
+
/*
* Host register state saved across a VMRUN.
*
@@ -116,6 +120,13 @@ ENTRY(svm_launch)
pop %r12
pop %rbx
+ /* Restore %GS.base to point to the host's pcpu data */
+ pop %rdx
+ mov %edx, %eax
+ shr $32, %rdx
+ mov $MSR_GSBASE, %ecx
+ wrmsr
+
VLEAVE
ret
END(svm_launch)
OpenPOWER on IntegriCloud