summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorgrehan <grehan@FreeBSD.org>2014-05-18 03:50:17 +0000
committergrehan <grehan@FreeBSD.org>2014-05-18 03:50:17 +0000
commit9fa48763c09d7405752b9281c8b1f453c9375865 (patch)
tree5876567a6761924e1d9790a83a51a7e7580282b1 /sys/amd64
parent1c24706a8072cf54a9cbf87879c337dc31982a99 (diff)
downloadFreeBSD-src-9fa48763c09d7405752b9281c8b1f453c9375865.zip
FreeBSD-src-9fa48763c09d7405752b9281c8b1f453c9375865.tar.gz
Make the vmx asm code dtrace-fbt-friendly by
- inserting frame enter/leave sequences - restructuring the vmx_enter_guest routine so that it subsumes the vm_exit_guest block, which was the #vmexit RIP and not a callable routine. Reviewed by: neel MFC after: 3 weeks
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/vmm/intel/vmx.h5
-rw-r--r--sys/amd64/vmm/intel/vmx_support.S21
2 files changed, 19 insertions, 7 deletions
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index c21c979..208fcee 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -67,7 +67,7 @@ struct vmxctx {
int inst_fail_status;
/*
- * The pmap needs to be deactivated in vmx_exit_guest()
+ * The pmap needs to be deactivated in vmx_enter_guest()
* so keep a copy of the 'pmap' in each vmxctx.
*/
struct pmap *pmap;
@@ -121,10 +121,11 @@ CTASSERT((offsetof(struct vmx, pir_desc[0]) & 63) == 0);
#define VMX_VMLAUNCH_ERROR 2
#define VMX_INVEPT_ERROR 3
int vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched);
-void vmx_exit_guest(void);
void vmx_call_isr(uintptr_t entry);
u_long vmx_fix_cr0(u_long cr0);
u_long vmx_fix_cr4(u_long cr4);
+extern char vmx_exit_guest[];
+
#endif
diff --git a/sys/amd64/vmm/intel/vmx_support.S b/sys/amd64/vmm/intel/vmx_support.S
index 1d78021..840b7e0 100644
--- a/sys/amd64/vmm/intel/vmx_support.S
+++ b/sys/amd64/vmm/intel/vmx_support.S
@@ -37,6 +37,10 @@
#define LK
#endif
+/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
+#define VENTER push %rbp ; mov %rsp,%rbp
+#define VLEAVE pop %rbp
+
/*
* Assumes that %rdi holds a pointer to the 'vmxctx'.
*
@@ -98,6 +102,7 @@
* Interrupts must be disabled on entry.
*/
ENTRY(vmx_enter_guest)
+ VENTER
/*
* Save host state before doing anything else.
*/
@@ -183,14 +188,17 @@ inst_error:
LK btrl %r10d, PM_ACTIVE(%r11)
VMX_HOST_RESTORE
+ VLEAVE
ret
-END(vmx_enter_guest)
/*
- * void vmx_exit_guest(void)
- * %rsp points to the struct vmxctx
+ * Non-error VM-exit from the guest. Make this a label so it can
+ * be used by C code when setting up the VMCS.
+ * The VMCS-restored %rsp points to the struct vmxctx
*/
-ENTRY(vmx_exit_guest)
+ ALIGN_TEXT
+ .globl vmx_exit_guest
+vmx_exit_guest:
/*
* Save guest state that is not automatically saved in the vmcs.
*/
@@ -229,8 +237,9 @@ ENTRY(vmx_exit_guest)
* value of VMX_GUEST_VMEXIT.
*/
movl $VMX_GUEST_VMEXIT, %eax
+ VLEAVE
ret
-END(vmx_exit_guest)
+END(vmx_enter_guest)
/*
* %rdi = interrupt handler entry point
@@ -239,6 +248,7 @@ END(vmx_exit_guest)
* instruction in Intel SDM, Vol 2.
*/
ENTRY(vmx_call_isr)
+ VENTER
mov %rsp, %r11 /* save %rsp */
and $~0xf, %rsp /* align on 16-byte boundary */
pushq $KERNEL_SS /* %ss */
@@ -247,5 +257,6 @@ ENTRY(vmx_call_isr)
pushq $KERNEL_CS /* %cs */
cli /* disable interrupts */
callq *%rdi /* push %rip and call isr */
+ VLEAVE
ret
END(vmx_call_isr)
OpenPOWER on IntegriCloud