summaryrefslogtreecommitdiffstats
path: root/sys/amd64/include
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2013-10-05 21:22:35 +0000
committerneel <neel@FreeBSD.org>2013-10-05 21:22:35 +0000
commitaed205d5cd3901acd2a0cf583e44ec166d99191a (patch)
tree7bf8725b3e8dc06e35d8c372329cbea900a7100c /sys/amd64/include
parent716c2031c71f1b1fbd78da6494f5c5e2a5115e9b (diff)
downloadFreeBSD-src-aed205d5cd3901acd2a0cf583e44ec166d99191a.zip
FreeBSD-src-aed205d5cd3901acd2a0cf583e44ec166d99191a.tar.gz
Merge projects/bhyve_npt_pmap into head.
Make the amd64/pmap code aware of nested page table mappings used by bhyve guests. This allows bhyve to associate each guest with its own vmspace and deal with nested page faults in the context of that vmspace. This also enables features like accessed/dirty bit tracking, swapping to disk and transparent superpage promotions of guest memory. Guest vmspace: Each bhyve guest has a unique vmspace to represent the physical memory allocated to the guest. Each memory segment allocated by the guest is mapped into the guest's address space via the 'vmspace->vm_map' and is backed by an object of type OBJT_DEFAULT. pmap types: The amd64/pmap now understands two types of pmaps: PT_X86 and PT_EPT. The PT_X86 pmap type is used by the vmspace associated with the host kernel as well as user processes executing on the host. The PT_EPT pmap is used by the vmspace associated with a bhyve guest. Page Table Entries: The EPT page table entries as mostly similar in functionality to regular page table entries although there are some differences in terms of what bits are used to express that functionality. For e.g. the dirty bit is represented by bit 9 in the nested PTE as opposed to bit 6 in the regular x86 PTE. Therefore the bitmask representing the dirty bit is now computed at runtime based on the type of the pmap. Thus PG_M that was previously a macro now becomes a local variable that is initialized at runtime using 'pmap_modified_bit(pmap)'. An additional wrinkle associated with EPT mappings is that older Intel processors don't have hardware support for tracking accessed/dirty bits in the PTE. This means that the amd64/pmap code needs to emulate these bits to provide proper accounting to the VM subsystem. This is achieved by using the following mapping for EPT entries that need emulation of A/D bits: Bit Position Interpreted By PG_V 52 software (accessed bit emulation handler) PG_RW 53 software (dirty bit emulation handler) PG_A 0 hardware (aka EPT_PG_RD) PG_M 1 hardware (aka EPT_PG_WR) The idea to use the mapping listed above for A/D bit emulation came from Alan Cox (alc@). The final difference with respect to x86 PTEs is that some EPT implementations do not support superpage mappings. This is recorded in the 'pm_flags' field of the pmap. TLB invalidation: The amd64/pmap code has a number of ways to do invalidation of mappings that may be cached in the TLB: single page, multiple pages in a range or the entire TLB. All of these funnel into a single EPT invalidation routine called 'pmap_invalidate_ept()'. This routine bumps up the EPT generation number and sends an IPI to the host cpus that are executing the guest's vcpus. On a subsequent entry into the guest it will detect that the EPT has changed and invalidate the mappings from the TLB. Guest memory access: Since the guest memory is no longer wired we need to hold the host physical page that backs the guest physical page before we can access it. The helper functions 'vm_gpa_hold()/vm_gpa_release()' are available for this purpose. PCI passthru: Guest's with PCI passthru devices will wire the entire guest physical address space. The MMIO BAR associated with the passthru device is backed by a vm_object of type OBJT_SG. An IOMMU domain is created only for guest's that have one or more PCI passthru devices attached to them. Limitations: There isn't a way to map a guest physical page without execute permissions. This is because the amd64/pmap code interprets the guest physical mappings as user mappings since they are numerically below VM_MAXUSER_ADDRESS. Since PG_U shares the same bit position as EPT_PG_EXECUTE all guest mappings become automatically executable. Thanks to Alan Cox and Konstantin Belousov for their rigorous code reviews as well as their support and encouragement. Thanks for John Baldwin for reviewing the use of OBJT_SG as the backing object for pci passthru mmio regions. Special thanks to Peter Holm for testing the patch on short notice. Approved by: re Discussed with: grehan Reviewed by: alc, kib Tested by: pho
Diffstat (limited to 'sys/amd64/include')
-rw-r--r--sys/amd64/include/pmap.h115
-rw-r--r--sys/amd64/include/vmm.h43
-rw-r--r--sys/amd64/include/vmm_dev.h12
-rw-r--r--sys/amd64/include/vmm_instruction_emul.h4
4 files changed, 134 insertions, 40 deletions
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index 11e1cf2..01de629 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -50,41 +50,74 @@
* of the fields not present here and there, depending on a lot of things.
*/
/* ---- Intel Nomenclature ---- */
-#define PG_V 0x001 /* P Valid */
-#define PG_RW 0x002 /* R/W Read/Write */
-#define PG_U 0x004 /* U/S User/Supervisor */
-#define PG_NC_PWT 0x008 /* PWT Write through */
-#define PG_NC_PCD 0x010 /* PCD Cache disable */
-#define PG_A 0x020 /* A Accessed */
-#define PG_M 0x040 /* D Dirty */
-#define PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
-#define PG_PTE_PAT 0x080 /* PAT PAT index */
-#define PG_G 0x100 /* G Global */
-#define PG_AVAIL1 0x200 /* / Available for system */
-#define PG_AVAIL2 0x400 /* < programmers use */
-#define PG_AVAIL3 0x800 /* \ */
-#define PG_PDE_PAT 0x1000 /* PAT PAT index */
-#define PG_NX (1ul<<63) /* No-execute */
+#define X86_PG_V 0x001 /* P Valid */
+#define X86_PG_RW 0x002 /* R/W Read/Write */
+#define X86_PG_U 0x004 /* U/S User/Supervisor */
+#define X86_PG_NC_PWT 0x008 /* PWT Write through */
+#define X86_PG_NC_PCD 0x010 /* PCD Cache disable */
+#define X86_PG_A 0x020 /* A Accessed */
+#define X86_PG_M 0x040 /* D Dirty */
+#define X86_PG_PS 0x080 /* PS Page size (0=4k,1=2M) */
+#define X86_PG_PTE_PAT 0x080 /* PAT PAT index */
+#define X86_PG_G 0x100 /* G Global */
+#define X86_PG_AVAIL1 0x200 /* / Available for system */
+#define X86_PG_AVAIL2 0x400 /* < programmers use */
+#define X86_PG_AVAIL3 0x800 /* \ */
+#define X86_PG_PDE_PAT 0x1000 /* PAT PAT index */
+#define X86_PG_NX (1ul<<63) /* No-execute */
+#define X86_PG_AVAIL(x) (1ul << (x))
+/* Page level cache control fields used to determine the PAT type */
+#define X86_PG_PDE_CACHE (X86_PG_PDE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
+#define X86_PG_PTE_CACHE (X86_PG_PTE_PAT | X86_PG_NC_PWT | X86_PG_NC_PCD)
+
+/*
+ * Intel extended page table (EPT) bit definitions.
+ */
+#define EPT_PG_READ 0x001 /* R Read */
+#define EPT_PG_WRITE 0x002 /* W Write */
+#define EPT_PG_EXECUTE 0x004 /* X Execute */
+#define EPT_PG_IGNORE_PAT 0x040 /* IPAT Ignore PAT */
+#define EPT_PG_PS 0x080 /* PS Page size */
+#define EPT_PG_A 0x100 /* A Accessed */
+#define EPT_PG_M 0x200 /* D Dirty */
+#define EPT_PG_MEMORY_TYPE(x) ((x) << 3) /* MT Memory Type */
+
+/*
+ * Define the PG_xx macros in terms of the bits on x86 PTEs.
+ */
+#define PG_V X86_PG_V
+#define PG_RW X86_PG_RW
+#define PG_U X86_PG_U
+#define PG_NC_PWT X86_PG_NC_PWT
+#define PG_NC_PCD X86_PG_NC_PCD
+#define PG_A X86_PG_A
+#define PG_M X86_PG_M
+#define PG_PS X86_PG_PS
+#define PG_PTE_PAT X86_PG_PTE_PAT
+#define PG_G X86_PG_G
+#define PG_AVAIL1 X86_PG_AVAIL1
+#define PG_AVAIL2 X86_PG_AVAIL2
+#define PG_AVAIL3 X86_PG_AVAIL3
+#define PG_PDE_PAT X86_PG_PDE_PAT
+#define PG_NX X86_PG_NX
+#define PG_PDE_CACHE X86_PG_PDE_CACHE
+#define PG_PTE_CACHE X86_PG_PTE_CACHE
/* Our various interpretations of the above */
-#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
-#define PG_MANAGED PG_AVAIL2
+#define PG_W X86_PG_AVAIL3 /* "Wired" pseudoflag */
+#define PG_MANAGED X86_PG_AVAIL2
+#define EPT_PG_EMUL_V X86_PG_AVAIL(52)
+#define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
#define PG_FRAME (0x000ffffffffff000ul)
#define PG_PS_FRAME (0x000fffffffe00000ul)
-#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
-#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
-
-/* Page level cache control fields used to determine the PAT type */
-#define PG_PDE_CACHE (PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
-#define PG_PTE_CACHE (PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
/*
* Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
* (PTE) page mappings have identical settings for the following fields:
*/
-#define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
- PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
+#define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \
+ PG_M | PG_A | PG_U | PG_RW | PG_V)
/*
* Page Protection Exception bits
@@ -96,6 +129,28 @@
#define PGEX_RSV 0x08 /* reserved PTE field is non-zero */
#define PGEX_I 0x10 /* during an instruction fetch */
+/*
+ * undef the PG_xx macros that define bits in the regular x86 PTEs that
+ * have a different position in nested PTEs. This is done when compiling
+ * code that needs to be aware of the differences between regular x86 and
+ * nested PTEs.
+ *
+ * The appropriate bitmask will be calculated at runtime based on the pmap
+ * type.
+ */
+#ifdef AMD64_NPT_AWARE
+#undef PG_AVAIL1 /* X86_PG_AVAIL1 aliases with EPT_PG_M */
+#undef PG_G
+#undef PG_A
+#undef PG_M
+#undef PG_PDE_PAT
+#undef PG_PDE_CACHE
+#undef PG_PTE_PAT
+#undef PG_PTE_CACHE
+#undef PG_RW
+#undef PG_V
+#endif
+
/*
* Pte related macros. This is complicated by having to deal with
* the sign extension of the 48th bit.
@@ -256,6 +311,11 @@ struct pmap {
int pm_flags;
};
+/* flags */
+#define PMAP_PDE_SUPERPAGE (1 << 0) /* supports 2MB superpages */
+#define PMAP_EMULATE_AD_BITS (1 << 1) /* needs A/D bits emulation */
+#define PMAP_SUPPORTS_EXEC_ONLY (1 << 2) /* execute only mappings ok */
+
typedef struct pmap *pmap_t;
#ifdef _KERNEL
@@ -272,6 +332,9 @@ extern struct pmap kernel_pmap_store;
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
+
+int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags);
+int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype);
#endif
/*
@@ -330,7 +393,7 @@ void pmap_invalidate_all(pmap_t);
void pmap_invalidate_cache(void);
void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
-
+void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
#endif /* _KERNEL */
#endif /* !LOCORE */
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index b741247..64dbc58 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -39,19 +39,18 @@ struct seg_desc;
struct vm_exit;
struct vm_run;
struct vlapic;
+struct vmspace;
+struct vm_object;
+struct pmap;
enum x2apic_state;
typedef int (*vmm_init_func_t)(void);
typedef int (*vmm_cleanup_func_t)(void);
-typedef void * (*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
-typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
+typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
+typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
+ struct pmap *pmap);
typedef void (*vmi_cleanup_func_t)(void *vmi);
-typedef int (*vmi_mmap_set_func_t)(void *vmi, vm_paddr_t gpa,
- vm_paddr_t hpa, size_t length,
- vm_memattr_t attr, int prot,
- boolean_t superpages_ok);
-typedef vm_paddr_t (*vmi_mmap_get_func_t)(void *vmi, vm_paddr_t gpa);
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
uint64_t *retval);
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
@@ -65,6 +64,8 @@ typedef int (*vmi_inject_event_t)(void *vmi, int vcpu,
uint32_t code, int code_valid);
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
+typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
+typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
struct vmm_ops {
vmm_init_func_t init; /* module wide initialization */
@@ -73,8 +74,6 @@ struct vmm_ops {
vmi_init_func_t vminit; /* vm-specific initialization */
vmi_run_func_t vmrun;
vmi_cleanup_func_t vmcleanup;
- vmi_mmap_set_func_t vmmmap_set;
- vmi_mmap_get_func_t vmmmap_get;
vmi_get_register_t vmgetreg;
vmi_set_register_t vmsetreg;
vmi_get_desc_t vmgetdesc;
@@ -82,6 +81,8 @@ struct vmm_ops {
vmi_inject_event_t vminject;
vmi_get_cap_t vmgetcap;
vmi_set_cap_t vmsetcap;
+ vmi_vmspace_alloc vmspace_alloc;
+ vmi_vmspace_free vmspace_free;
};
extern struct vmm_ops vmm_ops_intel;
@@ -93,9 +94,14 @@ const char *vm_name(struct vm *vm);
int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
-vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
+void *vm_gpa_hold(struct vm *, vm_paddr_t gpa, size_t len, int prot,
+ void **cookie);
+void vm_gpa_release(void *cookie);
int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
struct vm_memory_segment *seg);
+int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
+ vm_offset_t *offset, struct vm_object **object);
+boolean_t vm_mem_allocated(struct vm *vm, vm_paddr_t gpa);
int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
@@ -130,8 +136,9 @@ void *vm_iommu_domain(struct vm *vm);
enum vcpu_state {
VCPU_IDLE,
+ VCPU_FROZEN,
VCPU_RUNNING,
- VCPU_CANNOT_RUN,
+ VCPU_SLEEPING,
};
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state);
@@ -145,7 +152,9 @@ vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
void *vcpu_stats(struct vm *vm, int vcpu);
void vm_interrupt_hostcpu(struct vm *vm, int vcpu);
-
+struct vmspace *vm_get_vmspace(struct vm *vm);
+int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
+int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
#endif /* KERNEL */
#include <machine/vmm_instruction_emul.h>
@@ -247,6 +256,7 @@ enum vm_exitcode {
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
+ VM_EXITCODE_INST_EMUL,
VM_EXITCODE_SPINUP_AP,
VM_EXITCODE_MAX
};
@@ -266,8 +276,15 @@ struct vm_exit {
} inout;
struct {
uint64_t gpa;
- struct vie vie;
+ int fault_type;
+ int protection;
} paging;
+ struct {
+ uint64_t gpa;
+ uint64_t gla;
+ uint64_t cr3;
+ struct vie vie;
+ } inst_emul;
/*
* VMX specific payload. Used when there is no "better"
* exitcode to represent the VM-exit.
diff --git a/sys/amd64/include/vmm_dev.h b/sys/amd64/include/vmm_dev.h
index c3b47c2..bf014cc 100644
--- a/sys/amd64/include/vmm_dev.h
+++ b/sys/amd64/include/vmm_dev.h
@@ -36,7 +36,8 @@ int vmmdev_cleanup(void);
struct vm_memory_segment {
vm_paddr_t gpa; /* in */
- size_t len; /* in */
+ size_t len;
+ int wired;
};
struct vm_register {
@@ -135,6 +136,12 @@ struct vm_x2apic {
enum x2apic_state state;
};
+struct vm_gpa_pte {
+ uint64_t gpa; /* in */
+ uint64_t pte[4]; /* out */
+ int ptenum;
+};
+
enum {
/* general routines */
IOCNUM_ABIVERS = 0,
@@ -145,6 +152,7 @@ enum {
/* memory apis */
IOCNUM_MAP_MEMORY = 10,
IOCNUM_GET_MEMORY_SEG = 11,
+ IOCNUM_GET_GPA_PMAP = 12,
/* register/state accessors */
IOCNUM_SET_REGISTER = 20,
@@ -215,4 +223,6 @@ enum {
_IOW('v', IOCNUM_SET_X2APIC_STATE, struct vm_x2apic)
#define VM_GET_X2APIC_STATE \
_IOWR('v', IOCNUM_GET_X2APIC_STATE, struct vm_x2apic)
+#define VM_GET_GPA_PMAP \
+ _IOWR('v', IOCNUM_GET_GPA_PMAP, struct vm_gpa_pte)
#endif
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
index a812a73..a7480e7 100644
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -102,11 +102,15 @@ int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
#ifdef _KERNEL
/*
* APIs to fetch and decode the instruction from nested page fault handler.
+ *
+ * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
*/
int vmm_fetch_instruction(struct vm *vm, int cpuid,
uint64_t rip, int inst_length, uint64_t cr3,
struct vie *vie);
+void vie_init(struct vie *vie);
+
/*
* Decode the instruction fetched into 'vie' so it can be emulated.
*
OpenPOWER on IntegriCloud