summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2016-05-09 17:39:43 -0300
committerRenato Botelho <renato@netgate.com>2016-05-09 17:39:43 -0300
commit20ab79d9560e0036f834689a67c3035c18e3eb43 (patch)
treeee1296285669938e8ca9f3215719c5e461aab23b /sys/amd64
parent394097451eda898069e18b8dc01759b5ec2d19ff (diff)
parent751248c9f444f5d00818a67a793dc0319749f236 (diff)
downloadFreeBSD-src-20ab79d9560e0036f834689a67c3035c18e3eb43.zip
FreeBSD-src-20ab79d9560e0036f834689a67c3035c18e3eb43.tar.gz
Merge remote-tracking branch 'origin/master' into devel-11
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/acpica/acpi_wakecode.S4
-rw-r--r--sys/amd64/amd64/atomic.c2
-rw-r--r--sys/amd64/amd64/exception.S2
-rw-r--r--sys/amd64/amd64/mpboot.S4
-rw-r--r--sys/amd64/amd64/pmap.c22
-rw-r--r--sys/amd64/conf/GENERIC3
-rw-r--r--sys/amd64/conf/NOTES2
-rw-r--r--sys/amd64/include/vmm.h2
-rw-r--r--sys/amd64/linux/linux_locore.s32
-rw-r--r--sys/amd64/linux/linux_machdep.c2
-rw-r--r--sys/amd64/linux32/linux32_machdep.c2
-rw-r--r--sys/amd64/vmm/amd/vmcb.h2
-rw-r--r--sys/amd64/vmm/intel/vtd.c2
-rw-r--r--sys/amd64/vmm/io/vlapic.c2
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c2
-rw-r--r--sys/amd64/vmm/vmm_stat.c2
16 files changed, 70 insertions, 17 deletions
diff --git a/sys/amd64/acpica/acpi_wakecode.S b/sys/amd64/acpica/acpi_wakecode.S
index a4c559f..6b36d55 100644
--- a/sys/amd64/acpica/acpi_wakecode.S
+++ b/sys/amd64/acpica/acpi_wakecode.S
@@ -179,7 +179,7 @@ wakeup_32:
* Finally, switch to long bit mode by enabling paging. We have
* to be very careful here because all the segmentation disappears
* out from underneath us. The spec says we can depend on the
- * subsequent pipelined branch to execute, but *only if* everthing
+ * subsequent pipelined branch to execute, but *only if* everything
* is still identity mapped. If any mappings change, the pipeline
* will flush.
*/
@@ -188,7 +188,7 @@ wakeup_32:
mov %eax, %cr0
/*
- * At this point paging is enabled, and we are in "compatability" mode.
+ * At this point paging is enabled, and we are in "compatibility" mode.
* We do another far jump to reload %cs with the 64 bit selector.
* %cr3 points to a 4-level page table page.
* We cannot yet jump all the way to the kernel because we can only
diff --git a/sys/amd64/amd64/atomic.c b/sys/amd64/amd64/atomic.c
index 063201f..9dc3e8d 100644
--- a/sys/amd64/amd64/atomic.c
+++ b/sys/amd64/amd64/atomic.c
@@ -27,7 +27,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-/* This file creates publically callable functions to perform various
+/* This file creates publicly callable functions to perform various
* simple arithmetic on memory which is atomic in the presence of
* interrupts and multiple processors.
*/
diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
index fd8cdac..2c2b99b 100644
--- a/sys/amd64/amd64/exception.S
+++ b/sys/amd64/amd64/exception.S
@@ -427,7 +427,7 @@ IDTVEC(fast_syscall)
/*
* Here for CYA insurance, in case a "syscall" instruction gets
- * issued from 32 bit compatability mode. MSR_CSTAR has to point
+ * issued from 32 bit compatibility mode. MSR_CSTAR has to point
* to *something* if EFER_SCE is enabled.
*/
IDTVEC(fast_syscall32)
diff --git a/sys/amd64/amd64/mpboot.S b/sys/amd64/amd64/mpboot.S
index c4b6537..5576aff 100644
--- a/sys/amd64/amd64/mpboot.S
+++ b/sys/amd64/amd64/mpboot.S
@@ -121,7 +121,7 @@ protmode:
* Finally, switch to long bit mode by enabling paging. We have
* to be very careful here because all the segmentation disappears
* out from underneath us. The spec says we can depend on the
- * subsequent pipelined branch to execute, but *only if* everthing
+ * subsequent pipelined branch to execute, but *only if* everything
* is still identity mapped. If any mappings change, the pipeline
* will flush.
*/
@@ -130,7 +130,7 @@ protmode:
mov %eax, %cr0
/*
- * At this point paging is enabled, and we are in "compatability" mode.
+ * At this point paging is enabled, and we are in "compatibility" mode.
* We do another far jump to reload %cs with the 64 bit selector.
* %cr3 points to a 4-level page table page.
* We cannot yet jump all the way to the kernel because we can only
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 22a6c31..a7c879c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -4407,6 +4407,12 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
lockp)) {
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
+ /*
+ * Although "va" is not mapped, paging-
+ * structure caches could nonetheless have
+ * entries that refer to the freed page table
+ * pages. Invalidate those entries.
+ */
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
}
@@ -4584,6 +4590,12 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (mpte != NULL) {
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
+ /*
+ * Although "va" is not mapped, paging-
+ * structure caches could nonetheless have
+ * entries that refer to the freed page table
+ * pages. Invalidate those entries.
+ */
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
}
@@ -4967,6 +4979,14 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr,
dstmpte, &free)) {
+ /*
+ * Although "addr" is not
+ * mapped, paging-structure
+ * caches could nonetheless
+ * have entries that refer to
+ * the freed page table pages.
+ * Invalidate those entries.
+ */
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(&free);
@@ -5219,7 +5239,7 @@ pmap_page_is_mapped(vm_page_t m)
* Destroy all managed, non-wired mappings in the given user-space
* pmap. This pmap cannot be active on any processor besides the
* caller.
- *
+ *
* This function cannot be applied to the kernel pmap. Moreover, it
* is not intended for general use. It is only to be used during
* process termination. Consequently, it can be implemented in ways
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 272da43..2859de5 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -102,6 +102,7 @@ device cpufreq
device acpi
options ACPI_DMAR
device pci
+options PCI_HP # PCI-Express native HotPlug
options PCI_IOV # PCI SR-IOV support
# Floppy drives
@@ -349,7 +350,7 @@ device virtio_blk # VirtIO Block device
device virtio_scsi # VirtIO SCSI device
device virtio_balloon # VirtIO Memory Balloon device
-# HyperV drivers and enchancement support
+# HyperV drivers and enhancement support
device hyperv # HyperV drivers
# Xen HVM Guest Optimizations
diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES
index 604a1a3..4107ca6 100644
--- a/sys/amd64/conf/NOTES
+++ b/sys/amd64/conf/NOTES
@@ -509,7 +509,7 @@ device virtio_balloon # VirtIO Memory Balloon device
device virtio_random # VirtIO Entropy device
device virtio_console # VirtIO Console device
-# Microsoft Hyper-V enchancement support
+# Microsoft Hyper-V enhancement support
device hyperv # HyperV drivers
# Xen HVM Guest Optimizations
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index f2de960..bdfff1f 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -377,7 +377,7 @@ struct vm_copyinfo {
* at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
* a copyin or PROT_WRITE for a copyout.
*
- * retval is_fault Intepretation
+ * retval is_fault Interpretation
* 0 0 Success
* 0 1 An exception was injected into the guest
* EFAULT N/A Unrecoverable error
diff --git a/sys/amd64/linux/linux_locore.s b/sys/amd64/linux/linux_locore.s
index 5dcc09a..1bcf05b 100644
--- a/sys/amd64/linux/linux_locore.s
+++ b/sys/amd64/linux/linux_locore.s
@@ -29,6 +29,7 @@ NON_GPROF_ENTRY(linux_rt_sigcode)
movq $LINUX_SYS_linux_rt_sigreturn,%rax /* linux_rt_sigreturn() */
syscall /* enter kernel with args */
hlt
+.endrtsigcode:
0: jmp 0b
NON_GPROF_ENTRY(__vdso_clock_gettime)
@@ -74,3 +75,34 @@ NON_GPROF_ENTRY(__vdso_getcpu)
.balign 4
.previous
#endif
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAMEDLSI0:
+ .long .LENDCIEDLSI0-.LSTARTCIEDLSI0
+.LSTARTCIEDLSI0:
+ .long 0 /* CIE ID */
+ .byte 1 /* Version number */
+ .string "zR" /* NULL-terminated
+ * augmentation string
+ */
+ .uleb128 1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 8 /* Return address register column */
+ .uleb128 1 /* Augmentation value length */
+ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+ .byte 0x0c /* DW_CFA_def_cfa */
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
+ .uleb128 1
+ .align 4
+.LENDCIEDLSI0:
+ .long .LENDFDEDLSI0-.LSTARTFDEDLSI0 /* Length FDE */
+.LSTARTFDEDLSI0:
+ .long .LSTARTFDEDLSI0-.LSTARTFRAMEDLSI0 /* CIE pointer */
+ .long .startrtsigcode-. /* PC-relative start address */
+ .long .endrtsigcode-.startrtsigcode
+ .uleb128 0
+ .align 4
+.LENDFDEDLSI0:
+ .previous
diff --git a/sys/amd64/linux/linux_machdep.c b/sys/amd64/linux/linux_machdep.c
index 376cce7..0459ba7 100644
--- a/sys/amd64/linux/linux_machdep.c
+++ b/sys/amd64/linux/linux_machdep.c
@@ -212,7 +212,7 @@ linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
/*
* The Linux MAP_GROWSDOWN option does not limit auto
* growth of the region. Linux mmap with this option
- * takes as addr the inital BOS, and as len, the initial
+ * takes as addr the initial BOS, and as len, the initial
* region size. It can then grow down from addr without
* limit. However, Linux threads has an implicit internal
* limit to stack size of STACK_SIZE. Its just not
diff --git a/sys/amd64/linux32/linux32_machdep.c b/sys/amd64/linux32/linux32_machdep.c
index 8f4e350..25f023c 100644
--- a/sys/amd64/linux32/linux32_machdep.c
+++ b/sys/amd64/linux32/linux32_machdep.c
@@ -576,7 +576,7 @@ linux_mmap_common(struct thread *td, l_uintptr_t addr, l_size_t len, l_int prot,
/*
* The Linux MAP_GROWSDOWN option does not limit auto
* growth of the region. Linux mmap with this option
- * takes as addr the inital BOS, and as len, the initial
+ * takes as addr the initial BOS, and as len, the initial
* region size. It can then grow down from addr without
* limit. However, Linux threads has an implicit internal
* limit to stack size of STACK_SIZE. Its just not
diff --git a/sys/amd64/vmm/amd/vmcb.h b/sys/amd64/vmm/amd/vmcb.h
index 496f880..9c4f582 100644
--- a/sys/amd64/vmm/amd/vmcb.h
+++ b/sys/amd64/vmm/amd/vmcb.h
@@ -313,7 +313,7 @@ struct vmcb_state {
uint64_t br_to;
uint64_t int_from;
uint64_t int_to;
- uint8_t pad7[0x968]; /* Reserved upto end of VMCB */
+ uint8_t pad7[0x968]; /* Reserved up to end of VMCB */
} __attribute__ ((__packed__));
CTASSERT(sizeof(struct vmcb_state) == 0xC00);
diff --git a/sys/amd64/vmm/intel/vtd.c b/sys/amd64/vmm/intel/vtd.c
index be57aff..f3b7a98 100644
--- a/sys/amd64/vmm/intel/vtd.c
+++ b/sys/amd64/vmm/intel/vtd.c
@@ -463,7 +463,7 @@ vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len,
panic("vtd_create_mapping: unaligned len 0x%0lx", len);
/*
- * Compute the size of the mapping that we can accomodate.
+ * Compute the size of the mapping that we can accommodate.
*
* This is based on three factors:
* - supported super page size
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 3451e1e..cdf411b 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -841,7 +841,7 @@ vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys,
} else {
/*
* In the "Flat Model" the MDA is interpreted as an 8-bit wide
- * bitmask. This model is only avilable in the xAPIC mode.
+ * bitmask. This model is only available in the xAPIC mode.
*/
mda_flat_ldest = dest & 0xff;
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index ae5330f..852f1d7 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -1232,7 +1232,7 @@ emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
size = vie->opsize_override ? 2 : 8;
} else {
/*
- * In protected or compability mode the 'B' flag in the
+ * In protected or compatibility mode the 'B' flag in the
* stack-segment descriptor determines the size of the
* stack pointer.
*/
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
index 7e2f64d..62002d6 100644
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -69,7 +69,7 @@ vmm_stat_register(void *arg)
return;
if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) {
- printf("Cannot accomodate vmm stat type \"%s\"!\n", vst->desc);
+ printf("Cannot accommodate vmm stat type \"%s\"!\n", vst->desc);
return;
}
OpenPOWER on IntegriCloud