summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c45
-rw-r--r--sys/i386/i386/pmap.c50
2 files changed, 75 insertions, 20 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 49ceca9..2d88398 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1041,7 +1041,12 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
virtual_avail = va;
- /* Initialize the PAT MSR. */
+ /*
+ * Initialize the PAT MSR.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment.
+ */
pmap_init_pat();
/* Initialize TLB Context Id. */
@@ -3441,6 +3446,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
int PG_PTE_CACHE;
PG_G = pmap_global_bit(pmap);
@@ -3479,9 +3485,9 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
- lockp);
- pmap_invalidate_page(pmap, trunc_2mpage(va));
+ sva = trunc_2mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free, lockp);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -3624,11 +3630,23 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
/*
- * Machines that don't support invlpg, also don't support
- * PG_G.
+ * When workaround_erratum383 is false, a promotion to a 2M
+ * page mapping does not invalidate the 512 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
if (oldpde & PG_MANAGED) {
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
@@ -4011,9 +4029,14 @@ retry:
if (newpde != oldpde) {
if (!atomic_cmpset_long(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 60cfe47..bdc310c 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -508,7 +508,14 @@ pmap_bootstrap(vm_paddr_t firstaddr)
for (i = 1; i < NKPT; i++)
PTD[i] = 0;
- /* Initialize the PAT MSR if present. */
+ /*
+ * Initialize the PAT MSR if present.
+ * pmap_init_pat() clears and sets CR4_PGE, which, as a
+ * side-effect, invalidates stale PG_G TLB entries that might
+ * have been created in our pre-boot environment. We assume
+ * that PAT support implies PGE and in reverse, PGE presence
+ * comes with PAT. Both features were added for Pentium Pro.
+ */
pmap_init_pat();
/* Turn on PG_G on kernel page(s) */
@@ -565,7 +572,10 @@ pmap_init_pat(void)
pat_table[PAT_WRITE_PROTECTED] = 3;
pat_table[PAT_UNCACHED] = 3;
- /* Bail if this CPU doesn't implement PAT. */
+ /*
+ * Bail if this CPU doesn't implement PAT.
+ * We assume that PAT support implies PGE.
+ */
if ((cpu_feature & CPUID_PAT) == 0) {
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
@@ -2633,6 +2643,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
vm_paddr_t mptepa;
vm_page_t mpte;
struct spglist free;
+ vm_offset_t sva;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2655,8 +2666,9 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
- pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
- pmap_invalidate_page(pmap, trunc_4mpage(va));
+ sva = trunc_4mpage(va);
+ pmap_remove_pde(pmap, pde, sva, &free);
+ pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
@@ -2827,9 +2839,24 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
/*
* Machines that don't support invlpg, also don't support
* PG_G.
+ *
+ * When workaround_erratum383 is false, a promotion to a 2M/4M
+ * page mapping does not invalidate the 512/1024 4K page mappings
+ * from the TLB. Consequently, at this point, the TLB may
+ * hold both 4K and 2M/4M page mappings. Therefore, the entire
+ * range of addresses must be invalidated here. In contrast,
+ * when workaround_erratum383 is true, a promotion does
+ * invalidate the 512/1024 4K page mappings, and so a single INVLPG
+ * suffices to invalidate the 2M/4M page mapping.
*/
- if (oldpde & PG_G)
- pmap_invalidate_page(kernel_pmap, sva);
+ if ((oldpde & PG_G) != 0) {
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ }
+
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
if (oldpde & PG_MANAGED) {
pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
@@ -3139,9 +3166,14 @@ retry:
if (newpde != oldpde) {
if (!pde_cmpset(pde, oldpde, newpde))
goto retry;
- if (oldpde & PG_G)
- pmap_invalidate_page(pmap, sva);
- else
+ if (oldpde & PG_G) {
+ /* See pmap_remove_pde() for explanation. */
+ if (workaround_erratum383)
+ pmap_invalidate_page(kernel_pmap, sva);
+ else
+ pmap_invalidate_range(kernel_pmap, sva,
+ sva + NBPDR - 1);
+ } else
anychanged = TRUE;
}
return (anychanged);
OpenPOWER on IntegriCloud