summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/pmap.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2017-06-28 04:01:29 +0000
committeralc <alc@FreeBSD.org>2017-06-28 04:01:29 +0000
commitb497b3ce303caf124e91812fdd0aae50dad2f89b (patch)
tree546481b4cfad14a9d947dd01efa64aa2a87a374e /sys/amd64/amd64/pmap.c
parent022ed953b94b818bfab23e8eb02a79f8e8824cc6 (diff)
downloadFreeBSD-src-b497b3ce303caf124e91812fdd0aae50dad2f89b.zip
FreeBSD-src-b497b3ce303caf124e91812fdd0aae50dad2f89b.tar.gz
MFC r314310
Refine the fix from r312954. Specifically, add a new PDE-only flag, PG_PROMOTED, that indicates whether lingering 4KB page mappings might need to be flushed on a PDE change that restricts or destroys a 2MB page mapping. This flag allows the pmap to avoid range invalidations that are both unnecessary and costly. Approved by: re (kib)
Diffstat (limited to 'sys/amd64/amd64/pmap.c')
-rw-r--r--sys/amd64/amd64/pmap.c70
1 files changed, 39 insertions, 31 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 551413f..a7ce847 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -613,6 +613,8 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
+static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
+ pd_entry_t pde);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
@@ -1838,6 +1840,27 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
}
#endif /* !SMP */
+static void
+pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
+{
+
+ /*
+ * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
+ * by a promotion that did not invalidate the 512 4KB page mappings
+ * that might exist in the TLB. Consequently, at this point, the TLB
+ * may hold both 4KB and 2MB page mappings for the address range [va,
+ * va + NBPDR). Therefore, the entire range must be invalidated here.
+ * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
+ * 4KB page mappings for the address range [va, va + NBPDR), and so a
+ * single INVLPG suffices to invalidate the 2MB page mapping from the
+ * TLB.
+ */
+ if ((pde & PG_PROMOTED) != 0)
+ pmap_invalidate_range(pmap, va, va + NBPDR - 1);
+ else
+ pmap_invalidate_page(pmap, va);
+}
+
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
@@ -3472,7 +3495,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
SLIST_INIT(&free);
sva = trunc_2mpage(va);
pmap_remove_pde(pmap, pde, sva, &free, lockp);
- pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
+ if ((oldpde & PG_G) == 0)
+ pmap_invalidate_pde_page(pmap, sva, oldpde);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -3612,25 +3636,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
oldpde = pte_load_clear(pdq);
if (oldpde & PG_W)
pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
-
- /*
- * When workaround_erratum383 is false, a promotion to a 2M
- * page mapping does not invalidate the 512 4K page mappings
- * from the TLB. Consequently, at this point, the TLB may
- * hold both 4K and 2M page mappings. Therefore, the entire
- * range of addresses must be invalidated here. In contrast,
- * when workaround_erratum383 is true, a promotion does
- * invalidate the 512 4K page mappings, and so a single INVLPG
- * suffices to invalidate the 2M page mapping.
- */
- if ((oldpde & PG_G) != 0) {
- if (workaround_erratum383)
- pmap_invalidate_page(kernel_pmap, sva);
- else
- pmap_invalidate_range(kernel_pmap, sva,
- sva + NBPDR - 1);
- }
-
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
if (oldpde & PG_MANAGED) {
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
@@ -4010,16 +4017,16 @@ retry:
if ((prot & VM_PROT_EXECUTE) == 0)
newpde |= pg_nx;
if (newpde != oldpde) {
- if (!atomic_cmpset_long(pde, oldpde, newpde))
+ /*
+ * As an optimization to future operations on this PDE, clear
+ * PG_PROMOTED. The impending invalidation will remove any
+ * lingering 4KB page mappings from the TLB.
+ */
+ if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
goto retry;
- if (oldpde & PG_G) {
- /* See pmap_remove_pde() for explanation. */
- if (workaround_erratum383)
- pmap_invalidate_page(kernel_pmap, sva);
- else
- pmap_invalidate_range(kernel_pmap, sva,
- sva + NBPDR - 1);
- } else
+ if ((oldpde & PG_G) != 0)
+ pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
+ else
anychanged = TRUE;
}
return (anychanged);
@@ -4272,7 +4279,7 @@ setpte:
if (workaround_erratum383)
pmap_update_pde(pmap, va, pde, PG_PS | newpde);
else
- pde_store(pde, PG_PS | newpde);
+ pde_store(pde, PG_PROMOTED | PG_PS | newpde);
atomic_add_long(&pmap_pde_promotions, 1);
CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
@@ -4585,7 +4592,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
/*
- * Map the superpage.
+ * Map the superpage. (This is not a promoted mapping; there will not
+ * be any lingering 4KB page mappings in the TLB.)
*/
pde_store(pde, newpde);
OpenPOWER on IntegriCloud