summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-07-13 04:10:41 +0000
committeralc <alc@FreeBSD.org>2012-07-13 04:10:41 +0000
commitc99370c950d35082bbe5b6a2a64a36af4c002996 (patch)
treed57454ddc0158ce5b40601cd9a08e827c19c690b /sys/amd64
parentdd55b30cac15d4f8854b73e850cfe54398b9404e (diff)
downloadFreeBSD-src-c99370c950d35082bbe5b6a2a64a36af4c002996.zip
FreeBSD-src-c99370c950d35082bbe5b6a2a64a36af4c002996.tar.gz
Wring a few cycles out of pmap_enter(). In particular, on a user-space
pmap, avoid walking the page table twice.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c161
1 files changed, 86 insertions, 75 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 9e1ae3d..aee73a3 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3454,8 +3454,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: page %p is not busy", m));
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
- if ((m->oflags & VPO_UNMANAGED) == 0)
- newpte |= PG_MANAGED;
if ((prot & VM_PROT_WRITE) != 0)
newpte |= PG_RW;
if ((prot & VM_PROT_EXECUTE) == 0)
@@ -3477,14 +3475,22 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* In the case that a page table page is not
* resident, we are creating it here.
*/
- if (va < VM_MAXUSER_ADDRESS)
- mpte = pmap_allocpte(pmap, va, &lock);
-
+retry:
pde = pmap_pde(pmap, va);
- if (pde != NULL && (*pde & PG_V) != 0) {
- if ((*pde & PG_PS) != 0)
- panic("pmap_enter: attempted pmap_enter on 2MB page");
+ if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
+ pmap_demote_pde_locked(pmap, pde, va, &lock))) {
pte = pmap_pde_to_pte(pde, va);
+ if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ mpte->wire_count++;
+ }
+ } else if (va < VM_MAXUSER_ADDRESS) {
+ /*
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
+ */
+ mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
+ goto retry;
} else
panic("pmap_enter: invalid page directory va=%#lx", va);
@@ -3492,59 +3498,65 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
opa = origpte & PG_FRAME;
/*
- * Mapping has not changed, must be protection or wiring change.
+ * Is the specified virtual address already mapped?
*/
- if (origpte && (opa == pa)) {
+ if ((origpte & PG_V) != 0) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
* are valid mappings in them. Hence, if a user page is wired,
* the PT page will be also.
*/
- if (wired && ((origpte & PG_W) == 0))
+ if (wired && (origpte & PG_W) == 0)
pmap->pm_stats.wired_count++;
else if (!wired && (origpte & PG_W))
pmap->pm_stats.wired_count--;
/*
- * Remove extra pte reference
+ * Remove the extra PT page reference.
*/
- if (mpte)
- mpte->wire_count--;
-
- if ((origpte & PG_MANAGED) != 0)
- om = m;
- goto validate;
- }
-
- /*
- * Mapping has changed, invalidate old range and fall through to
- * handle validating new mapping.
- */
- if (opa) {
- if (origpte & PG_W)
- pmap->pm_stats.wired_count--;
- if ((origpte & PG_MANAGED) != 0)
- om = PHYS_TO_VM_PAGE(opa);
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
("pmap_enter: missing reference to page table page,"
" va: 0x%lx", va));
}
- } else
- pmap_resident_count_inc(pmap, 1);
- /*
- * Increment the counters.
- */
- if (wired)
- pmap->pm_stats.wired_count++;
+ /*
+ * Has the mapping changed?
+ */
+ if (opa == pa) {
+ /*
+ * No, might be a protection or wiring change.
+ */
+ if ((origpte & PG_MANAGED) != 0) {
+ newpte |= PG_MANAGED;
+ om = m;
+ }
+ if ((origpte & ~(PG_M | PG_A)) == newpte)
+ goto unchanged;
+ goto validate;
+ } else {
+ /*
+ * Yes, fall through to validate the new mapping.
+ */
+ if ((origpte & PG_MANAGED) != 0)
+ om = PHYS_TO_VM_PAGE(opa);
+ }
+ } else {
+ /*
+ * Increment the counters.
+ */
+ if (wired)
+ pmap->pm_stats.wired_count++;
+ pmap_resident_count_inc(pmap, 1);
+ }
/*
* Enter on the PV list if part of our managed memory.
*/
- if ((newpte & PG_MANAGED) != 0) {
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ newpte |= PG_MANAGED;
pv = get_pv_entry(pmap, &lock);
pv->pv_va = va;
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
@@ -3554,45 +3566,44 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
validate:
/*
- * Update the PTE only if the mapping or protection/wiring bits are
- * different.
+ * Update the PTE.
*/
- if ((origpte & ~(PG_M | PG_A)) != newpte) {
- newpte |= PG_A;
- if ((access & VM_PROT_WRITE) != 0)
- newpte |= PG_M;
- if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
- vm_page_aflag_set(m, PGA_WRITEABLE);
- if (origpte & PG_V) {
- invlva = FALSE;
- origpte = pte_load_store(pte, newpte);
- if (origpte & PG_A) {
- if (origpte & PG_MANAGED)
- vm_page_aflag_set(om, PGA_REFERENCED);
- if (opa != pa || ((origpte & PG_NX) == 0 &&
- (newpte & PG_NX) != 0))
- invlva = TRUE;
- }
- if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
- if ((origpte & PG_MANAGED) != 0)
- vm_page_dirty(om);
- if ((newpte & PG_RW) == 0)
- invlva = TRUE;
- }
- if (opa != pa && (origpte & PG_MANAGED) != 0) {
- CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
- pmap_pvh_free(&om->md, pmap, va);
- if ((om->aflags & PGA_WRITEABLE) != 0 &&
- TAILQ_EMPTY(&om->md.pv_list) &&
- ((om->flags & PG_FICTITIOUS) != 0 ||
- TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
- vm_page_aflag_clear(om, PGA_WRITEABLE);
- }
- if (invlva)
- pmap_invalidate_page(pmap, va);
- } else
- pte_store(pte, newpte);
- }
+ newpte |= PG_A;
+ if ((access & VM_PROT_WRITE) != 0)
+ newpte |= PG_M;
+ if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ if ((origpte & PG_V) != 0) {
+ invlva = FALSE;
+ origpte = pte_load_store(pte, newpte);
+ if ((origpte & PG_A) != 0) {
+ if ((origpte & PG_MANAGED) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ if (opa != pa || ((origpte & PG_NX) == 0 &&
+ (newpte & PG_NX) != 0))
+ invlva = TRUE;
+ }
+ if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if ((origpte & PG_MANAGED) != 0)
+ vm_page_dirty(om);
+ if ((newpte & PG_RW) == 0)
+ invlva = TRUE;
+ }
+ if (opa != pa && (origpte & PG_MANAGED) != 0) {
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pmap_pvh_free(&om->md, pmap, va);
+ if ((om->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list) &&
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ }
+ if (invlva)
+ pmap_invalidate_page(pmap, va);
+ } else
+ pte_store(pte, newpte);
+
+unchanged:
/*
* If both the page table page and the reservation are fully
OpenPOWER on IntegriCloud