From dcd4c15b163e4cd614852d584071ed807d5f5bc9 Mon Sep 17 00:00:00 2001 From: alc Date: Sun, 29 Jul 2012 18:20:49 +0000 Subject: Shave off a few more cycles from pmap_enter()'s critical section. In particular, do a little less work with the PV list lock held. --- sys/amd64/amd64/pmap.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'sys/amd64') diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index fe45a67..c29e7c9 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -3446,6 +3446,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); + KASSERT((prot & access) == access, ("pmap_enter: access not in prot")); KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); @@ -3453,7 +3454,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, VM_OBJECT_LOCKED(m->object), ("pmap_enter: page %p is not busy", m)); pa = VM_PAGE_TO_PHYS(m); - newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V); + newpte = (pt_entry_t)(pa | PG_A | PG_V); + if ((access & VM_PROT_WRITE) != 0) + newpte |= PG_M; if ((prot & VM_PROT_WRITE) != 0) newpte |= PG_RW; if ((prot & VM_PROT_EXECUTE) == 0) @@ -3464,6 +3467,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, newpte |= PG_U; if (pmap == kernel_pmap) newpte |= PG_G; + newpte |= pmap_cache_bits(m->md.pat_mode, 0); mpte = om = NULL; @@ -3495,7 +3499,6 @@ retry: panic("pmap_enter: invalid page directory va=%#lx", va); origpte = *pte; - opa = origpte & PG_FRAME; /* * Is the specified virtual address already mapped? @@ -3507,9 +3510,9 @@ retry: * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ - if (wired && (origpte & PG_W) == 0) + if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) pmap->pm_stats.wired_count++; - else if (!wired && (origpte & PG_W)) + else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) pmap->pm_stats.wired_count--; /* @@ -3523,17 +3526,20 @@ retry: } /* - * Has the mapping changed? + * Has the physical page changed? */ + opa = origpte & PG_FRAME; if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((origpte & PG_MANAGED) != 0) { newpte |= PG_MANAGED; + if ((newpte & PG_RW) != 0) + vm_page_aflag_set(m, PGA_WRITEABLE); om = m; } - if ((origpte & ~(PG_M | PG_A)) == newpte) + if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) goto unchanged; goto validate; } else { @@ -3547,7 +3553,7 @@ retry: /* * Increment the counters. */ - if (wired) + if ((newpte & PG_W) != 0) pmap->pm_stats.wired_count++; pmap_resident_count_inc(pmap, 1); } @@ -3561,21 +3567,18 @@ retry: pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + if ((newpte & PG_RW) != 0) + vm_page_aflag_set(m, PGA_WRITEABLE); } -validate: - /* * Update the PTE. */ - newpte |= PG_A; - if ((access & VM_PROT_WRITE) != 0) - newpte |= PG_M; - if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW)) - vm_page_aflag_set(m, PGA_WRITEABLE); if ((origpte & PG_V) != 0) { +validate: invlva = FALSE; origpte = pte_load_store(pte, newpte); + opa = origpte & PG_FRAME; if ((origpte & PG_A) != 0 && (opa != pa || ((origpte & PG_NX) == 0 && (newpte & PG_NX) != 0))) invlva = TRUE; -- cgit v1.1