summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-07-29 18:20:49 +0000
committeralc <alc@FreeBSD.org>2012-07-29 18:20:49 +0000
commitdcd4c15b163e4cd614852d584071ed807d5f5bc9 (patch)
treeb99b45fc46d10c8668236dc2e497b594ab2cd727 /sys/amd64
parent0df7adbcbe3c3afbf692f3f73ca539615d63b08b (diff)
downloadFreeBSD-src-dcd4c15b163e4cd614852d584071ed807d5f5bc9.zip
FreeBSD-src-dcd4c15b163e4cd614852d584071ed807d5f5bc9.tar.gz
Shave off a few more cycles from pmap_enter()'s critical section. In
particular, do a little less work with the PV list lock held.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index fe45a67..c29e7c9 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3446,6 +3446,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
va));
+ KASSERT((prot & access) == access, ("pmap_enter: access not in prot"));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
@@ -3453,7 +3454,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
VM_OBJECT_LOCKED(m->object),
("pmap_enter: page %p is not busy", m));
pa = VM_PAGE_TO_PHYS(m);
- newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
+ newpte = (pt_entry_t)(pa | PG_A | PG_V);
+ if ((access & VM_PROT_WRITE) != 0)
+ newpte |= PG_M;
if ((prot & VM_PROT_WRITE) != 0)
newpte |= PG_RW;
if ((prot & VM_PROT_EXECUTE) == 0)
@@ -3464,6 +3467,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
newpte |= PG_U;
if (pmap == kernel_pmap)
newpte |= PG_G;
+ newpte |= pmap_cache_bits(m->md.pat_mode, 0);
mpte = om = NULL;
@@ -3495,7 +3499,6 @@ retry:
panic("pmap_enter: invalid page directory va=%#lx", va);
origpte = *pte;
- opa = origpte & PG_FRAME;
/*
* Is the specified virtual address already mapped?
@@ -3507,9 +3510,9 @@ retry:
* are valid mappings in them. Hence, if a user page is wired,
* the PT page will be also.
*/
- if (wired && (origpte & PG_W) == 0)
+ if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
pmap->pm_stats.wired_count++;
- else if (!wired && (origpte & PG_W))
+ else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
pmap->pm_stats.wired_count--;
/*
@@ -3523,17 +3526,20 @@ retry:
}
/*
- * Has the mapping changed?
+ * Has the physical page changed?
*/
+ opa = origpte & PG_FRAME;
if (opa == pa) {
/*
* No, might be a protection or wiring change.
*/
if ((origpte & PG_MANAGED) != 0) {
newpte |= PG_MANAGED;
+ if ((newpte & PG_RW) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
om = m;
}
- if ((origpte & ~(PG_M | PG_A)) == newpte)
+ if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
goto unchanged;
goto validate;
} else {
@@ -3547,7 +3553,7 @@ retry:
/*
* Increment the counters.
*/
- if (wired)
+ if ((newpte & PG_W) != 0)
pmap->pm_stats.wired_count++;
pmap_resident_count_inc(pmap, 1);
}
@@ -3561,21 +3567,18 @@ retry:
pv->pv_va = va;
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+ if ((newpte & PG_RW) != 0)
+ vm_page_aflag_set(m, PGA_WRITEABLE);
}
-validate:
-
/*
* Update the PTE.
*/
- newpte |= PG_A;
- if ((access & VM_PROT_WRITE) != 0)
- newpte |= PG_M;
- if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
- vm_page_aflag_set(m, PGA_WRITEABLE);
if ((origpte & PG_V) != 0) {
+validate:
invlva = FALSE;
origpte = pte_load_store(pte, newpte);
+ opa = origpte & PG_FRAME;
if ((origpte & PG_A) != 0 && (opa != pa ||
((origpte & PG_NX) == 0 && (newpte & PG_NX) != 0)))
invlva = TRUE;
OpenPOWER on IntegriCloud