summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-08-01 16:04:13 +0000
committeralc <alc@FreeBSD.org>2012-08-01 16:04:13 +0000
commit9c4b62fad8853cdccbe2f3a1900de7ec19bb52ca (patch)
treeca90665994a83282a7e75e13ed388776aca81e44
parente5c6ca6783da2418a0046f43f47977f4534925d3 (diff)
downloadFreeBSD-src-9c4b62fad8853cdccbe2f3a1900de7ec19bb52ca.zip
FreeBSD-src-9c4b62fad8853cdccbe2f3a1900de7ec19bb52ca.tar.gz
Revise pmap_enter()'s handling of mapping updates that change the
PTE's PG_M and PG_RW bits but not the physical page frame. First, only perform vm_page_dirty() on a managed vm_page when the PG_M bit is being cleared. If the updated PTE continues to have PG_M set, then there is no requirement to perform vm_page_dirty(). Second, flush the mapping from the TLB when PG_M alone is cleared, not just when PG_M and PG_RW are cleared. Otherwise, a stale TLB entry may stop PG_M from being set again on the next store to the virtual page. However, since the vm_page's dirty field already shows the physical page as being dirty, no actual harm comes from the PG_M bit not being set. Nonetheless, it is potentially confusing to someone expecting to see the PTE change after a store to the virtual page.
-rw-r--r--sys/amd64/amd64/pmap.c52
1 files changed, 30 insertions, 22 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c29e7c9..3ae1230 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3439,7 +3439,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
- boolean_t invlva;
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
@@ -3537,7 +3536,6 @@ retry:
newpte |= PG_MANAGED;
if ((newpte & PG_RW) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
- om = m;
}
if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
goto unchanged;
@@ -3576,30 +3574,40 @@ retry:
*/
if ((origpte & PG_V) != 0) {
validate:
- invlva = FALSE;
origpte = pte_load_store(pte, newpte);
opa = origpte & PG_FRAME;
- if ((origpte & PG_A) != 0 && (opa != pa ||
- ((origpte & PG_NX) == 0 && (newpte & PG_NX) != 0)))
- invlva = TRUE;
- if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ if (opa != pa) {
+ if ((origpte & PG_MANAGED) != 0) {
+ if ((origpte & (PG_M | PG_RW)) == (PG_M |
+ PG_RW))
+ vm_page_dirty(om);
+ if ((origpte & PG_A) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pmap_pvh_free(&om->md, pmap, va);
+ if ((om->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list) &&
+ ((om->flags & PG_FICTITIOUS) != 0 ||
+ TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ }
+ } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
+ PG_RW)) == (PG_M | PG_RW)) {
if ((origpte & PG_MANAGED) != 0)
- vm_page_dirty(om);
- if ((newpte & PG_RW) == 0)
- invlva = TRUE;
- }
- if (opa != pa && (origpte & PG_MANAGED) != 0) {
- if ((origpte & PG_A) != 0)
- vm_page_aflag_set(om, PGA_REFERENCED);
- CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
- pmap_pvh_free(&om->md, pmap, va);
- if ((om->aflags & PGA_WRITEABLE) != 0 &&
- TAILQ_EMPTY(&om->md.pv_list) &&
- ((om->flags & PG_FICTITIOUS) != 0 ||
- TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
- vm_page_aflag_clear(om, PGA_WRITEABLE);
+ vm_page_dirty(m);
+
+ /*
+ * Although the PTE may still have PG_RW set, TLB
+ * invalidation may nonetheless be required because
+ * the PTE no longer has PG_M set.
+ */
+ } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
+ /*
+ * This PTE change does not require TLB invalidation.
+ */
+ goto unchanged;
}
- if (invlva)
+ if ((origpte & PG_A) != 0)
pmap_invalidate_page(pmap, va);
} else
pte_store(pte, newpte);
OpenPOWER on IntegriCloud