summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/pmap.c17
-rw-r--r--sys/i386/i386/pmap.c14
2 files changed, 15 insertions, 16 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index e45f256..d9ead4c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1820,7 +1820,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t pa;
register pt_entry_t *pte;
vm_paddr_t opa;
- pd_entry_t ptepde;
pt_entry_t origpte, newpte;
vm_page_t mpte, om;
@@ -1855,7 +1854,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
#endif
- pte = pmap_pte_pde(pmap, va, &ptepde);
+ pte = pmap_pte(pmap, va);
/*
* Page Directory table entry not valid, we need a new PT page
@@ -1915,23 +1914,23 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* handle validating new mapping.
*/
if (opa) {
- int err;
if (origpte & PG_W)
pmap->pm_stats.wired_count--;
if (origpte & PG_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
}
- err = pmap_unuse_pt(pmap, va, ptepde);
- if (err)
- panic("pmap_enter: pte vanished, va: 0x%lx", va);
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%lx", va));
+ }
} else
pmap->pm_stats.resident_count++;
/*
- * Enter on the PV list if part of our managed memory. Note that we
- * raise IPL while manipulating pv_table since pmap_enter can be
- * called at interrupt time.
+ * Enter on the PV list if part of our managed memory.
*/
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index e5ad5fa..84e5bd5 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1954,23 +1954,23 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* handle validating new mapping.
*/
if (opa) {
- int err;
if (origpte & PG_W)
pmap->pm_stats.wired_count--;
if (origpte & PG_MANAGED) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
}
- err = pmap_unuse_pt(pmap, va);
- if (err)
- panic("pmap_enter: pte vanished, va: 0x%x", va);
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%x", va));
+ }
} else
pmap->pm_stats.resident_count++;
/*
- * Enter on the PV list if part of our managed memory. Note that we
- * raise IPL while manipulating pv_table since pmap_enter can be
- * called at interrupt time.
+ * Enter on the PV list if part of our managed memory.
*/
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
OpenPOWER on IntegriCloud