summaryrefslogtreecommitdiffstats
path: root/sys/ia64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
committeralc <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
commita7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14 (patch)
treeeeb6525f020e547b58a26d7c6e3b96038c99d223 /sys/ia64
parentde19fa170105efedc0a0218b0511595eb73cc209 (diff)
downloadFreeBSD-src-a7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14.zip
FreeBSD-src-a7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14.tar.gz
Retire pmap_track_modified(). We no longer need it because we do not
create managed mappings within the clean submap. To prevent regressions, add assertions blocking the creation of managed mappings within the clean submap. Reviewed by: tegge
Diffstat (limited to 'sys/ia64')
-rw-r--r--sys/ia64/ia64/pmap.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index a00bbcf..06d07f5 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -691,19 +691,6 @@ pmap_free_rid(uint32_t rid)
mtx_unlock(&pmap_ridmutex);
}
-/*
- * this routine defines the region(s) of memory that should
- * not be tested for the modified bit.
- */
-static PMAP_INLINE int
-pmap_track_modified(vm_offset_t va)
-{
- if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
- return 1;
- else
- return 0;
-}
-
/***************************************************
* Page table page management routines.....
***************************************************/
@@ -1199,8 +1186,7 @@ pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va,
if (pmap_managed(pte)) {
m = PHYS_TO_VM_PAGE(pmap_ppn(pte));
if (pmap_dirty(pte))
- if (pmap_track_modified(va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
if (pmap_accessed(pte))
vm_page_flag_set(m, PG_REFERENCED);
@@ -1498,8 +1484,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
vm_offset_t pa = pmap_ppn(pte);
vm_page_t m = PHYS_TO_VM_PAGE(pa);
if (pmap_dirty(pte)) {
- if (pmap_track_modified(sva))
- vm_page_dirty(m);
+ vm_page_dirty(m);
pmap_clear_dirty(pte);
}
if (pmap_accessed(pte)) {
@@ -1593,8 +1578,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* We might be turning off write access to the page,
* so we go ahead and sense modify status.
*/
- if (managed && pmap_dirty(&origpte) &&
- pmap_track_modified(va))
+ if (managed && pmap_dirty(&origpte))
vm_page_dirty(m);
pmap_invalidate_page(pmap, va);
@@ -1614,6 +1598,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Enter on the PV list if part of our managed memory.
*/
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
pmap_insert_entry(pmap, va, m);
managed = TRUE;
}
@@ -1656,6 +1642,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_t oldpmap;
boolean_t managed;
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
+ ("pmap_enter_quick: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
OpenPOWER on IntegriCloud