summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
committeralc <alc@FreeBSD.org>2006-04-12 04:22:52 +0000
commita7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14 (patch)
treeeeb6525f020e547b58a26d7c6e3b96038c99d223 /sys/amd64
parentde19fa170105efedc0a0218b0511595eb73cc209 (diff)
downloadFreeBSD-src-a7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14.zip
FreeBSD-src-a7e3d6f83b2e60bf4f2a0ca1d7f58c6b2d883f14.tar.gz
Retire pmap_track_modified(). We no longer need it because we do not
create managed mappings within the clean submap. To prevent regressions, add assertions blocking the creation of managed mappings within the clean submap. Reviewed by: tegge
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c52
1 files changed, 10 insertions, 42 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c5e27e9..be3daad 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -619,20 +619,6 @@ SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW,
* Low level helper routines.....
***************************************************/
-
-/*
- * this routine defines the region(s) of memory that should
- * not be tested for the modified bit.
- */
-static PMAP_INLINE int
-pmap_track_modified(vm_offset_t va)
-{
- if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
- return 1;
- else
- return 0;
-}
-
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@@ -1532,8 +1518,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
KASSERT((tpte & PG_RW),
("pmap_collect: modified page not writable: va: %#lx, pte: %#lx",
va, tpte));
- if (pmap_track_modified(va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
pmap_invalidate_page(pmap, va);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
@@ -1751,8 +1736,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde)
KASSERT((oldpte & PG_RW),
("pmap_remove_pte: modified page not writable: va: %#lx, pte: %#lx",
va, oldpte));
- if (pmap_track_modified(va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
if (oldpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
@@ -1940,8 +1924,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((tpte & PG_RW),
("pmap_remove_all: modified page not writable: va: %#lx, pte: %#lx",
pv->pv_va, tpte));
- if (pmap_track_modified(pv->pv_va))
- vm_page_dirty(m);
+ vm_page_dirty(m);
}
pmap_invalidate_page(pmap, pv->pv_va);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
@@ -2030,8 +2013,7 @@ retry:
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
- if ((pbits & PG_M) != 0 &&
- pmap_track_modified(sva)) {
+ if ((pbits & PG_M) != 0) {
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits &
PG_FRAME);
@@ -2182,6 +2164,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* Enter on the PV list if part of our managed memory.
*/
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
+ ("pmap_enter: managed mapping within the clean submap"));
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@@ -2227,8 +2211,7 @@ validate:
KASSERT((origpte & PG_RW),
("pmap_enter: modified page not writable: va: %#lx, pte: %#lx",
va, origpte));
- if ((origpte & PG_MANAGED) &&
- pmap_track_modified(va))
+ if ((origpte & PG_MANAGED) != 0)
vm_page_dirty(om);
if ((newpte & PG_RW) == 0)
invlva = TRUE;
@@ -2258,6 +2241,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pt_entry_t *pte;
vm_paddr_t pa;
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
+ ("pmap_enter_quick: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
@@ -2836,13 +2822,6 @@ pmap_is_modified(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- /*
- * if the bit being tested is the modified bit, then
- * mark clean_map and ptes as never
- * modified.
- */
- if (!pmap_track_modified(pv->pv_va))
- continue;
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
@@ -2898,14 +2877,6 @@ pmap_clear_ptes(vm_page_t m, long bit)
* setting RO do we need to clear the VAC?
*/
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- /*
- * don't write protect pager mappings
- */
- if (bit == PG_RW) {
- if (!pmap_track_modified(pv->pv_va))
- continue;
- }
-
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
@@ -2983,9 +2954,6 @@ pmap_ts_referenced(vm_page_t m)
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
- if (!pmap_track_modified(pv->pv_va))
- continue;
-
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
OpenPOWER on IntegriCloud