summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2005-10-31 21:25:33 +0000
committeralc <alc@FreeBSD.org>2005-10-31 21:25:33 +0000
commit086563b4955f4b19bd7df9737a138ac62fbaa99a (patch)
tree5e091e5287a354f9e08d9be8c219f2449cc43e0a /sys
parentbd0529b5a0b46f48edc03276b4f87871d130ec17 (diff)
downloadFreeBSD-src-086563b4955f4b19bd7df9737a138ac62fbaa99a.zip
FreeBSD-src-086563b4955f4b19bd7df9737a138ac62fbaa99a.tar.gz
Instead of a panic()ing in pmap_insert_entry() if get_pv_entry()
fails, reclaim a pv entry by destroying a mapping to an inactive page. Change the format strings in many of the assertions that were recently converted from PMAP_DIAGNOSTIC printf()s so that they are compatible with PAE. Avoid unnecessary differences between the amd64 and i386 format strings.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c64
-rw-r--r--sys/i386/i386/pmap.c73
2 files changed, 124 insertions, 13 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 58db969..7fa7d8e 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -203,6 +203,7 @@ static caddr_t crashdumpmap;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
+static pv_entry_t pv_entry_reclaim(pmap_t locked_pmap);
static void pmap_clear_ptes(vm_page_t m, long bit);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
@@ -1450,6 +1451,57 @@ get_pv_entry(void)
return uma_zalloc(pvzone, M_NOWAIT);
}
+/*
+ * Reclaim a pv entry by removing a mapping to an inactive page.
+ */
+static pv_entry_t
+pv_entry_reclaim(pmap_t locked_pmap)
+{
+ pd_entry_t ptepde;
+ pmap_t pmap;
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(m, &vm_page_queues[PQ_INACTIVE].pl, pageq) {
+ if (m->hold_count || m->busy || (m->flags & PG_BUSY))
+ continue;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ va = pv->pv_va;
+ pmap = pv->pv_pmap;
+ if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
+ continue;
+ pmap->pm_stats.resident_count--;
+ pte = pmap_pte_pde(pmap, va, &ptepde);
+ tpte = pte_load_clear(pte);
+ KASSERT((tpte & PG_W) == 0,
+ ("pv_entry_reclaim: wired pte %#lx", tpte));
+ if (tpte & PG_A)
+ vm_page_flag_set(m, PG_REFERENCED);
+ if (tpte & PG_M) {
+ KASSERT((tpte & PG_RW),
+ ("pv_entry_reclaim: modified page not writable: va: %#lx, pte: %#lx",
+ va, tpte));
+ if (pmap_track_modified(va))
+ vm_page_dirty(m);
+ }
+ pmap_invalidate_page(pmap, va);
+ TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_EMPTY(&m->md.pv_list))
+ vm_page_flag_clear(m, PG_WRITEABLE);
+ m->md.pv_list_count--;
+ pmap_unuse_pt(pmap, va, ptepde);
+ if (pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ return (pv);
+ }
+ }
+ panic("pv_entry_reclaim: increase vm.pmap.shpgperproc");
+}
static void
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
@@ -1488,8 +1540,10 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
pv_entry_t pv;
pv = get_pv_entry();
- if (pv == NULL)
- panic("no pv entries: increase vm.pmap.shpgperproc");
+ if (pv == NULL) {
+ pv_entry_count--;
+ pv = pv_entry_reclaim(pmap);
+ }
pv->pv_va = va;
pv->pv_pmap = pmap;
@@ -1524,7 +1578,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde)
m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
if (oldpte & PG_M) {
KASSERT((oldpte & PG_RW),
- ("pmap_remove_pte: modified page not writable: va: 0x%lx, pte: 0x%lx",
+ ("pmap_remove_pte: modified page not writable: va: %#lx, pte: %#lx",
va, oldpte));
if (pmap_track_modified(va))
vm_page_dirty(m);
@@ -1705,7 +1759,7 @@ pmap_remove_all(vm_page_t m)
*/
if (tpte & PG_M) {
KASSERT((tpte & PG_RW),
- ("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx",
+ ("pmap_remove_all: modified page not writable: va: %#lx, pte: %#lx",
pv->pv_va, tpte));
if (pmap_track_modified(pv->pv_va))
vm_page_dirty(m);
@@ -1993,7 +2047,7 @@ validate:
}
if (origpte & PG_M) {
KASSERT((origpte & PG_RW),
- ("pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx",
+ ("pmap_enter: modified page not writable: va: %#lx, pte: %#lx",
va, origpte));
if ((origpte & PG_MANAGED) &&
pmap_track_modified(va))
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index fdc51d7..b9f852f 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -253,6 +253,7 @@ static struct mtx PMAP2mutex;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
+static pv_entry_t pv_entry_reclaim(pmap_t locked_pmap);
static void pmap_clear_ptes(vm_page_t m, int bit);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
@@ -1452,6 +1453,60 @@ get_pv_entry(void)
return uma_zalloc(pvzone, M_NOWAIT);
}
+/*
+ * Reclaim a pv entry by removing a mapping to an inactive page.
+ */
+static pv_entry_t
+pv_entry_reclaim(pmap_t locked_pmap)
+{
+ pmap_t pmap;
+ pt_entry_t *pte, tpte;
+ pv_entry_t pv;
+ vm_offset_t va;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ sched_pin();
+ TAILQ_FOREACH(m, &vm_page_queues[PQ_INACTIVE].pl, pageq) {
+ if (m->hold_count || m->busy || (m->flags & PG_BUSY))
+ continue;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ va = pv->pv_va;
+ pmap = pv->pv_pmap;
+ if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
+ continue;
+ pmap->pm_stats.resident_count--;
+ pte = pmap_pte_quick(pmap, va);
+ tpte = pte_load_clear(pte);
+ KASSERT((tpte & PG_W) == 0,
+ ("pv_entry_reclaim: wired pte %#jx",
+ (uintmax_t)tpte));
+ if (tpte & PG_A)
+ vm_page_flag_set(m, PG_REFERENCED);
+ if (tpte & PG_M) {
+ KASSERT((tpte & PG_RW),
+ ("pv_entry_reclaim: modified page not writable: va: %#x, pte: %#jx",
+ va, (uintmax_t)tpte));
+ if (pmap_track_modified(va))
+ vm_page_dirty(m);
+ }
+ pmap_invalidate_page(pmap, va);
+ TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_EMPTY(&m->md.pv_list))
+ vm_page_flag_clear(m, PG_WRITEABLE);
+ m->md.pv_list_count--;
+ pmap_unuse_pt(pmap, va);
+ if (pmap != locked_pmap)
+ PMAP_UNLOCK(pmap);
+ sched_unpin();
+ return (pv);
+ }
+ }
+ sched_unpin();
+ panic("pv_entry_reclaim: increase vm.pmap.shpgperproc");
+}
static void
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
@@ -1490,8 +1545,10 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
pv_entry_t pv;
pv = get_pv_entry();
- if (pv == NULL)
- panic("no pv entries: increase vm.pmap.shpgperproc");
+ if (pv == NULL) {
+ pv_entry_count--;
+ pv = pv_entry_reclaim(pmap);
+ }
pv->pv_va = va;
pv->pv_pmap = pmap;
@@ -1527,8 +1584,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
m = PHYS_TO_VM_PAGE(oldpte);
if (oldpte & PG_M) {
KASSERT((oldpte & PG_RW),
- ("pmap_remove_pte: modified page not writable: va: 0x%x, pte: 0x%x",
- va, oldpte));
+ ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx",
+ va, (uintmax_t)oldpte));
if (pmap_track_modified(va))
vm_page_dirty(m);
}
@@ -1693,8 +1750,8 @@ pmap_remove_all(vm_page_t m)
*/
if (tpte & PG_M) {
KASSERT((tpte & PG_RW),
- ("pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x",
- pv->pv_va, tpte));
+ ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx",
+ pv->pv_va, (uintmax_t)tpte));
if (pmap_track_modified(pv->pv_va))
vm_page_dirty(m);
}
@@ -1983,8 +2040,8 @@ validate:
}
if (origpte & PG_M) {
KASSERT((origpte & PG_RW),
- ("pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x",
- va, origpte));
+ ("pmap_enter: modified page not writable: va: %#x, pte: %#jx",
+ va, (uintmax_t)origpte));
if ((origpte & PG_MANAGED) &&
pmap_track_modified(va))
vm_page_dirty(om);
OpenPOWER on IntegriCloud