summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2005-11-09 08:19:21 +0000
committeralc <alc@FreeBSD.org>2005-11-09 08:19:21 +0000
commit8852c8f9e25ac6d124a51b1e330993a5ba1bcafa (patch)
tree62e83f535ea20fe619830cadd0ca11c77e61b37f /sys/amd64
parentf9da852761f4d4f96009496cf662d42d998c42a6 (diff)
downloadFreeBSD-src-8852c8f9e25ac6d124a51b1e330993a5ba1bcafa.zip
FreeBSD-src-8852c8f9e25ac6d124a51b1e330993a5ba1bcafa.tar.gz
Reimplement the reclamation of PV entries. Specifically, perform
reclamation synchronously from get_pv_entry() instead of asynchronously as part of the page daemon. Additionally, limit the reclamation to inactive pages unless allocation from the PV entry zone or reclamation from the inactive queue fails. Previously, reclamation destroyed mappings to both inactive and active pages. get_pv_entry() still, however, wakes up the page daemon when reclamation occurs. The reason being that the page daemon may move some pages from the active queue to the inactive queue, making some new pages available to future reclamations. Print the "reclaiming PV entries" message at most once per minute, but don't stop printing it after the fifth time. This way, we do not give the impression that the problem has gone away. Reviewed by: tegge
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/pmap.c75
1 files changed, 42 insertions, 33 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index d144849..6c7da48 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -185,7 +185,6 @@ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
-int pmap_pagedaemon_waken;
/*
* All those kernel PT submaps that BSD is so fond of
@@ -200,8 +199,7 @@ struct msgbuf *msgbufp = 0;
static caddr_t crashdumpmap;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
-static pv_entry_t get_pv_entry(void);
-static pv_entry_t pv_entry_reclaim(pmap_t locked_pmap);
+static pv_entry_t get_pv_entry(pmap_t locked_pmap);
static void pmap_clear_ptes(vm_page_t m, long bit);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
@@ -1428,40 +1426,45 @@ free_pv_entry(pv_entry_t pv)
/*
* get a new pv_entry, allocating a block from the system
* when needed.
- * the memory allocation is performed bypassing the malloc code
- * because of the possibility of allocations at interrupt time.
*/
static pv_entry_t
-get_pv_entry(void)
-{
- pv_entry_count++;
- if ((pv_entry_count > pv_entry_high_water) &&
- (pmap_pagedaemon_waken == 0)) {
- pmap_pagedaemon_waken = 1;
- wakeup (&vm_pages_needed);
- }
- return uma_zalloc(pvzone, M_NOWAIT);
-}
-
-/*
- * Reclaim a pv entry by removing a mapping to an inactive page.
- */
-static pv_entry_t
-pv_entry_reclaim(pmap_t locked_pmap)
+get_pv_entry(pmap_t locked_pmap)
{
+ static const struct timeval printinterval = { 60, 0 };
+ static struct timeval lastprint;
+ struct vpgqueues *vpq;
pd_entry_t ptepde;
pmap_t pmap;
pt_entry_t *pte, tpte;
- pv_entry_t pv;
+ pv_entry_t allocated_pv, next_pv, pv;
vm_offset_t va;
vm_page_t m;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- TAILQ_FOREACH(m, &vm_page_queues[PQ_INACTIVE].pl, pageq) {
+ allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
+ if (allocated_pv != NULL) {
+ pv_entry_count++;
+ if (pv_entry_count > pv_entry_high_water)
+ pagedaemon_wakeup();
+ else
+ return (allocated_pv);
+ }
+
+ /*
+ * Reclaim pv entries: At first, destroy mappings to inactive
+ * pages. After that, if a pv entry is still needed, destroy
+ * mappings to active pages.
+ */
+ if (ratecheck(&lastprint, &printinterval))
+ printf("Approaching the limit on PV entries, "
+ "increase the vm.pmap.shpgperproc tunable.\n");
+ vpq = &vm_page_queues[PQ_INACTIVE];
+retry:
+ TAILQ_FOREACH(m, &vpq->pl, pageq) {
if (m->hold_count || m->busy || (m->flags & PG_BUSY))
continue;
- TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
va = pv->pv_va;
pmap = pv->pv_pmap;
if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
@@ -1470,12 +1473,12 @@ pv_entry_reclaim(pmap_t locked_pmap)
pte = pmap_pte_pde(pmap, va, &ptepde);
tpte = pte_load_clear(pte);
KASSERT((tpte & PG_W) == 0,
- ("pv_entry_reclaim: wired pte %#lx", tpte));
+ ("get_pv_entry: wired pte %#lx", tpte));
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
if (tpte & PG_M) {
KASSERT((tpte & PG_RW),
- ("pv_entry_reclaim: modified page not writable: va: %#lx, pte: %#lx",
+ ("get_pv_entry: modified page not writable: va: %#lx, pte: %#lx",
va, tpte));
if (pmap_track_modified(va))
vm_page_dirty(m);
@@ -1489,10 +1492,20 @@ pv_entry_reclaim(pmap_t locked_pmap)
pmap_unuse_pt(pmap, va, ptepde);
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
- return (pv);
+ if (allocated_pv == NULL)
+ allocated_pv = pv;
+ else
+ free_pv_entry(pv);
}
}
- panic("pv_entry_reclaim: increase vm.pmap.shpgperproc");
+ if (allocated_pv == NULL) {
+ if (vpq == &vm_page_queues[PQ_INACTIVE]) {
+ vpq = &vm_page_queues[PQ_ACTIVE];
+ goto retry;
+ }
+ panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
+ }
+ return (allocated_pv);
}
static void
@@ -1531,11 +1544,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
- pv = get_pv_entry();
- if (pv == NULL) {
- pv_entry_count--;
- pv = pv_entry_reclaim(pmap);
- }
+ pv = get_pv_entry(pmap);
pv->pv_va = va;
pv->pv_pmap = pmap;
OpenPOWER on IntegriCloud