diff options
author | alc <alc@FreeBSD.org> | 2005-11-09 08:19:21 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2005-11-09 08:19:21 +0000 |
commit | 8852c8f9e25ac6d124a51b1e330993a5ba1bcafa (patch) | |
tree | 62e83f535ea20fe619830cadd0ca11c77e61b37f /sys/alpha | |
parent | f9da852761f4d4f96009496cf662d42d998c42a6 (diff) | |
download | FreeBSD-src-8852c8f9e25ac6d124a51b1e330993a5ba1bcafa.zip FreeBSD-src-8852c8f9e25ac6d124a51b1e330993a5ba1bcafa.tar.gz |
Reimplement the reclamation of PV entries. Specifically, perform
reclamation synchronously from get_pv_entry() instead of
asynchronously as part of the page daemon. Additionally, limit the
reclamation to inactive pages unless allocation from the PV entry zone
or reclamation from the inactive queue fails. Previously, reclamation
destroyed mappings to both inactive and active pages. get_pv_entry()
still, however, wakes up the page daemon when reclamation occurs. The
reason being that the page daemon may move some pages from the active
queue to the inactive queue, making some new pages available to future
reclamations.
Print the "reclaiming PV entries" message at most once per minute, but
don't stop printing it after the fifth time. This way, we do not give
the impression that the problem has gone away.
Reviewed by: tegge
Diffstat (limited to 'sys/alpha')
-rw-r--r-- | sys/alpha/alpha/pmap.c | 71 |
1 files changed, 41 insertions, 30 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c index 39cd851..9ce77cd 100644 --- a/sys/alpha/alpha/pmap.c +++ b/sys/alpha/alpha/pmap.c @@ -317,11 +317,9 @@ static struct mtx allpmaps_lock; */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; -int pmap_pagedaemon_waken; static PMAP_INLINE void free_pv_entry(pv_entry_t pv); -static pv_entry_t get_pv_entry(void); -static pv_entry_t pv_entry_reclaim(pmap_t locked_pmap); +static pv_entry_t get_pv_entry(pmap_t locked_pmap); static void alpha_protection_init(void); static void pmap_changebit(vm_page_t m, int bit, boolean_t setem); @@ -1300,39 +1298,44 @@ free_pv_entry(pv_entry_t pv) /* * get a new pv_entry, allocating a block from the system * when needed. - * the memory allocation is performed bypassing the malloc code - * because of the possibility of allocations at interrupt time. */ static pv_entry_t -get_pv_entry(void) -{ - pv_entry_count++; - if ((pv_entry_count > pv_entry_high_water) && - (pmap_pagedaemon_waken == 0)) { - pmap_pagedaemon_waken = 1; - wakeup (&vm_pages_needed); - } - return uma_zalloc(pvzone, M_NOWAIT); -} - -/* - * Reclaim a pv entry by removing a mapping to an inactive page. - */ -static pv_entry_t -pv_entry_reclaim(pmap_t locked_pmap) +get_pv_entry(pmap_t locked_pmap) { + static const struct timeval printinterval = { 60, 0 }; + static struct timeval lastprint; + struct vpgqueues *vpq; pmap_t pmap; pt_entry_t *pte, tpte; - pv_entry_t pv; + pv_entry_t allocated_pv, next_pv, pv; vm_offset_t va; vm_page_t m; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(m, &vm_page_queues[PQ_INACTIVE].pl, pageq) { + allocated_pv = uma_zalloc(pvzone, M_NOWAIT); + if (allocated_pv != NULL) { + pv_entry_count++; + if (pv_entry_count > pv_entry_high_water) + pagedaemon_wakeup(); + else + return (allocated_pv); + } + + /* + * Reclaim pv entries: At first, destroy mappings to inactive + * pages. After that, if a pv entry is still needed, destroy + * mappings to active pages. + */ + if (ratecheck(&lastprint, &printinterval)) + printf("Approaching the limit on PV entries, " + "increase the vm.pmap.shpgperproc tunable.\n"); + vpq = &vm_page_queues[PQ_INACTIVE]; +retry: + TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy || (m->flags & PG_BUSY)) continue; - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { va = pv->pv_va; pmap = pv->pv_pmap; if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) @@ -1342,7 +1345,7 @@ pv_entry_reclaim(pmap_t locked_pmap) tpte = *pte; *pte = 0; KASSERT((tpte & PG_W) == 0, - ("pv_entry_reclaim: wired pte %#lx", tpte)); + ("get_pv_entry: wired pte %#lx", tpte)); if ((tpte & PG_FOR) == 0) vm_page_flag_set(m, PG_REFERENCED); if ((tpte & PG_FOW) == 0) { @@ -1358,10 +1361,20 @@ pv_entry_reclaim(pmap_t locked_pmap) pmap_unuse_pt(pmap, va, pv->pv_ptem); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); - return (pv); + if (allocated_pv == NULL) + allocated_pv = pv; + else + free_pv_entry(pv); + } + } + if (allocated_pv == NULL) { + if (vpq == &vm_page_queues[PQ_INACTIVE]) { + vpq = &vm_page_queues[PQ_ACTIVE]; + goto retry; } + panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); } - panic("pv_entry_reclaim: increase vm.pmap.shpgperproc"); + return (allocated_pv); } static int @@ -1408,9 +1421,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { pv_entry_t pv; - pv = get_pv_entry(); - if (pv == NULL) - pv = pv_entry_reclaim(pmap); + pv = get_pv_entry(pmap); pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; |