summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2013-09-11 17:23:42 +0000
committeralc <alc@FreeBSD.org>2013-09-11 17:23:42 +0000
commit11b50ccc0ed63bbd99489f7131d095e1783c0e2e (patch)
treed79384abb53a6c7ff5c8c8388d4700c2c211d27f
parentaa68cbca5903957047a27c2717c554a984fe847d (diff)
downloadFreeBSD-src-11b50ccc0ed63bbd99489f7131d095e1783c0e2e.zip
FreeBSD-src-11b50ccc0ed63bbd99489f7131d095e1783c0e2e.tar.gz
Prior to r254304, we only began scanning the active page queue when the
amount of free memory was close to the point at which we would begin reclaiming pages. Now, we continuously scan the active page queue, regardless of the amount of free memory. Consequently, we are continuously calling pmap_ts_referenced() on active pages. Prior to this change, pmap_ts_referenced() would always demote superpage mappings in order to obtain finer-grained reference information. This made sense because we were coming under memory pressure and would soon have to begin reclaiming pages. Now, however, with continuous scanning of the active page queue, these demotions are taking a toll on performance. To address this problem, I have replaced the demotion with a heuristic for periodically clearing the reference flag on superpage mappings. Approved by: re (kib) Sponsored by: EMC / Isilon Storage Division
-rw-r--r--sys/i386/i386/pmap.c121
1 files changed, 69 insertions, 52 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index f09abee..a3b93a7 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -2639,8 +2639,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
oldpde = *pde;
KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
- mpte = pmap_lookup_pt_page(pmap, va);
- if (mpte != NULL)
+ if ((oldpde & PG_A) != 0 && (mpte = pmap_lookup_pt_page(pmap, va)) !=
+ NULL)
pmap_remove_pt_page(pmap, mpte);
else {
KASSERT((oldpde & PG_W) == 0,
@@ -4744,6 +4744,8 @@ retry:
rw_wunlock(&pvh_global_lock);
}
+#define PMAP_TS_REFERENCED_MAX 5
+
/*
* pmap_ts_referenced:
*
@@ -4760,73 +4762,88 @@ int
pmap_ts_referenced(vm_page_t m)
{
struct md_page *pvh;
- pv_entry_t pv, pvf, pvn;
+ pv_entry_t pv, pvf;
pmap_t pmap;
- pd_entry_t oldpde, *pde;
+ pd_entry_t *pde;
pt_entry_t *pte;
- vm_offset_t va;
+ vm_paddr_t pa;
int rtval = 0;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
- pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
+ pa = VM_PAGE_TO_PHYS(m);
+ pvh = pa_to_pvh(pa);
rw_wlock(&pvh_global_lock);
sched_pin();
- if ((m->flags & PG_FICTITIOUS) != 0)
+ if ((m->flags & PG_FICTITIOUS) != 0 ||
+ (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
goto small_mappings;
- TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, pvn) {
- va = pv->pv_va;
+ pv = pvf;
+ do {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
- pde = pmap_pde(pmap, va);
- oldpde = *pde;
- if ((oldpde & PG_A) != 0) {
- if (pmap_demote_pde(pmap, pde, va)) {
- if ((oldpde & PG_W) == 0) {
- /*
- * Remove the mapping to a single page
- * so that a subsequent access may
- * repromote. Since the underlying
- * page table page is fully populated,
- * this removal never frees a page
- * table page.
- */
- va += VM_PAGE_TO_PHYS(m) - (oldpde &
- PG_PS_FRAME);
- pmap_remove_page(pmap, va, NULL);
- rtval++;
- if (rtval > 4) {
- PMAP_UNLOCK(pmap);
- goto out;
- }
- }
+ pde = pmap_pde(pmap, pv->pv_va);
+ if ((*pde & PG_A) != 0) {
+ /*
+ * Since this reference bit is shared by either 1024
+ * or 512 4KB pages, it should not be cleared every
+ * time it is tested. Apply a simple "hash" function
+ * on the physical page number, the virtual superpage
+ * number, and the pmap address to select one 4KB page
+ * out of the 1024 or 512 on which testing the
+ * reference bit will result in clearing that bit.
+ * This function is designed to avoid the selection of
+ * the same 4KB page for every 2- or 4MB page mapping.
+ *
+ * On demotion, a mapping that hasn't been referenced
+ * is simply destroyed. To avoid the possibility of a
+ * subsequent page fault on a demoted wired mapping,
+ * always leave its reference bit set. Moreover,
+ * since the superpage is wired, the current state of
+ * its reference bit won't affect page replacement.
+ */
+ if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
+ (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
+ (*pde & PG_W) == 0) {
+ atomic_clear_int((u_int *)pde, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
}
+ rtval++;
}
PMAP_UNLOCK(pmap);
- }
+ /* Rotate the PV list if it has more than one entry. */
+ if (TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
+ }
+ if (rtval >= PMAP_TS_REFERENCED_MAX)
+ goto out;
+ } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
small_mappings:
- if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
- pvf = pv;
- do {
- pvn = TAILQ_NEXT(pv, pv_next);
+ if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
+ goto out;
+ pv = pvf;
+ do {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pde = pmap_pde(pmap, pv->pv_va);
+ KASSERT((*pde & PG_PS) == 0,
+ ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
+ m));
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ if ((*pte & PG_A) != 0) {
+ atomic_clear_int((u_int *)pte, PG_A);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ rtval++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (TAILQ_NEXT(pv, pv_next) != NULL) {
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
- pmap = PV_PMAP(pv);
- PMAP_LOCK(pmap);
- pde = pmap_pde(pmap, pv->pv_va);
- KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
- " found a 4mpage in page %p's pv list", m));
- pte = pmap_pte_quick(pmap, pv->pv_va);
- if ((*pte & PG_A) != 0) {
- atomic_clear_int((u_int *)pte, PG_A);
- pmap_invalidate_page(pmap, pv->pv_va);
- rtval++;
- if (rtval > 4)
- pvn = NULL;
- }
- PMAP_UNLOCK(pmap);
- } while ((pv = pvn) != NULL && pv != pvf);
- }
+ }
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval <
+ PMAP_TS_REFERENCED_MAX);
out:
sched_unpin();
rw_wunlock(&pvh_global_lock);
OpenPOWER on IntegriCloud