summaryrefslogtreecommitdiffstats
path: root/sys/powerpc
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2017-10-07 21:13:54 +0000
committeralc <alc@FreeBSD.org>2017-10-07 21:13:54 +0000
commit7970befebf456f9da8258eeab8e96dacc479e4f9 (patch)
treebe9fcf61f8815d4fc5cce64df611dd1cb889a43e /sys/powerpc
parent9ee4f002d358264a9dc87e8ce77801d708bd63cb (diff)
downloadFreeBSD-src-7970befebf456f9da8258eeab8e96dacc479e4f9.zip
FreeBSD-src-7970befebf456f9da8258eeab8e96dacc479e4f9.tar.gz
MFC r305685
Various changes to pmap_ts_referenced() Move PMAP_TS_REFERENCED_MAX out of the various pmap implementations and into vm/pmap.h, and describe what its purpose is. Eliminate the archaic "XXX" comment about its value. I don't believe that its exact value, e.g., 5 versus 6, matters. Update the arm64 and riscv pmap implementations of pmap_ts_referenced() to opportunistically update the page's dirty field. On amd64, use the PDE value already cached in a local variable rather than dereferencing a pointer again and again.
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/booke/pmap.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index b7ffdd6..0448129 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -2527,9 +2527,13 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
* is necessary that 0 only be returned when there are truly no
* reference bits set.
*
- * XXX: The exact number of bits to check and clear is a matter that
- * should be tested and standardized at some point in the future for
- * optimal aging of shared pages.
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits. This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified(). However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages. Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
*/
static int
mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
@@ -2546,6 +2550,8 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
+ if (PTE_ISMODIFIED(pte))
+ vm_page_dirty(m);
if (PTE_ISREFERENCED(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@@ -2556,7 +2562,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
tlb_miss_unlock();
mtx_unlock_spin(&tlbivax_mutex);
- if (++count > 4) {
+ if (++count >= PMAP_TS_REFERENCED_MAX) {
PMAP_UNLOCK(pv->pv_pmap);
break;
}
OpenPOWER on IntegriCloud