summaryrefslogtreecommitdiffstats
path: root/sys/amd64/amd64/pmap.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2016-10-01 19:30:28 +0000
committeralc <alc@FreeBSD.org>2016-10-01 19:30:28 +0000
commitef0b439c822bffc80175b9dfb78957bb807d576d (patch)
tree0160d2729d42e24c2952c09ab03d4166a8895bd1 /sys/amd64/amd64/pmap.c
parentc83481c3142b3f842f75e35f0359ab1e4a918a1c (diff)
downloadFreeBSD-src-ef0b439c822bffc80175b9dfb78957bb807d576d.zip
FreeBSD-src-ef0b439c822bffc80175b9dfb78957bb807d576d.tar.gz
MFC r305213,305319,305398
As an optimization to the machine-independent layer, change the machine- dependent pmap_ts_referenced() so that it updates the page's dirty field if a modified bit is found while counting reference bits. This opportunistic update can be performed at low cost and can eliminate the need for some future calls to pmap_is_modified() by the machine- independent layer. Replace the number 4 in sparc64's pmap_ts_referenced() by PMAP_TS_REFERENCED_MAX, like we've done elsewhere, e.g., amd64.
Diffstat (limited to 'sys/amd64/amd64/pmap.c')
-rw-r--r--sys/amd64/amd64/pmap.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 81ce359..7063827 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -5831,6 +5831,14 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
* should be tested and standardized at some point in the future for
* optimal aging of shared pages.
*
+ * As an optimization, update the page's dirty field if a modified bit is
+ * found while counting reference bits. This opportunistic update can be
+ * performed at low cost and can eliminate the need for some future calls
+ * to pmap_is_modified(). However, since this function stops after
+ * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ * dirty pages. Those dirty pages will only be detected by a future call
+ * to pmap_is_modified().
+ *
* A DI block is not needed within this function, because
* invalidations are performed before the PV list lock is
* released.
@@ -5843,7 +5851,7 @@ pmap_ts_referenced(vm_page_t m)
pmap_t pmap;
struct rwlock *lock;
pd_entry_t oldpde, *pde;
- pt_entry_t *pte, PG_A;
+ pt_entry_t *pte, PG_A, PG_M, PG_RW;
vm_offset_t va;
vm_paddr_t pa;
int cleared, md_gen, not_cleared, pvh_gen;
@@ -5878,9 +5886,19 @@ retry:
}
}
PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
va = pv->pv_va;
pde = pmap_pde(pmap, pv->pv_va);
oldpde = *pde;
+ if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+ /*
+ * Although "oldpde" is mapping a 2MB page, because
+ * this function is called at a 4KB page granularity,
+ * we only update the 4KB page under test.
+ */
+ vm_page_dirty(m);
+ }
if ((*pde & PG_A) != 0) {
/*
* Since this reference bit is shared by 512 4KB
@@ -5974,11 +5992,15 @@ small_mappings:
}
}
PG_A = pmap_accessed_bit(pmap);
+ PG_M = pmap_modified_bit(pmap);
+ PG_RW = pmap_rw_bit(pmap);
pde = pmap_pde(pmap, pv->pv_va);
KASSERT((*pde & PG_PS) == 0,
("pmap_ts_referenced: found a 2mpage in page %p's pv list",
m));
pte = pmap_pde_to_pte(pde, pv->pv_va);
+ if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ vm_page_dirty(m);
if ((*pte & PG_A) != 0) {
if (safe_to_clear_referenced(pmap, *pte)) {
atomic_clear_long(pte, PG_A);
OpenPOWER on IntegriCloud