summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-04-24 17:32:52 +0000
committeralc <alc@FreeBSD.org>2010-04-24 17:32:52 +0000
commit0a905b1db997b1f231d5d442e158b5228d69937a (patch)
tree7bce54bbe68a24f0e3ffcf944e58cf38a95b99e3 /sys
parent49089e216a08f4cd346d73ab4334e75650d4aa16 (diff)
downloadFreeBSD-src-0a905b1db997b1f231d5d442e158b5228d69937a.zip
FreeBSD-src-0a905b1db997b1f231d5d442e158b5228d69937a.tar.gz
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or not pages have been referenced because it clears any reference bits that it encounters. For example, in mincore(), clearing the reference bits has two negative consequences. First, it throws off the activity count calculations performed by the page daemon. Specifically, a page on which mincore() has called pmap_ts_referenced() looks less active to the page daemon than it should. Consequently, the page could be deactivated prematurely by the page daemon. Arguably, this problem could be fixed by having mincore() duplicate the activity count calculation on the page. However, there is a second problem for which that is not a solution. In order to clear a reference on a 4KB page, it may be necessary to demote a 2/4MB page mapping. Thus, a mincore() by one process can have the side effect of demoting a superpage mapping within another process!
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/pmap.c48
-rw-r--r--sys/arm/arm/pmap.c14
-rw-r--r--sys/i386/i386/pmap.c50
-rw-r--r--sys/i386/xen/pmap.c32
-rw-r--r--sys/ia64/ia64/pmap.c35
-rw-r--r--sys/mips/mips/pmap.c18
-rw-r--r--sys/powerpc/aim/mmu_oea.c11
-rw-r--r--sys/powerpc/aim/mmu_oea64.c11
-rw-r--r--sys/powerpc/booke/pmap.c29
-rw-r--r--sys/powerpc/powerpc/mmu_if.m14
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c8
-rw-r--r--sys/sparc64/sparc64/pmap.c21
-rw-r--r--sys/sun4v/sun4v/pmap.c11
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/vm_mmap.c4
15 files changed, 289 insertions, 18 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4b3f041..4d1c9a9 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -236,6 +236,7 @@ static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
+static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
@@ -4178,6 +4179,49 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ if (m->flags & PG_FICTITIOUS)
+ return (FALSE);
+ if (pmap_is_referenced_pvh(&m->md))
+ return (TRUE);
+ return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+}
+
+/*
+ * Returns TRUE if any of the given mappings were referenced and FALSE
+ * otherwise. Both page and 2mpage mappings are supported.
+ */
+static boolean_t
+pmap_is_referenced_pvh(struct md_page *pvh)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ boolean_t rv;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ rv = FALSE;
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, pv->pv_va);
+ rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ break;
+ }
+ return (rv);
+}
+
+/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
@@ -4893,10 +4937,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
- pmap_ts_referenced(m)) {
+ pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
- vm_page_flag_set(m, PG_REFERENCED);
- }
vm_page_unlock_queues();
}
}
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 19b85d8..366b43f 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -4493,6 +4493,20 @@ pmap_clear_modify(vm_page_t m)
/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ return ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
+ (m->md.pvh_attrs & PVF_REF) != 0);
+}
+
+/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 7c74f00..f316657 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -296,6 +296,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
+static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
@@ -4356,6 +4357,51 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ if (m->flags & PG_FICTITIOUS)
+ return (FALSE);
+ if (pmap_is_referenced_pvh(&m->md))
+ return (TRUE);
+ return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+}
+
+/*
+ * Returns TRUE if any of the given mappings were referenced and FALSE
+ * otherwise. Both page and 4mpage mappings are supported.
+ */
+static boolean_t
+pmap_is_referenced_pvh(struct md_page *pvh)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ boolean_t rv;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ rv = FALSE;
+ sched_pin();
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ break;
+ }
+ sched_unpin();
+ return (rv);
+}
+
+/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
@@ -4961,10 +5007,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
- pmap_ts_referenced(m)) {
+ pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
- vm_page_flag_set(m, PG_REFERENCED);
- }
vm_page_unlock_queues();
}
}
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index ae4d4aa..40e36be 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -3718,6 +3718,34 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (rv);
}
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ boolean_t rv;
+
+ rv = FALSE;
+ if (m->flags & PG_FICTITIOUS)
+ return (rv);
+ sched_pin();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte_quick(pmap, pv->pv_va);
+ rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ break;
+ }
+ if (*PMAP1)
+ PT_SET_MA(PADDR1, 0);
+ sched_unpin();
+ return (rv);
+}
+
void
pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
{
@@ -4145,10 +4173,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
*/
vm_page_lock_queues();
if ((m->flags & PG_REFERENCED) ||
- pmap_ts_referenced(m)) {
+ pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
- vm_page_flag_set(m, PG_REFERENCED);
- }
vm_page_unlock_queues();
}
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 6fe4cdf..a7c47ef 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -2023,6 +2023,37 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ struct ia64_lpte *pte;
+ pmap_t oldpmap;
+ pv_entry_t pv;
+ boolean_t rv;
+
+ rv = FALSE;
+ if (m->flags & PG_FICTITIOUS)
+ return (rv);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
+ oldpmap = pmap_switch(pv->pv_pmap);
+ pte = pmap_find_vhpt(pv->pv_va);
+ pmap_switch(oldpmap);
+ KASSERT(pte != NULL, ("pte"));
+ rv = pmap_accessed(pte) ? TRUE : FALSE;
+ PMAP_UNLOCK(pv->pv_pmap);
+ if (rv)
+ break;
+ }
+ return (rv);
+}
+
+/*
* Clear the modify bits on the specified physical page.
*/
void
@@ -2197,10 +2228,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
* Referenced by someone
*/
vm_page_lock_queues();
- if (pmap_ts_referenced(m)) {
+ if (pmap_is_referenced(m))
val |= MINCORE_REFERENCED_OTHER;
- vm_page_flag_set(m, PG_REFERENCED);
- }
vm_page_unlock_queues();
}
}
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 9c28f23..3634a27 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2626,6 +2626,20 @@ pmap_clear_modify(vm_page_t m)
}
/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ return ((m->flags & PG_FICTITIOUS) == 0 &&
+ (m->md.pv_flags & PV_TABLE_REF) != 0);
+}
+
+/*
* pmap_clear_reference:
*
* Clear the reference bit on the specified physical page.
@@ -2750,10 +2764,8 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
* Referenced by us or someone
*/
vm_page_lock_queues();
- if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
+ if ((m->flags & PG_REFERENCED) || pmap_is_referenced(m))
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
- vm_page_flag_set(m, PG_REFERENCED);
- }
vm_page_unlock_queues();
}
return val;
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 8357929..24e7b4e 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -305,6 +305,7 @@ vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea_init(mmu_t);
boolean_t moea_is_modified(mmu_t, vm_page_t);
+boolean_t moea_is_referenced(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
@@ -344,6 +345,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
MMUMETHOD(mmu_init, moea_init),
MMUMETHOD(mmu_is_modified, moea_is_modified),
+ MMUMETHOD(mmu_is_referenced, moea_is_referenced),
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
@@ -1269,6 +1271,15 @@ moea_init(mmu_t mmu)
}
boolean_t
+moea_is_referenced(mmu_t mmu, vm_page_t m)
+{
+
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
+ return (FALSE);
+ return (moea_query_bit(m, PTE_REF));
+}
+
+boolean_t
moea_is_modified(mmu_t mmu, vm_page_t m)
{
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 97fff63..2571587 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -379,6 +379,7 @@ vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
void moea64_init(mmu_t);
boolean_t moea64_is_modified(mmu_t, vm_page_t);
+boolean_t moea64_is_referenced(mmu_t, vm_page_t);
boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
@@ -416,6 +417,7 @@ static mmu_method_t moea64_bridge_methods[] = {
MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
MMUMETHOD(mmu_init, moea64_init),
MMUMETHOD(mmu_is_modified, moea64_is_modified),
+ MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
MMUMETHOD(mmu_map, moea64_map),
MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
@@ -1463,6 +1465,15 @@ moea64_init(mmu_t mmu)
}
boolean_t
+moea64_is_referenced(mmu_t mmu, vm_page_t m)
+{
+
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
+ return (FALSE);
+ return (moea64_query_bit(m, PTE_REF));
+}
+
+boolean_t
moea64_is_modified(mmu_t mmu, vm_page_t m)
{
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 13e637c..549eaaa 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -288,6 +288,7 @@ static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
static void mmu_booke_init(mmu_t);
static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
+static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
int);
@@ -342,6 +343,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_init, mmu_booke_init),
MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
+ MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
MMUMETHOD(mmu_map, mmu_booke_map),
MMUMETHOD(mmu_mincore, mmu_booke_mincore),
@@ -2181,6 +2183,33 @@ mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
}
/*
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+static boolean_t
+mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
+{
+ pte_t *pte;
+ pv_entry_t pv;
+ boolean_t rv;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ rv = FALSE;
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
+ return (rv);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
+ PMAP_LOCK(pv->pv_pmap);
+ if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ PTE_ISVALID(pte))
+ rv = PTE_ISREFERENCED(pte) ? TRUE : FALSE;
+ PMAP_UNLOCK(pv->pv_pmap);
+ if (rv)
+ break;
+ }
+ return (rv);
+}
+
+/*
* Clear the modify bits on the specified physical page.
*/
static void
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 5b8ba14..a87e5d8 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -346,6 +346,20 @@ METHOD boolean_t is_prefaultable {
/**
+ * @brief Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ *
+ * @params _pg physical page
+ *
+ * @retval boolean TRUE if page has been referenced
+ */
+METHOD boolean_t is_referenced {
+ mmu_t _mmu;
+ vm_page_t _pg;
+};
+
+
+/**
* @brief Return a count of referenced bits for a page, clearing those bits.
* Not all referenced bits need to be cleared, but it is necessary that 0
* only be returned when there are none set.
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 2b45e17..c16360f 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -195,6 +195,14 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
}
boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ CTR2(KTR_PMAP, "%s(%p)", __func__, m);
+ return (MMU_IS_REFERENCED(mmu_obj, m));
+}
+
+boolean_t
pmap_ts_referenced(vm_page_t m)
{
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index e464559..d97900c 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1917,6 +1917,27 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (FALSE);
}
+/*
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+ struct tte *tp;
+
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
+ return (FALSE);
+ TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
+ if ((tp->tte_data & TD_PV) == 0)
+ continue;
+ if ((tp->tte_data & TD_REF) != 0)
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
void
pmap_clear_modify(vm_page_t m)
{
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index d3b8c79..2633b8e 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -1592,6 +1592,17 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
}
/*
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ return (tte_get_phys_bit(m, VTD_REF));
+}
+
+/*
* Extract the physical page address associated with the given kernel virtual
* address.
*/
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 51a3bbe..e4d8e81 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -119,6 +119,7 @@ void pmap_growkernel(vm_offset_t);
void pmap_init(void);
boolean_t pmap_is_modified(vm_page_t m);
boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
+boolean_t pmap_is_referenced(vm_page_t m);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index a47cd6a..88ed3d5 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -871,10 +871,8 @@ RestartScan:
pmap_is_modified(m))
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
- pmap_ts_referenced(m)) {
- vm_page_flag_set(m, PG_REFERENCED);
+ pmap_is_referenced(m))
mincoreinfo |= MINCORE_REFERENCED_OTHER;
- }
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(current->object.vm_object);
OpenPOWER on IntegriCloud