summaryrefslogtreecommitdiffstats
path: root/sys/i386/xen/pmap.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
committeralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
commit32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch)
tree9d773a8e937b9a4faf763308a1843594ec04df60 /sys/i386/xen/pmap.c
parent95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff)
downloadFreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip
FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the page queues lock from pmap_mincore(). Push down the page queues lock into pmap_clear_modify(), pmap_clear_reference(), and pmap_is_modified(). Assert that these functions are never passed an unmanaged page. Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m: Contrary to what the comment says, pmap_mincore() is not simply an optimization. Without a complete pmap_mincore() implementation, mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED because only the pmap can provide this information. Eliminate the page queues lock from vfs_setdirty_locked_object(), vm_pageout_clean(), vm_object_page_collect_flush(), and vm_object_page_clean(). Generally speaking, these are all accesses to the page's dirty field, which are synchronized by the containing vm object's lock. Reduce the scope of the page queues lock in vm_object_madvise() and vm_page_dontneed(). Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/i386/xen/pmap.c')
-rw-r--r--sys/i386/xen/pmap.c107
1 files changed, 53 insertions, 54 deletions
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index ac1a17d..5e04680 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -3663,12 +3663,21 @@ pmap_is_modified(vm_page_t m)
pmap_t pmap;
boolean_t rv;
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
rv = FALSE;
- if (m->flags & PG_FICTITIOUS)
- return (rv);
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no PTEs can have PG_M set.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
+ (m->flags & PG_WRITEABLE) == 0)
+ return (rv);
+ vm_page_lock_queues();
sched_pin();
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
@@ -3681,6 +3690,7 @@ pmap_is_modified(vm_page_t m)
if (*PMAP1)
PT_SET_MA(PADDR1, 0);
sched_unpin();
+ vm_page_unlock_queues();
return (rv);
}
@@ -3887,9 +3897,20 @@ pmap_clear_modify(vm_page_t m)
pmap_t pmap;
pt_entry_t *pte;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & PG_FICTITIOUS) != 0)
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ KASSERT((m->oflags & VPO_BUSY) == 0,
+ ("pmap_clear_modify: page %p is busy", m));
+
+ /*
+ * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set.
+ * If the object containing the page is locked and the page is not
+ * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ */
+ if ((m->flags & PG_WRITEABLE) == 0)
return;
+ vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@@ -3907,6 +3928,7 @@ pmap_clear_modify(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
+ vm_page_unlock_queues();
}
/*
@@ -3921,9 +3943,9 @@ pmap_clear_reference(vm_page_t m)
pmap_t pmap;
pt_entry_t *pte;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if ((m->flags & PG_FICTITIOUS) != 0)
- return;
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_clear_reference: page %p is not managed", m));
+ vm_page_lock_queues();
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
@@ -3941,6 +3963,7 @@ pmap_clear_reference(vm_page_t m)
PMAP_UNLOCK(pmap);
}
sched_unpin();
+ vm_page_unlock_queues();
}
/*
@@ -4133,60 +4156,36 @@ pmap_change_attr(va, size, mode)
* perform the pmap work for mincore
*/
int
-pmap_mincore(pmap_t pmap, vm_offset_t addr)
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pt_entry_t *ptep, pte;
- vm_page_t m;
- int val = 0;
+ vm_paddr_t pa;
+ int val;
PMAP_LOCK(pmap);
+retry:
ptep = pmap_pte(pmap, addr);
pte = (ptep != NULL) ? PT_GET(ptep) : 0;
pmap_pte_release(ptep);
- PMAP_UNLOCK(pmap);
-
- if (pte != 0) {
- vm_paddr_t pa;
-
- val = MINCORE_INCORE;
- if ((pte & PG_MANAGED) == 0)
- return val;
-
+ val = 0;
+ if ((pte & PG_V) != 0) {
+ val |= MINCORE_INCORE;
+ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+ val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
+ if ((pte & PG_A) != 0)
+ val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
+ }
+ if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
+ (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
+ (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
pa = pte & PG_FRAME;
-
- m = PHYS_TO_VM_PAGE(pa);
-
- /*
- * Modified by us
- */
- if (pte & PG_M)
- val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
- else {
- /*
- * Modified by someone else
- */
- vm_page_lock_queues();
- if (m->dirty || pmap_is_modified(m))
- val |= MINCORE_MODIFIED_OTHER;
- vm_page_unlock_queues();
- }
- /*
- * Referenced by us
- */
- if (pte & PG_A)
- val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
- else {
- /*
- * Referenced by someone else
- */
- vm_page_lock_queues();
- if ((m->flags & PG_REFERENCED) ||
- pmap_is_referenced(m))
- val |= MINCORE_REFERENCED_OTHER;
- vm_page_unlock_queues();
- }
- }
- return val;
+ /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
+ if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
+ goto retry;
+ } else
+ PA_UNLOCK_COND(*locked_pa);
+ PMAP_UNLOCK(pmap);
+ return (val);
}
void
OpenPOWER on IntegriCloud