diff options
author | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
commit | 32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch) | |
tree | 9d773a8e937b9a4faf763308a1843594ec04df60 /sys/i386 | |
parent | 95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff) | |
download | FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz |
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/i386')
-rw-r--r-- | sys/i386/i386/pmap.c | 107 | ||||
-rw-r--r-- | sys/i386/xen/pmap.c | 107 |
2 files changed, 109 insertions, 105 deletions
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 591aed8..d20c15b 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -4294,12 +4294,25 @@ pmap_remove_pages(pmap_t pmap) boolean_t pmap_is_modified(vm_page_t m) { + boolean_t rv; - if (m->flags & PG_FICTITIOUS) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); + + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) return (FALSE); - if (pmap_is_modified_pvh(&m->md)) - return (TRUE); - return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); + vm_page_lock_queues(); + rv = pmap_is_modified_pvh(&m->md) || + pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))); + vm_page_unlock_queues(); + return (rv); } /* @@ -4563,9 +4576,20 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { @@ -4623,6 +4647,7 @@ pmap_clear_modify(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4640,9 +4665,9 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { @@ -4686,6 +4711,7 @@ pmap_clear_reference(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4955,72 +4981,51 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *pdep; pt_entry_t *ptep, pte; vm_paddr_t pa; - vm_page_t m; - int val = 0; - + int val; + PMAP_LOCK(pmap); +retry: pdep = pmap_pde(pmap, addr); if (*pdep != 0) { if (*pdep & PG_PS) { pte = *pdep; - val = MINCORE_SUPER; /* Compute the physical address of the 4KB page. */ pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) & PG_FRAME; + val = MINCORE_SUPER; } else { ptep = pmap_pte(pmap, addr); pte = *ptep; pmap_pte_release(ptep); pa = pte & PG_FRAME; + val = 0; } } else { pte = 0; pa = 0; + val = 0; } - PMAP_UNLOCK(pmap); - - if (pte != 0) { + if ((pte & PG_V) != 0) { val |= MINCORE_INCORE; - if ((pte & PG_MANAGED) == 0) - return (val); - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone else - */ - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pte & PG_A) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone else - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if ((pte & PG_A) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); return (val); } diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c index ac1a17d..5e04680 100644 --- a/sys/i386/xen/pmap.c +++ b/sys/i386/xen/pmap.c @@ -3663,12 +3663,21 @@ pmap_is_modified(vm_page_t m) pmap_t pmap; boolean_t rv; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; - if (m->flags & PG_FICTITIOUS) - return (rv); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (rv); + vm_page_lock_queues(); sched_pin(); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -3681,6 +3690,7 @@ pmap_is_modified(vm_page_t m) if (*PMAP1) PT_SET_MA(PADDR1, 0); sched_unpin(); + vm_page_unlock_queues(); return (rv); } @@ -3887,9 +3897,20 @@ pmap_clear_modify(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3907,6 +3928,7 @@ pmap_clear_modify(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -3921,9 +3943,9 @@ pmap_clear_reference(vm_page_t m) pmap_t pmap; pt_entry_t *pte; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & PG_FICTITIOUS) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); sched_pin(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -3941,6 +3963,7 @@ pmap_clear_reference(vm_page_t m) PMAP_UNLOCK(pmap); } sched_unpin(); + vm_page_unlock_queues(); } /* @@ -4133,60 +4156,36 @@ pmap_change_attr(va, size, mode) * perform the pmap work for mincore */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pt_entry_t *ptep, pte; - vm_page_t m; - int val = 0; + vm_paddr_t pa; + int val; PMAP_LOCK(pmap); +retry: ptep = pmap_pte(pmap, addr); pte = (ptep != NULL) ? PT_GET(ptep) : 0; pmap_pte_release(ptep); - PMAP_UNLOCK(pmap); - - if (pte != 0) { - vm_paddr_t pa; - - val = MINCORE_INCORE; - if ((pte & PG_MANAGED) == 0) - return val; - + val = 0; + if ((pte & PG_V) != 0) { + val |= MINCORE_INCORE; + if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + if ((pte & PG_A) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && + (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { pa = pte & PG_FRAME; - - m = PHYS_TO_VM_PAGE(pa); - - /* - * Modified by us - */ - if (pte & PG_M) - val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; - else { - /* - * Modified by someone else - */ - vm_page_lock_queues(); - if (m->dirty || pmap_is_modified(m)) - val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); - } - /* - * Referenced by us - */ - if (pte & PG_A) - val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; - else { - /* - * Referenced by someone else - */ - vm_page_lock_queues(); - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - val |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - } - } - return val; + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); + return (val); } void |