diff options
author | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
commit | 32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch) | |
tree | 9d773a8e937b9a4faf763308a1843594ec04df60 /sys/powerpc | |
parent | 95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff) | |
download | FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz |
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/powerpc')
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 40 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 40 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap.c | 80 | ||||
-rw-r--r-- | sys/powerpc/powerpc/mmu_if.m | 9 | ||||
-rw-r--r-- | sys/powerpc/powerpc/pmap_dispatch.c | 4 |
5 files changed, 123 insertions, 50 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index a2f120b..b5d0ae7 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -1290,29 +1290,57 @@ moea_is_referenced(mmu_t mmu, vm_page_t m) boolean_t moea_is_modified(mmu_t mmu, vm_page_t m) { + boolean_t rv; - if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) - return (FALSE); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_is_modified: page %p is not managed", m)); - return (moea_query_bit(m, PTE_CHG)); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have PTE_CHG set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (FALSE); + vm_page_lock_queues(); + rv = moea_query_bit(m, PTE_CHG); + vm_page_unlock_queues(); + return (rv); } void moea_clear_reference(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); moea_clear_bit(m, PTE_REF, NULL); + vm_page_unlock_queues(); } void moea_clear_modify(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("moea_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG + * set. If the object containing the page is locked and the page is + * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); moea_clear_bit(m, PTE_CHG, NULL); + vm_page_unlock_queues(); } /* diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 8b6453a..140e949 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -1485,29 +1485,57 @@ moea64_is_referenced(mmu_t mmu, vm_page_t m) boolean_t moea64_is_modified(mmu_t mmu, vm_page_t m) { + boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return (FALSE); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea64_is_modified: page %p is not managed", m)); - return (moea64_query_bit(m, LPTE_CHG)); + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can have LPTE_CHG set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (FALSE); + vm_page_lock_queues(); + rv = moea64_query_bit(m, LPTE_CHG); + vm_page_unlock_queues(); + return (rv); } void moea64_clear_reference(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea64_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); moea64_clear_bit(m, LPTE_REF, NULL); + vm_page_unlock_queues(); } void moea64_clear_modify(mmu_t mmu, vm_page_t m) { - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("moea64_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("moea64_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG + * set. If the object containing the page is locked and the page is + * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); moea64_clear_bit(m, LPTE_CHG, NULL); + vm_page_unlock_queues(); } /* diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 81a337f..cd29a71 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -292,7 +292,8 @@ static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); -static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); +static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, + vm_paddr_t *); static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); @@ -2155,26 +2156,35 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m) { pte_t *pte; pv_entry_t pv; + boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return (FALSE); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("mmu_booke_is_modified: page %p is not managed", m)); + rv = FALSE; + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no PTEs can be modified. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (rv); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); - if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { - if (!PTE_ISVALID(pte)) - goto make_sure_to_unlock; - - if (PTE_ISMODIFIED(pte)) { - PMAP_UNLOCK(pv->pv_pmap); - return (TRUE); - } + if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && + PTE_ISVALID(pte)) { + if (PTE_ISMODIFIED(pte)) + rv = TRUE; } -make_sure_to_unlock: PMAP_UNLOCK(pv->pv_pmap); + if (rv) + break; } - return (FALSE); + vm_page_unlock_queues(); + return (rv); } /* @@ -2224,16 +2234,24 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) pte_t *pte; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("mmu_booke_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("mmu_booke_clear_modify: page %p is busy", m)); + /* + * If the page is not PG_WRITEABLE, then no PTEs can be modified. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) + return; + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); - if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { - if (!PTE_ISVALID(pte)) - goto make_sure_to_unlock; - + if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && + PTE_ISVALID(pte)) { mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); @@ -2246,9 +2264,9 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } -make_sure_to_unlock: PMAP_UNLOCK(pv->pv_pmap); } + vm_page_unlock_queues(); } /* @@ -2310,16 +2328,13 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) pte_t *pte; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; - + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("mmu_booke_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); - if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { - if (!PTE_ISVALID(pte)) - goto make_sure_to_unlock; - + if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && + PTE_ISVALID(pte)) { if (PTE_ISREFERENCED(pte)) { mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); @@ -2331,9 +2346,9 @@ mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) mtx_unlock_spin(&tlbivax_mutex); } } -make_sure_to_unlock: PMAP_UNLOCK(pv->pv_pmap); } + vm_page_unlock_queues(); } /* @@ -2632,7 +2647,8 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, * Perform the pmap work for mincore. */ static int -mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) +mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, + vm_paddr_t *locked_pa) { TODO; diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index a87e5d8..d31e541 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -90,7 +90,8 @@ CODE { return; } - static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) + static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, + vm_paddr_t *locked_pa) { return (0); } @@ -633,12 +634,11 @@ METHOD void zero_page_idle { /** - * @brief Extract mincore(2) information from a mapping. This routine is - * optional and is an optimisation: the mincore code will call is_modified - * and ts_referenced if no result is returned. + * @brief Extract mincore(2) information from a mapping. * * @param _pmap physical map * @param _addr page virtual address + * @param _locked_pa page physical address * * @retval 0 no result * @retval non-zero mincore(2) flag values @@ -647,6 +647,7 @@ METHOD int mincore { mmu_t _mmu; pmap_t _pmap; vm_offset_t _addr; + vm_paddr_t *_locked_pa; } DEFAULT mmu_null_mincore; diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index c16360f..dd94685 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -360,11 +360,11 @@ pmap_zero_page_idle(vm_page_t m) } int -pmap_mincore(pmap_t pmap, vm_offset_t addr) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); - return (MMU_MINCORE(mmu_obj, pmap, addr)); + return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa)); } void |