diff options
author | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
commit | 32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch) | |
tree | 9d773a8e937b9a4faf763308a1843594ec04df60 /sys/sparc64 | |
parent | 95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff) | |
download | FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz |
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/sparc64')
-rw-r--r-- | sys/sparc64/sparc64/pmap.c | 52 |
1 files changed, 40 insertions, 12 deletions
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 1524a1e..6b4ae41 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -1898,17 +1898,32 @@ boolean_t pmap_is_modified(vm_page_t m) { struct tte *tp; + boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return (FALSE); + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_is_modified: page %p is not managed", m)); + rv = FALSE; + + /* + * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PG_WRITEABLE + * is clear, no TTEs can have TD_W set. + */ + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + if ((m->oflags & VPO_BUSY) == 0 && + (m->flags & PG_WRITEABLE) == 0) + return (rv); + vm_page_lock_queues(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; - if ((tp->tte_data & TD_W) != 0) - return (TRUE); + if ((tp->tte_data & TD_W) != 0) { + rv = TRUE; + break; + } } - return (FALSE); + vm_page_unlock_queues(); + return (rv); } /* @@ -1951,9 +1966,20 @@ pmap_clear_modify(vm_page_t m) struct tte *tp; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); + KASSERT((m->oflags & VPO_BUSY) == 0, + ("pmap_clear_modify: page %p is busy", m)); + + /* + * If the page is not PG_WRITEABLE, then no TTEs can have TD_W set. + * If the object containing the page is locked and the page is not + * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. + */ + if ((m->flags & PG_WRITEABLE) == 0) return; + vm_page_lock_queues(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1961,6 +1987,7 @@ pmap_clear_modify(vm_page_t m) if ((data & TD_W) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } + vm_page_unlock_queues(); } void @@ -1969,9 +1996,9 @@ pmap_clear_reference(vm_page_t m) struct tte *tp; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) - return; + KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + ("pmap_clear_reference: page %p is not managed", m)); + vm_page_lock_queues(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1979,6 +2006,7 @@ pmap_clear_reference(vm_page_t m) if ((data & TD_REF) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } + vm_page_unlock_queues(); } void @@ -2014,7 +2042,7 @@ pmap_remove_write(vm_page_t m) } int -pmap_mincore(pmap_t pm, vm_offset_t addr) +pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa) { /* TODO; */ |