summaryrefslogtreecommitdiffstats
path: root/sys/sun4v
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
committeralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
commit32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch)
tree9d773a8e937b9a4faf763308a1843594ec04df60 /sys/sun4v
parent95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff)
downloadFreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip
FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the page queues lock from pmap_mincore(). Push down the page queues lock into pmap_clear_modify(), pmap_clear_reference(), and pmap_is_modified(). Assert that these functions are never passed an unmanaged page. Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m: Contrary to what the comment says, pmap_mincore() is not simply an optimization. Without a complete pmap_mincore() implementation, mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED because only the pmap can provide this information. Eliminate the page queues lock from vfs_setdirty_locked_object(), vm_pageout_clean(), vm_object_page_collect_flush(), and vm_object_page_clean(). Generally speaking, these are all accesses to the page's dirty field, which are synchronized by the containing vm object's lock. Reduce the scope of the page queues lock in vm_object_madvise() and vm_page_dontneed(). Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/sun4v')
-rw-r--r--sys/sun4v/sun4v/pmap.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/sys/sun4v/sun4v/pmap.c b/sys/sun4v/sun4v/pmap.c
index 6117275..57fe7c6 100644
--- a/sys/sun4v/sun4v/pmap.c
+++ b/sys/sun4v/sun4v/pmap.c
@@ -966,14 +966,33 @@ void
pmap_clear_modify(vm_page_t m)
{
KDPRINTF("pmap_clear_modify(0x%lx)\n", VM_PAGE_TO_PHYS(m));
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ KASSERT((m->oflags & VPO_BUSY) == 0,
+ ("pmap_clear_modify: page %p is busy", m));
+
+ /*
+ * If the page is not PG_WRITEABLE, then no TTEs can have VTD_W set.
+ * If the object containing the page is locked and the page is not
+ * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
+ */
+ if ((m->flags & PG_WRITEABLE) == 0)
+ return;
+ vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_W);
+ vm_page_unlock_queues();
}
void
pmap_clear_reference(vm_page_t m)
{
KDPRINTF("pmap_clear_reference(0x%lx)\n", VM_PAGE_TO_PHYS(m));
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_clear_reference: page %p is not managed", m));
+ vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_REF);
+ vm_page_unlock_queues();
}
void
@@ -1589,8 +1608,24 @@ pmap_invalidate_all(pmap_t pmap)
boolean_t
pmap_is_modified(vm_page_t m)
{
+ boolean_t rv;
- return (tte_get_phys_bit(m, VTD_W));
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
+
+ /*
+ * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PG_WRITEABLE
+ * is clear, no TTEs can have VTD_W set.
+ */
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ if ((m->oflags & VPO_BUSY) == 0 &&
+ (m->flags & PG_WRITEABLE) == 0)
+ return (FALSE);
+ vm_page_lock_queues();
+ rv = tte_get_phys_bit(m, VTD_W);
+ vm_page_unlock_queues();
+ return (rv);
}
@@ -1652,7 +1687,7 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
}
int
-pmap_mincore(pmap_t pmap, vm_offset_t addr)
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
return (0);
}
OpenPOWER on IntegriCloud