diff options
author | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
---|---|---|
committer | alc <alc@FreeBSD.org> | 2010-05-24 14:26:57 +0000 |
commit | 32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch) | |
tree | 9d773a8e937b9a4faf763308a1843594ec04df60 /sys/vm/vm_mmap.c | |
parent | 95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff) | |
download | FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz |
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/vm/vm_mmap.c')
-rw-r--r-- | sys/vm/vm_mmap.c | 93 |
1 files changed, 67 insertions, 26 deletions
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index f9b3db3..99d5059 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -772,8 +772,13 @@ mincore(td, uap) int vecindex, lastvecindex; vm_map_entry_t current; vm_map_entry_t entry; + vm_object_t object; + vm_paddr_t locked_pa; + vm_page_t m; + vm_pindex_t pindex; int mincoreinfo; unsigned int timestamp; + boolean_t locked; /* * Make sure that the addresses presented are valid for user @@ -847,38 +852,74 @@ RestartScan: * it can provide info as to whether we are the * one referencing or modifying the page. */ - mincoreinfo = pmap_mincore(pmap, addr); - if (!mincoreinfo) { - vm_pindex_t pindex; - vm_ooffset_t offset; - vm_page_t m; + object = NULL; + locked_pa = 0; + retry: + m = NULL; + mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); + if (locked_pa != 0) { /* - * calculate the page index into the object + * The page is mapped by this process but not + * both accessed and modified. It is also + * managed. Acquire the object lock so that + * other mappings might be examined. */ - offset = current->offset + (addr - current->start); - pindex = OFF_TO_IDX(offset); - VM_OBJECT_LOCK(current->object.vm_object); - m = vm_page_lookup(current->object.vm_object, - pindex); + m = PHYS_TO_VM_PAGE(locked_pa); + if (m->object != object) { + if (object != NULL) + VM_OBJECT_UNLOCK(object); + object = m->object; + locked = VM_OBJECT_TRYLOCK(object); + vm_page_unlock(m); + if (!locked) { + VM_OBJECT_LOCK(object); + vm_page_lock(m); + goto retry; + } + } else + vm_page_unlock(m); + KASSERT(m->valid == VM_PAGE_BITS_ALL, + ("mincore: page %p is mapped but invalid", + m)); + } else if (mincoreinfo == 0) { /* - * if the page is resident, then gather information about - * it. + * The page is not mapped by this process. If + * the object implements managed pages, then + * determine if the page is resident so that + * the mappings might be examined. */ - if (m != NULL && m->valid != 0) { - mincoreinfo = MINCORE_INCORE; - vm_page_lock(m); - vm_page_lock_queues(); - if (m->dirty || - pmap_is_modified(m)) - mincoreinfo |= MINCORE_MODIFIED_OTHER; - if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(m)) - mincoreinfo |= MINCORE_REFERENCED_OTHER; - vm_page_unlock_queues(); - vm_page_unlock(m); + if (current->object.vm_object != object) { + if (object != NULL) + VM_OBJECT_UNLOCK(object); + object = current->object.vm_object; + VM_OBJECT_LOCK(object); } - VM_OBJECT_UNLOCK(current->object.vm_object); + if (object->type == OBJT_DEFAULT || + object->type == OBJT_SWAP || + object->type == OBJT_VNODE) { + pindex = OFF_TO_IDX(current->offset + + (addr - current->start)); + m = vm_page_lookup(object, pindex); + if (m != NULL && m->valid == 0) + m = NULL; + if (m != NULL) + mincoreinfo = MINCORE_INCORE; + } + } + if (m != NULL) { + /* Examine other mappings to the page. */ + if (m->dirty == 0 && pmap_is_modified(m)) + vm_page_dirty(m); + if (m->dirty != 0) + mincoreinfo |= MINCORE_MODIFIED_OTHER; + vm_page_lock_queues(); + if ((m->flags & PG_REFERENCED) != 0 || + pmap_is_referenced(m)) + mincoreinfo |= MINCORE_REFERENCED_OTHER; + vm_page_unlock_queues(); } + if (object != NULL) + VM_OBJECT_UNLOCK(object); /* * subyte may page fault. In case it needs to modify |