summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_object.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
committeralc <alc@FreeBSD.org>2010-05-24 14:26:57 +0000
commit32b13ee95703577767e8794ce5e896ad8fdbdee7 (patch)
tree9d773a8e937b9a4faf763308a1843594ec04df60 /sys/vm/vm_object.c
parent95cb40b038035cd0d6e2b2b015f41a7d4dec0fcb (diff)
downloadFreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.zip
FreeBSD-src-32b13ee95703577767e8794ce5e896ad8fdbdee7.tar.gz
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the page queues lock from pmap_mincore(). Push down the page queues lock into pmap_clear_modify(), pmap_clear_reference(), and pmap_is_modified(). Assert that these functions are never passed an unmanaged page. Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m: Contrary to what the comment says, pmap_mincore() is not simply an optimization. Without a complete pmap_mincore() implementation, mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED because only the pmap can provide this information. Eliminate the page queues lock from vfs_setdirty_locked_object(), vm_pageout_clean(), vm_object_page_collect_flush(), and vm_object_page_clean(). Generally speaking, these are all accesses to the page's dirty field, which are synchronized by the containing vm object's lock. Reduce the scope of the page queues lock in vm_object_madvise() and vm_page_dontneed(). Reviewed by: kib (an earlier version)
Diffstat (limited to 'sys/vm/vm_object.c')
-rw-r--r--sys/vm/vm_object.c35
1 files changed, 6 insertions, 29 deletions
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 96ceb88..ae7af15 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -817,19 +817,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
++tscan;
continue;
}
- vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(p);
if (--scanlimit == 0)
break;
++tscan;
continue;
}
- vm_page_unlock_queues();
- vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and
* this is a nosync page, we can't continue.
@@ -900,17 +894,11 @@ again:
continue;
}
- vm_page_lock(p);
- vm_page_lock_queues();
vm_page_test_dirty(p);
if (p->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(p);
p->oflags &= ~VPO_CLEANCHK;
continue;
}
- vm_page_unlock_queues();
- vm_page_unlock(p);
/*
* If we have been asked to skip nosync pages and this is a
* nosync page, skip it. Note that the object flags were
@@ -977,17 +965,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
- vm_page_lock(tp);
- vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
- vm_page_unlock(tp);
- vm_page_unlock_queues();
tp->oflags &= ~VPO_CLEANCHK;
break;
}
- vm_page_unlock(tp);
- vm_page_unlock_queues();
maf[ i - 1 ] = tp;
maxf++;
continue;
@@ -1007,17 +989,11 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
(tp->oflags & VPO_CLEANCHK) == 0) ||
(tp->busy != 0))
break;
- vm_page_lock(tp);
- vm_page_lock_queues();
vm_page_test_dirty(tp);
if (tp->dirty == 0) {
- vm_page_unlock_queues();
- vm_page_unlock(tp);
tp->oflags &= ~VPO_CLEANCHK;
break;
}
- vm_page_unlock_queues();
- vm_page_unlock(tp);
mab[ i - 1 ] = tp;
maxb++;
continue;
@@ -1217,21 +1193,23 @@ shadowlookup:
* If the page is not in a normal state, skip it.
*/
vm_page_lock(m);
- vm_page_lock_queues();
if (m->hold_count != 0 || m->wire_count != 0) {
- vm_page_unlock_queues();
vm_page_unlock(m);
goto unlock_tobject;
}
+ KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+ ("vm_object_madvise: page %p is not managed", m));
if ((m->oflags & VPO_BUSY) || m->busy) {
- if (advise == MADV_WILLNEED)
+ if (advise == MADV_WILLNEED) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
+ vm_page_lock_queues();
vm_page_flag_set(m, PG_REFERENCED);
- vm_page_unlock_queues();
+ vm_page_unlock_queues();
+ }
vm_page_unlock(m);
if (object != tobject)
VM_OBJECT_UNLOCK(object);
@@ -1266,7 +1244,6 @@ shadowlookup:
m->act_count = 0;
vm_page_dontneed(m);
}
- vm_page_unlock_queues();
vm_page_unlock(m);
if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);
OpenPOWER on IntegriCloud