summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_pageout.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
committeralc <alc@FreeBSD.org>2010-06-10 16:56:35 +0000
commit7c212e010d5269026628a1e2e686c89679c23af8 (patch)
tree2e32fd87e6b7bde5898f2e0cb503e19a50ea5f7b /sys/vm/vm_pageout.c
parentc2cccc78fe9cc4c3e971806f8c5ce77d16a4bd39 (diff)
downloadFreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.zip
FreeBSD-src-7c212e010d5269026628a1e2e686c89679c23af8.tar.gz
Reduce the scope of the page queues lock and the number of
PG_REFERENCED changes in vm_pageout_object_deactivate_pages(). Simplify this function's inner loop using TAILQ_FOREACH(), and shorten some of its overly long lines. Update a stale comment. Assert that PG_REFERENCED may be cleared only if the object containing the page is locked. Add a comment documenting this. Assert that a caller to vm_page_requeue() holds the page queues lock, and assert that the page is on a page queue. Push down the page queues lock into pmap_ts_referenced() and pmap_page_exists_quick(). (As of now, there are no longer any pmap functions that expect to be called with the page queues lock held.) Neither pmap_ts_referenced() nor pmap_page_exists_quick() should ever be passed an unmanaged page. Assert this rather than returning "0" and "FALSE" respectively. ARM: Simplify pmap_page_exists_quick() by switching to TAILQ_FOREACH(). Push down the page queues lock inside of pmap_clearbit(), simplifying pmap_clear_modify(), pmap_clear_reference(), and pmap_remove_write(). Additionally, this allows for avoiding the acquisition of the page queues lock in some cases. PowerPC/AIM: moea*_page_exits_quick() and moea*_page_wired_mappings() will never be called before pmap initialization is complete. Therefore, the check for moea_initialized can be eliminated. Push down the page queues lock inside of moea*_clear_bit(), simplifying moea*_clear_modify() and moea*_clear_reference(). The last parameter to moea*_clear_bit() is never used. Eliminate it. PowerPC/BookE: Simplify mmu_booke_page_exists_quick()'s control flow. Reviewed by: kib@
Diffstat (limited to 'sys/vm/vm_pageout.c')
-rw-r--r--sys/vm/vm_pageout.c68
1 files changed, 30 insertions, 38 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 3158a7d..bff803d 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -547,21 +547,17 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
/*
* vm_pageout_object_deactivate_pages
*
- * deactivate enough pages to satisfy the inactive target
- * requirements or if vm_page_proc_limit is set, then
- * deactivate all of the pages in the object and its
- * backing_objects.
+ * Deactivate enough pages to satisfy the inactive target
+ * requirements.
*
* The object and map must be locked.
*/
static void
-vm_pageout_object_deactivate_pages(pmap, first_object, desired)
- pmap_t pmap;
- vm_object_t first_object;
- long desired;
+vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
+ long desired)
{
vm_object_t backing_object, object;
- vm_page_t p, next;
+ vm_page_t p;
int actcount, remove_mode;
VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
@@ -579,61 +575,57 @@ vm_pageout_object_deactivate_pages(pmap, first_object, desired)
if (object->shadow_count > 1)
remove_mode = 1;
/*
- * scan the objects entire memory queue
+ * Scan the object's entire memory queue.
*/
- p = TAILQ_FIRST(&object->memq);
- while (p != NULL) {
+ TAILQ_FOREACH(p, &object->memq, listq) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
- next = TAILQ_NEXT(p, listq);
- if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) {
- p = next;
+ if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0)
continue;
- }
+ PCPU_INC(cnt.v_pdpages);
vm_page_lock(p);
- vm_page_lock_queues();
- cnt.v_pdpages++;
- if (p->wire_count != 0 ||
- p->hold_count != 0 ||
+ if (p->wire_count != 0 || p->hold_count != 0 ||
!pmap_page_exists_quick(pmap, p)) {
- vm_page_unlock_queues();
vm_page_unlock(p);
- p = next;
continue;
}
actcount = pmap_ts_referenced(p);
- if (actcount) {
- vm_page_flag_set(p, PG_REFERENCED);
- } else if (p->flags & PG_REFERENCED) {
- actcount = 1;
+ if ((p->flags & PG_REFERENCED) != 0) {
+ if (actcount == 0)
+ actcount = 1;
+ vm_page_lock_queues();
+ vm_page_flag_clear(p, PG_REFERENCED);
+ vm_page_unlock_queues();
}
- if ((p->queue != PQ_ACTIVE) &&
- (p->flags & PG_REFERENCED)) {
+ if (p->queue != PQ_ACTIVE && actcount != 0) {
vm_page_activate(p);
p->act_count += actcount;
- vm_page_flag_clear(p, PG_REFERENCED);
} else if (p->queue == PQ_ACTIVE) {
- if ((p->flags & PG_REFERENCED) == 0) {
- p->act_count -= min(p->act_count, ACT_DECLINE);
- if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
+ if (actcount == 0) {
+ p->act_count -= min(p->act_count,
+ ACT_DECLINE);
+ if (!remove_mode &&
+ (vm_pageout_algorithm ||
+ p->act_count == 0)) {
pmap_remove_all(p);
vm_page_deactivate(p);
} else {
+ vm_page_lock_queues();
vm_page_requeue(p);
+ vm_page_unlock_queues();
}
} else {
vm_page_activate(p);
- vm_page_flag_clear(p, PG_REFERENCED);
- if (p->act_count < (ACT_MAX - ACT_ADVANCE))
+ if (p->act_count < ACT_MAX -
+ ACT_ADVANCE)
p->act_count += ACT_ADVANCE;
+ vm_page_lock_queues();
vm_page_requeue(p);
+ vm_page_unlock_queues();
}
- } else if (p->queue == PQ_INACTIVE) {
+ } else if (p->queue == PQ_INACTIVE)
pmap_remove_all(p);
- }
- vm_page_unlock_queues();
vm_page_unlock(p);
- p = next;
}
if ((backing_object = object->backing_object) == NULL)
goto unlock_return;
OpenPOWER on IntegriCloud