summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortegge <tegge@FreeBSD.org>2006-02-17 21:02:39 +0000
committertegge <tegge@FreeBSD.org>2006-02-17 21:02:39 +0000
commitd0ed011fe8d1a5e4bb7e83fe89968dbc94ede197 (patch)
tree53801b4ed76e6a0eb496edb4cfcc838478a3a57f
parent9f719475dfcefd4b9cac65bef1d42093dcfbac13 (diff)
downloadFreeBSD-src-d0ed011fe8d1a5e4bb7e83fe89968dbc94ede197.zip
FreeBSD-src-d0ed011fe8d1a5e4bb7e83fe89968dbc94ede197.tar.gz
Expand scope of marker to reduce the number of page queue scan restarts.
-rw-r--r--sys/vm/vm_pageout.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b5abceb..e7611d2 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -906,6 +906,16 @@ rescan0:
}
/*
+ * Following operations may unlock
+ * vm_page_queue_mtx, invalidating the 'next'
+ * pointer. To prevent an inordinate number
+ * of restarts we use our marker to remember
+ * our place.
+ *
+ */
+ TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl,
+ m, &marker, pageq);
+ /*
* The object is already known NOT to be dead. It
* is possible for the vget() to block the whole
* pageout daemon, but the new low-memory handling
@@ -936,8 +946,8 @@ rescan0:
++pageout_lock_miss;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- VM_OBJECT_UNLOCK(object);
- continue;
+ vp = NULL;
+ goto unlock_and_continue;
}
vm_page_unlock_queues();
VI_LOCK(vp);
@@ -950,8 +960,8 @@ rescan0:
vn_finished_write(mp);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- VM_OBJECT_UNLOCK(object);
- continue;
+ vp = NULL;
+ goto unlock_and_continue;
}
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
@@ -964,7 +974,8 @@ rescan0:
*/
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
m->object != object ||
- object->handle != vp) {
+ object->handle != vp ||
+ TAILQ_NEXT(m, pageq) != &marker) {
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
goto unlock_and_continue;
@@ -998,21 +1009,14 @@ rescan0:
* laundry. If it is still in the laundry, then we
* start the cleaning operation.
*
- * This operation may cluster, invalidating the 'next'
- * pointer. To prevent an inordinate number of
- * restarts we use our marker to remember our place.
- *
* decrement page_shortage on success to account for
* the (future) cleaned page. Otherwise we could wind
* up laundering or cleaning too many pages.
*/
- TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
if (vm_pageout_clean(m) != 0) {
--page_shortage;
--maxlaunder;
}
- next = TAILQ_NEXT(&marker, pageq);
- TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
unlock_and_continue:
VM_OBJECT_UNLOCK(object);
if (vp) {
@@ -1021,6 +1025,9 @@ unlock_and_continue:
vn_finished_write(mp);
vm_page_lock_queues();
}
+ next = TAILQ_NEXT(&marker, pageq);
+ TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl,
+ &marker, pageq);
continue;
}
VM_OBJECT_UNLOCK(object);
OpenPOWER on IntegriCloud