summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-07-02 06:56:37 +0000
committeralc <alc@FreeBSD.org>2007-07-02 06:56:37 +0000
commitab67a078687c55227d438cdeab0c7fd9f800f520 (patch)
tree52ca362de6cfc797c4996e2d77be3e452205e14e
parent03be3cbe9a4999d68c76e2d5336983a576cc33a0 (diff)
downloadFreeBSD-src-ab67a078687c55227d438cdeab0c7fd9f800f520.zip
FreeBSD-src-ab67a078687c55227d438cdeab0c7fd9f800f520.tar.gz
In the previous revision, when I replaced the unconditional acquisition
of Giant in vm_pageout_scan() with VFS_LOCK_GIANT(), I had to eliminate the acquisition of the vnode interlock before releasing the vm object's lock because the vnode interlock cannot be held when VFS_LOCK_GIANT() is performed. Unfortunately, this allows the vnode to be recycled between the release of the vm object's lock and the vget() on the vnode. In this revision, I prevent the vnode from being recycled by acquiring another reference to the vm object and underlying vnode before releasing the vm object's lock. This change also addresses another preexisting but trivial problem. By acquiring another reference to the vm object, I also prevent the vm object from being recycled. Previously, the "vnodes skipped" counter could be wrong because if it examined a recycled vm object. Reported by: kib Reviewed by: kib Approved by: re (kensmith) MFC after: 3 weeks
-rw-r--r--sys/vm/vm_pageout.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index c4d5ad7..c46bfe3 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -887,7 +887,7 @@ rescan0:
*/
int swap_pageouts_ok, vfslocked = 0;
struct vnode *vp = NULL;
- struct mount *mp;
+ struct mount *mp = NULL;
if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
swap_pageouts_ok = 1;
@@ -943,25 +943,24 @@ rescan0:
*/
if (object->type == OBJT_VNODE) {
vp = object->handle;
- mp = NULL;
if (vp->v_type == VREG &&
vn_start_write(vp, &mp, V_NOWAIT) != 0) {
+ KASSERT(mp == NULL,
+ ("vm_pageout_scan: mp != NULL"));
++pageout_lock_miss;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- vp = NULL;
goto unlock_and_continue;
}
vm_page_unlock_queues();
+ vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
curthread)) {
- VFS_UNLOCK_GIANT(vfslocked);
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
++pageout_lock_miss;
- vn_finished_write(mp);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
vp = NULL;
@@ -973,12 +972,10 @@ rescan0:
* The page might have been moved to another
* queue during potential blocking in vget()
* above. The page might have been freed and
- * reused for another vnode. The object might
- * have been reused for another vnode.
+ * reused for another vnode.
*/
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
m->object != object ||
- object->handle != vp ||
TAILQ_NEXT(m, pageq) != &marker) {
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
@@ -987,7 +984,7 @@ rescan0:
/*
* The page may have been busied during the
- * blocking in vput(); We don't move the
+ * blocking in vget(). We don't move the
* page back onto the end of the queue so that
* statistics are more correct if we don't.
*/
@@ -1023,10 +1020,12 @@ rescan0:
}
unlock_and_continue:
VM_OBJECT_UNLOCK(object);
- if (vp) {
+ if (mp != NULL) {
vm_page_unlock_queues();
- vput(vp);
+ if (vp != NULL)
+ vput(vp);
VFS_UNLOCK_GIANT(vfslocked);
+ vm_object_deallocate(object);
vn_finished_write(mp);
vm_page_lock_queues();
}
OpenPOWER on IntegriCloud