summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2012-12-03 22:15:16 +0000
committerkib <kib@FreeBSD.org>2012-12-03 22:15:16 +0000
commit69dfcf0272e9843f5132e35241d7124bbae253ba (patch)
treed0f98ecb0caa60b2d54680222378d7fe086fb13d
parent20b909215d96066e1480c2630adb246a1e4a3342 (diff)
downloadFreeBSD-src-69dfcf0272e9843f5132e35241d7124bbae253ba.zip
FreeBSD-src-69dfcf0272e9843f5132e35241d7124bbae253ba.tar.gz
The vnode_free_list_mtx is required unconditionally when iterating
over the active list. The mount interlock is not enough to guarantee the validity of the tailq link pointers. The __mnt_vnode_next_active() and __mnt_vnode_first_active() active lists iterators helper functions did not provided the neccessary stability for the list, allowing the iterators to pick garbage. This was uncovered after the r243599 made the active list iterators non-nop. Since a vnode interlock is before the vnode_free_list_mtx, obtain the vnode ilock in the non-blocking manner when under vnode_free_list_mtx, and restart iteration after the yield if the lock attempt failed. Assert that a vnode found on the list is active, and assert that the helpers return the vnode with interlock owned. Reported and tested by: pho MFC after: 1 week
-rw-r--r--sys/kern/vfs_subr.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index c2f5f93..2c470df 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -4718,10 +4718,20 @@ __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
if (should_yield())
kern_yield(PRI_UNCHANGED);
MNT_ILOCK(mp);
+restart:
+ mtx_lock(&vnode_free_list_mtx);
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
vp = TAILQ_NEXT(*mvp, v_actfreelist);
while (vp != NULL) {
- VI_LOCK(vp);
+ if (vp->v_type == VMARKER) {
+ vp = TAILQ_NEXT(vp, v_actfreelist);
+ continue;
+ }
+ if (!VI_TRYLOCK(vp)) {
+ mtx_unlock(&vnode_free_list_mtx);
+ kern_yield(PRI_UNCHANGED);
+ goto restart;
+ }
if (vp->v_mount == mp && vp->v_type != VMARKER &&
(vp->v_iflag & VI_DOOMED) == 0)
break;
@@ -4732,16 +4742,18 @@ __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
/* Check if we are done */
if (vp == NULL) {
+ mtx_unlock(&vnode_free_list_mtx);
__mnt_vnode_markerfree_active(mvp, mp);
/* MNT_IUNLOCK(mp); -- done in above function */
mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
return (NULL);
}
- mtx_lock(&vnode_free_list_mtx);
TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
mtx_unlock(&vnode_free_list_mtx);
MNT_IUNLOCK(mp);
+ ASSERT_VI_LOCKED(vp, "active iter");
+ KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
return (vp);
}
@@ -4755,9 +4767,19 @@ __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
MNT_REF(mp);
(*mvp)->v_type = VMARKER;
+restart:
+ mtx_lock(&vnode_free_list_mtx);
vp = TAILQ_FIRST(&mp->mnt_activevnodelist);
while (vp != NULL) {
- VI_LOCK(vp);
+ if (vp->v_type == VMARKER) {
+ vp = TAILQ_NEXT(vp, v_actfreelist);
+ continue;
+ }
+ if (!VI_TRYLOCK(vp)) {
+ mtx_unlock(&vnode_free_list_mtx);
+ kern_yield(PRI_UNCHANGED);
+ goto restart;
+ }
if (vp->v_mount == mp && vp->v_type != VMARKER &&
(vp->v_iflag & VI_DOOMED) == 0)
break;
@@ -4768,6 +4790,7 @@ __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
/* Check if we are done */
if (vp == NULL) {
+ mtx_unlock(&vnode_free_list_mtx);
MNT_REL(mp);
MNT_IUNLOCK(mp);
free(*mvp, M_VNODE_MARKER);
@@ -4775,10 +4798,11 @@ __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
return (NULL);
}
(*mvp)->v_mount = mp;
- mtx_lock(&vnode_free_list_mtx);
TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
mtx_unlock(&vnode_free_list_mtx);
MNT_IUNLOCK(mp);
+ ASSERT_VI_LOCKED(vp, "active iter first");
+ KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
return (vp);
}
OpenPOWER on IntegriCloud