summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2010-09-11 13:06:06 +0000
committerkib <kib@FreeBSD.org>2010-09-11 13:06:06 +0000
commit107ea66c07f4e9862fd2961da07c078eab84d05f (patch)
treefd0a7ea41044fca75b3c668faa23382cf15de3fb
parentfe4368ea53d115c6ccdebffbc023513a6424ec3a (diff)
downloadFreeBSD-src-107ea66c07f4e9862fd2961da07c078eab84d05f.zip
FreeBSD-src-107ea66c07f4e9862fd2961da07c078eab84d05f.tar.gz
Protect mnt_syncer with the sync_mtx. This prevents a (rare) vnode leak
when mount and update are executed in parallel. Encapsulate syncer vnode deallocation into the helper function vfs_deallocate_syncvnode(), to not externalize sync_mtx from vfs_subr.c. Found and reviewed by: jh (previous version of the patch) Tested by: pho MFC after: 3 weeks
-rw-r--r--sys/kern/vfs_mount.c17
-rw-r--r--sys/kern/vfs_subr.c31
-rw-r--r--sys/sys/mount.h1
3 files changed, 34 insertions, 15 deletions
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index 3254266..7bd10b4 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -1031,14 +1031,10 @@ vfs_domount_update(
*/
mp->mnt_optnew = NULL;
- if ((mp->mnt_flag & MNT_RDONLY) == 0) {
- if (mp->mnt_syncer == NULL)
- vfs_allocate_syncvnode(mp);
- } else {
- if (mp->mnt_syncer != NULL)
- vrele(mp->mnt_syncer);
- mp->mnt_syncer = NULL;
- }
+ if ((mp->mnt_flag & MNT_RDONLY) == 0)
+ vfs_allocate_syncvnode(mp);
+ else
+ vfs_deallocate_syncvnode(mp);
end:
vfs_unbusy(mp);
VI_LOCK(vp);
@@ -1318,8 +1314,7 @@ dounmount(mp, flags, td)
mp->mnt_kern_flag &= ~MNTK_ASYNC;
MNT_IUNLOCK(mp);
cache_purgevfs(mp); /* remove cache entries for this file sys */
- if (mp->mnt_syncer != NULL)
- vrele(mp->mnt_syncer);
+ vfs_deallocate_syncvnode(mp);
/*
* For forced unmounts, move process cdir/rdir refs on the fs root
* vnode to the covered vnode. For non-forced unmounts we want
@@ -1358,7 +1353,7 @@ dounmount(mp, flags, td)
}
MNT_ILOCK(mp);
mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ;
- if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL) {
+ if ((mp->mnt_flag & MNT_RDONLY) == 0) {
MNT_IUNLOCK(mp);
vfs_allocate_syncvnode(mp);
MNT_ILOCK(mp);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index d770b98..6b90888 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -3405,9 +3405,31 @@ vfs_allocate_syncvnode(struct mount *mp)
/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
mtx_lock(&sync_mtx);
sync_vnode_count++;
+ if (mp->mnt_syncer == NULL) {
+ mp->mnt_syncer = vp;
+ vp = NULL;
+ }
mtx_unlock(&sync_mtx);
BO_UNLOCK(bo);
- mp->mnt_syncer = vp;
+ if (vp != NULL) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ vgone(vp);
+ vput(vp);
+ }
+}
+
+void
+vfs_deallocate_syncvnode(struct mount *mp)
+{
+ struct vnode *vp;
+
+ mtx_lock(&sync_mtx);
+ vp = mp->mnt_syncer;
+ if (vp != NULL)
+ mp->mnt_syncer = NULL;
+ mtx_unlock(&sync_mtx);
+ if (vp != NULL)
+ vrele(vp);
}
/*
@@ -3488,15 +3510,16 @@ sync_reclaim(struct vop_reclaim_args *ap)
bo = &vp->v_bufobj;
BO_LOCK(bo);
- vp->v_mount->mnt_syncer = NULL;
+ mtx_lock(&sync_mtx);
+ if (vp->v_mount->mnt_syncer == vp)
+ vp->v_mount->mnt_syncer = NULL;
if (bo->bo_flag & BO_ONWORKLST) {
- mtx_lock(&sync_mtx);
LIST_REMOVE(bo, bo_synclist);
syncer_worklist_len--;
sync_vnode_count--;
- mtx_unlock(&sync_mtx);
bo->bo_flag &= ~BO_ONWORKLST;
}
+ mtx_unlock(&sync_mtx);
BO_UNLOCK(bo);
return (0);
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index 85f16ff..f68e95d 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -731,6 +731,7 @@ int vfs_busy(struct mount *, int);
int vfs_export /* process mount export info */
(struct mount *, struct export_args *);
void vfs_allocate_syncvnode(struct mount *);
+void vfs_deallocate_syncvnode(struct mount *);
int vfs_donmount(struct thread *td, int fsflags, struct uio *fsoptions);
void vfs_getnewfsid(struct mount *);
struct cdev *vfs_getrootfsid(struct mount *);
OpenPOWER on IntegriCloud