summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2003-11-05 04:30:08 +0000
committerkan <kan@FreeBSD.org>2003-11-05 04:30:08 +0000
commit36d60f3bb735f38bbec69f4cc40ef27a24629c54 (patch)
treed6d549e791dd7c4a627af5beb25ec44a2fcdd02d /sys/kern
parent932794c27cf2ec2241eadf3b4891463fa9eb2432 (diff)
downloadFreeBSD-src-36d60f3bb735f38bbec69f4cc40ef27a24629c54.zip
FreeBSD-src-36d60f3bb735f38bbec69f4cc40ef27a24629c54.tar.gz
Remove mntvnode_mtx and replace it with per-mountpoint mutex.
Introduce two new macros MNT_ILOCK(mp)/MNT_IUNLOCK(mp) to operate on this mutex transparently. Eventually new mutex will be protecting more fields in struct mount, not only vnode list. Discussed with: jeff
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_default.c10
-rw-r--r--sys/kern/vfs_mount.c7
-rw-r--r--sys/kern/vfs_subr.c51
3 files changed, 35 insertions, 33 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index ffce8ff..c3b7a19 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -879,7 +879,7 @@ vfs_stdsync(mp, waitfor, cred, td)
/*
* Force stale buffer cache information to be flushed.
*/
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
@@ -896,10 +896,10 @@ loop:
VI_UNLOCK(vp);
continue;
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
if ((error = vget(vp, lockreq, td)) != 0) {
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
if (error == ENOENT)
goto loop;
continue;
@@ -910,9 +910,9 @@ loop:
VOP_UNLOCK(vp, 0, td);
vrele(vp);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
return (allerror);
}
diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c
index a962bcc..cea2d41 100644
--- a/sys/kern/vfs_mount.c
+++ b/sys/kern/vfs_mount.c
@@ -116,9 +116,6 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
/* For any iteration/modification of mountlist */
struct mtx mountlist_mtx;
-/* For any iteration/modification of mnt_vnodelist */
-struct mtx mntvnode_mtx;
-
/*
* The vnode of the system's root (/ in the filesystem, without chroot
* active.)
@@ -662,6 +659,7 @@ vfs_nmount(td, fsflags, fsoptions)
TAILQ_INIT(&mp->mnt_nvnodelist);
TAILQ_INIT(&mp->mnt_reservedvnlist);
mp->mnt_nvnodelistsize = 0;
+ mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@@ -1029,6 +1027,7 @@ vfs_mount(td, fstype, fspath, fsflags, fsdata)
TAILQ_INIT(&mp->mnt_nvnodelist);
TAILQ_INIT(&mp->mnt_reservedvnlist);
mp->mnt_nvnodelistsize = 0;
+ mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
mp->mnt_op = vfsp->vfc_vfsops;
@@ -1371,6 +1370,7 @@ dounmount(mp, flags, td)
panic("unmount: dangling vnode");
lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_mtx, td);
lockdestroy(&mp->mnt_lock);
+ mtx_destroy(&mp->mnt_mtx);
if (coveredvp != NULL)
vrele(coveredvp);
if (mp->mnt_kern_flag & MNTK_MWAIT)
@@ -1408,6 +1408,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
if (vfsp == NULL)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
+ mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, td);
TAILQ_INIT(&mp->mnt_nvnodelist);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 1b48f08..dc9934b 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -476,7 +476,6 @@ vntblinit(void *dummy __unused)
(5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
minvnodes = desiredvnodes / 4;
mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
- mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
TAILQ_INIT(&vnode_free_list);
@@ -710,7 +709,7 @@ vlrureclaim(struct mount *mp)
trigger = cnt.v_page_count * 2 / usevnodes;
done = 0;
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
count = mp->mnt_nvnodelistsize / 10 + 1;
while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
@@ -722,16 +721,16 @@ vlrureclaim(struct mount *mp)
if (VMIGHTFREE(vp) && /* critical path opt */
(vp->v_object == NULL ||
vp->v_object->resident_page_count < trigger)) {
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
vgonel(vp, curthread);
done++;
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
} else
VI_UNLOCK(vp);
}
--count;
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
return done;
}
@@ -1051,24 +1050,26 @@ insmntque(vp, mp)
register struct mount *mp;
{
- mtx_lock(&mntvnode_mtx);
/*
* Delete from old mount point vnode list, if on one.
*/
if (vp->v_mount != NULL) {
+ MNT_ILOCK(vp->v_mount);
KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
("bad mount point vnode list size"));
TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
vp->v_mount->mnt_nvnodelistsize--;
+ MNT_IUNLOCK(vp->v_mount);
}
/*
* Insert into list of vnodes for the new mount point, if available.
*/
if ((vp->v_mount = mp) != NULL) {
+ MNT_ILOCK(vp->v_mount);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
mp->mnt_nvnodelistsize++;
+ MNT_IUNLOCK(vp->v_mount);
}
- mtx_unlock(&mntvnode_mtx);
}
/*
@@ -2368,7 +2369,7 @@ vflush(mp, rootrefs, flags)
vput(rootvp);
}
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
/*
@@ -2380,10 +2381,10 @@ loop:
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
VI_LOCK(vp);
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
if (error) {
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
goto loop;
}
/*
@@ -2391,7 +2392,7 @@ loop:
*/
if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
VOP_UNLOCK(vp, 0, td);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
continue;
}
/*
@@ -2407,7 +2408,7 @@ loop:
(error == 0 && vattr.va_nlink > 0)) &&
(vp->v_writecount == 0 || vp->v_type != VREG)) {
VOP_UNLOCK(vp, LK_INTERLOCK, td);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
continue;
}
} else
@@ -2421,7 +2422,7 @@ loop:
*/
if (vp->v_usecount == 0) {
vgonel(vp, td);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
continue;
}
@@ -2435,7 +2436,7 @@ loop:
vgonel(vp, td);
else
vgonechrl(vp, td);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
continue;
}
#ifdef DIAGNOSTIC
@@ -2443,10 +2444,10 @@ loop:
vprint("vflush: busy vnode", vp);
#endif
VI_UNLOCK(vp);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
busy++;
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
/*
* If just the root vnode is busy, and if its refcount
@@ -2481,10 +2482,10 @@ vlruvp(struct vnode *vp)
struct mount *mp;
if ((mp = vp->v_mount) != NULL) {
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
}
#endif
}
@@ -3073,7 +3074,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
continue;
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
if (n == len)
break;
@@ -3122,7 +3123,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
vrele(vp);
++n;
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
mtx_lock(&mountlist_mtx);
vfs_unbusy(mp, td);
if (n == len)
@@ -3201,7 +3202,7 @@ vfs_msync(struct mount *mp, int flags)
GIANT_REQUIRED;
tries = 5;
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
if (vp->v_mount != mp) {
@@ -3219,13 +3220,13 @@ loop:
if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
curthread)) {
if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
vput(vp);
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
continue;
}
@@ -3238,7 +3239,7 @@ loop:
}
vput(vp);
}
- mtx_lock(&mntvnode_mtx);
+ MNT_ILOCK(mp);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
if (--tries > 0)
goto loop;
@@ -3247,7 +3248,7 @@ loop:
} else
VI_UNLOCK(vp);
}
- mtx_unlock(&mntvnode_mtx);
+ MNT_IUNLOCK(mp);
}
/*
OpenPOWER on IntegriCloud