summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/fs/nullfs/null.h1
-rw-r--r--sys/fs/nullfs/null_subr.c29
-rw-r--r--sys/fs/nullfs/null_vfsops.c60
-rw-r--r--sys/fs/nullfs/null_vnops.c23
4 files changed, 67 insertions, 46 deletions
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index c8fc5de..0878e55 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -56,6 +56,7 @@ struct null_node {
int nullfs_init(struct vfsconf *vfsp);
int nullfs_uninit(struct vfsconf *vfsp);
int null_nodeget(struct mount *mp, struct vnode *target, struct vnode **vpp);
+struct vnode *null_hashget(struct mount *mp, struct vnode *lowervp);
void null_hashrem(struct null_node *xp);
int null_bypass(struct vop_generic_args *ap);
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index 7e3571d..5f926a6 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -67,7 +67,6 @@ struct mtx null_hashmtx;
static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
-static struct vnode * null_hashget(struct mount *, struct vnode *);
static struct vnode * null_hashins(struct mount *, struct null_node *);
/*
@@ -98,7 +97,7 @@ nullfs_uninit(vfsp)
* Return a VREF'ed alias for lower vnode if already exists, else 0.
* Lower vnode should be locked on entry and will be left locked on exit.
*/
-static struct vnode *
+struct vnode *
null_hashget(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
@@ -209,14 +208,10 @@ null_nodeget(mp, lowervp, vpp)
struct vnode *vp;
int error;
- /*
- * The insmntque1() call below requires the exclusive lock on
- * the nullfs vnode.
- */
- ASSERT_VOP_ELOCKED(lowervp, "lowervp");
- KASSERT(lowervp->v_usecount >= 1, ("Unreferenced vnode %p\n", lowervp));
+ ASSERT_VOP_LOCKED(lowervp, "lowervp");
+ KASSERT(lowervp->v_usecount >= 1, ("Unreferenced vnode %p", lowervp));
- /* Lookup the hash firstly */
+ /* Lookup the hash firstly. */
*vpp = null_hashget(mp, lowervp);
if (*vpp != NULL) {
vrele(lowervp);
@@ -224,6 +219,19 @@ null_nodeget(mp, lowervp, vpp)
}
/*
+ * The insmntque1() call below requires the exclusive lock on
+ * the nullfs vnode. Upgrade the lock now if hash failed to
+ * provide ready to use vnode.
+ */
+ if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) {
+ vn_lock(lowervp, LK_UPGRADE | LK_RETRY);
+ if ((lowervp->v_iflag & VI_DOOMED) != 0) {
+ vput(lowervp);
+ return (ENOENT);
+ }
+ }
+
+ /*
* We do not serialize vnode creation, instead we will check for
* duplicates later, when adding new vnode to hash.
* Note that duplicate can only appear in hash if the lowervp is
@@ -233,8 +241,7 @@ null_nodeget(mp, lowervp, vpp)
* might cause a bogus v_data pointer to get dereferenced
* elsewhere if MALLOC should block.
*/
- xp = malloc(sizeof(struct null_node),
- M_NULLFSNODE, M_WAITOK);
+ xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
error = getnewvnode("null", mp, &null_vnodeops, &vp);
if (error) {
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index 08f722a..9c2851b 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -65,6 +65,7 @@ static vfs_statfs_t nullfs_statfs;
static vfs_unmount_t nullfs_unmount;
static vfs_vget_t nullfs_vget;
static vfs_extattrctl_t nullfs_extattrctl;
+static vfs_reclaim_lowervp_t nullfs_reclaim_lowervp;
/*
* Mount null layer
@@ -121,8 +122,10 @@ nullfs_mount(struct mount *mp)
*/
NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread);
error = namei(ndp);
+
/*
* Re-lock vnode.
+ * XXXKIB This is deadlock-prone as well.
*/
if (isvnunlocked)
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);
@@ -146,7 +149,7 @@ nullfs_mount(struct mount *mp)
}
xmp = (struct null_mount *) malloc(sizeof(struct null_mount),
- M_NULLFSMNT, M_WAITOK); /* XXX */
+ M_NULLFSMNT, M_WAITOK);
/*
* Save reference to underlying FS
@@ -186,10 +189,15 @@ nullfs_mount(struct mount *mp)
}
MNT_ILOCK(mp);
mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
- (MNTK_MPSAFE | MNTK_SHARED_WRITES);
+ (MNTK_MPSAFE | MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED |
+ MNTK_EXTENDED_SHARED);
+ mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT;
MNT_IUNLOCK(mp);
mp->mnt_data = xmp;
vfs_getnewfsid(mp);
+ MNT_ILOCK(xmp->nullm_vfs);
+ TAILQ_INSERT_TAIL(&xmp->nullm_vfs->mnt_uppers, mp, mnt_upper_link);
+ MNT_IUNLOCK(xmp->nullm_vfs);
vfs_mountedfrom(mp, target);
@@ -206,14 +214,16 @@ nullfs_unmount(mp, mntflags)
struct mount *mp;
int mntflags;
{
- void *mntdata;
- int error;
- int flags = 0;
+ struct null_mount *mntdata;
+ struct mount *ump;
+ int error, flags;
NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp);
if (mntflags & MNT_FORCE)
- flags |= FORCECLOSE;
+ flags = FORCECLOSE;
+ else
+ flags = 0;
/* There is 1 extra root vnode reference (nullm_rootvp). */
error = vflush(mp, 1, flags, curthread);
@@ -224,9 +234,17 @@ nullfs_unmount(mp, mntflags)
* Finally, throw away the null_mount structure
*/
mntdata = mp->mnt_data;
+ ump = mntdata->nullm_vfs;
+ MNT_ILOCK(ump);
+ while ((ump->mnt_kern_flag & MNTK_VGONE_UPPER) != 0) {
+ ump->mnt_kern_flag |= MNTK_VGONE_WAITER;
+ msleep(&ump->mnt_uppers, &ump->mnt_mtx, 0, "vgnupw", 0);
+ }
+ TAILQ_REMOVE(&ump->mnt_uppers, mp, mnt_upper_link);
+ MNT_IUNLOCK(ump);
mp->mnt_data = NULL;
free(mntdata, M_NULLFSMNT);
- return 0;
+ return (0);
}
static int
@@ -316,13 +334,10 @@ nullfs_vget(mp, ino, flags, vpp)
KASSERT((flags & LK_TYPE_MASK) != 0,
("nullfs_vget: no lock requested"));
- flags &= ~LK_TYPE_MASK;
- flags |= LK_EXCLUSIVE;
error = VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, flags, vpp);
- if (error)
+ if (error != 0)
return (error);
-
return (null_nodeget(mp, *vpp, vpp));
}
@@ -334,11 +349,11 @@ nullfs_fhtovp(mp, fidp, flags, vpp)
struct vnode **vpp;
{
int error;
- error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, LK_EXCLUSIVE,
+
+ error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, flags,
vpp);
- if (error)
+ if (error != 0)
return (error);
-
return (null_nodeget(mp, *vpp, vpp));
}
@@ -350,10 +365,22 @@ nullfs_extattrctl(mp, cmd, filename_vp, namespace, attrname)
int namespace;
const char *attrname;
{
- return VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, filename_vp,
- namespace, attrname);
+
+ return (VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd,
+ filename_vp, namespace, attrname));
}
+static void
+nullfs_reclaim_lowervp(struct mount *mp, struct vnode *lowervp)
+{
+ struct vnode *vp;
+
+ vp = null_hashget(mp, lowervp);
+ if (vp == NULL)
+ return;
+ vgone(vp);
+ vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
+}
static struct vfsops null_vfsops = {
.vfs_extattrctl = nullfs_extattrctl,
@@ -367,6 +394,7 @@ static struct vfsops null_vfsops = {
.vfs_uninit = nullfs_uninit,
.vfs_unmount = nullfs_unmount,
.vfs_vget = nullfs_vget,
+ .vfs_reclaim_lowervp = nullfs_reclaim_lowervp,
};
VFS_SET(null_vfsops, nullfs, VFCF_LOOPBACK | VFCF_JAIL);
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index 6f40233..5fb020b 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -665,33 +665,18 @@ null_unlock(struct vop_unlock_args *ap)
}
/*
- * There is no way to tell that someone issued remove/rmdir operation
- * on the underlying filesystem. For now we just have to release lowervp
- * as soon as possible.
- *
- * Note, we can't release any resources nor remove vnode from hash before
- * appropriate VXLOCK stuff is done because other process can find this
- * vnode in hash during inactivation and may be sitting in vget() and waiting
- * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
+ * XXXKIB
*/
static int
-null_inactive(struct vop_inactive_args *ap)
+null_inactive(struct vop_inactive_args *ap __unused)
{
- struct vnode *vp = ap->a_vp;
-
- vp->v_object = NULL;
-
- /*
- * If this is the last reference, then free up the vnode
- * so as not to tie up the lower vnodes.
- */
- vrecycle(vp);
return (0);
}
/*
- * Now, the VXLOCK is in force and we're free to destroy the null vnode.
+ * Now, the nullfs vnode and, due to the sharing lock, the lower
+ * vnode, are exclusively locked, and we shall destroy the null vnode.
*/
static int
null_reclaim(struct vop_reclaim_args *ap)
OpenPOWER on IntegriCloud