summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>1997-04-04 17:46:21 +0000
committerdfr <dfr@FreeBSD.org>1997-04-04 17:46:21 +0000
commit60008c7902ffea9987742942288eb83c2dbdd15e (patch)
tree590bf5529562a70c9080bde604b60fd21002126b /sys/kern
parent1428ad29019cc6f5eab10624e7ef3316fce39757 (diff)
downloadFreeBSD-src-60008c7902ffea9987742942288eb83c2dbdd15e.zip
FreeBSD-src-60008c7902ffea9987742942288eb83c2dbdd15e.tar.gz
Add a function vop_sharedlock which a copy of vop_nolock without the
implementation #ifdef out. This can be used for now by NFS. As soon as all the other filesystems' locking is fixed, this can go away. Print the vnode address in vprint for easier debugging.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_export.c75
-rw-r--r--sys/kern/vfs_extattr.c20
-rw-r--r--sys/kern/vfs_lookup.c8
-rw-r--r--sys/kern/vfs_subr.c75
-rw-r--r--sys/kern/vfs_syscalls.c20
-rw-r--r--sys/kern/vfs_vnops.c4
6 files changed, 194 insertions, 8 deletions
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index 4f3bc04..92a9edf 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.80 1997/03/05 04:54:54 davidg Exp $
+ * $Id: vfs_subr.c,v 1.81 1997/04/01 13:05:34 bde Exp $
*/
/*
@@ -871,6 +871,73 @@ vget(vp, flags, p)
* count is maintained in an auxillary vnode lock structure.
*/
int
+vop_sharedlock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+#ifdef DEBUG_VFS_LOCKS
+ /*
+ * Normally, we use shared locks here, but that confuses
+ * the locking assertions.
+ */
+ vnflags = LK_EXCLUSIVE;
+ break;
+#endif
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+}
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
vop_nolock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -1291,8 +1358,10 @@ vclean(struct vnode *vp, int flags, struct proc *p)
vrele(vp);
cache_purge(vp);
if (vp->v_vnlock) {
+#ifdef DIAGNOSTIC
if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
vprint("vclean: lock not drained", vp);
+#endif
FREE(vp->v_vnlock, M_VNODE);
vp->v_vnlock = NULL;
}
@@ -1581,7 +1650,9 @@ vprint(label, vp)
char buf[64];
if (label != NULL)
- printf("%s: ", label);
+ printf("%s: %x: ", label, vp);
+ else
+ printf("%x: ", vp);
printf("type %s, usecount %d, writecount %d, refcount %ld,",
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index d97f1ca..6c27097 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.62 1997/03/31 12:02:42 peter Exp $
+ * $Id: vfs_syscalls.c,v 1.63 1997/03/31 12:21:37 peter Exp $
*/
/*
@@ -1028,6 +1028,8 @@ mknod(p, uap, retval)
if (vp)
vrele(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
return (error);
}
@@ -1124,6 +1126,8 @@ link(p, uap, retval)
}
}
vrele(vp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
return (error);
}
@@ -1171,6 +1175,8 @@ symlink(p, uap, retval)
vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
out:
FREE(path, M_NAMEI);
return (error);
@@ -1212,6 +1218,8 @@ undelete(p, uap, retval)
if (error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE))
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
return (error);
}
@@ -1269,6 +1277,8 @@ unlink(p, uap, retval)
if (vp != NULLVP)
vput(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
return (error);
}
@@ -2272,6 +2282,10 @@ out:
vrele(fvp);
}
vrele(tond.ni_startdir);
+ ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
+ ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
+ ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
+ ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
out1:
if (fromnd.ni_startdir)
@@ -2327,6 +2341,8 @@ mkdir(p, uap, retval)
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
if (!error)
vput(nd.ni_vp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
return (error);
}
@@ -2385,6 +2401,8 @@ out:
vput(nd.ni_dvp);
vput(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
return (error);
}
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 0c04b01..72da093 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_lookup.c 8.4 (Berkeley) 2/16/94
- * $Id$
+ * $Id: vfs_lookup.c,v 1.16 1997/02/22 09:39:33 peter Exp $
*/
#include "opt_ktrace.h"
@@ -409,6 +409,7 @@ dirloop:
unionlookup:
ndp->ni_dvp = dp;
ndp->ni_vp = NULL;
+ ASSERT_VOP_LOCKED(dp, "lookup");
if (error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp)) {
#ifdef DIAGNOSTIC
if (ndp->ni_vp != NULL)
@@ -458,6 +459,8 @@ unionlookup:
printf("found\n");
#endif
+ ASSERT_VOP_LOCKED(ndp->ni_vp, "lookup");
+
/*
* Take into account any additional components consumed by
* the underlying filesystem.
@@ -516,6 +519,9 @@ nextname:
cnp->cn_nameptr++;
ndp->ni_pathlen--;
}
+ if (ndp->ni_dvp != ndp->ni_vp) {
+ ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "lookup");
+ }
vrele(ndp->ni_dvp);
goto dirloop;
}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 4f3bc04..92a9edf 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.80 1997/03/05 04:54:54 davidg Exp $
+ * $Id: vfs_subr.c,v 1.81 1997/04/01 13:05:34 bde Exp $
*/
/*
@@ -871,6 +871,73 @@ vget(vp, flags, p)
* count is maintained in an auxillary vnode lock structure.
*/
int
+vop_sharedlock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+#ifdef DEBUG_VFS_LOCKS
+ /*
+ * Normally, we use shared locks here, but that confuses
+ * the locking assertions.
+ */
+ vnflags = LK_EXCLUSIVE;
+ break;
+#endif
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+}
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
vop_nolock(ap)
struct vop_lock_args /* {
struct vnode *a_vp;
@@ -1291,8 +1358,10 @@ vclean(struct vnode *vp, int flags, struct proc *p)
vrele(vp);
cache_purge(vp);
if (vp->v_vnlock) {
+#ifdef DIAGNOSTIC
if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
vprint("vclean: lock not drained", vp);
+#endif
FREE(vp->v_vnlock, M_VNODE);
vp->v_vnlock = NULL;
}
@@ -1581,7 +1650,9 @@ vprint(label, vp)
char buf[64];
if (label != NULL)
- printf("%s: ", label);
+ printf("%s: %x: ", label, vp);
+ else
+ printf("%x: ", vp);
printf("type %s, usecount %d, writecount %d, refcount %ld,",
typename[vp->v_type], vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index d97f1ca..6c27097 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.62 1997/03/31 12:02:42 peter Exp $
+ * $Id: vfs_syscalls.c,v 1.63 1997/03/31 12:21:37 peter Exp $
*/
/*
@@ -1028,6 +1028,8 @@ mknod(p, uap, retval)
if (vp)
vrele(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
return (error);
}
@@ -1124,6 +1126,8 @@ link(p, uap, retval)
}
}
vrele(vp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
return (error);
}
@@ -1171,6 +1175,8 @@ symlink(p, uap, retval)
vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
out:
FREE(path, M_NAMEI);
return (error);
@@ -1212,6 +1218,8 @@ undelete(p, uap, retval)
if (error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE))
VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
vput(nd.ni_dvp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
return (error);
}
@@ -1269,6 +1277,8 @@ unlink(p, uap, retval)
if (vp != NULLVP)
vput(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
return (error);
}
@@ -2272,6 +2282,10 @@ out:
vrele(fvp);
}
vrele(tond.ni_startdir);
+ ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
+ ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
+ ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
+ ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
FREE(tond.ni_cnd.cn_pnbuf, M_NAMEI);
out1:
if (fromnd.ni_startdir)
@@ -2327,6 +2341,8 @@ mkdir(p, uap, retval)
error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
if (!error)
vput(nd.ni_vp);
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
return (error);
}
@@ -2385,6 +2401,8 @@ out:
vput(nd.ni_dvp);
vput(vp);
}
+ ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
+ ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
return (error);
}
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index cb6c932..6a3fca6 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
- * $Id: vfs_vnops.c,v 1.33 1997/03/23 03:36:38 bde Exp $
+ * $Id: vfs_vnops.c,v 1.34 1997/03/24 11:52:27 bde Exp $
*/
#include <sys/param.h>
@@ -104,6 +104,8 @@ vn_open(ndp, fmode, cmode)
if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
&ndp->ni_cnd, vap))
return (error);
+ ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
+ ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
fmode &= ~O_TRUNC;
vp = ndp->ni_vp;
} else {
OpenPOWER on IntegriCloud