summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_default.c
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2004-03-29 22:41:21 +0000
committerpeter <peter@FreeBSD.org>2004-03-29 22:41:21 +0000
commit1f224a3d83fa0f8d991bac65f30316ddd5becc12 (patch)
tree9ae8206254c3dd899dbfd13744073cb3aef5f1aa /sys/kern/vfs_default.c
parent8e41c82b83a283bd515274944d549ab23a03dc94 (diff)
downloadFreeBSD-src-1f224a3d83fa0f8d991bac65f30316ddd5becc12.zip
FreeBSD-src-1f224a3d83fa0f8d991bac65f30316ddd5becc12.tar.gz
Clean up the stub fake vnode locking implemenations. The main reason this
stuff was here (NFS) was fixed by Alfred in November. The only remaining consumer of the stub functions was umapfs, which is horribly horribly broken. It has missed out on about the last 5 years worth of maintenence that was done on nullfs (from which umapfs is derived). It needs major work to bring it up to date with the vnode locking protocol. umapfs really needs to find a caretaker to bring it into the 21st century. Functions GC'ed: vop_noislocked, vop_nolock, vop_nounlock, vop_sharedlock.
Diffstat (limited to 'sys/kern/vfs_default.c')
-rw-r--r--sys/kern/vfs_default.c160
1 files changed, 0 insertions, 160 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index c3b7a19..d227bf6 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -369,166 +369,6 @@ vop_stdpoll(ap)
}
/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_sharedlock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
-#ifdef DEBUG_VFS_LOCKS
- /*
- * Normally, we use shared locks here, but that confuses
- * the locking assertions.
- */
- vnflags = LK_EXCLUSIVE;
- break;
-#endif
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
-#ifndef DEBUG_LOCKS
- return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else
- return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
- "vop_sharedlock", vp->filename, vp->line));
-#endif
-}
-
-/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_nolock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-#ifdef notyet
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
- return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else /* for now */
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-#endif
-}
-
-/*
- * Do the inverse of vop_nolock, handling the interlock in a compatible way.
- */
-int
-vop_nounlock(ap)
- struct vop_unlock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-}
-
-/*
- * Return whether or not the node is in use.
- */
-int
-vop_noislocked(ap)
- struct vop_islocked_args /* {
- struct vnode *a_vp;
- struct thread *a_td;
- } */ *ap;
-{
-
- return (0);
-}
-
-/*
* Return our mount point, as we will take charge of the writes.
*/
int
OpenPOWER on IntegriCloud