summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/conf/NOTES3
-rw-r--r--sys/fs/umapfs/umap_vnops.c9
-rw-r--r--sys/kern/vfs_default.c160
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/sys/vnode.h4
5 files changed, 11 insertions, 166 deletions
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 55ecdd8..0e9698e 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -701,7 +701,8 @@ options PROCFS #Process filesystem (requires PSEUDOFS)
options PSEUDOFS #Pseudo-filesystem framework
options SMBFS #SMB/CIFS filesystem
options UDF #Universal Disk Format
-options UMAPFS #UID map filesystem
+#umapfs is seriously (functionally) broken at this point.
+#options UMAPFS #UID map filesystem
options UNIONFS #Union filesystem
# The xFS_ROOT options REQUIRE the associated ``options xFS''
options NFS_ROOT #NFS usable as root device
diff --git a/sys/fs/umapfs/umap_vnops.c b/sys/fs/umapfs/umap_vnops.c
index 6a6dc08..c57147a 100644
--- a/sys/fs/umapfs/umap_vnops.c
+++ b/sys/fs/umapfs/umap_vnops.c
@@ -363,6 +363,15 @@ umap_lock(ap)
} */ *ap;
{
+ /*
+ * vop_nolock no longer exists. I could have pasted the code
+ * in so that it compiles, but that would be doing our users a
+ * great disservice. umapfs is about 5 years behind the nullfs
+ * code that it is derived from. The stub locking here guarantees
+ * a deadlock the moment a VOP_INACTIVE arrives. There is no point
+ * pasting the code that makes it compile either, because that just
+ * makes it Even More Wrong.
+ */
vop_nolock(ap);
if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
return (0);
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index c3b7a19..d227bf6 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -369,166 +369,6 @@ vop_stdpoll(ap)
}
/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_sharedlock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
-#ifdef DEBUG_VFS_LOCKS
- /*
- * Normally, we use shared locks here, but that confuses
- * the locking assertions.
- */
- vnflags = LK_EXCLUSIVE;
- break;
-#endif
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
-#ifndef DEBUG_LOCKS
- return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else
- return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
- "vop_sharedlock", vp->filename, vp->line));
-#endif
-}
-
-/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_nolock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-#ifdef notyet
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
- return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else /* for now */
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-#endif
-}
-
-/*
- * Do the inverse of vop_nolock, handling the interlock in a compatible way.
- */
-int
-vop_nounlock(ap)
- struct vop_unlock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-}
-
-/*
- * Return whether or not the node is in use.
- */
-int
-vop_noislocked(ap)
- struct vop_islocked_args /* {
- struct vnode *a_vp;
- struct thread *a_td;
- } */ *ap;
-{
-
- return (0);
-}
-
-/*
* Return our mount point, as we will take charge of the writes.
*/
int
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 6a73399..749f5f0 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -222,7 +222,6 @@ SUBDIR= ${_3dfx} \
uhid \
ukbd \
ulpt \
- umapfs \
umass \
umct \
umodem \
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 4079adf..fef70ec 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -697,14 +697,10 @@ int vop_stdislocked(struct vop_islocked_args *);
int vop_stdlock(struct vop_lock_args *);
int vop_stdputpages(struct vop_putpages_args *);
int vop_stdunlock(struct vop_unlock_args *);
-int vop_noislocked(struct vop_islocked_args *);
-int vop_nolock(struct vop_lock_args *);
int vop_nopoll(struct vop_poll_args *);
-int vop_nounlock(struct vop_unlock_args *);
int vop_stdpathconf(struct vop_pathconf_args *);
int vop_stdpoll(struct vop_poll_args *);
int vop_revoke(struct vop_revoke_args *);
-int vop_sharedlock(struct vop_lock_args *);
int vop_eopnotsupp(struct vop_generic_args *ap);
int vop_ebadf(struct vop_generic_args *ap);
int vop_einval(struct vop_generic_args *ap);
OpenPOWER on IntegriCloud