summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_default.c
diff options
context:
space:
mode:
authorphk <phk@FreeBSD.org>1997-10-26 20:55:39 +0000
committerphk <phk@FreeBSD.org>1997-10-26 20:55:39 +0000
commite3cdaf12b2b1039b21a615be67756c1d2e49a342 (patch)
tree938be7b5e64d05ad10e17ada8dfae77d07754414 /sys/kern/vfs_default.c
parentd9574c43edc723778db9f6eefab8d2a1695c279b (diff)
downloadFreeBSD-src-e3cdaf12b2b1039b21a615be67756c1d2e49a342.zip
FreeBSD-src-e3cdaf12b2b1039b21a615be67756c1d2e49a342.tar.gz
VFS interior redecoration.
Rename vn_default_error to vop_defaultop all over the place. Move vn_bwrite from vfs_bio.c to vfs_default.c and call it vop_stdbwrite. Use vop_null instead of nullop. Move vop_nopoll from vfs_subr.c to vfs_default.c Move vop_sharedlock from vfs_subr.c to vfs_default.c Move vop_nolock from vfs_subr.c to vfs_default.c Move vop_nounlock from vfs_subr.c to vfs_default.c Move vop_noislocked from vfs_subr.c to vfs_default.c Use vop_ebadf instead of *_ebadf. Add vop_defaultop for getpages on master vnode in MFS.
Diffstat (limited to 'sys/kern/vfs_default.c')
-rw-r--r--sys/kern/vfs_default.c207
1 files changed, 205 insertions, 2 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index f7aaf9b..a8f685e 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -40,9 +40,11 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
+#include <sys/poll.h>
static int vop_nostrategy __P((struct vop_strategy_args *));
@@ -57,9 +59,9 @@ static int vop_nostrategy __P((struct vop_strategy_args *));
vop_t **default_vnodeop_p;
static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *) vop_eopnotsupp },
- { &vop_abortop_desc, (vop_t *) nullop },
+ { &vop_abortop_desc, (vop_t *) vop_null },
{ &vop_advlock_desc, (vop_t *) vop_einval },
- { &vop_bwrite_desc, (vop_t *) vn_bwrite },
+ { &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
{ &vop_close_desc, (vop_t *) vop_null },
{ &vop_fsync_desc, (vop_t *) vop_null },
{ &vop_ioctl_desc, (vop_t *) vop_enotty },
@@ -217,3 +219,204 @@ vop_stdislocked(ap)
return (lockstatus(l));
}
+/*
+ * Return true for select/poll.
+ */
+int
+vop_nopoll(ap)
+ struct vop_poll_args /* {
+ struct vnode *a_vp;
+ int a_events;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+
+ /*
+ * Just return what we were asked for.
+ */
+ return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
+}
+
+int
+vop_stdbwrite(ap)
+ struct vop_bwrite_args *ap;
+{
+ return (bwrite(ap->a_bp));
+}
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
+vop_sharedlock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+#ifdef DEBUG_VFS_LOCKS
+ /*
+ * Normally, we use shared locks here, but that confuses
+ * the locking assertions.
+ */
+ vnflags = LK_EXCLUSIVE;
+ break;
+#endif
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+}
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
+vop_nolock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+#ifdef notyet
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+#else /* for now */
+ /*
+ * Since we are not using the lock manager, we must clear
+ * the interlock here.
+ */
+ if (ap->a_flags & LK_INTERLOCK) {
+ simple_unlock(&ap->a_vp->v_interlock);
+ }
+ return (0);
+#endif
+}
+
+/*
+ * Do the inverse of vop_nolock, handling the interlock in a compatible way.
+ */
+int
+vop_nounlock(ap)
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL) {
+ if (ap->a_flags & LK_INTERLOCK)
+ simple_unlock(&ap->a_vp->v_interlock);
+ return (0);
+ }
+ return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
+ &ap->a_vp->v_interlock, ap->a_p));
+}
+
+/*
+ * Return whether or not the node is in use.
+ */
+int
+vop_noislocked(ap)
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL)
+ return (0);
+ return (lockstatus(vp->v_vnlock));
+}
+
OpenPOWER on IntegriCloud