summaryrefslogtreecommitdiffstats
path: root/sys/fs/nullfs
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1997-02-10 02:22:35 +0000
committerdyson <dyson@FreeBSD.org>1997-02-10 02:22:35 +0000
commit10f666af84d48e89e4e2960415c9b616fce4077f (patch)
tree88a944de263165091f0a18abeedbaaccec532407 /sys/fs/nullfs
parent0960d7e91af3428ffba89b42228d82d8afaa0389 (diff)
downloadFreeBSD-src-10f666af84d48e89e4e2960415c9b616fce4077f.zip
FreeBSD-src-10f666af84d48e89e4e2960415c9b616fce4077f.tar.gz
This is the kernel Lite/2 commit. There are some requisite userland
changes, so don't expect to be able to run the kernel as-is (very well) without the appropriate Lite/2 userland changes. The system boots and can mount UFS filesystems. Untested: ext2fs, msdosfs, NFS Known problems: Incorrect Berkeley ID strings in some files. Mount_std mounts will not work until the getfsent library routine is changed. Reviewed by: various people Submitted by: Jeffery Hsu <hsu@freebsd.org>
Diffstat (limited to 'sys/fs/nullfs')
-rw-r--r--sys/fs/nullfs/null.h5
-rw-r--r--sys/fs/nullfs/null_subr.c61
-rw-r--r--sys/fs/nullfs/null_vfsops.c19
-rw-r--r--sys/fs/nullfs/null_vnops.c223
4 files changed, 236 insertions, 72 deletions
diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h
index beadb42..70a81b2 100644
--- a/sys/fs/nullfs/null.h
+++ b/sys/fs/nullfs/null.h
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)null.h 8.2 (Berkeley) 1/21/94
+ * @(#)null.h 8.3 (Berkeley) 8/20/94
*
* $FreeBSD$
*/
@@ -52,8 +52,7 @@ struct null_mount {
* A cache of vnode references
*/
struct null_node {
- struct null_node *null_forw; /* Hash chain */
- struct null_node *null_back;
+ LIST_ENTRY(null_node) null_hash; /* Hash list */
struct vnode *null_lowervp; /* VREFed once */
struct vnode *null_vnode; /* Back pointer */
};
diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c
index a14a7e4..4418631 100644
--- a/sys/fs/nullfs/null_subr.c
+++ b/sys/fs/nullfs/null_subr.c
@@ -33,13 +33,14 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)null_subr.c 8.4 (Berkeley) 1/21/94
+ * @(#)null_subr.c 8.7 (Berkeley) 5/14/95
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@@ -48,11 +49,8 @@
#include <sys/malloc.h>
#include <miscfs/nullfs/null.h>
-extern int nullfs_init __P((void));
-
#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
#define NNULLNODECACHE 16
-#define NULL_NHASH(vp) ((((u_long)vp)>>LOG2_SIZEVNODE) & (NNULLNODECACHE-1))
/*
* Null layer cache:
@@ -62,51 +60,32 @@ extern int nullfs_init __P((void));
* alias is removed the lower vnode is vrele'd.
*/
-/*
- * Cache head
- */
-struct null_node_cache {
- struct null_node *ac_forw;
- struct null_node *ac_back;
-};
-
-static struct null_node_cache null_node_cache[NNULLNODECACHE];
+#define NULL_NHASH(vp) \
+ (&null_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & null_node_hash])
+LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
+u_long null_node_hash;
static int null_node_alloc __P((struct mount *mp, struct vnode *lowervp,
struct vnode **vpp));
static struct vnode *
null_node_find __P((struct mount *mp, struct vnode *lowervp));
-static struct null_node_cache *
- null_node_hash __P((struct vnode *lowervp));
/*
* Initialise cache headers
*/
int
-nullfs_init()
+nullfs_init(vfsp)
+ struct vfsconf *vfsp;
{
- struct null_node_cache *ac;
+
#ifdef NULLFS_DIAGNOSTIC
printf("nullfs_init\n"); /* printed during system boot */
#endif
-
- for (ac = null_node_cache; ac < null_node_cache + NNULLNODECACHE; ac++)
- ac->ac_forw = ac->ac_back = (struct null_node *) ac;
+ null_node_hashtbl = hashinit(NNULLNODECACHE, M_CACHE, &null_node_hash);
return (0);
}
/*
- * Compute hash list for given lower vnode
- */
-static struct null_node_cache *
-null_node_hash(lowervp)
-struct vnode *lowervp;
-{
-
- return (&null_node_cache[NULL_NHASH(lowervp)]);
-}
-
-/*
* Return a VREF'ed alias for lower vnode if already exists, else 0.
*/
static struct vnode *
@@ -114,7 +93,8 @@ null_node_find(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
{
- struct null_node_cache *hd;
+ struct proc *p = curproc; /* XXX */
+ struct null_node_hashhead *hd;
struct null_node *a;
struct vnode *vp;
@@ -124,9 +104,9 @@ null_node_find(mp, lowervp)
* the lower vnode. If found, the increment the null_node
* reference count (but NOT the lower vnode's VREF counter).
*/
- hd = null_node_hash(lowervp);
+ hd = NULL_NHASH(lowervp);
loop:
- for (a = hd->ac_forw; a != (struct null_node *) hd; a = a->null_forw) {
+ for (a = hd->lh_first; a != 0; a = a->null_hash.le_next) {
if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
vp = NULLTOV(a);
/*
@@ -134,7 +114,7 @@ loop:
* stuff, but we don't want to lock
* the lower node.
*/
- if (vget(vp, 0)) {
+ if (vget(vp, 0, p)) {
printf ("null_node_find: vget failed.\n");
goto loop;
};
@@ -157,7 +137,7 @@ null_node_alloc(mp, lowervp, vpp)
struct vnode *lowervp;
struct vnode **vpp;
{
- struct null_node_cache *hd;
+ struct null_node_hashhead *hd;
struct null_node *xp;
struct vnode *othervp, *vp;
int error;
@@ -194,8 +174,8 @@ null_node_alloc(mp, lowervp, vpp)
return 0;
};
VREF(lowervp); /* Extra VREF will be vrele'd in null_node_create */
- hd = null_node_hash(lowervp);
- insque(xp, hd);
+ hd = NULL_NHASH(lowervp);
+ LIST_INSERT_HEAD(hd, xp, null_hash);
return 0;
}
@@ -250,9 +230,8 @@ null_node_create(mp, lowervp, newvpp)
#ifdef DIAGNOSTIC
if (lowervp->v_usecount < 1) {
/* Should never happen... */
- vprint ("null_node_create: alias ",aliasvp);
- vprint ("null_node_create: lower ",lowervp);
- printf ("null_node_create: lower has 0 usecount.\n");
+ vprint ("null_node_create: alias ", aliasvp);
+ vprint ("null_node_create: lower ", lowervp);
panic ("null_node_create: lower has 0 usecount.");
};
#endif
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index e2aeeba..339b7c1 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1992, 1993
+ * Copyright (c) 1992, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
@@ -46,7 +46,7 @@
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/kernel.h>
+#include <sys/proc.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vnode.h>
@@ -55,7 +55,7 @@
#include <sys/malloc.h>
#include <miscfs/nullfs/null.h>
-extern int nullfs_init __P((void));
+extern int nullfs_init __P((struct vfsconf *));
static int nullfs_fhtovp __P((struct mount *mp, struct fid *fidp,
struct mbuf *nam, struct vnode **vpp,
@@ -146,7 +146,7 @@ nullfs_mount(mp, path, data, ndp, p)
/*
* Unlock the node (either the lower or the alias)
*/
- VOP_UNLOCK(vp);
+ VOP_UNLOCK(vp, 0, p);
/*
* Make sure the node alias worked
*/
@@ -166,7 +166,7 @@ nullfs_mount(mp, path, data, ndp, p)
if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
mp->mnt_flag |= MNT_LOCAL;
mp->mnt_data = (qaddr_t) xmp;
- getnewfsid(mp, MOUNT_LOFS);
+ vfs_getnewfsid(mp);
(void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
@@ -212,12 +212,8 @@ nullfs_unmount(mp, mntflags, p)
printf("nullfs_unmount(mp = %x)\n", mp);
#endif
- if (mntflags & MNT_FORCE) {
- /* lofs can never be rootfs so don't check for it */
- if (!doforce)
- return (EINVAL);
+ if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
- }
/*
* Clear out buffer cache. I don't think we
@@ -259,6 +255,7 @@ nullfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
+ struct proc *p = curproc; /* XXX */
struct vnode *vp;
#ifdef NULLFS_DIAGNOSTIC
@@ -273,7 +270,7 @@ nullfs_root(mp, vpp)
*/
vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
VREF(vp);
- VOP_LOCK(vp);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
*vpp = vp;
return 0;
}
diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c
index ec279bc..c1af96f 100644
--- a/sys/fs/nullfs/null_vnops.c
+++ b/sys/fs/nullfs/null_vnops.c
@@ -33,7 +33,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)null_vnops.c 8.1 (Berkeley) 6/10/93
+ * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
+ *
+ * Ancestors:
+ * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
+ * $Id: null_vnops.c,v 1.11.2000.1 1996/09/17 14:32:31 peter Exp $
+ * ...and...
+ * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
*
* $FreeBSD$
*/
@@ -88,13 +94,21 @@
* in the arguments and, if a vnode is return by the operation,
* stacks a null-node on top of the returned vnode.
*
- * Although bypass handles most operations,
- * vop_getattr, _inactive, _reclaim, and _print are not bypassed.
- * Vop_getattr must change the fsid being returned.
+ * Although bypass handles most operations, vop_getattr, vop_lock,
+ * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
+ * bypassed. Vop_getattr must change the fsid being returned.
+ * Vop_lock and vop_unlock must handle any locking for the
+ * current vnode as well as pass the lock request down.
* Vop_inactive and vop_reclaim are not bypassed so that
- * they can handle freeing null-layer specific data.
- * Vop_print is not bypassed to avoid excessive debugging
- * information.
+ * they can handle freeing null-layer specific data. Vop_print
+ * is not bypassed to avoid excessive debugging information.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer.
*
*
* INSTANTIATING VNODE STACKS
@@ -178,7 +192,7 @@ static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
&null_bug_bypass, 0, "");
-static int null_bypass __P((struct vop_generic_args *ap));
+int null_bypass __P((struct vop_generic_args *ap));
static int null_bwrite __P((struct vop_bwrite_args *ap));
static int null_getattr __P((struct vop_getattr_args *ap));
static int null_inactive __P((struct vop_inactive_args *ap));
@@ -211,7 +225,7 @@ static int null_strategy __P((struct vop_strategy_args *ap));
* - all mapped vnodes are of our vnode-type (NEEDSWORK:
* problems on rmdir'ing mount points and renaming?)
*/
-static int
+int
null_bypass(ap)
struct vop_generic_args /* {
struct vnodeop_desc *a_desc;
@@ -254,7 +268,8 @@ null_bypass(ap)
* are of our type. Check for and don't map any
* that aren't. (We must always map first vp or vclean fails.)
*/
- if (i && (*this_vp_p)->v_op != null_vnodeop_p) {
+ if (i && (*this_vp_p == NULL ||
+ (*this_vp_p)->v_op != null_vnodeop_p)) {
old_vps[i] = NULL;
} else {
old_vps[i] = *this_vp_p;
@@ -317,6 +332,105 @@ null_bypass(ap)
return (error);
}
+/*
+ * We have to carry on the locking protocol on the null layer vnodes
+ * as we progress through the tree. We also have to enforce read-only
+ * if this layer is mounted read-only.
+ */
+static int
+null_lookup(ap)
+ struct vop_lookup_args /* {
+ struct vnode * a_dvp;
+ struct vnode ** a_vpp;
+ struct componentname * a_cnp;
+ } */ *ap;
+{
+ struct componentname *cnp = ap->a_cnp;
+ struct proc *p = cnp->cn_proc;
+ int flags = cnp->cn_flags;
+ struct vop_lock_args lockargs;
+ struct vop_unlock_args unlockargs;
+ struct vnode *dvp, *vp;
+ int error;
+
+ if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
+ (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
+ return (EROFS);
+ error = null_bypass(ap);
+ if (error == EJUSTRETURN && (flags & ISLASTCN) &&
+ (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) &&
+ (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
+ error = EROFS;
+ /*
+ * We must do the same locking and unlocking at this layer as
+ * is done in the layers below us. We could figure this out
+ * based on the error return and the LASTCN, LOCKPARENT, and
+ * LOCKLEAF flags. However, it is more expidient to just find
+ * out the state of the lower level vnodes and set ours to the
+ * same state.
+ */
+ dvp = ap->a_dvp;
+ vp = *ap->a_vpp;
+ if (dvp == vp)
+ return (error);
+ if (!VOP_ISLOCKED(dvp)) {
+ unlockargs.a_vp = dvp;
+ unlockargs.a_flags = 0;
+ unlockargs.a_p = p;
+ vop_nounlock(&unlockargs);
+ }
+ if (vp != NULL && VOP_ISLOCKED(vp)) {
+ lockargs.a_vp = vp;
+ lockargs.a_flags = LK_SHARED;
+ lockargs.a_p = p;
+ vop_nolock(&lockargs);
+ }
+ return (error);
+}
+
+/*
+ * Setattr call. Disallow write attempts if the layer is mounted read-only.
+ */
+int
+null_setattr(ap)
+ struct vop_setattr_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct vattr *vap = ap->a_vap;
+
+ if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
+ vap->va_gid != (gid_t)VNOVAL || vap->va_atime.ts_sec != VNOVAL ||
+ vap->va_mtime.ts_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
+ (vp->v_mount->mnt_flag & MNT_RDONLY))
+ return (EROFS);
+ if (vap->va_size != VNOVAL) {
+ switch (vp->v_type) {
+ case VDIR:
+ return (EISDIR);
+ case VCHR:
+ case VBLK:
+ case VSOCK:
+ case VFIFO:
+ return (0);
+ case VREG:
+ case VLNK:
+ default:
+ /*
+ * Disallow write attempts if the filesystem is
+ * mounted read-only.
+ */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+ }
+ }
+ return (null_bypass(ap));
+}
/*
* We handle getattr only to change the fsid.
@@ -331,19 +445,90 @@ null_getattr(ap)
} */ *ap;
{
int error;
- error = null_bypass(ap);
- if (error)
+
+ if (error = null_bypass(ap))
return (error);
/* Requires that arguments be restored. */
ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
return (0);
}
-
static int
+null_access(ap)
+ struct vop_access_args /* {
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ mode_t mode = ap->a_mode;
+
+ /*
+ * Disallow write attempts on read-only layers;
+ * unless the file is a socket, fifo, or a block or
+ * character device resident on the file system.
+ */
+ if (mode & VWRITE) {
+ switch (vp->v_type) {
+ case VDIR:
+ case VLNK:
+ case VREG:
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+ break;
+ }
+ }
+ return (null_bypass(ap));
+}
+
+/*
+ * We need to process our own vnode lock and then clear the
+ * interlock flag as it applies only to our vnode, not the
+ * vnodes below us on the stack.
+ */
+int
+null_lock(ap)
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+
+ vop_nolock(ap);
+ if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ ap->a_flags &= ~LK_INTERLOCK;
+ return (null_bypass(ap));
+}
+
+/*
+ * We need to process our own vnode unlock and then clear the
+ * interlock flag as it applies only to our vnode, not the
+ * vnodes below us on the stack.
+ */
+int
+null_unlock(ap)
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+
+ vop_nounlock(ap);
+ ap->a_flags &= ~LK_INTERLOCK;
+ return (null_bypass(ap));
+}
+
+int
null_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
+ struct proc *a_p;
} */ *ap;
{
/*
@@ -358,6 +543,7 @@ null_inactive(ap)
* like they do in the name lookup cache code.
* That's too much work for now.
*/
+ VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
return (0);
}
@@ -365,6 +551,7 @@ static int
null_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
+ struct proc *a_p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
@@ -377,14 +564,13 @@ null_reclaim(ap)
*/
/* After this assignment, this node will not be re-used. */
xp->null_lowervp = NULL;
- remque(xp);
+ LIST_REMOVE(xp, null_hash);
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
vrele (lowervp);
return (0);
}
-
static int
null_print(ap)
struct vop_print_args /* {
@@ -396,7 +582,6 @@ null_print(ap)
return (0);
}
-
/*
* XXX - vop_strategy must be hand coded because it has no
* vnode in its arguments.
@@ -422,7 +607,6 @@ null_strategy(ap)
return (error);
}
-
/*
* XXX - like vop_strategy, vop_bwrite must be hand coded because it has no
* vnode in its arguments.
@@ -455,7 +639,12 @@ vop_t **null_vnodeop_p;
static struct vnodeopv_entry_desc null_vnodeop_entries[] = {
{ &vop_default_desc, (vop_t *)null_bypass },
+ { &vop_lookup_desc, (vop_t *)null_lookup },
+ { &vop_setattr_desc, (vop_t *)null_setattr },
{ &vop_getattr_desc, (vop_t *)null_getattr },
+ { &vop_access_desc, (vop_t *)null_access },
+ { &vop_lock_desc, (vop_t *)null_lock },
+ { &vop_unlock_desc, (vop_t *)null_unlock },
{ &vop_inactive_desc, (vop_t *)null_inactive },
{ &vop_reclaim_desc, (vop_t *)null_reclaim },
{ &vop_print_desc, (vop_t *)null_print },
OpenPOWER on IntegriCloud