summaryrefslogtreecommitdiffstats
path: root/sys/fs/umapfs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs/umapfs')
-rw-r--r--sys/fs/umapfs/umap.h92
-rw-r--r--sys/fs/umapfs/umap_subr.c397
-rw-r--r--sys/fs/umapfs/umap_vfsops.c407
-rw-r--r--sys/fs/umapfs/umap_vnops.c488
4 files changed, 1384 insertions, 0 deletions
diff --git a/sys/fs/umapfs/umap.h b/sys/fs/umapfs/umap.h
new file mode 100644
index 0000000..9f4d1e7
--- /dev/null
+++ b/sys/fs/umapfs/umap.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software donated to Berkeley by
+ * the UCLA Ficus project.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)umap.h 8.3 (Berkeley) 1/21/94
+ *
+ * @(#)null_vnops.c 1.5 (Berkeley) 7/10/92
+ */
+
+#define MAPFILEENTRIES 64
+#define GMAPFILEENTRIES 16
+#define NOBODY 32767
+#define NULLGROUP 65534
+
+struct umap_args {
+ char *target; /* Target of loopback */
+ int nentries; /* # of entries in user map array */
+ int gnentries; /* # of entries in group map array */
+ u_long (*mapdata)[2]; /* pointer to array of user mappings */
+ u_long (*gmapdata)[2]; /* pointer to array of group mappings */
+};
+
+struct umap_mount {
+ struct mount *umapm_vfs;
+ struct vnode *umapm_rootvp; /* Reference to root umap_node */
+ int info_nentries; /* number of uid mappings */
+ int info_gnentries; /* number of gid mappings */
+ u_long info_mapdata[MAPFILEENTRIES][2]; /* mapping data for
+ user mapping in ficus */
+ u_long info_gmapdata[GMAPFILEENTRIES][2]; /*mapping data for
+ group mapping in ficus */
+};
+
+#ifdef KERNEL
+/*
+ * A cache of vnode references
+ */
+struct umap_node {
+ struct umap_node *umap_forw; /* Hash chain */
+ struct umap_node *umap_back;
+ struct vnode *umap_lowervp; /* Aliased vnode - VREFed once */
+ struct vnode *umap_vnode; /* Back pointer to vnode/umap_node */
+};
+
+extern int umap_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp));
+extern u_long umap_reverse_findid __P((u_long id, u_long map[][2], int nentries));
+extern void umap_mapids __P((struct mount *v_mount, struct ucred *credp));
+
+#define MOUNTTOUMAPMOUNT(mp) ((struct umap_mount *)((mp)->mnt_data))
+#define VTOUMAP(vp) ((struct umap_node *)(vp)->v_data)
+#define UMAPTOV(xp) ((xp)->umap_vnode)
+#ifdef UMAPFS_DIAGNOSTIC
+extern struct vnode *umap_checkvp __P((struct vnode *vp, char *fil, int lno));
+#define UMAPVPTOLOWERVP(vp) umap_checkvp((vp), __FILE__, __LINE__)
+#else
+#define UMAPVPTOLOWERVP(vp) (VTOUMAP(vp)->umap_lowervp)
+#endif
+
+extern int (**umap_vnodeop_p)();
+extern struct vfsops umap_vfsops;
+#endif /* KERNEL */
diff --git a/sys/fs/umapfs/umap_subr.c b/sys/fs/umapfs/umap_subr.c
new file mode 100644
index 0000000..6f1f077
--- /dev/null
+++ b/sys/fs/umapfs/umap_subr.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software donated to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)umap_subr.c 8.6 (Berkeley) 1/26/94
+ *
+ * $Id: lofs_subr.c, v 1.11 1992/05/30 10:05:43 jsp Exp jsp $
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/malloc.h>
+#include <miscfs/umapfs/umap.h>
+
+#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */
+#define NUMAPNODECACHE 16
+#define UMAP_NHASH(vp) ((((u_long) vp)>>LOG2_SIZEVNODE) & (NUMAPNODECACHE-1))
+
+/*
+ * Null layer cache:
+ * Each cache entry holds a reference to the target vnode
+ * along with a pointer to the alias vnode. When an
+ * entry is added the target vnode is VREF'd. When the
+ * alias is removed the target vnode is vrele'd.
+ */
+
+/*
+ * Cache head
+ */
+struct umap_node_cache {
+ struct umap_node *ac_forw;
+ struct umap_node *ac_back;
+};
+
+static struct umap_node_cache umap_node_cache[NUMAPNODECACHE];
+
+/*
+ * Initialise cache headers
+ */
+umapfs_init()
+{
+ struct umap_node_cache *ac;
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_init\n"); /* printed during system boot */
+#endif
+
+ for (ac = umap_node_cache; ac < umap_node_cache + NUMAPNODECACHE; ac++)
+ ac->ac_forw = ac->ac_back = (struct umap_node *) ac;
+}
+
+/*
+ * Compute hash list for given target vnode
+ */
+static struct umap_node_cache *
+umap_node_hash(targetvp)
+ struct vnode *targetvp;
+{
+
+ return (&umap_node_cache[UMAP_NHASH(targetvp)]);
+}
+
+/*
+ * umap_findid is called by various routines in umap_vnodeops.c to
+ * find a user or group id in a map.
+ */
+static u_long
+umap_findid(id, map, nentries)
+ u_long id;
+ u_long map[][2];
+ int nentries;
+{
+ int i;
+
+ /* Find uid entry in map */
+ i = 0;
+ while ((i<nentries) && ((map[i][0]) != id))
+ i++;
+
+ if (i < nentries)
+ return (map[i][1]);
+ else
+ return (-1);
+
+}
+
+/*
+ * umap_reverse_findid is called by umap_getattr() in umap_vnodeops.c to
+ * find a user or group id in a map, in reverse.
+ */
+u_long
+umap_reverse_findid(id, map, nentries)
+ u_long id;
+ u_long map[][2];
+ int nentries;
+{
+ int i;
+
+ /* Find uid entry in map */
+ i = 0;
+ while ((i<nentries) && ((map[i][1]) != id))
+ i++;
+
+ if (i < nentries)
+ return (map[i][0]);
+ else
+ return (-1);
+
+}
+
+/*
+ * Return alias for target vnode if already exists, else 0.
+ */
+static struct vnode *
+umap_node_find(mp, targetvp)
+ struct mount *mp;
+ struct vnode *targetvp;
+{
+ struct umap_node_cache *hd;
+ struct umap_node *a;
+ struct vnode *vp;
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umap_node_find(mp = %x, target = %x)\n", mp, targetvp);
+#endif
+
+ /*
+ * Find hash base, and then search the (two-way) linked
+ * list looking for a umap_node structure which is referencing
+ * the target vnode. If found, the increment the umap_node
+ * reference count (but NOT the target vnode's VREF counter).
+ */
+ hd = umap_node_hash(targetvp);
+
+ loop:
+ for (a = hd->ac_forw; a != (struct umap_node *) hd; a = a->umap_forw) {
+ if (a->umap_lowervp == targetvp &&
+ a->umap_vnode->v_mount == mp) {
+ vp = UMAPTOV(a);
+ /*
+ * We need vget for the VXLOCK
+ * stuff, but we don't want to lock
+ * the lower node.
+ */
+ if (vget(vp, 0)) {
+#ifdef UMAPFS_DIAGNOSTIC
+ printf ("umap_node_find: vget failed.\n");
+#endif
+ goto loop;
+ }
+ return (vp);
+ }
+ }
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umap_node_find(%x, %x): NOT found\n", mp, targetvp);
+#endif
+
+ return (0);
+}
+
+/*
+ * Make a new umap_node node.
+ * Vp is the alias vnode, lofsvp is the target vnode.
+ * Maintain a reference to (targetvp).
+ */
+static int
+umap_node_alloc(mp, lowervp, vpp)
+ struct mount *mp;
+ struct vnode *lowervp;
+ struct vnode **vpp;
+{
+ struct umap_node_cache *hd;
+ struct umap_node *xp;
+ struct vnode *othervp, *vp;
+ int error;
+
+ if (error = getnewvnode(VT_UMAP, mp, umap_vnodeop_p, vpp))
+ return (error);
+ vp = *vpp;
+
+ MALLOC(xp, struct umap_node *, sizeof(struct umap_node),
+ M_TEMP, M_WAITOK);
+ vp->v_type = lowervp->v_type;
+ xp->umap_vnode = vp;
+ vp->v_data = xp;
+ xp->umap_lowervp = lowervp;
+ /*
+ * Before we insert our new node onto the hash chains,
+ * check to see if someone else has beaten us to it.
+ * (We could have slept in MALLOC.)
+ */
+ if (othervp = umap_node_find(lowervp)) {
+ FREE(xp, M_TEMP);
+ vp->v_type = VBAD; /* node is discarded */
+ vp->v_usecount = 0; /* XXX */
+ *vpp = othervp;
+ return (0);
+ }
+ VREF(lowervp); /* Extra VREF will be vrele'd in umap_node_create */
+ hd = umap_node_hash(lowervp);
+ insque(xp, hd);
+ return (0);
+}
+
+
+/*
+ * Try to find an existing umap_node vnode refering
+ * to it, otherwise make a new umap_node vnode which
+ * contains a reference to the target vnode.
+ */
+int
+umap_node_create(mp, targetvp, newvpp)
+ struct mount *mp;
+ struct vnode *targetvp;
+ struct vnode **newvpp;
+{
+ struct vnode *aliasvp;
+
+ if (aliasvp = umap_node_find(mp, targetvp)) {
+ /*
+ * Take another reference to the alias vnode
+ */
+#ifdef UMAPFS_DIAGNOSTIC
+ vprint("umap_node_create: exists", ap->umap_vnode);
+#endif
+ /* VREF(aliasvp); */
+ } else {
+ int error;
+
+ /*
+ * Get new vnode.
+ */
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umap_node_create: create new alias vnode\n");
+#endif
+ /*
+ * Make new vnode reference the umap_node.
+ */
+ if (error = umap_node_alloc(mp, targetvp, &aliasvp))
+ return (error);
+
+ /*
+ * aliasvp is already VREF'd by getnewvnode()
+ */
+ }
+
+ vrele(targetvp);
+
+#ifdef UMAPFS_DIAGNOSTIC
+ vprint("umap_node_create: alias", aliasvp);
+ vprint("umap_node_create: target", targetvp);
+#endif
+
+ *newvpp = aliasvp;
+ return (0);
+}
+
+#ifdef UMAPFS_DIAGNOSTIC
+int umap_checkvp_barrier = 1;
+struct vnode *
+umap_checkvp(vp, fil, lno)
+ struct vnode *vp;
+ char *fil;
+ int lno;
+{
+ struct umap_node *a = VTOUMAP(vp);
+#if 0
+ /*
+ * Can't do this check because vop_reclaim runs
+ * with funny vop vector.
+ */
+ if (vp->v_op != umap_vnodeop_p) {
+ printf ("umap_checkvp: on non-umap-node\n");
+ while (umap_checkvp_barrier) /*WAIT*/ ;
+ panic("umap_checkvp");
+ }
+#endif
+ if (a->umap_lowervp == NULL) {
+ /* Should never happen */
+ int i; u_long *p;
+ printf("vp = %x, ZERO ptr\n", vp);
+ for (p = (u_long *) a, i = 0; i < 8; i++)
+ printf(" %x", p[i]);
+ printf("\n");
+ /* wait for debugger */
+ while (umap_checkvp_barrier) /*WAIT*/ ;
+ panic("umap_checkvp");
+ }
+ if (a->umap_lowervp->v_usecount < 1) {
+ int i; u_long *p;
+ printf("vp = %x, unref'ed lowervp\n", vp);
+ for (p = (u_long *) a, i = 0; i < 8; i++)
+ printf(" %x", p[i]);
+ printf("\n");
+ /* wait for debugger */
+ while (umap_checkvp_barrier) /*WAIT*/ ;
+ panic ("umap with unref'ed lowervp");
+ }
+#if 0
+ printf("umap %x/%d -> %x/%d [%s, %d]\n",
+ a->umap_vnode, a->umap_vnode->v_usecount,
+ a->umap_lowervp, a->umap_lowervp->v_usecount,
+ fil, lno);
+#endif
+ return (a->umap_lowervp);
+}
+#endif
+
+/* umap_mapids maps all of the ids in a credential, both user and group. */
+
+void
+umap_mapids(v_mount, credp)
+ struct mount *v_mount;
+ struct ucred *credp;
+{
+ int i, unentries, gnentries;
+ u_long *groupmap, *usermap;
+ uid_t uid;
+ gid_t gid;
+
+ unentries = MOUNTTOUMAPMOUNT(v_mount)->info_nentries;
+ usermap = &(MOUNTTOUMAPMOUNT(v_mount)->info_mapdata[0][0]);
+ gnentries = MOUNTTOUMAPMOUNT(v_mount)->info_gnentries;
+ groupmap = &(MOUNTTOUMAPMOUNT(v_mount)->info_gmapdata[0][0]);
+
+ /* Find uid entry in map */
+
+ uid = (uid_t) umap_findid(credp->cr_uid, usermap, unentries);
+
+ if (uid != -1)
+ credp->cr_uid = uid;
+ else
+ credp->cr_uid = (uid_t) NOBODY;
+
+#ifdef notdef
+ /* cr_gid is the same as cr_groups[0] in 4BSD */
+
+ /* Find gid entry in map */
+
+ gid = (gid_t) umap_findid(credp->cr_gid, groupmap, gnentries);
+
+ if (gid != -1)
+ credp->cr_gid = gid;
+ else
+ credp->cr_gid = NULLGROUP;
+#endif
+
+ /* Now we must map each of the set of groups in the cr_groups
+ structure. */
+
+ i = 0;
+ while (credp->cr_groups[i] != 0) {
+ gid = (gid_t) umap_findid(credp->cr_groups[i],
+ groupmap, gnentries);
+
+ if (gid != -1)
+ credp->cr_groups[i++] = gid;
+ else
+ credp->cr_groups[i++] = NULLGROUP;
+ }
+}
diff --git a/sys/fs/umapfs/umap_vfsops.c b/sys/fs/umapfs/umap_vfsops.c
new file mode 100644
index 0000000..2480a85
--- /dev/null
+++ b/sys/fs/umapfs/umap_vfsops.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software donated to Berkeley by
+ * the UCLA Ficus project.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)umap_vfsops.c 8.3 (Berkeley) 1/21/94
+ *
+ * @(#)null_vfsops.c 1.5 (Berkeley) 7/10/92
+ */
+
+/*
+ * Umap Layer
+ * (See mount_umap(8) for a description of this layer.)
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/malloc.h>
+#include <miscfs/umapfs/umap.h>
+
+/*
+ * Mount umap layer
+ */
+int
+umapfs_mount(mp, path, data, ndp, p)
+ struct mount *mp;
+ char *path;
+ caddr_t data;
+ struct nameidata *ndp;
+ struct proc *p;
+{
+ struct umap_args args;
+ struct vnode *lowerrootvp, *vp;
+ struct vnode *umapm_rootvp;
+ struct umap_mount *amp;
+ u_int size;
+ int error;
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_mount(mp = %x)\n", mp);
+#endif
+
+ /*
+ * Update is a no-op
+ */
+ if (mp->mnt_flag & MNT_UPDATE) {
+ return (EOPNOTSUPP);
+ /* return (VFS_MOUNT(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, path, data, ndp, p));*/
+ }
+
+ /*
+ * Get argument
+ */
+ if (error = copyin(data, (caddr_t)&args, sizeof(struct umap_args)))
+ return (error);
+
+ /*
+ * Find lower node
+ */
+ NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF,
+ UIO_USERSPACE, args.target, p);
+ if (error = namei(ndp))
+ return (error);
+
+ /*
+ * Sanity check on lower vnode
+ */
+ lowerrootvp = ndp->ni_vp;
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("vp = %x, check for VDIR...\n", lowerrootvp);
+#endif
+ vrele(ndp->ni_dvp);
+ ndp->ni_dvp = 0;
+
+ if (lowerrootvp->v_type != VDIR) {
+ vput(lowerrootvp);
+ return (EINVAL);
+ }
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("mp = %x\n", mp);
+#endif
+
+ amp = (struct umap_mount *) malloc(sizeof(struct umap_mount),
+ M_UFSMNT, M_WAITOK); /* XXX */
+
+ /*
+ * Save reference to underlying FS
+ */
+ amp->umapm_vfs = lowerrootvp->v_mount;
+
+ /*
+ * Now copy in the number of entries and maps for umap mapping.
+ */
+ amp->info_nentries = args.nentries;
+ amp->info_gnentries = args.gnentries;
+ error = copyin(args.mapdata, (caddr_t)amp->info_mapdata,
+ 2*sizeof(u_long)*args.nentries);
+ if (error)
+ return (error);
+
+#ifdef UMAP_DIAGNOSTIC
+ printf("umap_mount:nentries %d\n",args.nentries);
+ for (i = 0; i < args.nentries; i++)
+ printf(" %d maps to %d\n", amp->info_mapdata[i][0],
+ amp->info_mapdata[i][1]);
+#endif
+
+ error = copyin(args.gmapdata, (caddr_t)amp->info_gmapdata,
+ 2*sizeof(u_long)*args.nentries);
+ if (error)
+ return (error);
+
+#ifdef UMAP_DIAGNOSTIC
+ printf("umap_mount:gnentries %d\n",args.gnentries);
+ for (i = 0; i < args.gnentries; i++)
+ printf(" group %d maps to %d\n",
+ amp->info_gmapdata[i][0],
+ amp->info_gmapdata[i][1]);
+#endif
+
+
+ /*
+ * Save reference. Each mount also holds
+ * a reference on the root vnode.
+ */
+ error = umap_node_create(mp, lowerrootvp, &vp);
+ /*
+ * Unlock the node (either the lower or the alias)
+ */
+ VOP_UNLOCK(vp);
+ /*
+ * Make sure the node alias worked
+ */
+ if (error) {
+ vrele(lowerrootvp);
+ free(amp, M_UFSMNT); /* XXX */
+ return (error);
+ }
+
+ /*
+ * Keep a held reference to the root vnode.
+ * It is vrele'd in umapfs_unmount.
+ */
+ umapm_rootvp = vp;
+ umapm_rootvp->v_flag |= VROOT;
+ amp->umapm_rootvp = umapm_rootvp;
+ if (UMAPVPTOLOWERVP(umapm_rootvp)->v_mount->mnt_flag & MNT_LOCAL)
+ mp->mnt_flag |= MNT_LOCAL;
+ mp->mnt_data = (qaddr_t) amp;
+ getnewfsid(mp, MOUNT_LOFS);
+
+ (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
+ bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
+ (void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
+ &size);
+ bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_mount: lower %s, alias at %s\n",
+ mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
+#endif
+ return (0);
+}
+
+/*
+ * VFS start. Nothing needed here - the start routine
+ * on the underlying filesystem will have been called
+ * when that filesystem was mounted.
+ */
+int
+umapfs_start(mp, flags, p)
+ struct mount *mp;
+ int flags;
+ struct proc *p;
+{
+ return (0);
+ /* return (VFS_START(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, flags, p)); */
+}
+
+/*
+ * Free reference to umap layer
+ */
+int
+umapfs_unmount(mp, mntflags, p)
+ struct mount *mp;
+ int mntflags;
+ struct proc *p;
+{
+ struct vnode *umapm_rootvp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
+ int error;
+ int flags = 0;
+ extern int doforce;
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_unmount(mp = %x)\n", mp);
+#endif
+
+ if (mntflags & MNT_FORCE) {
+ /* lofs can never be rootfs so don't check for it */
+ if (!doforce)
+ return (EINVAL);
+ flags |= FORCECLOSE;
+ }
+
+ /*
+ * Clear out buffer cache. I don't think we
+ * ever get anything cached at this level at the
+ * moment, but who knows...
+ */
+#ifdef notyet
+ mntflushbuf(mp, 0);
+ if (mntinvalbuf(mp, 1))
+ return (EBUSY);
+#endif
+ if (umapm_rootvp->v_usecount > 1)
+ return (EBUSY);
+ if (error = vflush(mp, umapm_rootvp, flags))
+ return (error);
+
+#ifdef UMAPFS_DIAGNOSTIC
+ vprint("alias root of lower", umapm_rootvp);
+#endif
+ /*
+ * Release reference on underlying root vnode
+ */
+ vrele(umapm_rootvp);
+ /*
+ * And blow it away for future re-use
+ */
+ vgone(umapm_rootvp);
+ /*
+ * Finally, throw away the umap_mount structure
+ */
+ free(mp->mnt_data, M_UFSMNT); /* XXX */
+ mp->mnt_data = 0;
+ return (0);
+}
+
+int
+umapfs_root(mp, vpp)
+ struct mount *mp;
+ struct vnode **vpp;
+{
+ struct vnode *vp;
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_root(mp = %x, vp = %x->%x)\n", mp,
+ MOUNTTOUMAPMOUNT(mp)->umapm_rootvp,
+ UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp)
+ );
+#endif
+
+ /*
+ * Return locked reference to root.
+ */
+ vp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp;
+ VREF(vp);
+ VOP_LOCK(vp);
+ *vpp = vp;
+ return (0);
+}
+
+int
+umapfs_quotactl(mp, cmd, uid, arg, p)
+ struct mount *mp;
+ int cmd;
+ uid_t uid;
+ caddr_t arg;
+ struct proc *p;
+{
+ return (VFS_QUOTACTL(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, cmd, uid, arg, p));
+}
+
+int
+umapfs_statfs(mp, sbp, p)
+ struct mount *mp;
+ struct statfs *sbp;
+ struct proc *p;
+{
+ int error;
+ struct statfs mstat;
+
+#ifdef UMAPFS_DIAGNOSTIC
+ printf("umapfs_statfs(mp = %x, vp = %x->%x)\n", mp,
+ MOUNTTOUMAPMOUNT(mp)->umapm_rootvp,
+ UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp)
+ );
+#endif
+
+ bzero(&mstat, sizeof(mstat));
+
+ error = VFS_STATFS(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, &mstat, p);
+ if (error)
+ return (error);
+
+ /* now copy across the "interesting" information and fake the rest */
+ sbp->f_type = mstat.f_type;
+ sbp->f_flags = mstat.f_flags;
+ sbp->f_bsize = mstat.f_bsize;
+ sbp->f_iosize = mstat.f_iosize;
+ sbp->f_blocks = mstat.f_blocks;
+ sbp->f_bfree = mstat.f_bfree;
+ sbp->f_bavail = mstat.f_bavail;
+ sbp->f_files = mstat.f_files;
+ sbp->f_ffree = mstat.f_ffree;
+ if (sbp != &mp->mnt_stat) {
+ bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid));
+ bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
+ bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
+ }
+ return (0);
+}
+
+int
+umapfs_sync(mp, waitfor, cred, p)
+ struct mount *mp;
+ int waitfor;
+ struct ucred *cred;
+ struct proc *p;
+{
+ /*
+ * XXX - Assumes no data cached at umap layer.
+ */
+ return (0);
+}
+
+int
+umapfs_vget(mp, ino, vpp)
+ struct mount *mp;
+ ino_t ino;
+ struct vnode **vpp;
+{
+
+ return (VFS_VGET(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, ino, vpp));
+}
+
+int
+umapfs_fhtovp(mp, fidp, nam, vpp, exflagsp, credanonp)
+ struct mount *mp;
+ struct fid *fidp;
+ struct mbuf *nam;
+ struct vnode **vpp;
+ int *exflagsp;
+ struct ucred**credanonp;
+{
+
+ return (VFS_FHTOVP(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, fidp, nam, vpp, exflagsp,credanonp));
+}
+
+int
+umapfs_vptofh(vp, fhp)
+ struct vnode *vp;
+ struct fid *fhp;
+{
+ return (VFS_VPTOFH(UMAPVPTOLOWERVP(vp), fhp));
+}
+
+int umapfs_init __P((void));
+
+struct vfsops umap_vfsops = {
+ umapfs_mount,
+ umapfs_start,
+ umapfs_unmount,
+ umapfs_root,
+ umapfs_quotactl,
+ umapfs_statfs,
+ umapfs_sync,
+ umapfs_vget,
+ umapfs_fhtovp,
+ umapfs_vptofh,
+ umapfs_init,
+};
diff --git a/sys/fs/umapfs/umap_vnops.c b/sys/fs/umapfs/umap_vnops.c
new file mode 100644
index 0000000..287804e
--- /dev/null
+++ b/sys/fs/umapfs/umap_vnops.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software donated to Berkeley by
+ * the UCLA Ficus project.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)umap_vnops.c 8.3 (Berkeley) 1/5/94
+ */
+
+/*
+ * Umap Layer
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/malloc.h>
+#include <sys/buf.h>
+#include <miscfs/umapfs/umap.h>
+
+
+int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
+
+/*
+ * This is the 10-Apr-92 bypass routine.
+ * See null_vnops.c:null_bypass for more details.
+ */
+int
+umap_bypass(ap)
+ struct vop_generic_args /* {
+ struct vnodeop_desc *a_desc;
+ <other random data follows, presumably>
+ } */ *ap;
+{
+ extern int (**umap_vnodeop_p)(); /* not extern, really "forward" */
+ struct ucred **credpp = 0, *credp = 0;
+ struct ucred *savecredp, *savecompcredp = 0;
+ struct ucred *compcredp = 0;
+ struct vnode **this_vp_p;
+ int error;
+ struct vnode *old_vps[VDESC_MAX_VPS];
+ struct vnode *vp1 = 0;
+ struct vnode **vps_p[VDESC_MAX_VPS];
+ struct vnode ***vppp;
+ struct vnodeop_desc *descp = ap->a_desc;
+ int reles, i;
+ struct componentname **compnamepp = 0;
+
+ if (umap_bug_bypass)
+ printf ("umap_bypass: %s\n", descp->vdesc_name);
+
+#ifdef SAFETY
+ /*
+ * We require at least one vp.
+ */
+ if (descp->vdesc_vp_offsets == NULL ||
+ descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
+ panic ("umap_bypass: no vp's in map.\n");
+#endif
+
+ /*
+ * Map the vnodes going in.
+ * Later, we'll invoke the operation based on
+ * the first mapped vnode's operation vector.
+ */
+ reles = descp->vdesc_flags;
+ for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
+ if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
+ break; /* bail out at end of list */
+ vps_p[i] = this_vp_p =
+ VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
+
+ if (i == 0) {
+ vp1 = *vps_p[0];
+ }
+
+ /*
+ * We're not guaranteed that any but the first vnode
+ * are of our type. Check for and don't map any
+ * that aren't. (Must map first vp or vclean fails.)
+ */
+
+ if (i && (*this_vp_p)->v_op != umap_vnodeop_p) {
+ old_vps[i] = NULL;
+ } else {
+ old_vps[i] = *this_vp_p;
+ *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
+ if (reles & 1)
+ VREF(*this_vp_p);
+ }
+
+ }
+
+ /*
+ * Fix the credentials. (That's the purpose of this layer.)
+ */
+
+ if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
+
+ credpp = VOPARG_OFFSETTO(struct ucred**,
+ descp->vdesc_cred_offset, ap);
+
+ /* Save old values */
+
+ savecredp = (*credpp);
+ (*credpp) = crdup(savecredp);
+ credp = *credpp;
+
+ if (umap_bug_bypass && credp->cr_uid != 0)
+ printf("umap_bypass: user was %d, group %d\n",
+ credp->cr_uid, credp->cr_gid);
+
+ /* Map all ids in the credential structure. */
+
+ umap_mapids(vp1->v_mount, credp);
+
+ if (umap_bug_bypass && credp->cr_uid != 0)
+ printf("umap_bypass: user now %d, group %d\n",
+ credp->cr_uid, credp->cr_gid);
+ }
+
+ /* BSD often keeps a credential in the componentname structure
+ * for speed. If there is one, it better get mapped, too.
+ */
+
+ if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
+
+ compnamepp = VOPARG_OFFSETTO(struct componentname**,
+ descp->vdesc_componentname_offset, ap);
+
+ compcredp = (*compnamepp)->cn_cred;
+ savecompcredp = compcredp;
+ compcredp = (*compnamepp)->cn_cred = crdup(savecompcredp);
+
+ if (umap_bug_bypass && compcredp->cr_uid != 0)
+ printf("umap_bypass: component credit user was %d, group %d\n",
+ compcredp->cr_uid, compcredp->cr_gid);
+
+ /* Map all ids in the credential structure. */
+
+ umap_mapids(vp1->v_mount, compcredp);
+
+ if (umap_bug_bypass && compcredp->cr_uid != 0)
+ printf("umap_bypass: component credit user now %d, group %d\n",
+ compcredp->cr_uid, compcredp->cr_gid);
+ }
+
+ /*
+ * Call the operation on the lower layer
+ * with the modified argument structure.
+ */
+ error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
+
+ /*
+ * Maintain the illusion of call-by-value
+ * by restoring vnodes in the argument structure
+ * to their original value.
+ */
+ reles = descp->vdesc_flags;
+ for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
+ if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
+ break; /* bail out at end of list */
+ if (old_vps[i]) {
+ *(vps_p[i]) = old_vps[i];
+ if (reles & 1)
+ vrele(*(vps_p[i]));
+ };
+ };
+
+ /*
+ * Map the possible out-going vpp
+ * (Assumes that the lower layer always returns
+ * a VREF'ed vpp unless it gets an error.)
+ */
+ if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
+ !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
+ !error) {
+ if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
+ goto out;
+ vppp = VOPARG_OFFSETTO(struct vnode***,
+ descp->vdesc_vpp_offset, ap);
+ error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
+ };
+
+ out:
+ /*
+ * Free duplicate cred structure and restore old one.
+ */
+ if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
+ if (umap_bug_bypass && credp && credp->cr_uid != 0)
+ printf("umap_bypass: returning-user was %d\n",
+ credp->cr_uid);
+
+ crfree(credp);
+ (*credpp) = savecredp;
+ if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
+ printf("umap_bypass: returning-user now %d\n\n",
+ (*credpp)->cr_uid);
+ }
+
+ if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
+ if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
+ printf("umap_bypass: returning-component-user was %d\n",
+ compcredp->cr_uid);
+
+ crfree(compcredp);
+ (*compnamepp)->cn_cred = savecompcredp;
+ if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
+ printf("umap_bypass: returning-component-user now %d\n",
+ compcredp->cr_uid);
+ }
+
+ return (error);
+}
+
+
+/*
+ * We handle getattr to change the fsid.
+ */
+int
+umap_getattr(ap)
+ struct vop_getattr_args /* {
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+{
+ short uid, gid;
+ int error, tmpid, nentries, gnentries;
+ u_long (*mapdata)[2], (*gmapdata)[2];
+ struct vnode **vp1p;
+ struct vnodeop_desc *descp = ap->a_desc;
+
+ if (error = umap_bypass(ap))
+ return (error);
+ /* Requires that arguments be restored. */
+ ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
+
+ /*
+ * Umap needs to map the uid and gid returned by a stat
+ * into the proper values for this site. This involves
+ * finding the returned uid in the mapping information,
+ * translating it into the uid on the other end,
+ * and filling in the proper field in the vattr
+ * structure pointed to by ap->a_vap. The group
+ * is easier, since currently all groups will be
+ * translate to the NULLGROUP.
+ */
+
+ /* Find entry in map */
+
+ uid = ap->a_vap->va_uid;
+ gid = ap->a_vap->va_gid;
+ if (umap_bug_bypass)
+ printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
+ gid);
+
+ vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
+ nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
+ mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
+ gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
+ gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
+
+ /* Reverse map the uid for the vnode. Since it's a reverse
+ map, we can't use umap_mapids() to do it. */
+
+ tmpid = umap_reverse_findid(uid, mapdata, nentries);
+
+ if (tmpid != -1) {
+
+ ap->a_vap->va_uid = (uid_t) tmpid;
+ if (umap_bug_bypass)
+ printf("umap_getattr: original uid = %d\n", uid);
+ } else
+ ap->a_vap->va_uid = (uid_t) NOBODY;
+
+ /* Reverse map the gid for the vnode. */
+
+ tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
+
+ if (tmpid != -1) {
+
+ ap->a_vap->va_gid = (gid_t) tmpid;
+ if (umap_bug_bypass)
+ printf("umap_getattr: original gid = %d\n", gid);
+ } else
+ ap->a_vap->va_gid = (gid_t) NULLGROUP;
+
+ return (0);
+}
+
+int
+umap_inactive(ap)
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ /*
+ * Do nothing (and _don't_ bypass).
+ * Wait to vrele lowervp until reclaim,
+ * so that until then our umap_node is in the
+ * cache and reusable.
+ *
+ */
+ return (0);
+}
+
+int
+umap_reclaim(ap)
+ struct vop_reclaim_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct umap_node *xp = VTOUMAP(vp);
+ struct vnode *lowervp = xp->umap_lowervp;
+
+ /* After this assignment, this node will not be re-used. */
+ xp->umap_lowervp = NULL;
+ remque(xp);
+ FREE(vp->v_data, M_TEMP);
+ vp->v_data = NULL;
+ vrele(lowervp);
+ return (0);
+}
+
+int
+umap_strategy(ap)
+ struct vop_strategy_args /* {
+ struct buf *a_bp;
+ } */ *ap;
+{
+ struct buf *bp = ap->a_bp;
+ int error;
+ struct vnode *savedvp;
+
+ savedvp = bp->b_vp;
+ bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
+
+ error = VOP_STRATEGY(ap->a_bp);
+
+ bp->b_vp = savedvp;
+
+ return (error);
+}
+
+int
+umap_bwrite(ap)
+ struct vop_bwrite_args /* {
+ struct buf *a_bp;
+ } */ *ap;
+{
+ struct buf *bp = ap->a_bp;
+ int error;
+ struct vnode *savedvp;
+
+ savedvp = bp->b_vp;
+ bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
+
+ error = VOP_BWRITE(ap->a_bp);
+
+ bp->b_vp = savedvp;
+
+ return (error);
+}
+
+
+int
+umap_print(ap)
+ struct vop_print_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ printf("\ttag VT_UMAPFS, vp=%x, lowervp=%x\n", vp, UMAPVPTOLOWERVP(vp));
+ return (0);
+}
+
+int
+umap_rename(ap)
+ struct vop_rename_args /* {
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+ } */ *ap;
+{
+ int error;
+ struct componentname *compnamep;
+ struct ucred *compcredp, *savecompcredp;
+ struct vnode *vp;
+
+ /*
+ * Rename is irregular, having two componentname structures.
+ * We need to map the cre in the second structure,
+ * and then bypass takes care of the rest.
+ */
+
+ vp = ap->a_fdvp;
+ compnamep = ap->a_tcnp;
+ compcredp = compnamep->cn_cred;
+
+ savecompcredp = compcredp;
+ compcredp = compnamep->cn_cred = crdup(savecompcredp);
+
+ if (umap_bug_bypass && compcredp->cr_uid != 0)
+ printf("umap_rename: rename component credit user was %d, group %d\n",
+ compcredp->cr_uid, compcredp->cr_gid);
+
+ /* Map all ids in the credential structure. */
+
+ umap_mapids(vp->v_mount, compcredp);
+
+ if (umap_bug_bypass && compcredp->cr_uid != 0)
+ printf("umap_rename: rename component credit user now %d, group %d\n",
+ compcredp->cr_uid, compcredp->cr_gid);
+
+ error = umap_bypass(ap);
+
+ /* Restore the additional mapped componentname cred structure. */
+
+ crfree(compcredp);
+ compnamep->cn_cred = savecompcredp;
+
+ return error;
+}
+
+/*
+ * Global vfs data structures
+ */
+/*
+ * XXX - strategy, bwrite are hand coded currently. They should
+ * go away with a merged buffer/block cache.
+ *
+ */
+int (**umap_vnodeop_p)();
+struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
+ { &vop_default_desc, umap_bypass },
+
+ { &vop_getattr_desc, umap_getattr },
+ { &vop_inactive_desc, umap_inactive },
+ { &vop_reclaim_desc, umap_reclaim },
+ { &vop_print_desc, umap_print },
+ { &vop_rename_desc, umap_rename },
+
+ { &vop_strategy_desc, umap_strategy },
+ { &vop_bwrite_desc, umap_bwrite },
+
+ { (struct vnodeop_desc*) NULL, (int(*)()) NULL }
+};
+struct vnodeopv_desc umap_vnodeop_opv_desc =
+ { &umap_vnodeop_p, umap_vnodeop_entries };
OpenPOWER on IntegriCloud