summaryrefslogtreecommitdiffstats
path: root/sys/fs/udf/udf_vnops.c
diff options
context:
space:
mode:
authorscottl <scottl@FreeBSD.org>2002-04-14 16:52:14 +0000
committerscottl <scottl@FreeBSD.org>2002-04-14 16:52:14 +0000
commit1fd1b83baf376aa6cdbc4fee6c646c2e732250cd (patch)
treecb837dcaf5010099c0b2e9ce021a70b5e6f736ef /sys/fs/udf/udf_vnops.c
parent0559ebe0fd6ed993e11e306ef21ed5a9cb6da7f9 (diff)
downloadFreeBSD-src-1fd1b83baf376aa6cdbc4fee6c646c2e732250cd.zip
FreeBSD-src-1fd1b83baf376aa6cdbc4fee6c646c2e732250cd.tar.gz
Actually add the UDF files!
Diffstat (limited to 'sys/fs/udf/udf_vnops.c')
-rw-r--r--sys/fs/udf/udf_vnops.c1232
1 files changed, 1232 insertions, 0 deletions
diff --git a/sys/fs/udf/udf_vnops.c b/sys/fs/udf/udf_vnops.c
new file mode 100644
index 0000000..bb0d1aa
--- /dev/null
+++ b/sys/fs/udf/udf_vnops.c
@@ -0,0 +1,1232 @@
+/*-
+ * Copyright (c) 2001, 2002 Scott Long <scottl@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/* udf_vnops.c */
+/* Take care of the vnode side of things */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/namei.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/stat.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+#include <sys/dirent.h>
+#include <sys/queue.h>
+#include <sys/unistd.h>
+
+#include <vm/uma.h>
+
+#include <fs/udf/ecma167-udf.h>
+#include <fs/udf/osta.h>
+#include <fs/udf/udf.h>
+
+static int udf_access(struct vop_access_args *);
+static int udf_getattr(struct vop_getattr_args *);
+static int udf_ioctl(struct vop_ioctl_args *);
+static int udf_pathconf(struct vop_pathconf_args *);
+static int udf_read(struct vop_read_args *);
+static int udf_readdir(struct vop_readdir_args *);
+static int udf_readlink(struct vop_readlink_args *ap);
+static int udf_strategy(struct vop_strategy_args *);
+static int udf_print(struct vop_print_args *);
+static int udf_bmap(struct vop_bmap_args *);
+static int udf_lookup(struct vop_cachedlookup_args *);
+static int udf_reclaim(struct vop_reclaim_args *);
+static void udf_dumpblock(void *, int) __unused;
+static int udf_readatoffset(struct udf_node *, int *, int, struct buf **, u_int8_t **);
+static int udf_bmap_internal(struct udf_node *, u_int32_t, daddr64_t *, u_int32_t *);
+
+vop_t **udf_vnodeop_p;
+static struct vnodeopv_entry_desc udf_vnodeop_entries[] = {
+ { &vop_default_desc, (vop_t *) vop_defaultop },
+ { &vop_access_desc, (vop_t *) udf_access },
+ { &vop_bmap_desc, (vop_t *) udf_bmap },
+ { &vop_cachedlookup_desc, (vop_t *) udf_lookup },
+ { &vop_getattr_desc, (vop_t *) udf_getattr },
+ { &vop_inactive_desc, (vop_t *) vop_stdinactive },
+ { &vop_ioctl_desc, (vop_t *) udf_ioctl },
+ { &vop_islocked_desc, (vop_t *) vop_stdislocked },
+ { &vop_lock_desc, (vop_t *) vop_stdlock },
+ { &vop_lookup_desc, (vop_t *) vfs_cache_lookup },
+ { &vop_pathconf_desc, (vop_t *) udf_pathconf },
+ { &vop_print_desc, (vop_t *) udf_print },
+ { &vop_read_desc, (vop_t *) udf_read },
+ { &vop_readdir_desc, (vop_t *) udf_readdir },
+ { &vop_readlink_desc, (vop_t *) udf_readlink },
+ { &vop_reclaim_desc, (vop_t *) udf_reclaim },
+ { &vop_strategy_desc, (vop_t *) udf_strategy },
+ { &vop_unlock_desc, (vop_t *) vop_stdunlock },
+ { &vop_getpages_desc, (vop_t *) vop_stdgetpages },
+ { &vop_putpages_desc, (vop_t *) vop_stdputpages },
+ { NULL, NULL }
+};
+static struct vnodeopv_desc udf_vnodeop_opv_desc =
+ { &udf_vnodeop_p, udf_vnodeop_entries };
+VNODEOP_SET(udf_vnodeop_opv_desc);
+
+MALLOC_DEFINE(M_UDFFID, "UDF FID", "UDF FileId structure");
+
+/* Look up a udf_node based on the ino_t passed in and return it's vnode */
+int
+udf_hashlookup(struct udf_mnt *udfmp, ino_t id, int flags, struct vnode **vpp)
+{
+ struct udf_node *node;
+ int error;
+
+ *vpp = NULL;
+
+loop:
+ mtx_lock(&udfmp->hash_mtx);
+ TAILQ_FOREACH(node, &udfmp->udf_tqh, tq) {
+ if (node->hash_id == id) {
+ VI_LOCK(node->i_vnode);
+ mtx_unlock(&udfmp->hash_mtx);
+ error = vget(node->i_vnode, flags | LK_INTERLOCK,
+ curthread);
+ if (error == ENOENT)
+ goto loop;
+ if (error)
+ return (error);
+ *vpp = node->i_vnode;
+ return (0);
+ }
+ }
+
+ mtx_unlock(&udfmp->hash_mtx);
+ return (0);
+}
+
+int
+udf_hashins(struct udf_node *node)
+{
+ struct udf_mnt *udfmp;
+
+ udfmp = node->udfmp;
+
+ mtx_lock(&udfmp->hash_mtx);
+ TAILQ_INSERT_TAIL(&udfmp->udf_tqh, node, tq);
+ mtx_unlock(&udfmp->hash_mtx);
+ lockmgr(&node->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0,
+ curthread);
+
+ return (0);
+}
+
+int
+udf_hashrem(struct udf_node *node)
+{
+ struct udf_mnt *udfmp;
+
+ udfmp = node->udfmp;
+
+ mtx_lock(&udfmp->hash_mtx);
+ TAILQ_REMOVE(&udfmp->udf_tqh, node, tq);
+ mtx_unlock(&udfmp->hash_mtx);
+
+ return (0);
+}
+
+int
+udf_allocv(struct mount *mp, struct vnode **vpp, struct thread *td)
+{
+ int error;
+ struct vnode *vp;
+
+ error = getnewvnode(VT_UDF, mp, udf_vnodeop_p, &vp);
+ if (error) {
+ printf("udf_allocv: failed to allocate new vnode\n");
+ return (error);
+ }
+
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
+ *vpp = vp;
+ return (0);
+}
+
+/* Convert file entry permission (5 bits per owner/group/user) to a mode_t */
+static mode_t
+udf_permtomode(struct udf_node *node)
+{
+ u_int32_t perm;
+ u_int32_t flags;
+ mode_t mode;
+
+ perm = node->fentry->perm;
+ flags = node->fentry->icbtag.flags;
+
+ mode = perm & UDF_FENTRY_PERM_USER_MASK;
+ mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK) >> 2);
+ mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
+ mode |= ((flags & UDF_ICB_TAG_FLAGS_STICKY) << 4);
+ mode |= ((flags & UDF_ICB_TAG_FLAGS_SETGID) << 6);
+ mode |= ((flags & UDF_ICB_TAG_FLAGS_SETUID) << 8);
+
+ return (mode);
+}
+
+static int
+udf_access(struct vop_access_args *a)
+{
+ struct vnode *vp;
+ struct udf_node *node;
+ mode_t a_mode, mode;
+
+ vp = a->a_vp;
+ node = VTON(vp);
+ a_mode = a->a_mode;
+
+ if (a_mode & VWRITE) {
+ switch (vp->v_type) {
+ case VDIR:
+ case VLNK:
+ case VREG:
+ return (EROFS);
+ /* NOT REACHED */
+ default:
+ break;
+ }
+ }
+
+ mode = udf_permtomode(node);
+
+ return (vaccess(vp->v_type, mode, node->fentry->uid, node->fentry->gid,
+ a_mode, a->a_cred, NULL));
+}
+
+static int mon_lens[2][12] = {
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
+};
+
+static int
+udf_isaleapyear(int year)
+{
+ int i;
+
+ i = (year % 4) ? 0 : 1;
+ i &= (year % 100) ? 1 : 0;
+ i |= (year % 400) ? 0 : 1;
+
+ return i;
+}
+
+/*
+ * XXX This is just a rough hack. Daylight savings isn't calculated and tv_nsec
+ * is ignored.
+ * Timezone calculation compliments of Julian Elischer <julian@elischer.org>.
+ */
+static void
+udf_timetotimespec(struct timestamp *time, struct timespec *t)
+{
+ int i, lpyear, daysinyear;
+ union {
+ u_int16_t u_tz_offset;
+ int16_t s_tz_offset;
+ } tz;
+
+ t->tv_nsec = 0;
+
+ /* DirectCD seems to like using bogus year values */
+ if (time->year < 1970) {
+ t->tv_sec = 0;
+ return;
+ }
+
+ /* Calculate the time and day */
+ t->tv_sec = time->second;
+ t->tv_sec += time->minute * 60;
+ t->tv_sec += time->hour * 3600;
+ t->tv_sec += time->day * 3600 * 24;
+
+ /* Calclulate the month */
+ lpyear = udf_isaleapyear(time->year);
+ for (i = 1; i < time->month; i++)
+ t->tv_sec += mon_lens[lpyear][i] * 3600 * 24;
+
+ /* Speed up the calculation */
+ if (time->year > 1979)
+ t->tv_sec += 315532800;
+ if (time->year > 1989)
+ t->tv_sec += 315619200;
+ if (time->year > 1999)
+ t->tv_sec += 315532800;
+ for (i = 2000; i < time->year; i++) {
+ daysinyear = udf_isaleapyear(i) + 365 ;
+ t->tv_sec += daysinyear * 3600 * 24;
+ }
+
+ /*
+ * Calculate the time zone. The timezone is 12 bit signed 2's
+ * compliment, so we gotta do some extra magic to handle it right.
+ */
+ tz.u_tz_offset = time->type_tz;
+ tz.u_tz_offset &= 0x0fff;
+ if (tz.u_tz_offset & 0x0800)
+ tz.u_tz_offset |= 0xf000; /* extend the sign to 16 bits */
+ if ((time->type_tz & 0x1000) && (tz.s_tz_offset != -2047))
+ t->tv_sec -= tz.s_tz_offset * 60;
+
+ return;
+}
+
+static int
+udf_getattr(struct vop_getattr_args *a)
+{
+ struct vnode *vp;
+ struct udf_node *node;
+ struct vattr *vap;
+ struct file_entry *fentry;
+ struct timespec ts;
+
+ ts.tv_sec = 0;
+
+ vp = a->a_vp;
+ vap = a->a_vap;
+ node = VTON(vp);
+ fentry = node->fentry;
+
+ vap->va_fsid = dev2udev(node->i_dev);
+ vap->va_fileid = node->hash_id;
+ vap->va_mode = udf_permtomode(node);
+ vap->va_nlink = fentry->link_cnt;
+ /*
+ * XXX The spec says that -1 is valid for uid/gid and indicates an
+ * invalid uid/gid. How should this be represented?
+ */
+ vap->va_uid = (fentry->uid == -1) ? 0 : fentry->uid;
+ vap->va_gid = (fentry->gid == -1) ? 0 : fentry->gid;
+ udf_timetotimespec(&fentry->atime, &vap->va_atime);
+ udf_timetotimespec(&fentry->mtime, &vap->va_mtime);
+ vap->va_ctime = vap->va_mtime; /* XXX Stored as an Extended Attribute */
+ vap->va_rdev = 0; /* XXX */
+ if (vp->v_type & VDIR) {
+ /*
+ * Directories that are recorded within their ICB will show
+ * as having 0 blocks recorded. Since tradition dictates
+ * that directories consume at least one logical block,
+ * make it appear so.
+ */
+ if (fentry->logblks_rec != 0) {
+ vap->va_size = fentry->logblks_rec * node->udfmp->bsize;
+ } else {
+ vap->va_size = node->udfmp->bsize;
+ }
+ } else {
+ vap->va_size = fentry->inf_len;
+ }
+ vap->va_flags = 0;
+ vap->va_gen = 1;
+ vap->va_blocksize = node->udfmp->bsize;
+ vap->va_bytes = fentry->inf_len;
+ vap->va_type = vp->v_type;
+ vap->va_filerev = 0; /* XXX */
+ return (0);
+}
+
+/*
+ * File specific ioctls. DeCSS candidate?
+ */
+static int
+udf_ioctl(struct vop_ioctl_args *a)
+{
+ printf("%s called\n", __FUNCTION__);
+ return (EOPNOTSUPP);
+}
+
+/*
+ * I'm not sure that this has much value in a read-only filesystem, but
+ * cd9660 has it too.
+ */
+static int
+udf_pathconf(struct vop_pathconf_args *a)
+{
+
+ switch (a->a_name) {
+ case _PC_LINK_MAX:
+ *a->a_retval = 65535;
+ return (0);
+ case _PC_NAME_MAX:
+ *a->a_retval = NAME_MAX;
+ return (0);
+ case _PC_PATH_MAX:
+ *a->a_retval = PATH_MAX;
+ return (0);
+ case _PC_NO_TRUNC:
+ *a->a_retval = 1;
+ return (0);
+ default:
+ return (EINVAL);
+ }
+}
+
+static int
+udf_read(struct vop_read_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct uio *uio = a->a_uio;
+ struct udf_node *node = VTON(vp);
+ struct buf *bp;
+ u_int8_t *data;
+ int error = 0;
+ int size, n, fsize, offset;
+
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+
+ fsize = node->fentry->inf_len;
+ size = 0;
+ while (uio->uio_offset < fsize && uio->uio_resid > 0) {
+ offset = uio->uio_offset;
+ error = udf_readatoffset(node, &size, offset, &bp, &data);
+ if (error)
+ return (error);
+ n = min(size, size - bp->b_resid);
+ error = uiomove((caddr_t)data, n, uio);
+ if (bp != NULL)
+ brelse(bp);
+ size -= n;
+ if (error)
+ break;
+ };
+
+ return (error);
+}
+
+/* Convienience routine to dump a block in hex */
+static void
+udf_dumpblock(void *data, int len)
+{
+ int i, j;
+
+ for (i = 0; i < len; i++) {
+ printf("\noffset= %d: ", i);
+ for (j = 0; j < 8; j++) {
+ if (i + j == len)
+ break;
+ printf("0x%02x ", (u_int8_t)((u_int8_t*)(data))[i + j]);
+ }
+ i += j - 1;
+ }
+ printf("\n");
+}
+
+/*
+ * Call the OSTA routines to translate the name from a CS0 dstring to a
+ * 16-bit Unicode String. Hooks need to be placed in here to translate from
+ * Unicode to the encoding that the kernel/user expects. For now, compact
+ * the encoding to 8 bits if possible. Return the length of the translated
+ * string.
+ * XXX This horribly pessimizes the 8bit case
+ */
+static int
+udf_transname(char *cs0string, char *destname, int len)
+{
+ unicode_t *transname;
+ int i, unilen = 0;
+
+ /* allocate a buffer big enough to hold an 8->16 bit expansion */
+ transname = uma_zalloc(udf_zone_trans, M_WAITOK);
+ if (transname == NULL) {
+ printf("udf: out of memory?\n");
+ return 0;
+ }
+
+ if ((unilen = udf_UncompressUnicode(len, cs0string, transname)) == -1) {
+ printf("udf: Unicode translation failed\n");
+ uma_zfree(udf_zone_trans, transname);
+ return 0;
+ }
+
+ /* At this point, the name is in 16-bit Unicode. Compact it down
+ * to 8-bit
+ */
+ for (i = 0; i < unilen ; i++) {
+ if (transname[i] & 0xff00) {
+ destname[i] = '.'; /* Fudge the 16bit chars */
+ } else {
+ destname[i] = transname[i] & 0xff;
+ }
+ }
+
+ destname[unilen] = 0;
+ uma_zfree(udf_zone_trans, transname);
+
+ return unilen;
+}
+
+/*
+ * Compare a CS0 dstring with a name passed in from the VFS layer. Return
+ * 0 on a successful match, nonzero therwise. Unicode work may need to be done
+ * here also.
+ */
+static int
+udf_cmpname(char *cs0string, char *cmpname, int cs0len, int cmplen)
+{
+ char transname[MAXNAMLEN+1]; /* XXX stack */
+
+ if ((cs0len = udf_transname(cs0string, &transname[0], cs0len)) == 0)
+ return -1;
+
+ /* Easy check. If they aren't the same length, they aren't equal */
+ if (cs0len != cmplen)
+ return -1;
+
+ return (bcmp(transname, cmpname, cmplen));
+}
+
+struct udf_uiodir {
+ struct dirent *dirent;
+ u_long *cookies;
+ int ncookies;
+ int acookies;
+ int eofflag;
+};
+
+static int
+udf_uiodir(struct udf_uiodir *uiodir, int de_size, struct uio *uio, long cookie)
+{
+ if (uiodir->cookies != NULL) {
+ if (++uiodir->acookies > uiodir->ncookies) {
+ uiodir->eofflag = 0;
+ return (-1);
+ }
+ *uiodir->cookies++ = cookie;
+ }
+
+ if (uio->uio_resid < de_size) {
+ uiodir->eofflag = 0;
+ return (-1);
+ }
+
+ return (uiomove((caddr_t)uiodir->dirent, de_size, uio));
+}
+
+/* Prebuild the . and .. dirents. d_fileno will need to be filled in */
+static struct dirent udf_de_dot =
+ { 0, sizeof(struct dirent), DT_DIR, 1, "." };
+static struct dirent udf_de_dotdot =
+ { 0, sizeof(struct dirent), DT_DIR, 2, ".." };
+
+static int
+udf_readdir(struct vop_readdir_args *a)
+{
+ struct vnode *vp;
+ struct buf *bp;
+ struct uio *uio;
+ struct dirent dir;
+ struct udf_node *node;
+ struct udf_mnt *udfmp;
+ struct fileid_desc *fid;
+ struct udf_uiodir uiodir;
+ u_long *cookies = NULL;
+ u_int8_t *data;
+ int ncookies;
+ int error = 0, offset, off, size, de_size, fid_size, fsize;
+ int total_fid_size = 0, frag_size = 0, fid_fragment = 0;
+
+ vp = a->a_vp;
+ uio = a->a_uio;
+ node = VTON(vp);
+ udfmp = node->udfmp;
+ de_size = sizeof(struct dirent);
+ fid_size = UDF_FID_SIZE;
+ fsize = node->fentry->inf_len;
+ uiodir.eofflag = 1;
+
+ if (a->a_ncookies != NULL) {
+ /*
+ * Guess how many entries are needed. If we run out, this
+ * function will be called again and thing will pick up were
+ * it left off.
+ */
+ ncookies = uio->uio_resid / 8;
+ MALLOC(cookies, u_long *, sizeof(u_long) * ncookies,
+ M_TEMP, M_WAITOK);
+ if (cookies == NULL)
+ return (ENOMEM);
+ uiodir.ncookies = ncookies;
+ uiodir.cookies = cookies;
+ uiodir.acookies = 0;
+ } else {
+ uiodir.cookies = NULL;
+ }
+
+ /*
+ * offset is the absolute offset into the file data. off is the offset
+ * into the data, minus the blocks that weren't read because they fell
+ * before offset.
+ */
+ offset = uio->uio_offset;
+ off = 0;
+
+ /*
+ * Iterate through the file id descriptors. Give the parent dir
+ * entry special attention. size will be the size of the extent
+ * returned in data. If there is more than one extent, things get
+ * ugly.
+ */
+ size = 0;
+ error = udf_readatoffset(node, &size, offset, &bp, &data);
+ if (error) {
+ if (a->a_ncookies != NULL)
+ FREE(cookies, M_TEMP);
+ return (error);
+ }
+
+ while (offset + off < fsize) {
+
+ fid = (struct fileid_desc*)&data[off];
+
+ /* Check to see if the fid is fragmented */
+ if (off >= size || off + fid_size > size ||
+ off + fid->l_iu + fid->l_fi + fid_size > size) {
+ struct fileid_desc *fid_buf;
+ u_int8_t *buf;
+
+ /* Copy what we have of the fid into a buffer */
+ frag_size = size - off;
+ MALLOC(buf, u_int8_t*, max(frag_size, fid_size),
+ M_UDFFID, M_NOWAIT | M_ZERO);
+ if (buf == NULL)
+ panic("No memory?");
+ bcopy(fid, buf, frag_size);
+
+ /* Reduce all of the casting magic */
+ fid_buf = (struct fileid_desc*)buf;
+
+ if (bp != NULL)
+ brelse(bp);
+
+ /* Fetch the next allocation */
+ offset += size;
+ size = 0;
+ error = udf_readatoffset(node, &size, offset, &bp,
+ &data);
+ if (error)
+ break;
+
+ /*
+ * If the fragment was so small that we didn't get
+ * the l_iu and l_fi fields, copy those in.
+ */
+ if (fid_size > frag_size)
+ bcopy(data, &buf[frag_size],
+ fid_size - frag_size);
+
+ /*
+ * Now that we have enough of the fid to work with,
+ * allocate a new fid, copy the fragment into it,
+ * and copy the rest of the fid from the new
+ * allocation.
+ */
+ total_fid_size = fid_size + fid_buf->l_iu +
+ fid_buf->l_fi;
+ MALLOC(fid, struct fileid_desc *, total_fid_size,
+ M_UDFFID, M_NOWAIT | M_ZERO);
+ if (fid == NULL) {
+ brelse(bp);
+ error = ENOMEM;
+ break;
+ }
+ bcopy(fid_buf, fid, frag_size);
+ bcopy(data, &((u_int8_t*)(fid))[frag_size],
+ total_fid_size - frag_size);
+
+ fid_fragment = 1;
+ FREE(buf, M_UDFFID);
+ } else {
+ total_fid_size = fid->l_iu + fid->l_fi + fid_size;
+ }
+
+ /* XXX Should we return an error on a bad fid? */
+ if (udf_checktag(&fid->tag, TAGID_FID)) {
+ printf("Invalid FID tag\n");
+ break;
+ }
+
+ /* Is this a deleted file? */
+ if (fid->file_char & 0x4)
+ goto update_offset;
+
+ if (fid->l_iu != 0) {
+ printf("Possibly invalid fid found.\n");
+ goto update_offset;
+ }
+
+ if ((fid->l_fi == 0) && (fid->file_char & 0x08)) {
+ /* Do up the '.' and '..' entries. Dummy values are
+ * used for the cookies since the offset here is
+ * usually zero, and NFS doesn't like that value
+ * XXX Should the magic dirents be locked?
+ */
+ udf_de_dot.d_fileno = node->hash_id;
+ uiodir.dirent = &udf_de_dot;
+ error = udf_uiodir(&uiodir, de_size, uio, 1);
+ if (error)
+ break;
+
+ udf_de_dotdot.d_fileno = udf_getid(&fid->icb);
+ uiodir.dirent = &udf_de_dotdot;
+ error = udf_uiodir(&uiodir, de_size, uio, 2);
+ } else {
+ dir.d_namlen = udf_transname(&fid->data[fid->l_iu],
+ &dir.d_name[0], fid->l_fi);
+ dir.d_fileno = udf_getid(&fid->icb);
+ dir.d_type = (fid->file_char & 0x02) ? DT_DIR :
+ DT_UNKNOWN;
+ dir.d_reclen = GENERIC_DIRSIZ(&dir);
+ uiodir.dirent = &dir;
+ error = udf_uiodir(&uiodir, dir.d_reclen, uio, off);
+ }
+ if (error) {
+ printf("uiomove returned %d\n", error);
+ break;
+ }
+
+update_offset: /*
+ * Update the offset. Align on a 4 byte boundary because the
+ * UDF spec says so. If it was a fragmented entry, clean up.
+ */
+ if (fid_fragment) {
+ off = (total_fid_size - frag_size + 3) & ~0x03;
+ FREE(fid, M_UDFFID);
+ fid_fragment = 0;
+ } else {
+ off += (total_fid_size + 3) & ~0x03;
+ }
+ }
+
+ /* tell the calling layer whether we need to be called again */
+ *a->a_eofflag = uiodir.eofflag;
+ uio->uio_offset = offset + off;
+
+ if (bp != NULL)
+ brelse(bp);
+
+ if (a->a_ncookies != NULL) {
+ if (error)
+ free(cookies, M_TEMP);
+ else {
+ *a->a_ncookies = uiodir.acookies;
+ *a->a_cookies = cookies;
+ }
+ }
+
+ return (error);
+}
+
+/* Are there any implementations out there that do soft-links? */
+static int
+udf_readlink(struct vop_readlink_args *ap)
+{
+ printf("%s called\n", __FUNCTION__);
+ return (EOPNOTSUPP);
+}
+
+static int
+udf_strategy(struct vop_strategy_args *a)
+{
+ struct buf *bp;
+ struct vnode *vp;
+ struct udf_node *node;
+ int maxsize;
+
+ bp = a->a_bp;
+ vp = bp->b_vp;
+ node = VTON(vp);
+
+ /* cd9660 has this test reversed, but it seems more logical this way */
+ if (bp->b_blkno != bp->b_lblkno) {
+ /*
+ * Files that are embedded in the fentry don't translate well
+ * to a block number. Reject.
+ */
+ if (udf_bmap_internal(node, bp->b_lblkno * node->udfmp->bsize,
+ &bp->b_lblkno, &maxsize)) {
+ clrbuf(bp);
+ bp->b_blkno = -1;
+ }
+ }
+ if ((long)bp->b_blkno == -1) {
+ bufdone(bp);
+ return (0);
+ }
+ vp = node->i_devvp;
+ bp->b_dev = vp->v_rdev;
+ VOP_STRATEGY(vp, bp);
+ return (0);
+}
+
+static int
+udf_print(struct vop_print_args *a)
+{
+ printf("%s called\n", __FUNCTION__);
+ return (EOPNOTSUPP);
+}
+
+static int
+udf_bmap(struct vop_bmap_args *a)
+{
+ struct udf_node *node;
+ u_int32_t max_size;
+ int error;
+
+ node = VTON(a->a_vp);
+
+ if (a->a_vpp != NULL)
+ *a->a_vpp = node->i_devvp;
+ if (a->a_bnp == NULL)
+ return (0);
+ if (a->a_runb)
+ *a->a_runb = 0;
+
+ error = udf_bmap_internal(node, a->a_bn * node->udfmp->bsize, a->a_bnp,
+ &max_size);
+ if (error > 0)
+ return (error);
+
+ /* Punt on read-ahead for now */
+ if (a->a_runp)
+ *a->a_runp = 0;
+
+ return (0);
+}
+
+/*
+ * The all powerful VOP_LOOKUP().
+ */
+static int
+udf_lookup(struct vop_cachedlookup_args *a)
+{
+ struct vnode *dvp;
+ struct vnode *tdp = NULL;
+ struct vnode **vpp = a->a_vpp;
+ struct buf *bp = NULL;
+ struct udf_node *node;
+ struct udf_mnt *udfmp;
+ struct fileid_desc *fid = NULL;
+ struct thread *td;
+ u_long nameiop;
+ u_long flags;
+ char *nameptr;
+ long namelen;
+ ino_t id = 0;
+ u_int8_t *data;
+ int offset, off, error, size;
+ int numdirpasses, fid_size, fsize, icb_len;
+ int total_fid_size = 0, fid_fragment = 0;
+
+ dvp = a->a_dvp;
+ node = VTON(dvp);
+ udfmp = node->udfmp;
+ nameiop = a->a_cnp->cn_nameiop;
+ flags = a->a_cnp->cn_flags;
+ nameptr = a->a_cnp->cn_nameptr;
+ namelen = a->a_cnp->cn_namelen;
+ fid_size = UDF_FID_SIZE;
+ fsize = node->fentry->inf_len;
+ icb_len = sizeof(struct long_ad);
+ td = a->a_cnp->cn_thread;
+
+ /*
+ * If this is a LOOKUP and we've already partially searched through
+ * the directory, pick up where we left off and flag that the
+ * directory may need to be searched twice. For a full description,
+ * see /sys/isofs/cd9660/cd9660_lookup.c:cd9660_lookup()
+ */
+ if (nameiop != LOOKUP || node->diroff == 0 || node->diroff > size) {
+ offset = 0;
+ numdirpasses = 1;
+ } else {
+ offset = node->diroff;
+ numdirpasses = 2;
+ nchstats.ncs_2passes++;
+ }
+
+ /*
+ * The name lookup algorithm is quite similar to what is in readdir.
+ * Can this be broken out and shared?
+ */
+lookloop:
+ size = 0;
+ off = 0;
+ error = udf_readatoffset(node, &size, offset, &bp, &data);
+ if (error)
+ return (error);
+
+ while (offset + off < fsize) {
+ fid = (struct fileid_desc*)&data[off];
+
+ /* Check to see if the fid is fragmented */
+ if (off >= size || off + fid_size > size ||
+ off + fid_size + fid->l_iu + fid->l_fi > size) {
+ struct fileid_desc *fid_buf;
+ u_int8_t *buf;
+ int frag_size = 0;
+
+ /* Copy what we have of the fid into a buffer */
+ frag_size = size - off;
+ MALLOC(buf, u_int8_t*, max(frag_size, fid_size),
+ M_UDFFID, M_NOWAIT | M_ZERO);
+ if (buf == NULL)
+ panic("No memory?");
+ bcopy(fid, buf, frag_size);
+
+ /* Reduce all of the casting magic */
+ fid_buf = (struct fileid_desc*)buf;
+
+ if (bp != NULL)
+ brelse(bp);
+
+ /* Fetch the next allocation */
+ offset += size;
+ size = 0;
+ error = udf_readatoffset(node, &size, offset, &bp,
+ &data);
+ if (error)
+ return (error);
+
+ /*
+ * If the fragment was so small that we didn't get
+ * the l_iu and l_fi fields, copy those in.
+ */
+ if (fid_size > frag_size)
+ bcopy(data, &buf[frag_size],
+ fid_size - frag_size);
+
+ /*
+ * Now that we have enough of the fid to work with,
+ * allocate a new fid, copy the fragment into it,
+ * and copy the rest of the fid from the new
+ * allocation.
+ */
+ total_fid_size = fid_size + fid_buf->l_iu +
+ fid_buf->l_fi;
+ MALLOC(fid, struct fileid_desc *, total_fid_size,
+ M_UDFFID, M_NOWAIT | M_ZERO);
+ if (fid == NULL) {
+ brelse(bp);
+ return (ENOMEM);
+ }
+ bcopy(fid_buf, fid, frag_size);
+ bcopy(data, &((u_int8_t*)(fid))[frag_size],
+ total_fid_size - frag_size);
+
+ off = (total_fid_size - frag_size + 3) & ~0x03;
+ fid_fragment = 1;
+ FREE(buf, M_UDFFID);
+ } else {
+ /*
+ * Update the offset here to avoid looking at this fid
+ * again on a subsequent lookup.
+ */
+ total_fid_size = fid->l_iu + fid->l_fi + fid_size;
+ off += (total_fid_size + 3) & ~0x03;
+ }
+
+ /* XXX Should we return an error on a bad fid? */
+ if (udf_checktag(&fid->tag, TAGID_FID))
+ break;
+
+ if ((fid->l_fi == 0) && (fid->file_char & 0x08)) {
+ if (flags & ISDOTDOT) {
+ id = udf_getid(&fid->icb);
+ break;
+ }
+ } else {
+ if (!(udf_cmpname(&fid->data[fid->l_iu],
+ nameptr, fid->l_fi, namelen))) {
+ id = udf_getid(&fid->icb);
+ break;
+ }
+ }
+
+ /*
+ * If we got this far then this fid isn't what we were
+ * looking for. It's therefore safe to clean up from a
+ * fragmented fid.
+ */
+ if (fid_fragment) {
+ FREE(fid, M_UDFFID);
+ fid_fragment = 0;
+ }
+ }
+
+ /* Did we have a match? */
+ if (id) {
+ error = udf_vget(udfmp->im_mountp, id, LK_EXCLUSIVE, &tdp);
+ if (bp != NULL)
+ brelse(bp);
+ if (error)
+ return (error);
+
+ /* Remember where this entry was if it's the final component */
+ if ((flags & ISLASTCN) && nameiop == LOOKUP)
+ node->diroff = offset + off;
+ if (numdirpasses == 2)
+ nchstats.ncs_pass2++;
+ if (!(flags & LOCKPARENT) || !(flags & ISLASTCN)) {
+ a->a_cnp->cn_flags |= PDIRUNLOCK;
+ VOP_UNLOCK(dvp, 0, td);
+ }
+
+ *vpp = tdp;
+
+ /* Put this entry in the cache */
+ if (flags & MAKEENTRY)
+ cache_enter(dvp, *vpp, a->a_cnp);
+
+ if (fid_fragment)
+ FREE(fid, M_UDFFID);
+
+ return (0);
+ }
+
+ /* Name wasn't found on this pass. Do another pass? */
+ if (numdirpasses == 2) {
+ numdirpasses--;
+ offset = 0;
+ goto lookloop;
+ }
+
+ if (bp != NULL)
+ brelse(bp);
+
+ /* Enter name into cache as non-existant */
+ if (flags & MAKEENTRY)
+ cache_enter(dvp, *vpp, a->a_cnp);
+
+ /* Why wait to the very end to decide that this is a read-only fs? */
+ if (nameiop == CREATE || nameiop == RENAME)
+ return (EROFS);
+ return (ENOENT);
+
+}
+
+static int
+udf_reclaim(struct vop_reclaim_args *a)
+{
+ struct vnode *vp;
+ struct udf_node *unode;
+
+ vp = a->a_vp;
+ unode = VTON(vp);
+
+ cache_purge(vp);
+ if (unode != NULL) {
+ udf_hashrem(unode);
+ if (unode->i_devvp) {
+ vrele(unode->i_devvp);
+ unode->i_devvp = 0;
+ }
+
+ if (unode->fentry != NULL)
+ FREE(unode->fentry, M_UDFFENTRY);
+ lockdestroy(&unode->i_vnode->v_lock);
+ uma_zfree(udf_zone_node, unode);
+ vp->v_data = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Read the block and then set the data pointer to correspond with the
+ * offset passed in. Only read in at most 'size' bytes, and then set 'size'
+ * to the number of bytes pointed to. If 'size' is zero, try to read in a
+ * whole extent.
+ * XXX 'size' is limited to the logical block size for now due to problems
+ * with udf_read()
+ */
+static int
+udf_readatoffset(struct udf_node *node, int *size, int offset, struct buf **bp, u_int8_t **data)
+{
+ struct udf_mnt *udfmp;
+ struct file_entry *fentry = NULL;
+ struct buf *bp1;
+ u_int32_t max_size;
+ daddr64_t sector;
+ int error;
+
+ udfmp = node->udfmp;
+
+ error = udf_bmap_internal(node, offset, &sector, &max_size);
+ if (error == -1) {
+ /*
+ * This error means that the file *data* is stored in the
+ * allocation descriptor field of the file entry.
+ */
+ fentry = node->fentry;
+ *data = &fentry->data[fentry->l_ea];
+ *size = fentry->l_ad;
+ *bp = NULL;
+ return (0);
+ } else if (error != 0) {
+ return (error);
+ }
+
+ if (*size == 0 || *size > max_size)
+ *size = max_size;
+
+ /* XXX Read only one block at a time? Could read-ahead help? */
+ *size = min(*size, udfmp->bsize);
+ if (*size == 0)
+ return (EIO);
+
+ if ((error = udf_readlblks(udfmp, sector, *size, bp))) {
+ printf("udf_readlblks returned %d\n", error);
+ return (error);
+ }
+
+ bp1 = *bp;
+ *data = (u_int8_t *)&bp1->b_data[offset % udfmp->bsize];
+ return (0);
+}
+
+/*
+ * Translate a file offset into a logical block and then into a physical
+ * block.
+ */
+static int
+udf_bmap_internal(struct udf_node *node, u_int32_t offset, daddr64_t *sector, u_int32_t *max_size)
+{
+ struct udf_mnt *udfmp;
+ struct file_entry *fentry;
+ void *icb;
+ struct icb_tag *tag;
+ u_int32_t icblen = 0;
+ daddr64_t lsector;
+ int ad_offset, ad_num = 0;
+ int i, p_offset;
+
+ udfmp = node->udfmp;
+ fentry = node->fentry;
+ tag = &fentry->icbtag;
+
+ switch (tag->strat_type) {
+ case 4:
+ break;
+
+ case 4096:
+ printf("Cannot deal with strategy4096 yet!\n");
+ return (ENODEV);
+
+ default:
+ printf("Unknown strategy type %d\n", tag->strat_type);
+ return (ENODEV);
+ }
+
+ switch (tag->flags & 0x7) {
+ case 0:
+ /*
+ * The allocation descriptor field is filled with short_ad's.
+ * If the offset is beyond the current extent, look for the
+ * next extent.
+ */
+ do {
+ offset -= icblen;
+ ad_offset = sizeof(struct short_ad) * ad_num;
+ if (ad_offset > fentry->l_ad) {
+ printf("File offset out of bounds\n");
+ return (EINVAL);
+ }
+ icb = GETICB(long_ad, fentry, fentry->l_ea + ad_offset);
+ icblen = GETICBLEN(short_ad, icb);
+ ad_num++;
+ } while(offset >= icblen);
+
+ lsector = (offset >> udfmp->bshift) +
+ ((struct short_ad *)(icb))->pos;
+
+ *max_size = GETICBLEN(short_ad, icb) - offset;
+
+ break;
+ case 1:
+ /*
+ * The allocation descriptor field is filled with long_ad's
+ * If the offset is beyond the current extent, look for the
+ * next extent.
+ */
+ do {
+ offset -= icblen;
+ ad_offset = sizeof(struct long_ad) * ad_num;
+ if (ad_offset > fentry->l_ad) {
+ printf("File offset out of bounds\n");
+ return (EINVAL);
+ }
+ icb = GETICB(long_ad, fentry, fentry->l_ea + ad_offset);
+ icblen = GETICBLEN(long_ad, icb);
+ ad_num++;
+ } while(offset >= icblen);
+
+ lsector = (offset >> udfmp->bshift) +
+ ((struct long_ad *)(icb))->loc.lb_num;
+
+ *max_size = GETICBLEN(long_ad, icb) - offset;
+
+ break;
+ case 3:
+ /*
+ * This type means that the file *data* is stored in the
+ * allocation descriptor field of the file entry.
+ */
+ *max_size = 0;
+ *sector = node->hash_id + udfmp->bsize;
+
+ return (-1);
+ case 2:
+ /* DirectCD does not use extended_ad's */
+ default:
+ printf("Unsupported allocation descriptor %d\n",
+ tag->flags & 0x7);
+ return (ENODEV);
+ }
+
+ *sector = lsector + udfmp->part_start;
+
+ /*
+ * Check the sparing table. Each entry represents the beginning of
+ * a packet.
+ */
+ if (udfmp->s_table != NULL) {
+ for (i = 0; i< udfmp->s_table_entries; i++) {
+ p_offset = lsector - udfmp->s_table->entries[i].org;
+ if ((p_offset < udfmp->p_sectors) && (p_offset >= 0)) {
+ *sector = udfmp->s_table->entries[i].map +
+ p_offset;
+ break;
+ }
+ }
+ }
+
+ return (0);
+}
OpenPOWER on IntegriCloud