diff options
author | rmacklem <rmacklem@FreeBSD.org> | 2009-05-04 15:23:58 +0000 |
---|---|---|
committer | rmacklem <rmacklem@FreeBSD.org> | 2009-05-04 15:23:58 +0000 |
commit | e3d34903b6fb9cb09f7e616bde59d97341958fa2 (patch) | |
tree | 0246ff14527b554e60f1c9212be00ee8c1128197 /sys/fs/nfsclient | |
parent | fb2908c8ff440e0985013b83071bd8dfecb11371 (diff) | |
download | FreeBSD-src-e3d34903b6fb9cb09f7e616bde59d97341958fa2.zip FreeBSD-src-e3d34903b6fb9cb09f7e616bde59d97341958fa2.tar.gz |
Add the experimental nfs subtree to the kernel, that includes
support for NFSv4 as well as NFSv2 and 3.
It lives in 3 subdirs under sys/fs:
nfs - functions that are common to the client and server
nfsclient - a mutation of sys/nfsclient that call generic functions
to do RPCs and handle state. As such, it retains the
buffer cache handling characteristics and vnode semantics that
are found in sys/nfsclient, for the most part.
nfsserver - the server. It includes a DRC designed specifically for
NFSv4, that is used instead of the generic DRC in sys/rpc.
The build glue will be checked in later, so at this point, it
consists of 3 new subdirs that should not affect kernel building.
Approved by: kib (mentor)
Diffstat (limited to 'sys/fs/nfsclient')
-rw-r--r-- | sys/fs/nfsclient/nfs.h | 95 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clbio.c | 1934 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clcomsubs.c | 521 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clkrpc.c | 297 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_cllock.c | 396 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clnfsiod.c | 308 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clnode.c | 283 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clport.c | 1271 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clrpcops.c | 4173 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clstate.c | 4133 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clsubs.c | 402 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clvfsops.c | 1257 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clvnops.c | 3131 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_lock.h | 89 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfsargs.h | 104 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfsdiskless.h | 108 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfsmount.h | 106 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfsnode.h | 201 | ||||
-rw-r--r-- | sys/fs/nfsclient/nlminfo.h | 41 |
19 files changed, 18850 insertions, 0 deletions
diff --git a/sys/fs/nfsclient/nfs.h b/sys/fs/nfsclient/nfs.h new file mode 100644 index 0000000..d011ffb --- /dev/null +++ b/sys/fs/nfsclient/nfs.h @@ -0,0 +1,95 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NFSCLIENT_NFS_H_ +#define _NFSCLIENT_NFS_H_ + +#if defined(_KERNEL) + +#ifndef NFS_TPRINTF_INITIAL_DELAY +#define NFS_TPRINTF_INITIAL_DELAY 12 +#endif + +#ifndef NFS_TPRINTF_DELAY +#define NFS_TPRINTF_DELAY 30 +#endif + +/* + * Nfs version macros. + */ +#define NFS_ISV3(v) \ + (VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV3) +#define NFS_ISV4(v) \ + (VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV4) +#define NFS_ISV34(v) \ + (VFSTONFS((v)->v_mount)->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) + +/* + * Function prototypes. + */ +int ncl_meta_setsize(struct vnode *, struct ucred *, struct thread *, + u_quad_t); +void ncl_doio_directwrite(struct buf *); +int ncl_bioread(struct vnode *, struct uio *, int, struct ucred *); +int ncl_biowrite(struct vnode *, struct uio *, int, struct ucred *); +int ncl_vinvalbuf(struct vnode *, int, struct thread *, int); +int ncl_asyncio(struct nfsmount *, struct buf *, struct ucred *, + struct thread *); +int ncl_doio(struct vnode *, struct buf *, struct ucred *, struct thread *); +int ncl_msleep(struct thread *, void *, struct mtx *, int, char *, int); +void ncl_nhinit(void); +void ncl_nhuninit(void); +void ncl_nodelock(struct nfsnode *); +void ncl_nodeunlock(struct nfsnode *); +int ncl_getattrcache(struct vnode *, struct vattr *); +int ncl_readrpc(struct vnode *, struct uio *, struct ucred *); +int ncl_writerpc(struct vnode *, struct uio *, struct ucred *, int *, int *); +int ncl_readlinkrpc(struct vnode *, struct uio *, struct ucred *); +int ncl_readdirrpc(struct vnode *, struct uio *, struct ucred *, + struct thread *); +int ncl_readdirplusrpc(struct vnode *, struct uio *, struct ucred *, + struct thread *); +int ncl_writebp(struct buf *, int, struct thread *); +int ncl_commit(struct vnode *, u_quad_t, int, struct ucred *, struct thread *); +void ncl_clearcommit(struct mount *); +int ncl_fsinfo(struct nfsmount *, struct vnode *, struct ucred *, + struct thread *); +int ncl_init(struct vfsconf *); +int ncl_uninit(struct vfsconf *); +int ncl_mountroot(struct mount *, struct thread *); +int ncl_nfsiodnew(void); + +#endif /* _KERNEL */ + +#endif /* _NFSCLIENT_NFS_H_ */ diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c new file mode 100644 index 0000000..bae44ed --- /dev/null +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -0,0 +1,1934 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/kernel.h> +#include <sys/mount.h> +#include <sys/proc.h> +#include <sys/resourcevar.h> +#include <sys/signalvar.h> +#include <sys/vmmeter.h> +#include <sys/vnode.h> + +#include <vm/vm.h> +#include <vm/vm_extern.h> +#include <vm/vm_page.h> +#include <vm/vm_object.h> +#include <vm/vm_pager.h> +#include <vm/vnode_pager.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfsnode.h> + +extern int newnfs_directio_allow_mmap; +extern struct nfsstats newnfsstats; +extern struct mtx ncl_iod_mutex; +extern int ncl_numasync; +extern struct proc *ncl_iodwant[NFS_MAXRAHEAD]; +extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD]; +extern int newnfs_directio_enable; + +int ncl_pbuf_freecnt = -1; /* start out unlimited */ + +static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, + struct thread *td); +static int nfs_directio_write(struct vnode *vp, struct uio *uiop, + struct ucred *cred, int ioflag); + +/* + * Any signal that can interrupt an NFS operation in an intr mount + * should be added to this set. SIGSTOP and SIGKILL cannot be masked. + */ +static int nfs_sig_set[] = { + SIGINT, + SIGTERM, + SIGHUP, + SIGKILL, + SIGSTOP, + SIGQUIT +}; + +#ifdef notnow +/* + * Check to see if one of the signals in our subset is pending on + * the process (in an intr mount). + */ +int +ncl_sig_pending(sigset_t set) +{ + int i; + + for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) + if (SIGISMEMBER(set, nfs_sig_set[i])) + return (1); + return (0); +} +#endif + +/* + * The set/restore sigmask functions are used to (temporarily) overwrite + * the process p_sigmask during an RPC call (for example). These are also + * used in other places in the NFS client that might tsleep(). + */ +static void +ncl_set_sigmask(struct thread *td, sigset_t *oldset) +{ + sigset_t newset; + int i; + struct proc *p; + + SIGFILLSET(newset); + if (td == NULL) + td = curthread; /* XXX */ + p = td->td_proc; + /* Remove the NFS set of signals from newset */ + PROC_LOCK(p); + mtx_lock(&p->p_sigacts->ps_mtx); + for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) { + /* + * But make sure we leave the ones already masked + * by the process, ie. remove the signal from the + * temporary signalmask only if it wasn't already + * in p_sigmask. + */ + if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) && + !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i])) + SIGDELSET(newset, nfs_sig_set[i]); + } + mtx_unlock(&p->p_sigacts->ps_mtx); + PROC_UNLOCK(p); + kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0); +} + +static void +ncl_restore_sigmask(struct thread *td, sigset_t *set) +{ + if (td == NULL) + td = curthread; /* XXX */ + kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0); +} + +/* + * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the + * old one after msleep() returns. + */ +int +ncl_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo) +{ + sigset_t oldset; + int error; + struct proc *p; + + if ((priority & PCATCH) == 0) + return msleep(ident, mtx, priority, wmesg, timo); + if (td == NULL) + td = curthread; /* XXX */ + ncl_set_sigmask(td, &oldset); + error = msleep(ident, mtx, priority, wmesg, timo); + ncl_restore_sigmask(td, &oldset); + p = td->td_proc; + return (error); +} + +/* + * Vnode op for VM getpages. + */ +int +ncl_getpages(struct vop_getpages_args *ap) +{ + int i, error, nextoff, size, toff, count, npages; + struct uio uio; + struct iovec iov; + vm_offset_t kva; + struct buf *bp; + struct vnode *vp; + struct thread *td; + struct ucred *cred; + struct nfsmount *nmp; + vm_object_t object; + vm_page_t *pages; + struct nfsnode *np; + + vp = ap->a_vp; + np = VTONFS(vp); + td = curthread; /* XXX */ + cred = curthread->td_ucred; /* XXX */ + nmp = VFSTONFS(vp->v_mount); + pages = ap->a_m; + count = ap->a_count; + + if ((object = vp->v_object) == NULL) { + ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); + return VM_PAGER_ERROR; + } + + if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { + mtx_lock(&np->n_mtx); + if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { + mtx_unlock(&np->n_mtx); + ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); + return VM_PAGER_ERROR; + } else + mtx_unlock(&np->n_mtx); + } + + mtx_lock(&nmp->nm_mtx); + if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && + (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { + mtx_unlock(&nmp->nm_mtx); + /* We'll never get here for v4, because we always have fsinfo */ + (void)ncl_fsinfo(nmp, vp, cred, td); + } else + mtx_unlock(&nmp->nm_mtx); + + npages = btoc(count); + + /* + * If the requested page is partially valid, just return it and + * allow the pager to zero-out the blanks. Partially valid pages + * can only occur at the file EOF. + */ + + { + vm_page_t m = pages[ap->a_reqpage]; + + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + if (m->valid != 0) { + /* handled by vm_fault now */ + /* vm_page_zero_invalid(m, TRUE); */ + for (i = 0; i < npages; ++i) { + if (i != ap->a_reqpage) + vm_page_free(pages[i]); + } + vm_page_unlock_queues(); + VM_OBJECT_UNLOCK(object); + return(0); + } + vm_page_unlock_queues(); + VM_OBJECT_UNLOCK(object); + } + + /* + * We use only the kva address for the buffer, but this is extremely + * convienient and fast. + */ + bp = getpbuf(&ncl_pbuf_freecnt); + + kva = (vm_offset_t) bp->b_data; + pmap_qenter(kva, pages, npages); + PCPU_INC(cnt.v_vnodein); + PCPU_ADD(cnt.v_vnodepgsin, npages); + + iov.iov_base = (caddr_t) kva; + iov.iov_len = count; + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); + uio.uio_resid = count; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_READ; + uio.uio_td = td; + + error = ncl_readrpc(vp, &uio, cred); + pmap_qremove(kva, npages); + + relpbuf(bp, &ncl_pbuf_freecnt); + + if (error && (uio.uio_resid == count)) { + ncl_printf("nfs_getpages: error %d\n", error); + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + for (i = 0; i < npages; ++i) { + if (i != ap->a_reqpage) + vm_page_free(pages[i]); + } + vm_page_unlock_queues(); + VM_OBJECT_UNLOCK(object); + return VM_PAGER_ERROR; + } + + /* + * Calculate the number of bytes read and validate only that number + * of bytes. Note that due to pending writes, size may be 0. This + * does not mean that the remaining data is invalid! + */ + + size = count - uio.uio_resid; + VM_OBJECT_LOCK(object); + vm_page_lock_queues(); + for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { + vm_page_t m; + nextoff = toff + PAGE_SIZE; + m = pages[i]; + + if (nextoff <= size) { + /* + * Read operation filled an entire page + */ + m->valid = VM_PAGE_BITS_ALL; + vm_page_undirty(m); + } else if (size > toff) { + /* + * Read operation filled a partial page. + */ + m->valid = 0; + vm_page_set_validclean(m, 0, size - toff); + /* handled by vm_fault now */ + /* vm_page_zero_invalid(m, TRUE); */ + } else { + /* + * Read operation was short. If no error occured + * we may have hit a zero-fill section. We simply + * leave valid set to 0. + */ + ; + } + if (i != ap->a_reqpage) { + /* + * Whether or not to leave the page activated is up in + * the air, but we should put the page on a page queue + * somewhere (it already is in the object). Result: + * It appears that emperical results show that + * deactivating pages is best. + */ + + /* + * Just in case someone was asking for this page we + * now tell them that it is ok to use. + */ + if (!error) { + if (m->oflags & VPO_WANTED) + vm_page_activate(m); + else + vm_page_deactivate(m); + vm_page_wakeup(m); + } else { + vm_page_free(m); + } + } + } + vm_page_unlock_queues(); + VM_OBJECT_UNLOCK(object); + return 0; +} + +/* + * Vnode op for VM putpages. + */ +int +ncl_putpages(struct vop_putpages_args *ap) +{ + struct uio uio; + struct iovec iov; + vm_offset_t kva; + struct buf *bp; + int iomode, must_commit, i, error, npages, count; + off_t offset; + int *rtvals; + struct vnode *vp; + struct thread *td; + struct ucred *cred; + struct nfsmount *nmp; + struct nfsnode *np; + vm_page_t *pages; + + vp = ap->a_vp; + np = VTONFS(vp); + td = curthread; /* XXX */ + cred = curthread->td_ucred; /* XXX */ + nmp = VFSTONFS(vp->v_mount); + pages = ap->a_m; + count = ap->a_count; + rtvals = ap->a_rtvals; + npages = btoc(count); + offset = IDX_TO_OFF(pages[0]->pindex); + + mtx_lock(&nmp->nm_mtx); + if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && + (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { + mtx_unlock(&nmp->nm_mtx); + (void)ncl_fsinfo(nmp, vp, cred, td); + } else + mtx_unlock(&nmp->nm_mtx); + + mtx_lock(&np->n_mtx); + if (newnfs_directio_enable && !newnfs_directio_allow_mmap && + (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { + mtx_unlock(&np->n_mtx); + ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); + mtx_lock(&np->n_mtx); + } + + for (i = 0; i < npages; i++) + rtvals[i] = VM_PAGER_AGAIN; + + /* + * When putting pages, do not extend file past EOF. + */ + if (offset + count > np->n_size) { + count = np->n_size - offset; + if (count < 0) + count = 0; + } + mtx_unlock(&np->n_mtx); + + /* + * We use only the kva address for the buffer, but this is extremely + * convienient and fast. + */ + bp = getpbuf(&ncl_pbuf_freecnt); + + kva = (vm_offset_t) bp->b_data; + pmap_qenter(kva, pages, npages); + PCPU_INC(cnt.v_vnodeout); + PCPU_ADD(cnt.v_vnodepgsout, count); + + iov.iov_base = (caddr_t) kva; + iov.iov_len = count; + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_offset = offset; + uio.uio_resid = count; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_WRITE; + uio.uio_td = td; + + if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) + iomode = NFSWRITE_UNSTABLE; + else + iomode = NFSWRITE_FILESYNC; + + error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit); + + pmap_qremove(kva, npages); + relpbuf(bp, &ncl_pbuf_freecnt); + + if (!error) { + int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; + for (i = 0; i < nwritten; i++) { + rtvals[i] = VM_PAGER_OK; + vm_page_undirty(pages[i]); + } + if (must_commit) { + ncl_clearcommit(vp->v_mount); + } + } + return rtvals[0]; +} + +/* + * For nfs, cache consistency can only be maintained approximately. + * Although RFC1094 does not specify the criteria, the following is + * believed to be compatible with the reference port. + * For nfs: + * If the file's modify time on the server has changed since the + * last read rpc or you have written to the file, + * you may have lost data cache consistency with the + * server, so flush all of the file's data out of the cache. + * Then force a getattr rpc to ensure that you have up to date + * attributes. + * NB: This implies that cache data can be read when up to + * NFS_ATTRTIMEO seconds out of date. If you find that you need current + * attributes this could be forced by setting n_attrstamp to 0 before + * the VOP_GETATTR() call. + */ +static inline int +nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) +{ + int error = 0; + struct vattr vattr; + struct nfsnode *np = VTONFS(vp); + int old_lock; + + /* + * Grab the exclusive lock before checking whether the cache is + * consistent. + * XXX - We can make this cheaper later (by acquiring cheaper locks). + * But for now, this suffices. + */ + old_lock = ncl_upgrade_vnlock(vp); + mtx_lock(&np->n_mtx); + if (np->n_flag & NMODIFIED) { + mtx_unlock(&np->n_mtx); + if (vp->v_type != VREG) { + if (vp->v_type != VDIR) + panic("nfs: bioread, not dir"); + ncl_invaldir(vp); + error = ncl_vinvalbuf(vp, V_SAVE, td, 1); + if (error) + goto out; + } + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, cred); + if (error) + goto out; + mtx_lock(&np->n_mtx); + np->n_mtime = vattr.va_mtime; + mtx_unlock(&np->n_mtx); + } else { + mtx_unlock(&np->n_mtx); + error = VOP_GETATTR(vp, &vattr, cred); + if (error) + return (error); + mtx_lock(&np->n_mtx); + if ((np->n_flag & NSIZECHANGED) + || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { + mtx_unlock(&np->n_mtx); + if (vp->v_type == VDIR) + ncl_invaldir(vp); + error = ncl_vinvalbuf(vp, V_SAVE, td, 1); + if (error) + goto out; + mtx_lock(&np->n_mtx); + np->n_mtime = vattr.va_mtime; + np->n_flag &= ~NSIZECHANGED; + } + mtx_unlock(&np->n_mtx); + } +out: + ncl_downgrade_vnlock(vp, old_lock); + return error; +} + +/* + * Vnode op for read using bio + */ +int +ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) +{ + struct nfsnode *np = VTONFS(vp); + int biosize, i; + struct buf *bp, *rabp; + struct thread *td; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + daddr_t lbn, rabn; + int bcount; + int seqcount; + int nra, error = 0, n = 0, on = 0; + +#ifdef DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("ncl_read mode"); +#endif + if (uio->uio_resid == 0) + return (0); + if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ + return (EINVAL); + td = uio->uio_td; + + mtx_lock(&nmp->nm_mtx); + if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && + (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { + mtx_unlock(&nmp->nm_mtx); + (void)ncl_fsinfo(nmp, vp, cred, td); + mtx_lock(&nmp->nm_mtx); + } + if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) + (void) newnfs_iosize(nmp); + mtx_unlock(&nmp->nm_mtx); + + if (vp->v_type != VDIR && + (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) + return (EFBIG); + + if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) + /* No caching/ no readaheads. Just read data into the user buffer */ + return ncl_readrpc(vp, uio, cred); + + biosize = vp->v_mount->mnt_stat.f_iosize; + seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); + + error = nfs_bioread_check_cons(vp, td, cred); + if (error) + return error; + + do { + u_quad_t nsize; + + mtx_lock(&np->n_mtx); + nsize = np->n_size; + mtx_unlock(&np->n_mtx); + + switch (vp->v_type) { + case VREG: + NFSINCRGLOBAL(newnfsstats.biocache_reads); + lbn = uio->uio_offset / biosize; + on = uio->uio_offset & (biosize - 1); + + /* + * Start the read ahead(s), as required. + */ + if (nmp->nm_readahead > 0) { + for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && + (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { + rabn = lbn + 1 + nra; + if (incore(&vp->v_bufobj, rabn) == NULL) { + rabp = nfs_getcacheblk(vp, rabn, biosize, td); + if (!rabp) { + error = newnfs_sigintr(nmp, td); + if (error) + return (error); + else + break; + } + if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { + rabp->b_flags |= B_ASYNC; + rabp->b_iocmd = BIO_READ; + vfs_busy_pages(rabp, 0); + if (ncl_asyncio(nmp, rabp, cred, td)) { + rabp->b_flags |= B_INVAL; + rabp->b_ioflags |= BIO_ERROR; + vfs_unbusy_pages(rabp); + brelse(rabp); + break; + } + } else { + brelse(rabp); + } + } + } + } + + /* Note that bcount is *not* DEV_BSIZE aligned. */ + bcount = biosize; + if ((off_t)lbn * biosize >= nsize) { + bcount = 0; + } else if ((off_t)(lbn + 1) * biosize > nsize) { + bcount = nsize - (off_t)lbn * biosize; + } + bp = nfs_getcacheblk(vp, lbn, bcount, td); + + if (!bp) { + error = newnfs_sigintr(nmp, td); + return (error ? error : EINTR); + } + + /* + * If B_CACHE is not set, we must issue the read. If this + * fails, we return an error. + */ + + if ((bp->b_flags & B_CACHE) == 0) { + bp->b_iocmd = BIO_READ; + vfs_busy_pages(bp, 0); + error = ncl_doio(vp, bp, cred, td); + if (error) { + brelse(bp); + return (error); + } + } + + /* + * on is the offset into the current bp. Figure out how many + * bytes we can copy out of the bp. Note that bcount is + * NOT DEV_BSIZE aligned. + * + * Then figure out how many bytes we can copy into the uio. + */ + + n = 0; + if (on < bcount) + n = min((unsigned)(bcount - on), uio->uio_resid); + break; + case VLNK: + NFSINCRGLOBAL(newnfsstats.biocache_readlinks); + bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); + if (!bp) { + error = newnfs_sigintr(nmp, td); + return (error ? error : EINTR); + } + if ((bp->b_flags & B_CACHE) == 0) { + bp->b_iocmd = BIO_READ; + vfs_busy_pages(bp, 0); + error = ncl_doio(vp, bp, cred, td); + if (error) { + bp->b_ioflags |= BIO_ERROR; + brelse(bp); + return (error); + } + } + n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); + on = 0; + break; + case VDIR: + NFSINCRGLOBAL(newnfsstats.biocache_readdirs); + if (np->n_direofoffset + && uio->uio_offset >= np->n_direofoffset) { + return (0); + } + lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; + on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); + bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); + if (!bp) { + error = newnfs_sigintr(nmp, td); + return (error ? error : EINTR); + } + if ((bp->b_flags & B_CACHE) == 0) { + bp->b_iocmd = BIO_READ; + vfs_busy_pages(bp, 0); + error = ncl_doio(vp, bp, cred, td); + if (error) { + brelse(bp); + } + while (error == NFSERR_BAD_COOKIE) { + ncl_invaldir(vp); + error = ncl_vinvalbuf(vp, 0, td, 1); + /* + * Yuck! The directory has been modified on the + * server. The only way to get the block is by + * reading from the beginning to get all the + * offset cookies. + * + * Leave the last bp intact unless there is an error. + * Loop back up to the while if the error is another + * NFSERR_BAD_COOKIE (double yuch!). + */ + for (i = 0; i <= lbn && !error; i++) { + if (np->n_direofoffset + && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) + return (0); + bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); + if (!bp) { + error = newnfs_sigintr(nmp, td); + return (error ? error : EINTR); + } + if ((bp->b_flags & B_CACHE) == 0) { + bp->b_iocmd = BIO_READ; + vfs_busy_pages(bp, 0); + error = ncl_doio(vp, bp, cred, td); + /* + * no error + B_INVAL == directory EOF, + * use the block. + */ + if (error == 0 && (bp->b_flags & B_INVAL)) + break; + } + /* + * An error will throw away the block and the + * for loop will break out. If no error and this + * is not the block we want, we throw away the + * block and go for the next one via the for loop. + */ + if (error || i < lbn) + brelse(bp); + } + } + /* + * The above while is repeated if we hit another cookie + * error. If we hit an error and it wasn't a cookie error, + * we give up. + */ + if (error) + return (error); + } + + /* + * If not eof and read aheads are enabled, start one. + * (You need the current block first, so that you have the + * directory offset cookie of the next block.) + */ + if (nmp->nm_readahead > 0 && + (bp->b_flags & B_INVAL) == 0 && + (np->n_direofoffset == 0 || + (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && + incore(&vp->v_bufobj, lbn + 1) == NULL) { + rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); + if (rabp) { + if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { + rabp->b_flags |= B_ASYNC; + rabp->b_iocmd = BIO_READ; + vfs_busy_pages(rabp, 0); + if (ncl_asyncio(nmp, rabp, cred, td)) { + rabp->b_flags |= B_INVAL; + rabp->b_ioflags |= BIO_ERROR; + vfs_unbusy_pages(rabp); + brelse(rabp); + } + } else { + brelse(rabp); + } + } + } + /* + * Unlike VREG files, whos buffer size ( bp->b_bcount ) is + * chopped for the EOF condition, we cannot tell how large + * NFS directories are going to be until we hit EOF. So + * an NFS directory buffer is *not* chopped to its EOF. Now, + * it just so happens that b_resid will effectively chop it + * to EOF. *BUT* this information is lost if the buffer goes + * away and is reconstituted into a B_CACHE state ( due to + * being VMIO ) later. So we keep track of the directory eof + * in np->n_direofoffset and chop it off as an extra step + * right here. + */ + n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); + if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) + n = np->n_direofoffset - uio->uio_offset; + break; + default: + ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); + bp = NULL; + break; + }; + + if (n > 0) { + error = uiomove(bp->b_data + on, (int)n, uio); + } + if (vp->v_type == VLNK) + n = 0; + if (bp != NULL) + brelse(bp); + } while (error == 0 && uio->uio_resid > 0 && n > 0); + return (error); +} + +/* + * The NFS write path cannot handle iovecs with len > 1. So we need to + * break up iovecs accordingly (restricting them to wsize). + * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). + * For the ASYNC case, 2 copies are needed. The first a copy from the + * user buffer to a staging buffer and then a second copy from the staging + * buffer to mbufs. This can be optimized by copying from the user buffer + * directly into mbufs and passing the chain down, but that requires a + * fair amount of re-working of the relevant codepaths (and can be done + * later). + */ +static int +nfs_directio_write(vp, uiop, cred, ioflag) + struct vnode *vp; + struct uio *uiop; + struct ucred *cred; + int ioflag; +{ + int error; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct thread *td = uiop->uio_td; + int size; + int wsize; + + mtx_lock(&nmp->nm_mtx); + wsize = nmp->nm_wsize; + mtx_unlock(&nmp->nm_mtx); + if (ioflag & IO_SYNC) { + int iomode, must_commit; + struct uio uio; + struct iovec iov; +do_sync: + while (uiop->uio_resid > 0) { + size = min(uiop->uio_resid, wsize); + size = min(uiop->uio_iov->iov_len, size); + iov.iov_base = uiop->uio_iov->iov_base; + iov.iov_len = size; + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_offset = uiop->uio_offset; + uio.uio_resid = size; + uio.uio_segflg = UIO_USERSPACE; + uio.uio_rw = UIO_WRITE; + uio.uio_td = td; + iomode = NFSWRITE_FILESYNC; + error = ncl_writerpc(vp, &uio, cred, &iomode, + &must_commit); + KASSERT((must_commit == 0), + ("ncl_directio_write: Did not commit write")); + if (error) + return (error); + uiop->uio_offset += size; + uiop->uio_resid -= size; + if (uiop->uio_iov->iov_len <= size) { + uiop->uio_iovcnt--; + uiop->uio_iov++; + } else { + uiop->uio_iov->iov_base = + (char *)uiop->uio_iov->iov_base + size; + uiop->uio_iov->iov_len -= size; + } + } + } else { + struct uio *t_uio; + struct iovec *t_iov; + struct buf *bp; + + /* + * Break up the write into blocksize chunks and hand these + * over to nfsiod's for write back. + * Unfortunately, this incurs a copy of the data. Since + * the user could modify the buffer before the write is + * initiated. + * + * The obvious optimization here is that one of the 2 copies + * in the async write path can be eliminated by copying the + * data here directly into mbufs and passing the mbuf chain + * down. But that will require a fair amount of re-working + * of the code and can be done if there's enough interest + * in NFS directio access. + */ + while (uiop->uio_resid > 0) { + size = min(uiop->uio_resid, wsize); + size = min(uiop->uio_iov->iov_len, size); + bp = getpbuf(&ncl_pbuf_freecnt); + t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); + t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); + t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); + t_iov->iov_len = size; + t_uio->uio_iov = t_iov; + t_uio->uio_iovcnt = 1; + t_uio->uio_offset = uiop->uio_offset; + t_uio->uio_resid = size; + t_uio->uio_segflg = UIO_SYSSPACE; + t_uio->uio_rw = UIO_WRITE; + t_uio->uio_td = td; + bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size); + bp->b_flags |= B_DIRECT; + bp->b_iocmd = BIO_WRITE; + if (cred != NOCRED) { + crhold(cred); + bp->b_wcred = cred; + } else + bp->b_wcred = NOCRED; + bp->b_caller1 = (void *)t_uio; + bp->b_vp = vp; + error = ncl_asyncio(nmp, bp, NOCRED, td); + if (error) { + free(t_iov->iov_base, M_NFSDIRECTIO); + free(t_iov, M_NFSDIRECTIO); + free(t_uio, M_NFSDIRECTIO); + bp->b_vp = NULL; + relpbuf(bp, &ncl_pbuf_freecnt); + if (error == EINTR) + return (error); + goto do_sync; + } + uiop->uio_offset += size; + uiop->uio_resid -= size; + if (uiop->uio_iov->iov_len <= size) { + uiop->uio_iovcnt--; + uiop->uio_iov++; + } else { + uiop->uio_iov->iov_base = + (char *)uiop->uio_iov->iov_base + size; + uiop->uio_iov->iov_len -= size; + } + } + } + return (0); +} + +/* + * Vnode op for write using bio + */ +int +ncl_write(struct vop_write_args *ap) +{ + int biosize; + struct uio *uio = ap->a_uio; + struct thread *td = uio->uio_td; + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct ucred *cred = ap->a_cred; + int ioflag = ap->a_ioflag; + struct buf *bp; + struct vattr vattr; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + daddr_t lbn; + int bcount; + int n, on, error = 0; + struct proc *p = td?td->td_proc:NULL; + +#ifdef DIAGNOSTIC + if (uio->uio_rw != UIO_WRITE) + panic("ncl_write mode"); + if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) + panic("ncl_write proc"); +#endif + if (vp->v_type != VREG) + return (EIO); + mtx_lock(&np->n_mtx); + if (np->n_flag & NWRITEERR) { + np->n_flag &= ~NWRITEERR; + mtx_unlock(&np->n_mtx); + return (np->n_error); + } else + mtx_unlock(&np->n_mtx); + mtx_lock(&nmp->nm_mtx); + if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && + (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { + mtx_unlock(&nmp->nm_mtx); + (void)ncl_fsinfo(nmp, vp, cred, td); + mtx_lock(&nmp->nm_mtx); + } + if (nmp->nm_wsize == 0) + (void) newnfs_iosize(nmp); + mtx_unlock(&nmp->nm_mtx); + + /* + * Synchronously flush pending buffers if we are in synchronous + * mode or if we are appending. + */ + if (ioflag & (IO_APPEND | IO_SYNC)) { + mtx_lock(&np->n_mtx); + if (np->n_flag & NMODIFIED) { + mtx_unlock(&np->n_mtx); +#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ + /* + * Require non-blocking, synchronous writes to + * dirty files to inform the program it needs + * to fsync(2) explicitly. + */ + if (ioflag & IO_NDELAY) + return (EAGAIN); +#endif +flush_and_restart: + np->n_attrstamp = 0; + error = ncl_vinvalbuf(vp, V_SAVE, td, 1); + if (error) + return (error); + } else + mtx_unlock(&np->n_mtx); + } + + /* + * If IO_APPEND then load uio_offset. We restart here if we cannot + * get the append lock. + */ + if (ioflag & IO_APPEND) { + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, cred); + if (error) + return (error); + mtx_lock(&np->n_mtx); + uio->uio_offset = np->n_size; + mtx_unlock(&np->n_mtx); + } + + if (uio->uio_offset < 0) + return (EINVAL); + if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) + return (EFBIG); + if (uio->uio_resid == 0) + return (0); + + if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) + return nfs_directio_write(vp, uio, cred, ioflag); + + /* + * Maybe this should be above the vnode op call, but so long as + * file servers have no limits, i don't think it matters + */ + if (p != NULL) { + PROC_LOCK(p); + if (uio->uio_offset + uio->uio_resid > + lim_cur(p, RLIMIT_FSIZE)) { + psignal(p, SIGXFSZ); + PROC_UNLOCK(p); + return (EFBIG); + } + PROC_UNLOCK(p); + } + + biosize = vp->v_mount->mnt_stat.f_iosize; + /* + * Find all of this file's B_NEEDCOMMIT buffers. If our writes + * would exceed the local maximum per-file write commit size when + * combined with those, we must decide whether to flush, + * go synchronous, or return error. We don't bother checking + * IO_UNIT -- we just make all writes atomic anyway, as there's + * no point optimizing for something that really won't ever happen. + */ + if (!(ioflag & IO_SYNC)) { + int nflag; + + mtx_lock(&np->n_mtx); + nflag = np->n_flag; + mtx_unlock(&np->n_mtx); + int needrestart = 0; + if (nmp->nm_wcommitsize < uio->uio_resid) { + /* + * If this request could not possibly be completed + * without exceeding the maximum outstanding write + * commit size, see if we can convert it into a + * synchronous write operation. + */ + if (ioflag & IO_NDELAY) + return (EAGAIN); + ioflag |= IO_SYNC; + if (nflag & NMODIFIED) + needrestart = 1; + } else if (nflag & NMODIFIED) { + int wouldcommit = 0; + BO_LOCK(&vp->v_bufobj); + if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { + TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, + b_bobufs) { + if (bp->b_flags & B_NEEDCOMMIT) + wouldcommit += bp->b_bcount; + } + } + BO_UNLOCK(&vp->v_bufobj); + /* + * Since we're not operating synchronously and + * bypassing the buffer cache, we are in a commit + * and holding all of these buffers whether + * transmitted or not. If not limited, this + * will lead to the buffer cache deadlocking, + * as no one else can flush our uncommitted buffers. + */ + wouldcommit += uio->uio_resid; + /* + * If we would initially exceed the maximum + * outstanding write commit size, flush and restart. + */ + if (wouldcommit > nmp->nm_wcommitsize) + needrestart = 1; + } + if (needrestart) + goto flush_and_restart; + } + + do { + NFSINCRGLOBAL(newnfsstats.biocache_writes); + lbn = uio->uio_offset / biosize; + on = uio->uio_offset & (biosize-1); + n = min((unsigned)(biosize - on), uio->uio_resid); +again: + /* + * Handle direct append and file extension cases, calculate + * unaligned buffer size. + */ + mtx_lock(&np->n_mtx); + if (uio->uio_offset == np->n_size && n) { + mtx_unlock(&np->n_mtx); + /* + * Get the buffer (in its pre-append state to maintain + * B_CACHE if it was previously set). Resize the + * nfsnode after we have locked the buffer to prevent + * readers from reading garbage. + */ + bcount = on; + bp = nfs_getcacheblk(vp, lbn, bcount, td); + + if (bp != NULL) { + long save; + + mtx_lock(&np->n_mtx); + np->n_size = uio->uio_offset + n; + np->n_flag |= NMODIFIED; + vnode_pager_setsize(vp, np->n_size); + mtx_unlock(&np->n_mtx); + + save = bp->b_flags & B_CACHE; + bcount += n; + allocbuf(bp, bcount); + bp->b_flags |= save; + } + } else { + /* + * Obtain the locked cache block first, and then + * adjust the file's size as appropriate. + */ + bcount = on + n; + if ((off_t)lbn * biosize + bcount < np->n_size) { + if ((off_t)(lbn + 1) * biosize < np->n_size) + bcount = biosize; + else + bcount = np->n_size - (off_t)lbn * biosize; + } + mtx_unlock(&np->n_mtx); + bp = nfs_getcacheblk(vp, lbn, bcount, td); + mtx_lock(&np->n_mtx); + if (uio->uio_offset + n > np->n_size) { + np->n_size = uio->uio_offset + n; + np->n_flag |= NMODIFIED; + vnode_pager_setsize(vp, np->n_size); + } + mtx_unlock(&np->n_mtx); + } + + if (!bp) { + error = newnfs_sigintr(nmp, td); + if (!error) + error = EINTR; + break; + } + + /* + * Issue a READ if B_CACHE is not set. In special-append + * mode, B_CACHE is based on the buffer prior to the write + * op and is typically set, avoiding the read. If a read + * is required in special append mode, the server will + * probably send us a short-read since we extended the file + * on our end, resulting in b_resid == 0 and, thusly, + * B_CACHE getting set. + * + * We can also avoid issuing the read if the write covers + * the entire buffer. We have to make sure the buffer state + * is reasonable in this case since we will not be initiating + * I/O. See the comments in kern/vfs_bio.c's getblk() for + * more information. + * + * B_CACHE may also be set due to the buffer being cached + * normally. + */ + + if (on == 0 && n == bcount) { + bp->b_flags |= B_CACHE; + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; + } + + if ((bp->b_flags & B_CACHE) == 0) { + bp->b_iocmd = BIO_READ; + vfs_busy_pages(bp, 0); + error = ncl_doio(vp, bp, cred, td); + if (error) { + brelse(bp); + break; + } + } + if (bp->b_wcred == NOCRED) + bp->b_wcred = crhold(cred); + mtx_lock(&np->n_mtx); + np->n_flag |= NMODIFIED; + mtx_unlock(&np->n_mtx); + + /* + * If dirtyend exceeds file size, chop it down. This should + * not normally occur but there is an append race where it + * might occur XXX, so we log it. + * + * If the chopping creates a reverse-indexed or degenerate + * situation with dirtyoff/end, we 0 both of them. + */ + + if (bp->b_dirtyend > bcount) { + ncl_printf("NFS append race @%lx:%d\n", + (long)bp->b_blkno * DEV_BSIZE, + bp->b_dirtyend - bcount); + bp->b_dirtyend = bcount; + } + + if (bp->b_dirtyoff >= bp->b_dirtyend) + bp->b_dirtyoff = bp->b_dirtyend = 0; + + /* + * If the new write will leave a contiguous dirty + * area, just update the b_dirtyoff and b_dirtyend, + * otherwise force a write rpc of the old dirty area. + * + * While it is possible to merge discontiguous writes due to + * our having a B_CACHE buffer ( and thus valid read data + * for the hole), we don't because it could lead to + * significant cache coherency problems with multiple clients, + * especially if locking is implemented later on. + * + * as an optimization we could theoretically maintain + * a linked list of discontinuous areas, but we would still + * have to commit them separately so there isn't much + * advantage to it except perhaps a bit of asynchronization. + */ + + if (bp->b_dirtyend > 0 && + (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { + if (bwrite(bp) == EINTR) { + error = EINTR; + break; + } + goto again; + } + + error = uiomove((char *)bp->b_data + on, n, uio); + + /* + * Since this block is being modified, it must be written + * again and not just committed. Since write clustering does + * not work for the stage 1 data write, only the stage 2 + * commit rpc, we have to clear B_CLUSTEROK as well. + */ + bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); + + if (error) { + bp->b_ioflags |= BIO_ERROR; + brelse(bp); + break; + } + + /* + * Only update dirtyoff/dirtyend if not a degenerate + * condition. + */ + if (n) { + if (bp->b_dirtyend > 0) { + bp->b_dirtyoff = min(on, bp->b_dirtyoff); + bp->b_dirtyend = max((on + n), bp->b_dirtyend); + } else { + bp->b_dirtyoff = on; + bp->b_dirtyend = on + n; + } + vfs_bio_set_validclean(bp, on, n); + } + + /* + * If IO_SYNC do bwrite(). + * + * IO_INVAL appears to be unused. The idea appears to be + * to turn off caching in this case. Very odd. XXX + */ + if ((ioflag & IO_SYNC)) { + if (ioflag & IO_INVAL) + bp->b_flags |= B_NOCACHE; + error = bwrite(bp); + if (error) + break; + } else if ((n + on) == biosize) { + bp->b_flags |= B_ASYNC; + (void) ncl_writebp(bp, 0, NULL); + } else { + bdwrite(bp); + } + } while (uio->uio_resid > 0 && n > 0); + + return (error); +} + +/* + * Get an nfs cache block. + * + * Allocate a new one if the block isn't currently in the cache + * and return the block marked busy. If the calling process is + * interrupted by a signal for an interruptible mount point, return + * NULL. + * + * The caller must carefully deal with the possible B_INVAL state of + * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it + * indirectly), so synchronous reads can be issued without worrying about + * the B_INVAL state. We have to be a little more careful when dealing + * with writes (see comments in nfs_write()) when extending a file past + * its EOF. + */ +static struct buf * +nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) +{ + struct buf *bp; + struct mount *mp; + struct nfsmount *nmp; + + mp = vp->v_mount; + nmp = VFSTONFS(mp); + + if (nmp->nm_flag & NFSMNT_INT) { + sigset_t oldset; + + ncl_set_sigmask(td, &oldset); + bp = getblk(vp, bn, size, PCATCH, 0, 0); + ncl_restore_sigmask(td, &oldset); + while (bp == NULL) { + if (newnfs_sigintr(nmp, td)) + return (NULL); + bp = getblk(vp, bn, size, 0, 2 * hz, 0); + } + } else { + bp = getblk(vp, bn, size, 0, 0, 0); + } + + if (vp->v_type == VREG) { + int biosize; + + biosize = mp->mnt_stat.f_iosize; + bp->b_blkno = bn * (biosize / DEV_BSIZE); + } + return (bp); +} + +/* + * Flush and invalidate all dirty buffers. If another process is already + * doing the flush, just wait for completion. + */ +int +ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) +{ + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, slpflag, slptimeo; + int old_lock = 0; + + ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); + + /* + * XXX This check stops us from needlessly doing a vinvalbuf when + * being called through vclean(). It is not clear that this is + * unsafe. + */ + if (vp->v_iflag & VI_DOOMED) + return (0); + + if ((nmp->nm_flag & NFSMNT_INT) == 0) + intrflg = 0; + if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) + intrflg = 1; + if (intrflg) { + slpflag = PCATCH; + slptimeo = 2 * hz; + } else { + slpflag = 0; + slptimeo = 0; + } + + old_lock = ncl_upgrade_vnlock(vp); + /* + * Now, flush as required. + */ + if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { + VM_OBJECT_LOCK(vp->v_bufobj.bo_object); + vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); + VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); + /* + * If the page clean was interrupted, fail the invalidation. + * Not doing so, we run the risk of losing dirty pages in the + * vinvalbuf() call below. + */ + if (intrflg && (error = newnfs_sigintr(nmp, td))) + goto out; + } + + error = vinvalbuf(vp, flags, slpflag, 0); + while (error) { + if (intrflg && (error = newnfs_sigintr(nmp, td))) + goto out; + error = vinvalbuf(vp, flags, 0, slptimeo); + } + mtx_lock(&np->n_mtx); + if (np->n_directio_asyncwr == 0) + np->n_flag &= ~NMODIFIED; + mtx_unlock(&np->n_mtx); +out: + ncl_downgrade_vnlock(vp, old_lock); + return error; +} + +/* + * Initiate asynchronous I/O. Return an error if no nfsiods are available. + * This is mainly to avoid queueing async I/O requests when the nfsiods + * are all hung on a dead server. + * + * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp + * is eventually dequeued by the async daemon, ncl_doio() *will*. + */ +int +ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) +{ + int iod; + int gotiod; + int slpflag = 0; + int slptimeo = 0; + int error, error2; + + /* + * Unless iothreadcnt is set > 0, don't bother with async I/O + * threads. For LAN environments, they don't buy any significant + * performance improvement that you can't get with large block + * sizes. + */ + if (nmp->nm_readahead == 0) + return (EPERM); + + /* + * Commits are usually short and sweet so lets save some cpu and + * leave the async daemons for more important rpc's (such as reads + * and writes). + */ + mtx_lock(&ncl_iod_mutex); + if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && + (nmp->nm_bufqiods > ncl_numasync / 2)) { + mtx_unlock(&ncl_iod_mutex); + return(EIO); + } +again: + if (nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + gotiod = FALSE; + + /* + * Find a free iod to process this request. + */ + for (iod = 0; iod < ncl_numasync; iod++) + if (ncl_iodwant[iod]) { + gotiod = TRUE; + break; + } + + /* + * Try to create one if none are free. + */ + if (!gotiod) { + iod = ncl_nfsiodnew(); + if (iod != -1) + gotiod = TRUE; + } + + if (gotiod) { + /* + * Found one, so wake it up and tell it which + * mount to process. + */ + NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", + iod, nmp)); + ncl_iodwant[iod] = NULL; + ncl_iodmount[iod] = nmp; + nmp->nm_bufqiods++; + wakeup(&ncl_iodwant[iod]); + } + + /* + * If none are free, we may already have an iod working on this mount + * point. If so, it will process our request. + */ + if (!gotiod) { + if (nmp->nm_bufqiods > 0) { + NFS_DPF(ASYNCIO, + ("ncl_asyncio: %d iods are already processing mount %p\n", + nmp->nm_bufqiods, nmp)); + gotiod = TRUE; + } + } + + /* + * If we have an iod which can process the request, then queue + * the buffer. + */ + if (gotiod) { + /* + * Ensure that the queue never grows too large. We still want + * to asynchronize so we block rather then return EIO. + */ + while (nmp->nm_bufqlen >= 2*ncl_numasync) { + NFS_DPF(ASYNCIO, + ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); + nmp->nm_bufqwant = TRUE; + error = ncl_msleep(td, &nmp->nm_bufq, &ncl_iod_mutex, + slpflag | PRIBIO, + "nfsaio", slptimeo); + if (error) { + error2 = newnfs_sigintr(nmp, td); + if (error2) { + mtx_unlock(&ncl_iod_mutex); + return (error2); + } + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + /* + * We might have lost our iod while sleeping, + * so check and loop if nescessary. + */ + if (nmp->nm_bufqiods == 0) { + NFS_DPF(ASYNCIO, + ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); + goto again; + } + } + + /* We might have lost our nfsiod */ + if (nmp->nm_bufqiods == 0) { + NFS_DPF(ASYNCIO, + ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); + goto again; + } + + if (bp->b_iocmd == BIO_READ) { + if (bp->b_rcred == NOCRED && cred != NOCRED) + bp->b_rcred = crhold(cred); + } else { + if (bp->b_wcred == NOCRED && cred != NOCRED) + bp->b_wcred = crhold(cred); + } + + if (bp->b_flags & B_REMFREE) + bremfreef(bp); + BUF_KERNPROC(bp); + TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); + nmp->nm_bufqlen++; + if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { + mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); + VTONFS(bp->b_vp)->n_flag |= NMODIFIED; + VTONFS(bp->b_vp)->n_directio_asyncwr++; + mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); + } + mtx_unlock(&ncl_iod_mutex); + return (0); + } + + mtx_unlock(&ncl_iod_mutex); + + /* + * All the iods are busy on other mounts, so return EIO to + * force the caller to process the i/o synchronously. + */ + NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); + return (EIO); +} + +void +ncl_doio_directwrite(struct buf *bp) +{ + int iomode, must_commit; + struct uio *uiop = (struct uio *)bp->b_caller1; + char *iov_base = uiop->uio_iov->iov_base; + + iomode = NFSWRITE_FILESYNC; + uiop->uio_td = NULL; /* NULL since we're in nfsiod */ + ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit); + KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); + free(iov_base, M_NFSDIRECTIO); + free(uiop->uio_iov, M_NFSDIRECTIO); + free(uiop, M_NFSDIRECTIO); + if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { + struct nfsnode *np = VTONFS(bp->b_vp); + mtx_lock(&np->n_mtx); + np->n_directio_asyncwr--; + if (np->n_directio_asyncwr == 0) { + np->n_flag &= ~NMODIFIED; + if ((np->n_flag & NFSYNCWAIT)) { + np->n_flag &= ~NFSYNCWAIT; + wakeup((caddr_t)&np->n_directio_asyncwr); + } + } + mtx_unlock(&np->n_mtx); + } + bp->b_vp = NULL; + relpbuf(bp, &ncl_pbuf_freecnt); +} + +/* + * Do an I/O operation to/from a cache block. This may be called + * synchronously or from an nfsiod. + */ +int +ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td) +{ + struct uio *uiop; + struct nfsnode *np; + struct nfsmount *nmp; + int error = 0, iomode, must_commit = 0; + struct uio uio; + struct iovec io; + struct proc *p = td ? td->td_proc : NULL; + uint8_t iocmd; + + np = VTONFS(vp); + nmp = VFSTONFS(vp->v_mount); + uiop = &uio; + uiop->uio_iov = &io; + uiop->uio_iovcnt = 1; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_td = td; + + /* + * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We + * do this here so we do not have to do it in all the code that + * calls us. + */ + bp->b_flags &= ~B_INVAL; + bp->b_ioflags &= ~BIO_ERROR; + + KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); + iocmd = bp->b_iocmd; + if (iocmd == BIO_READ) { + io.iov_len = uiop->uio_resid = bp->b_bcount; + io.iov_base = bp->b_data; + uiop->uio_rw = UIO_READ; + + switch (vp->v_type) { + case VREG: + uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; + NFSINCRGLOBAL(newnfsstats.read_bios); + error = ncl_readrpc(vp, uiop, cr); + + if (!error) { + if (uiop->uio_resid) { + /* + * If we had a short read with no error, we must have + * hit a file hole. We should zero-fill the remainder. + * This can also occur if the server hits the file EOF. + * + * Holes used to be able to occur due to pending + * writes, but that is not possible any longer. + */ + int nread = bp->b_bcount - uiop->uio_resid; + int left = uiop->uio_resid; + + if (left > 0) + bzero((char *)bp->b_data + nread, left); + uiop->uio_resid = 0; + } + } + /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ + if (p && (vp->v_vflag & VV_TEXT)) { + mtx_lock(&np->n_mtx); + if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { + mtx_unlock(&np->n_mtx); + PROC_LOCK(p); + killproc(p, "text file modification"); + PROC_UNLOCK(p); + } else + mtx_unlock(&np->n_mtx); + } + break; + case VLNK: + uiop->uio_offset = (off_t)0; + NFSINCRGLOBAL(newnfsstats.readlink_bios); + error = ncl_readlinkrpc(vp, uiop, cr); + break; + case VDIR: + NFSINCRGLOBAL(newnfsstats.readdir_bios); + uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; + if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { + error = ncl_readdirplusrpc(vp, uiop, cr, td); + if (error == NFSERR_NOTSUPP) + nmp->nm_flag &= ~NFSMNT_RDIRPLUS; + } + if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) + error = ncl_readdirrpc(vp, uiop, cr, td); + /* + * end-of-directory sets B_INVAL but does not generate an + * error. + */ + if (error == 0 && uiop->uio_resid == bp->b_bcount) + bp->b_flags |= B_INVAL; + break; + default: + ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); + break; + }; + if (error) { + bp->b_ioflags |= BIO_ERROR; + bp->b_error = error; + } + } else { + /* + * If we only need to commit, try to commit + */ + if (bp->b_flags & B_NEEDCOMMIT) { + int retv; + off_t off; + + off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; + retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, + bp->b_wcred, td); + if (retv == 0) { + bp->b_dirtyoff = bp->b_dirtyend = 0; + bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); + bp->b_resid = 0; + bufdone(bp); + return (0); + } + if (retv == NFSERR_STALEWRITEVERF) { + ncl_clearcommit(vp->v_mount); + } + } + + /* + * Setup for actual write + */ + mtx_lock(&np->n_mtx); + if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) + bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; + mtx_unlock(&np->n_mtx); + + if (bp->b_dirtyend > bp->b_dirtyoff) { + io.iov_len = uiop->uio_resid = bp->b_dirtyend + - bp->b_dirtyoff; + uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE + + bp->b_dirtyoff; + io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; + uiop->uio_rw = UIO_WRITE; + NFSINCRGLOBAL(newnfsstats.write_bios); + + if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) + iomode = NFSWRITE_UNSTABLE; + else + iomode = NFSWRITE_FILESYNC; + + error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit); + + /* + * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try + * to cluster the buffers needing commit. This will allow + * the system to submit a single commit rpc for the whole + * cluster. We can do this even if the buffer is not 100% + * dirty (relative to the NFS blocksize), so we optimize the + * append-to-file-case. + * + * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be + * cleared because write clustering only works for commit + * rpc's, not for the data portion of the write). + */ + + if (!error && iomode == NFSWRITE_UNSTABLE) { + bp->b_flags |= B_NEEDCOMMIT; + if (bp->b_dirtyoff == 0 + && bp->b_dirtyend == bp->b_bcount) + bp->b_flags |= B_CLUSTEROK; + } else { + bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); + } + + /* + * For an interrupted write, the buffer is still valid + * and the write hasn't been pushed to the server yet, + * so we can't set BIO_ERROR and report the interruption + * by setting B_EINTR. For the B_ASYNC case, B_EINTR + * is not relevant, so the rpc attempt is essentially + * a noop. For the case of a V3 write rpc not being + * committed to stable storage, the block is still + * dirty and requires either a commit rpc or another + * write rpc with iomode == NFSV3WRITE_FILESYNC before + * the block is reused. This is indicated by setting + * the B_DELWRI and B_NEEDCOMMIT flags. + * + * If the buffer is marked B_PAGING, it does not reside on + * the vp's paging queues so we cannot call bdirty(). The + * bp in this case is not an NFS cache block so we should + * be safe. XXX + * + * The logic below breaks up errors into recoverable and + * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE + * and keep the buffer around for potential write retries. + * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) + * and save the error in the nfsnode. This is less than ideal + * but necessary. Keeping such buffers around could potentially + * cause buffer exhaustion eventually (they can never be written + * out, so will get constantly be re-dirtied). It also causes + * all sorts of vfs panics. For non-recoverable write errors, + * also invalidate the attrcache, so we'll be forced to go over + * the wire for this object, returning an error to user on next + * call (most of the time). + */ + if (error == EINTR || error == EIO || error == ETIMEDOUT + || (!error && (bp->b_flags & B_NEEDCOMMIT))) { + int s; + + s = splbio(); + bp->b_flags &= ~(B_INVAL|B_NOCACHE); + if ((bp->b_flags & B_PAGING) == 0) { + bdirty(bp); + bp->b_flags &= ~B_DONE; + } + if (error && (bp->b_flags & B_ASYNC) == 0) + bp->b_flags |= B_EINTR; + splx(s); + } else { + if (error) { + bp->b_ioflags |= BIO_ERROR; + bp->b_flags |= B_INVAL; + bp->b_error = np->n_error = error; + mtx_lock(&np->n_mtx); + np->n_flag |= NWRITEERR; + np->n_attrstamp = 0; + mtx_unlock(&np->n_mtx); + } + bp->b_dirtyoff = bp->b_dirtyend = 0; + } + } else { + bp->b_resid = 0; + bufdone(bp); + return (0); + } + } + bp->b_resid = uiop->uio_resid; + if (must_commit) + ncl_clearcommit(vp->v_mount); + bufdone(bp); + return (error); +} + +/* + * Used to aid in handling ftruncate() operations on the NFS client side. + * Truncation creates a number of special problems for NFS. We have to + * throw away VM pages and buffer cache buffers that are beyond EOF, and + * we have to properly handle VM pages or (potentially dirty) buffers + * that straddle the truncation point. + */ + +int +ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) +{ + struct nfsnode *np = VTONFS(vp); + u_quad_t tsize; + int biosize = vp->v_mount->mnt_stat.f_iosize; + int error = 0; + + mtx_lock(&np->n_mtx); + tsize = np->n_size; + np->n_size = nsize; + mtx_unlock(&np->n_mtx); + + if (nsize < tsize) { + struct buf *bp; + daddr_t lbn; + int bufsize; + + /* + * vtruncbuf() doesn't get the buffer overlapping the + * truncation point. We may have a B_DELWRI and/or B_CACHE + * buffer that now needs to be truncated. + */ + error = vtruncbuf(vp, cred, td, nsize, biosize); + lbn = nsize / biosize; + bufsize = nsize & (biosize - 1); + bp = nfs_getcacheblk(vp, lbn, bufsize, td); + if (!bp) + return EINTR; + if (bp->b_dirtyoff > bp->b_bcount) + bp->b_dirtyoff = bp->b_bcount; + if (bp->b_dirtyend > bp->b_bcount) + bp->b_dirtyend = bp->b_bcount; + bp->b_flags |= B_RELBUF; /* don't leave garbage around */ + brelse(bp); + } else { + vnode_pager_setsize(vp, nsize); + } + return(error); +} + diff --git a/sys/fs/nfsclient/nfs_clcomsubs.c b/sys/fs/nfsclient/nfs_clcomsubs.c new file mode 100644 index 0000000..1c03b5c --- /dev/null +++ b/sys/fs/nfsclient/nfs_clcomsubs.c @@ -0,0 +1,521 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * These functions support the macros and help fiddle mbuf chains for + * the nfs op functions. They do things like create the rpc header and + * copy data between mbuf chains and uio lists. + */ +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +extern struct nfsstats newnfsstats; +extern struct nfsv4_opflag nfsv4_opflag[NFSV4OP_NOPS]; +extern int ncl_mbuf_mlen; +extern enum vtype newnv2tov_type[8]; +extern enum vtype nv34tov_type[8]; +NFSCLSTATEMUTEX; +#endif /* !APPLEKEXT */ + +static nfsuint64 nfs_nullcookie = {{ 0, 0 }}; +static struct { + int op; + int opcnt; + const u_char *tag; + int taglen; +} nfsv4_opmap[NFS_NPROCS] = { + { 0, 1, "Null", 4 }, + { NFSV4OP_GETATTR, 1, "Getattr", 7, }, + { NFSV4OP_SETATTR, 2, "Setattr", 7, }, + { NFSV4OP_LOOKUP, 3, "Lookup", 6, }, + { NFSV4OP_ACCESS, 2, "Access", 6, }, + { NFSV4OP_READLINK, 2, "Readlink", 8, }, + { NFSV4OP_READ, 1, "Read", 4, }, + { NFSV4OP_WRITE, 2, "Write", 5, }, + { NFSV4OP_OPEN, 3, "Open", 4, }, + { NFSV4OP_CREATE, 3, "Create", 6, }, + { NFSV4OP_CREATE, 1, "Create", 6, }, + { NFSV4OP_CREATE, 3, "Create", 6, }, + { NFSV4OP_REMOVE, 1, "Remove", 6, }, + { NFSV4OP_REMOVE, 1, "Remove", 6, }, + { NFSV4OP_SAVEFH, 5, "Rename", 6, }, + { NFSV4OP_SAVEFH, 4, "Link", 4, }, + { NFSV4OP_READDIR, 2, "Readdir", 7, }, + { NFSV4OP_READDIR, 2, "Readdir", 7, }, + { NFSV4OP_GETATTR, 1, "Getattr", 7, }, + { NFSV4OP_GETATTR, 1, "Getattr", 7, }, + { NFSV4OP_GETATTR, 1, "Getattr", 7, }, + { NFSV4OP_COMMIT, 2, "Commit", 6, }, + { NFSV4OP_LOOKUPP, 3, "Lookupp", 7, }, + { NFSV4OP_SETCLIENTID, 1, "SetClientID", 11, }, + { NFSV4OP_SETCLIENTIDCFRM, 1, "SetClientIDConfirm", 18, }, + { NFSV4OP_LOCK, 1, "Lock", 4, }, + { NFSV4OP_LOCKU, 1, "LockU", 5, }, + { NFSV4OP_OPEN, 2, "Open", 4, }, + { NFSV4OP_CLOSE, 1, "Close", 5, }, + { NFSV4OP_OPENCONFIRM, 1, "Openconfirm", 11, }, + { NFSV4OP_LOCKT, 1, "LockT", 5, }, + { NFSV4OP_OPENDOWNGRADE, 1, "Opendowngrade", 13, }, + { NFSV4OP_RENEW, 1, "Renew", 5, }, + { NFSV4OP_PUTROOTFH, 1, "Dirpath", 7, }, + { NFSV4OP_RELEASELCKOWN, 1, "Rellckown", 9, }, + { NFSV4OP_DELEGRETURN, 1, "Delegret", 8, }, + { NFSV4OP_DELEGRETURN, 3, "DelegRemove", 11, }, + { NFSV4OP_DELEGRETURN, 7, "DelegRename1", 12, }, + { NFSV4OP_DELEGRETURN, 9, "DelegRename2", 12, }, + { NFSV4OP_GETATTR, 1, "Getacl", 6, }, + { NFSV4OP_SETATTR, 1, "Setacl", 6, }, +}; + + +/* + * NFS RPCS that have large request message size. + */ +static int nfs_bigrequest[NFS_NPROCS] = { + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* + * Start building a request. Mostly just put the first file handle in + * place. + */ +APPLESTATIC void +nfscl_reqstart(struct nfsrv_descript *nd, int procnum, struct nfsmount *nmp, + u_int8_t *nfhp, int fhlen, u_int32_t **opcntpp) +{ + struct mbuf *mb; + u_int32_t *tl; + int opcnt; + nfsattrbit_t attrbits; + + /* + * First, fill in some of the fields of nd. + */ + if (NFSHASNFSV4(nmp)) + nd->nd_flag = ND_NFSV4; + else if (NFSHASNFSV3(nmp)) + nd->nd_flag = ND_NFSV3; + else + nd->nd_flag = ND_NFSV2; + nd->nd_procnum = procnum; + nd->nd_repstat = 0; + + /* + * Get the first mbuf for the request. + */ + if (nfs_bigrequest[procnum]) + NFSMCLGET(mb, M_WAIT); + else + NFSMGET(mb); + mbuf_setlen(mb, 0); + nd->nd_mreq = nd->nd_mb = mb; + nd->nd_bpos = NFSMTOD(mb, caddr_t); + + /* + * And fill the first file handle into the request. + */ + if (nd->nd_flag & ND_NFSV4) { + opcnt = nfsv4_opmap[procnum].opcnt + + nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh; + /* + * What should the tag really be? + */ + (void) nfsm_strtom(nd, nfsv4_opmap[procnum].tag, + nfsv4_opmap[procnum].taglen); + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4_MINORVERSION); + if (opcntpp != NULL) + *opcntpp = tl; + *tl++ = txdr_unsigned(opcnt); + if (nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh > 0) { + *tl = txdr_unsigned(NFSV4OP_PUTFH); + (void) nfsm_fhtom(nd, nfhp, fhlen, 0); + if (nfsv4_opflag[nfsv4_opmap[procnum].op].needscfh==2){ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSWCCATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_V4WCCATTR; + } + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + } + *tl = txdr_unsigned(nfsv4_opmap[procnum].op); + } else { + (void) nfsm_fhtom(nd, nfhp, fhlen, 0); + } + NFSINCRGLOBAL(newnfsstats.rpccnt[procnum]); +} + +#ifndef APPLE +/* + * copies a uio scatter/gather list to an mbuf chain. + * NOTE: can ony handle iovcnt == 1 + */ +APPLESTATIC void +nfsm_uiombuf(struct nfsrv_descript *nd, struct uio *uiop, int siz) +{ + char *uiocp; + struct mbuf *mp, *mp2; + int xfer, left, mlen; + int uiosiz, clflg, rem; + char *cp, *tcp; + +#ifdef DIAGNOSTIC + if (uiop->uio_iovcnt != 1) + panic("nfsm_uiotombuf: iovcnt != 1"); +#endif + + if (siz > ncl_mbuf_mlen) /* or should it >= MCLBYTES ?? */ + clflg = 1; + else + clflg = 0; + rem = NFSM_RNDUP(siz) - siz; + mp = mp2 = nd->nd_mb; + while (siz > 0) { + left = uiop->uio_iov->iov_len; + uiocp = uiop->uio_iov->iov_base; + if (left > siz) + left = siz; + uiosiz = left; + while (left > 0) { + mlen = M_TRAILINGSPACE(mp); + if (mlen == 0) { + if (clflg) + NFSMCLGET(mp, M_WAIT); + else + NFSMGET(mp); + mbuf_setlen(mp, 0); + mbuf_setnext(mp2, mp); + mp2 = mp; + mlen = M_TRAILINGSPACE(mp); + } + xfer = (left > mlen) ? mlen : left; +#ifdef notdef + /* Not Yet.. */ + if (uiop->uio_iov->iov_op != NULL) + (*(uiop->uio_iov->iov_op)) + (uiocp, NFSMTOD(mp, caddr_t) + mbuf_len(mp), + xfer); + else +#endif + if (uiop->uio_segflg == UIO_SYSSPACE) + NFSBCOPY(uiocp, NFSMTOD(mp, caddr_t) + mbuf_len(mp), + xfer); + else + copyin(CAST_USER_ADDR_T(uiocp), NFSMTOD(mp, caddr_t) + + mbuf_len(mp), xfer); + mbuf_setlen(mp, mbuf_len(mp) + xfer); + left -= xfer; + uiocp += xfer; + uiop->uio_offset += xfer; + uiop->uio_resid -= xfer; + } + tcp = (char *)uiop->uio_iov->iov_base; + tcp += uiosiz; + uiop->uio_iov->iov_base = (void *)tcp; + uiop->uio_iov->iov_len -= uiosiz; + siz -= uiosiz; + } + if (rem > 0) { + if (rem > M_TRAILINGSPACE(mp)) { + NFSMGET(mp); + mbuf_setlen(mp, 0); + mbuf_setnext(mp2, mp); + } + cp = NFSMTOD(mp, caddr_t) + mbuf_len(mp); + for (left = 0; left < rem; left++) + *cp++ = '\0'; + mbuf_setlen(mp, mbuf_len(mp) + rem); + nd->nd_bpos = cp; + } else + nd->nd_bpos = NFSMTOD(mp, caddr_t) + mbuf_len(mp); + nd->nd_mb = mp; +} +#endif /* !APPLE */ + +/* + * Load vnode attributes from the xdr file attributes. + * Returns EBADRPC if they can't be parsed, 0 otherwise. + */ +APPLESTATIC int +nfsm_loadattr(struct nfsrv_descript *nd, struct nfsvattr *nap) +{ + struct nfs_fattr *fp; + int error = 0; + + if (nd->nd_flag & ND_NFSV4) { + error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, NULL, + NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(fp, struct nfs_fattr *, NFSX_V3FATTR); + nap->na_type = nfsv34tov_type(fp->fa_type); + nap->na_mode = fxdr_unsigned(u_short, fp->fa_mode); + nap->na_rdev = makedev(fxdr_unsigned(u_char, fp->fa3_rdev.specdata1), + fxdr_unsigned(u_char, fp->fa3_rdev.specdata2)); + nap->na_nlink = fxdr_unsigned(u_short, fp->fa_nlink); + nap->na_uid = fxdr_unsigned(uid_t, fp->fa_uid); + nap->na_gid = fxdr_unsigned(gid_t, fp->fa_gid); + nap->na_size = fxdr_hyper(&fp->fa3_size); + nap->na_blocksize = NFS_FABLKSIZE; + nap->na_bytes = fxdr_hyper(&fp->fa3_used); + nap->na_fileid = fxdr_unsigned(int32_t, + fp->fa3_fileid.nfsuquad[1]); + fxdr_nfsv3time(&fp->fa3_atime, &nap->na_atime); + fxdr_nfsv3time(&fp->fa3_ctime, &nap->na_ctime); + fxdr_nfsv3time(&fp->fa3_mtime, &nap->na_mtime); + nap->na_flags = 0; + nap->na_filerev = 0; + } else { + NFSM_DISSECT(fp, struct nfs_fattr *, NFSX_V2FATTR); + nap->na_type = nfsv2tov_type(fp->fa_type); + nap->na_mode = fxdr_unsigned(u_short, fp->fa_mode); + if (nap->na_type == VNON || nap->na_type == VREG) + nap->na_type = IFTOVT(nap->na_mode); + nap->na_rdev = fxdr_unsigned(dev_t, fp->fa2_rdev); + + /* + * Really ugly NFSv2 kludge. + */ + if (nap->na_type == VCHR && nap->na_rdev == ((dev_t)-1)) + nap->na_type = VFIFO; + nap->na_nlink = fxdr_unsigned(u_short, fp->fa_nlink); + nap->na_uid = fxdr_unsigned(uid_t, fp->fa_uid); + nap->na_gid = fxdr_unsigned(gid_t, fp->fa_gid); + nap->na_size = fxdr_unsigned(u_int32_t, fp->fa2_size); + nap->na_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize); + nap->na_bytes = + (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks) * + NFS_FABLKSIZE; + nap->na_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid); + fxdr_nfsv2time(&fp->fa2_atime, &nap->na_atime); + fxdr_nfsv2time(&fp->fa2_mtime, &nap->na_mtime); + nap->na_flags = 0; + nap->na_ctime.tv_sec = fxdr_unsigned(u_int32_t, + fp->fa2_ctime.nfsv2_sec); + nap->na_ctime.tv_nsec = 0; + nap->na_gen = fxdr_unsigned(u_int32_t,fp->fa2_ctime.nfsv2_usec); + nap->na_filerev = 0; + } +nfsmout: + return (error); +} + +/* + * This function finds the directory cookie that corresponds to the + * logical byte offset given. + */ +APPLESTATIC nfsuint64 * +nfscl_getcookie(struct nfsnode *np, off_t off, int add) +{ + struct nfsdmap *dp, *dp2; + int pos; + + pos = off / NFS_DIRBLKSIZ; + if (pos == 0) { +#ifdef DIAGNOSTIC + if (add) + panic("nfs getcookie add at 0"); +#endif + return (&nfs_nullcookie); + } + pos--; + dp = LIST_FIRST(&np->n_cookies); + if (!dp) { + if (add) { + MALLOC(dp, struct nfsdmap *, sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp->ndm_eocookie = 0; + LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list); + } else + return (NULL); + } + while (pos >= NFSNUMCOOKIES) { + pos -= NFSNUMCOOKIES; + if (LIST_NEXT(dp, ndm_list) != NULL) { + if (!add && dp->ndm_eocookie < NFSNUMCOOKIES && + pos >= dp->ndm_eocookie) + return (NULL); + dp = LIST_NEXT(dp, ndm_list); + } else if (add) { + MALLOC(dp2, struct nfsdmap *, sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp2->ndm_eocookie = 0; + LIST_INSERT_AFTER(dp, dp2, ndm_list); + dp = dp2; + } else + return (NULL); + } + if (pos >= dp->ndm_eocookie) { + if (add) + dp->ndm_eocookie = pos + 1; + else + return (NULL); + } + return (&dp->ndm_cookies[pos]); +} + +/* + * Gets a file handle out of an nfs reply sent to the client and returns + * the file handle and the file's attributes. + * For V4, it assumes that Getfh and Getattr Op's results are here. + */ +APPLESTATIC int +nfscl_mtofh(struct nfsrv_descript *nd, struct nfsfh **nfhpp, + struct nfsvattr *nap, int *attrflagp) +{ + u_int32_t *tl; + int error = 0, flag = 1; + + *nfhpp = NULL; + *attrflagp = 0; + /* + * First get the file handle and vnode. + */ + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + flag = fxdr_unsigned(int, *tl); + } else if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + } + if (flag) { + error = nfsm_getfh(nd, nfhpp); + if (error) + return (error); + } + + /* + * Now, get the attributes. + */ + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (flag) { + flag = fxdr_unsigned(int, *tl); + } else if (fxdr_unsigned(int, *tl)) { + error = nfsm_advance(nd, NFSX_V3FATTR, -1); + if (error) + return (error); + } + } + if (flag) { + error = nfsm_loadattr(nd, nap); + if (!error) + *attrflagp = 1; + } +nfsmout: + return (error); +} + +/* + * Put a state Id in the mbuf list. + */ +APPLESTATIC void +nfsm_stateidtom(struct nfsrv_descript *nd, nfsv4stateid_t *stateidp, int flag) +{ + nfsv4stateid_t *st; + + NFSM_BUILD(st, nfsv4stateid_t *, NFSX_STATEID); + if (flag == NFSSTATEID_PUTALLZERO) { + st->seqid = 0; + st->other[0] = 0; + st->other[1] = 0; + st->other[2] = 0; + } else if (flag == NFSSTATEID_PUTALLONE) { + st->seqid = 0xffffffff; + st->other[0] = 0xffffffff; + st->other[1] = 0xffffffff; + st->other[2] = 0xffffffff; + } else { + st->seqid = stateidp->seqid; + st->other[0] = stateidp->other[0]; + st->other[1] = stateidp->other[1]; + st->other[2] = stateidp->other[2]; + } +} + +/* + * Initialize the owner/delegation sleep lock. + */ +APPLESTATIC void +nfscl_lockinit(struct nfsv4lock *lckp) +{ + + lckp->nfslock_usecnt = 0; + lckp->nfslock_lock = 0; +} + +/* + * Get an exclusive lock. (Not needed for OpenBSD4, since there is only one + * thread for each posix process in the kernel.) + */ +APPLESTATIC void +nfscl_lockexcl(struct nfsv4lock *lckp, void *mutex) +{ + int igotlock; + + do { + igotlock = nfsv4_lock(lckp, 1, NULL, mutex); + } while (!igotlock); +} + +/* + * Release an exclusive lock. + */ +APPLESTATIC void +nfscl_lockunlock(struct nfsv4lock *lckp) +{ + + nfsv4_unlock(lckp, 0); +} + +/* + * Called to derefernce a lock on a stateid (delegation or open owner). + */ +APPLESTATIC void +nfscl_lockderef(struct nfsv4lock *lckp) +{ + + NFSLOCKCLSTATE(); + lckp->nfslock_usecnt--; + if (lckp->nfslock_usecnt == 0 && (lckp->nfslock_lock & NFSV4LOCK_WANTED)) { + lckp->nfslock_lock &= ~NFSV4LOCK_WANTED; + wakeup((caddr_t)lckp); + } + NFSUNLOCKCLSTATE(); +} + diff --git a/sys/fs/nfsclient/nfs_clkrpc.c b/sys/fs/nfsclient/nfs_clkrpc.c new file mode 100644 index 0000000..7cc2a60 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clkrpc.c @@ -0,0 +1,297 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include "opt_inet6.h" +#include "opt_kgssapi.h" + +#include <fs/nfs/nfsport.h> + +#include <rpc/rpc.h> +#include <rpc/rpcsec_gss.h> +#include <rpc/replay.h> + + +NFSDLOCKMUTEX; + +SYSCTL_DECL(_vfs_newnfs); + +SVCPOOL *nfscbd_pool; + +static int nfs_cbproc(struct nfsrv_descript *, u_int32_t); + +extern u_long sb_max_adj; +extern int nfs_numnfscbd; + +/* + * NFS client system calls for handling callbacks. + */ + +/* + * Handles server to client callbacks. + */ +static void +nfscb_program(struct svc_req *rqst, SVCXPRT *xprt) +{ + struct nfsrv_descript nd; + int cacherep; + + memset(&nd, 0, sizeof(nd)); + if (rqst->rq_proc != NFSPROC_NULL && + rqst->rq_proc != NFSV4PROC_CBCOMPOUND) { + svcerr_noproc(rqst); + svc_freereq(rqst); + return; + } + nd.nd_procnum = rqst->rq_proc; + nd.nd_flag = (ND_NFSCB | ND_NFSV4); + + /* + * Note: we want rq_addr, not svc_getrpccaller for nd_nam2 - + * NFS_SRVMAXDATA uses a NULL value for nd_nam2 to detect TCP + * mounts. + */ + nd.nd_mrep = rqst->rq_args; + rqst->rq_args = NULL; + newnfs_realign(&nd.nd_mrep); + nd.nd_md = nd.nd_mrep; + nd.nd_dpos = mtod(nd.nd_md, caddr_t); + nd.nd_nam = svc_getrpccaller(rqst); + nd.nd_nam2 = rqst->rq_addr; + nd.nd_mreq = NULL; + nd.nd_cred = NULL; + + if (nd.nd_procnum != NFSPROC_NULL) { + if (!svc_getcred(rqst, &nd.nd_cred, &nd.nd_credflavor)) { + svcerr_weakauth(rqst); + svc_freereq(rqst); + m_freem(nd.nd_mrep); + return; + } +#ifdef notyet +#ifdef MAC + mac_cred_associate_nfsd(nd.nd_cred); +#endif +#endif + cacherep = nfs_cbproc(&nd, rqst->rq_xid); + } else { + NFSMGET(nd.nd_mreq); + nd.nd_mreq->m_len = 0; + cacherep = RC_REPLY; + } + if (nd.nd_mrep != NULL) + m_freem(nd.nd_mrep); + + if (nd.nd_cred != NULL) + crfree(nd.nd_cred); + + if (cacherep == RC_DROPIT) { + if (nd.nd_mreq != NULL) + m_freem(nd.nd_mreq); + svc_freereq(rqst); + return; + } + + if (nd.nd_mreq == NULL) { + svcerr_decode(rqst); + svc_freereq(rqst); + return; + } + + if (nd.nd_repstat & NFSERR_AUTHERR) { + svcerr_auth(rqst, nd.nd_repstat & ~NFSERR_AUTHERR); + if (nd.nd_mreq != NULL) + m_freem(nd.nd_mreq); + } else if (!svc_sendreply_mbuf(rqst, nd.nd_mreq)) { + svcerr_systemerr(rqst); + } + svc_freereq(rqst); +} + +/* + * Check the cache and, optionally, do the RPC. + * Return the appropriate cache response. + */ +static int +nfs_cbproc(struct nfsrv_descript *nd, u_int32_t xid) +{ + struct thread *td = curthread; + int cacherep; + + if (nd->nd_nam2 == NULL) + nd->nd_flag |= ND_STREAMSOCK; + + nfscl_docb(nd, td); + if (nd->nd_repstat == NFSERR_DONTREPLY) + cacherep = RC_DROPIT; + else + cacherep = RC_REPLY; + return (cacherep); +} + +/* + * Adds a socket to the list for servicing by nfscbds. + */ +int +nfscbd_addsock(struct file *fp) +{ + int siz; + struct socket *so; + int error; + SVCXPRT *xprt; + + so = fp->f_data; + + siz = sb_max_adj; + error = soreserve(so, siz, siz); + if (error) + return (error); + + /* + * Steal the socket from userland so that it doesn't close + * unexpectedly. + */ + if (so->so_type == SOCK_DGRAM) + xprt = svc_dg_create(nfscbd_pool, so, 0, 0); + else + xprt = svc_vc_create(nfscbd_pool, so, 0, 0); + if (xprt) { + fp->f_ops = &badfileops; + fp->f_data = NULL; + svc_reg(xprt, NFS_CALLBCKPROG, NFSV4_CBVERS, nfscb_program, + NULL); + } + + return (0); +} + +/* + * Called by nfssvc() for nfscbds. Just loops around servicing rpc requests + * until it is killed by a signal. + * + * For now, only support callbacks via RPCSEC_GSS if there is a KerberosV + * keytab entry with a host based entry in it on the client. (I'm not even + * sure that getting Acceptor credentials for a user principal with a + * credentials cache is possible, but even if it is, major changes to the + * kgssapi would be required.) + * I don't believe that this is a serious limitation since, as of 2009, most + * NFSv4 servers supporting callbacks are using AUTH_SYS for callbacks even + * when the client is using RPCSEC_GSS. (This BSD server uses AUTH_SYS + * for callbacks unless nfsrv_gsscallbackson is set non-zero.) + */ +int +nfscbd_nfsd(struct thread *td, struct nfsd_nfscbd_args *args) +{ +#ifdef KGSSAPI + char principal[128]; + int error; +#endif + +#ifdef KGSSAPI + if (args != NULL) { + error = copyinstr(args->principal, principal, + sizeof(principal), NULL); + if (error) + return (error); + } else { + principal[0] = '\0'; + } +#endif + + /* + * Only the first nfsd actually does any work. The RPC code + * adds threads to it as needed. Any extra processes offered + * by nfsd just exit. If nfsd is new enough, it will call us + * once with a structure that specifies how many threads to + * use. + */ + NFSD_LOCK(); + if (nfs_numnfscbd == 0) { + nfs_numnfscbd++; + + NFSD_UNLOCK(); + +#ifdef KGSSAPI + if (principal[0] != '\0') + rpc_gss_set_svc_name(principal, "kerberosv5", + GSS_C_INDEFINITE, NFS_CALLBCKPROG, NFSV4_CBVERS); +#endif + + nfscbd_pool->sp_minthreads = 4; + nfscbd_pool->sp_maxthreads = 4; + + svc_run(nfscbd_pool); + +#ifdef KGSSAPI + rpc_gss_clear_svc_name(NFS_CALLBCKPROG, NFSV4_CBVERS); +#endif + + NFSD_LOCK(); + nfs_numnfscbd--; + nfsrvd_cbinit(1); + } + NFSD_UNLOCK(); + + return (0); +} + +/* + * Initialize the data structures for the server. + * Handshake with any new nfsds starting up to avoid any chance of + * corruption. + */ +void +nfsrvd_cbinit(int terminating) +{ + + NFSD_LOCK_ASSERT(); + + if (terminating) { + NFSD_UNLOCK(); + svcpool_destroy(nfscbd_pool); + nfscbd_pool = NULL; + NFSD_LOCK(); + } + + NFSD_UNLOCK(); + + nfscbd_pool = svcpool_create("nfscbd", SYSCTL_STATIC_CHILDREN(_vfs_newnfs)); + nfscbd_pool->sp_rcache = NULL; + nfscbd_pool->sp_assign = NULL; + nfscbd_pool->sp_done = NULL; + + NFSD_LOCK(); +} + diff --git a/sys/fs/nfsclient/nfs_cllock.c b/sys/fs/nfsclient/nfs_cllock.c new file mode 100644 index 0000000..a0b4e7c --- /dev/null +++ b/sys/fs/nfsclient/nfs_cllock.c @@ -0,0 +1,396 @@ +/*- + * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/conf.h> +#include <sys/fcntl.h> +#include <sys/kernel.h> /* for hz */ +#include <sys/limits.h> +#include <sys/lock.h> +#include <sys/malloc.h> +#include <sys/lockf.h> /* for hz */ /* Must come after sys/malloc.h */ +#include <sys/mbuf.h> +#include <sys/mount.h> +#include <sys/namei.h> +#include <sys/priv.h> +#include <sys/proc.h> +#include <sys/resourcevar.h> +#include <sys/socket.h> +#include <sys/socket.h> +#include <sys/unistd.h> +#include <sys/vnode.h> + +#include <net/if.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfs_lock.h> +#include <fs/nfsclient/nlminfo.h> + +extern void (*nlminfo_release_p)(struct proc *p); + +MALLOC_DEFINE(M_NEWNFSLOCK, "newnfsclient_lock", "NEWNFS lock request"); +MALLOC_DEFINE(M_NEWNLMINFO, "newnfsclient_nlminfo", "NEWNFS lock process structure"); + +static int nfslockdans(struct thread *td, struct lockd_ans *ansp); +static void nlminfo_release(struct proc *p); +/* + * -------------------------------------------------------------------- + * A miniature device driver which the userland uses to talk to us. + * + */ + +static struct cdev *nfslock_dev; +static struct mtx nfslock_mtx; +static int nfslock_isopen; +static TAILQ_HEAD(,__lock_msg) nfslock_list; + +static int +nfslock_open(struct cdev *dev, int oflags, int devtype, struct thread *td) +{ + int error; + + error = priv_check(td, PRIV_NFS_LOCKD); + if (error) + return (error); + + mtx_lock(&nfslock_mtx); + if (!nfslock_isopen) { + error = 0; + nfslock_isopen = 1; + } else { + error = EOPNOTSUPP; + } + mtx_unlock(&nfslock_mtx); + + return (error); +} + +static int +nfslock_close(struct cdev *dev, int fflag, int devtype, struct thread *td) +{ + struct __lock_msg *lm; + + mtx_lock(&nfslock_mtx); + nfslock_isopen = 0; + while (!TAILQ_EMPTY(&nfslock_list)) { + lm = TAILQ_FIRST(&nfslock_list); + /* XXX: answer request */ + TAILQ_REMOVE(&nfslock_list, lm, lm_link); + free(lm, M_NEWNFSLOCK); + } + mtx_unlock(&nfslock_mtx); + return (0); +} + +static int +nfslock_read(struct cdev *dev, struct uio *uio, int ioflag) +{ + int error; + struct __lock_msg *lm; + + if (uio->uio_resid != sizeof *lm) + return (EOPNOTSUPP); + lm = NULL; + error = 0; + mtx_lock(&nfslock_mtx); + while (TAILQ_EMPTY(&nfslock_list)) { + error = msleep(&nfslock_list, &nfslock_mtx, PSOCK | PCATCH, + "nfslockd", 0); + if (error) + break; + } + if (!error) { + lm = TAILQ_FIRST(&nfslock_list); + TAILQ_REMOVE(&nfslock_list, lm, lm_link); + } + mtx_unlock(&nfslock_mtx); + if (!error) { + error = uiomove(lm, sizeof *lm, uio); + free(lm, M_NEWNFSLOCK); + } + return (error); +} + +static int +nfslock_write(struct cdev *dev, struct uio *uio, int ioflag) +{ + struct lockd_ans la; + int error; + + if (uio->uio_resid != sizeof la) + return (EOPNOTSUPP); + error = uiomove(&la, sizeof la, uio); + if (!error) + error = nfslockdans(curthread, &la); + return (error); +} + +static int +nfslock_send(struct __lock_msg *lm) +{ + struct __lock_msg *lm2; + int error; + + error = 0; + lm2 = malloc(sizeof *lm2, M_NEWNFSLOCK, M_WAITOK); + mtx_lock(&nfslock_mtx); + if (nfslock_isopen) { + memcpy(lm2, lm, sizeof *lm2); + TAILQ_INSERT_TAIL(&nfslock_list, lm2, lm_link); + wakeup(&nfslock_list); + } else { + error = EOPNOTSUPP; + } + mtx_unlock(&nfslock_mtx); + if (error) + free(lm2, M_NEWNFSLOCK); + return (error); +} + +static struct cdevsw nfslock_cdevsw = { + .d_version = D_VERSION, + .d_open = nfslock_open, + .d_close = nfslock_close, + .d_read = nfslock_read, + .d_write = nfslock_write, + .d_name = "nfslock" +}; + +static int +newnfslock_modevent(module_t mod __unused, int type, void *data __unused) +{ + + switch (type) { + case MOD_LOAD: + if (bootverbose) + printf("nfslock: pseudo-device\n"); + mtx_init(&nfslock_mtx, "nfslock", NULL, MTX_DEF); + TAILQ_INIT(&nfslock_list); + nlminfo_release_p = nlminfo_release; + nfslock_dev = make_dev(&nfslock_cdevsw, 0, + UID_ROOT, GID_KMEM, 0600, _PATH_NFSLCKDEV); + return (0); + default: + return (EOPNOTSUPP); + } +} + +DEV_MODULE(newnfslock, newnfslock_modevent, NULL); +MODULE_VERSION(newnfslock, 1); + + +/* + * XXX + * We have to let the process know if the call succeeded. I'm using an extra + * field in the p_nlminfo field in the proc structure, as it is already for + * lockd stuff. + */ + +/* + * nfs_advlock -- + * NFS advisory byte-level locks. + * + * The vnode shall be (shared) locked on the entry, it is + * unconditionally unlocked after. + */ +int +ncl_dolock(struct vop_advlock_args *ap) +{ + LOCKD_MSG msg; + struct thread *td; + struct vnode *vp; + int error; + struct flock *fl; + struct proc *p; + + td = curthread; + p = td->td_proc; + + vp = ap->a_vp; + fl = ap->a_fl; + + ASSERT_VOP_LOCKED(vp, "nfs_dolock"); + + bcopy(VFSTONFS(vp->v_mount)->nm_nam, &msg.lm_addr, + min(sizeof msg.lm_addr, VFSTONFS(vp->v_mount)->nm_nam->sa_len)); + msg.lm_fh_len = NFS_ISV3(vp) ? VTONFS(vp)->n_fhsize : NFSX_V2FH; + bcopy(VTONFS(vp)->n_fhp->nfh_fh, msg.lm_fh, msg.lm_fh_len); + msg.lm_nfsv3 = NFS_ISV3(vp); + VOP_UNLOCK(vp, 0); + + /* + * the NLM protocol doesn't allow the server to return an error + * on ranges, so we do it. + */ + if (fl->l_whence != SEEK_END) { + if ((fl->l_whence != SEEK_CUR && fl->l_whence != SEEK_SET) || + fl->l_start < 0 || + (fl->l_len < 0 && + (fl->l_start == 0 || fl->l_start + fl->l_len < 0))) + return (EINVAL); + if (fl->l_len > 0 && + (fl->l_len - 1 > OFF_MAX - fl->l_start)) + return (EOVERFLOW); + } + + /* + * Fill in the information structure. + */ + msg.lm_version = LOCKD_MSG_VERSION; + msg.lm_msg_ident.pid = p->p_pid; + + mtx_lock(&Giant); + /* + * if there is no nfsowner table yet, allocate one. + */ + if (p->p_nlminfo == NULL) { + p->p_nlminfo = malloc(sizeof(struct nlminfo), + M_NEWNLMINFO, M_WAITOK | M_ZERO); + p->p_nlminfo->pid_start = p->p_stats->p_start; + timevaladd(&p->p_nlminfo->pid_start, &boottime); + } + msg.lm_msg_ident.pid_start = p->p_nlminfo->pid_start; + msg.lm_msg_ident.msg_seq = ++(p->p_nlminfo->msg_seq); + + msg.lm_fl = *fl; + msg.lm_wait = ap->a_flags & F_WAIT; + msg.lm_getlk = ap->a_op == F_GETLK; + cru2x(td->td_ucred, &msg.lm_cred); + + for (;;) { + error = nfslock_send(&msg); + if (error) + goto out; + + /* Unlocks succeed immediately. */ + if (fl->l_type == F_UNLCK) + goto out; + + /* + * Retry after 20 seconds if we haven't gotten a response yet. + * This number was picked out of thin air... but is longer + * then even a reasonably loaded system should take (at least + * on a local network). XXX Probably should use a back-off + * scheme. + * + * XXX: No PCATCH here since we currently have no useful + * way to signal to the userland rpc.lockd that the request + * has been aborted. Once the rpc.lockd implementation + * can handle aborts, and we report them properly, + * PCATCH can be put back. In the mean time, if we did + * permit aborting, the lock attempt would "get lost" + * and the lock would get stuck in the locked state. + */ + error = tsleep(p->p_nlminfo, PUSER, "lockd", 20*hz); + if (error != 0) { + if (error == EWOULDBLOCK) { + /* + * We timed out, so we rewrite the request + * to the fifo. + */ + continue; + } + + break; + } + + if (msg.lm_getlk && p->p_nlminfo->retcode == 0) { + if (p->p_nlminfo->set_getlk_pid) { + fl->l_sysid = 0; /* XXX */ + fl->l_pid = p->p_nlminfo->getlk_pid; + } else { + fl->l_type = F_UNLCK; + } + } + error = p->p_nlminfo->retcode; + break; + } + out: + mtx_unlock(&Giant); + return (error); +} + +/* + * nfslockdans -- + * NFS advisory byte-level locks answer from the lock daemon. + */ +static int +nfslockdans(struct thread *td, struct lockd_ans *ansp) +{ + struct proc *targetp; + + /* the version should match, or we're out of sync */ + if (ansp->la_vers != LOCKD_ANS_VERSION) + return (EINVAL); + + /* Find the process, set its return errno and wake it up. */ + if ((targetp = pfind(ansp->la_msg_ident.pid)) == NULL) + return (ESRCH); + + /* verify the pid hasn't been reused (if we can), and it isn't waiting + * for an answer from a more recent request. We return an EPIPE if + * the match fails, because we've already used ESRCH above, and this + * is sort of like writing on a pipe after the reader has closed it. + */ + if (targetp->p_nlminfo == NULL || + ((ansp->la_msg_ident.msg_seq != -1) && + (timevalcmp(&targetp->p_nlminfo->pid_start, + &ansp->la_msg_ident.pid_start, !=) || + targetp->p_nlminfo->msg_seq != ansp->la_msg_ident.msg_seq))) { + PROC_UNLOCK(targetp); + return (EPIPE); + } + + targetp->p_nlminfo->retcode = ansp->la_errno; + targetp->p_nlminfo->set_getlk_pid = ansp->la_set_getlk_pid; + targetp->p_nlminfo->getlk_pid = ansp->la_getlk_pid; + + wakeup(targetp->p_nlminfo); + + PROC_UNLOCK(targetp); + return (0); +} + +/* + * Free nlminfo attached to process. + */ +static void +nlminfo_release(struct proc *p) +{ + free(p->p_nlminfo, M_NEWNLMINFO); + p->p_nlminfo = NULL; +} diff --git a/sys/fs/nfsclient/nfs_clnfsiod.c b/sys/fs/nfsclient/nfs_clnfsiod.c new file mode 100644 index 0000000..f38aed8 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clnfsiod.c @@ -0,0 +1,308 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_syscalls.c 8.5 (Berkeley) 3/30/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/sysproto.h> +#include <sys/kernel.h> +#include <sys/sysctl.h> +#include <sys/file.h> +#include <sys/filedesc.h> +#include <sys/vnode.h> +#include <sys/malloc.h> +#include <sys/mount.h> +#include <sys/proc.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/mbuf.h> +#include <sys/socket.h> +#include <sys/socketvar.h> +#include <sys/domain.h> +#include <sys/protosw.h> +#include <sys/namei.h> +#include <sys/unistd.h> +#include <sys/kthread.h> +#include <sys/fcntl.h> +#include <sys/lockf.h> +#include <sys/mutex.h> + +#include <netinet/in.h> +#include <netinet/tcp.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfs_lock.h> + +extern struct mtx ncl_iod_mutex; + +int ncl_numasync; +struct proc *ncl_iodwant[NFS_MAXRAHEAD]; +struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD]; + +static void nfssvc_iod(void *); + +static int nfs_asyncdaemon[NFS_MAXRAHEAD]; + +SYSCTL_DECL(_vfs_newnfs); + +/* Maximum number of seconds a nfsiod kthread will sleep before exiting */ +static unsigned int ncl_iodmaxidle = 120; +SYSCTL_UINT(_vfs_newnfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &ncl_iodmaxidle, 0, ""); + +/* Maximum number of nfsiod kthreads */ +unsigned int ncl_iodmax = NFS_MAXRAHEAD; + +/* Minimum number of nfsiod kthreads to keep as spares */ +static unsigned int nfs_iodmin = 0; + +static int +sysctl_iodmin(SYSCTL_HANDLER_ARGS) +{ + int error, i; + int newmin; + + newmin = nfs_iodmin; + error = sysctl_handle_int(oidp, &newmin, 0, req); + if (error || (req->newptr == NULL)) + return (error); + mtx_lock(&ncl_iod_mutex); + if (newmin > ncl_iodmax) { + error = EINVAL; + goto out; + } + nfs_iodmin = newmin; + if (ncl_numasync >= nfs_iodmin) + goto out; + /* + * If the current number of nfsiod is lower + * than the new minimum, create some more. + */ + for (i = nfs_iodmin - ncl_numasync; i > 0; i--) + ncl_nfsiodnew(); +out: + mtx_unlock(&ncl_iod_mutex); + return (0); +} +SYSCTL_PROC(_vfs_newnfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0, + sizeof (nfs_iodmin), sysctl_iodmin, "IU", ""); + + +static int +sysctl_iodmax(SYSCTL_HANDLER_ARGS) +{ + int error, i; + int iod, newmax; + + newmax = ncl_iodmax; + error = sysctl_handle_int(oidp, &newmax, 0, req); + if (error || (req->newptr == NULL)) + return (error); + if (newmax > NFS_MAXRAHEAD) + return (EINVAL); + mtx_lock(&ncl_iod_mutex); + ncl_iodmax = newmax; + if (ncl_numasync <= ncl_iodmax) + goto out; + /* + * If there are some asleep nfsiods that should + * exit, wakeup() them so that they check ncl_iodmax + * and exit. Those who are active will exit as + * soon as they finish I/O. + */ + iod = ncl_numasync - 1; + for (i = 0; i < ncl_numasync - ncl_iodmax; i++) { + if (ncl_iodwant[iod]) + wakeup(&ncl_iodwant[iod]); + iod--; + } +out: + mtx_unlock(&ncl_iod_mutex); + return (0); +} +SYSCTL_PROC(_vfs_newnfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0, + sizeof (ncl_iodmax), sysctl_iodmax, "IU", ""); + +int +ncl_nfsiodnew(void) +{ + int error, i; + int newiod; + + if (ncl_numasync >= ncl_iodmax) + return (-1); + newiod = -1; + for (i = 0; i < ncl_iodmax; i++) + if (nfs_asyncdaemon[i] == 0) { + nfs_asyncdaemon[i]++; + newiod = i; + break; + } + if (newiod == -1) + return (-1); + mtx_unlock(&ncl_iod_mutex); + error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID, + 0, "nfsiod %d", newiod); + mtx_lock(&ncl_iod_mutex); + if (error) + return (-1); + ncl_numasync++; + return (newiod); +} + +static void +nfsiod_setup(void *dummy) +{ + int i; + int error; + + TUNABLE_INT_FETCH("vfs.newnfs.iodmin", &nfs_iodmin); + nfscl_init(); + mtx_lock(&ncl_iod_mutex); + /* Silently limit the start number of nfsiod's */ + if (nfs_iodmin > NFS_MAXRAHEAD) + nfs_iodmin = NFS_MAXRAHEAD; + + for (i = 0; i < nfs_iodmin; i++) { + error = ncl_nfsiodnew(); + if (error == -1) + panic("newnfsiod_setup: ncl_nfsiodnew failed"); + } + mtx_unlock(&ncl_iod_mutex); +} +SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL); + +static int nfs_defect = 0; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, ""); + +/* + * Asynchronous I/O daemons for client nfs. + * They do read-ahead and write-behind operations on the block I/O cache. + * Returns if we hit the timeout defined by the iodmaxidle sysctl. + */ +static void +nfssvc_iod(void *instance) +{ + struct buf *bp; + struct nfsmount *nmp; + int myiod, timo; + int error = 0; + + mtx_lock(&ncl_iod_mutex); + myiod = (int *)instance - nfs_asyncdaemon; + /* + * Main loop + */ + for (;;) { + while (((nmp = ncl_iodmount[myiod]) == NULL) + || !TAILQ_FIRST(&nmp->nm_bufq)) { + if (myiod >= ncl_iodmax) + goto finish; + if (nmp) + nmp->nm_bufqiods--; + ncl_iodwant[myiod] = curthread->td_proc; + ncl_iodmount[myiod] = NULL; + /* + * Always keep at least nfs_iodmin kthreads. + */ + timo = (myiod < nfs_iodmin) ? 0 : ncl_iodmaxidle * hz; + error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH, + "-", timo); + if (error) { + nmp = ncl_iodmount[myiod]; + /* + * Rechecking the nm_bufq closes a rare race where the + * nfsiod is woken up at the exact time the idle timeout + * fires + */ + if (nmp && TAILQ_FIRST(&nmp->nm_bufq)) + error = 0; + break; + } + } + if (error) + break; + while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) { + + /* Take one off the front of the list */ + TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); + nmp->nm_bufqlen--; + if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) { + nmp->nm_bufqwant = 0; + wakeup(&nmp->nm_bufq); + } + mtx_unlock(&ncl_iod_mutex); + if (bp->b_flags & B_DIRECT) { + KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set")); + (void)ncl_doio_directwrite(bp); + } else { + if (bp->b_iocmd == BIO_READ) + (void) ncl_doio(bp->b_vp, bp, bp->b_rcred, NULL); + else + (void) ncl_doio(bp->b_vp, bp, bp->b_wcred, NULL); + } + mtx_lock(&ncl_iod_mutex); + /* + * If there are more than one iod on this mount, then defect + * so that the iods can be shared out fairly between the mounts + */ + if (nfs_defect && nmp->nm_bufqiods > 1) { + NFS_DPF(ASYNCIO, + ("nfssvc_iod: iod %d defecting from mount %p\n", + myiod, nmp)); + ncl_iodmount[myiod] = NULL; + nmp->nm_bufqiods--; + break; + } + } + } +finish: + nfs_asyncdaemon[myiod] = 0; + if (nmp) + nmp->nm_bufqiods--; + ncl_iodwant[myiod] = NULL; + ncl_iodmount[myiod] = NULL; + /* Someone may be waiting for the last nfsiod to terminate. */ + if (--ncl_numasync == 0) + wakeup(&ncl_numasync); + mtx_unlock(&ncl_iod_mutex); + if ((error == 0) || (error == EWOULDBLOCK)) + kproc_exit(0); + /* Abnormal termination */ + kproc_exit(1); +} diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c new file mode 100644 index 0000000..188b72b --- /dev/null +++ b/sys/fs/nfsclient/nfs_clnode.c @@ -0,0 +1,283 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_node.c 8.6 (Berkeley) 5/22/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/lock.h> +#include <sys/malloc.h> +#include <sys/mount.h> +#include <sys/namei.h> +#include <sys/proc.h> +#include <sys/socket.h> +#include <sys/sysctl.h> +#include <sys/vnode.h> + +#include <vm/uma.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> + +extern struct vop_vector newnfs_vnodeops; +extern struct buf_ops buf_ops_newnfs; +MALLOC_DECLARE(M_NEWNFSREQ); + +uma_zone_t newnfsnode_zone; +vop_reclaim_t *ncl_reclaim_p = NULL; + +void +ncl_nhinit(void) +{ + + newnfsnode_zone = uma_zcreate("NCLNODE", sizeof(struct nfsnode), NULL, + NULL, NULL, NULL, UMA_ALIGN_PTR, 0); +} + +void +ncl_nhuninit(void) +{ + uma_zdestroy(newnfsnode_zone); +} + +/* + * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this + * function is going to be used to get Regular Files, code must be added + * to fill in the "struct nfsv4node". + * Look up a vnode/nfsnode by file handle. + * Callers must check for mount points!! + * In all cases, a pointer to a + * nfsnode structure is returned. + */ +int +ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp) +{ + struct thread *td = curthread; /* XXX */ + struct nfsnode *np; + struct vnode *vp; + struct vnode *nvp; + int error; + u_int hash; + struct nfsmount *nmp; + struct nfsfh *nfhp; + + nmp = VFSTONFS(mntp); + *npp = NULL; + + hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); + + MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, + M_NFSFH, M_WAITOK); + bcopy(fhp, &nfhp->nfh_fh[0], fhsize); + nfhp->nfh_len = fhsize; + error = vfs_hash_get(mntp, hash, LK_EXCLUSIVE, + td, &nvp, newnfs_vncmpf, nfhp); + FREE(nfhp, M_NFSFH); + if (error) + return (error); + if (nvp != NULL) { + *npp = VTONFS(nvp); + return (0); + } + + /* + * Allocate before getnewvnode since doing so afterward + * might cause a bogus v_data pointer to get dereferenced + * elsewhere if zalloc should block. + */ + np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); + + error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp); + if (error) { + uma_zfree(newnfsnode_zone, np); + return (error); + } + vp = nvp; + vp->v_bufobj.bo_ops = &buf_ops_newnfs; + vp->v_data = np; + np->n_vnode = vp; + /* + * Initialize the mutex even if the vnode is going to be a loser. + * This simplifies the logic in reclaim, which can then unconditionally + * destroy the mutex (in the case of the loser, or if hash_insert + * happened to return an error no special casing is needed). + */ + mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); + /* + * NFS supports recursive and shared locking. + */ + VN_LOCK_AREC(vp); + VN_LOCK_ASHARE(vp); + /* + * Are we getting the root? If so, make sure the vnode flags + * are correct + */ + if ((fhsize == nmp->nm_fhsize) && + !bcmp(fhp, nmp->nm_fh, fhsize)) { + if (vp->v_type == VNON) + vp->v_type = VDIR; + vp->v_vflag |= VV_ROOT; + } + + MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, + M_NFSFH, M_WAITOK); + bcopy(fhp, np->n_fhp->nfh_fh, fhsize); + np->n_fhp->nfh_len = fhsize; + lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); + error = insmntque(vp, mntp); + if (error != 0) { + *npp = NULL; + FREE((caddr_t)np->n_fhp, M_NFSFH); + mtx_destroy(&np->n_mtx); + uma_zfree(newnfsnode_zone, np); + return (error); + } + error = vfs_hash_insert(vp, hash, LK_EXCLUSIVE, + td, &nvp, newnfs_vncmpf, np->n_fhp); + if (error) + return (error); + if (nvp != NULL) { + *npp = VTONFS(nvp); + /* vfs_hash_insert() vput()'s the losing vnode */ + return (0); + } + *npp = np; + + return (0); +} + +int +ncl_inactive(struct vop_inactive_args *ap) +{ + struct nfsnode *np; + struct sillyrename *sp; + struct thread *td = curthread; /* XXX */ + + np = VTONFS(ap->a_vp); + if (prtactive && vrefcnt(ap->a_vp) != 0) + vprint("ncl_inactive: pushing active", ap->a_vp); + if (ap->a_vp->v_type != VDIR) { + sp = np->n_sillyrename; + np->n_sillyrename = NULL; + } else + sp = NULL; + if (sp) { + (void)ncl_vinvalbuf(ap->a_vp, 0, td, 1); + /* + * Remove the silly file that was rename'd earlier + */ + ncl_removeit(sp, ap->a_vp); + crfree(sp->s_cred); + vrele(sp->s_dvp); + FREE((caddr_t)sp, M_NEWNFSREQ); + } + np->n_flag &= NMODIFIED; + return (0); +} + +/* + * Reclaim an nfsnode so that it can be used for other purposes. + */ +int +ncl_reclaim(struct vop_reclaim_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct nfsdmap *dp, *dp2; + + if (prtactive && vrefcnt(vp) != 0) + vprint("ncl_reclaim: pushing active", vp); + + /* + * If the NLM is running, give it a chance to abort pending + * locks. + */ + if (ncl_reclaim_p) + ncl_reclaim_p(ap); + + /* + * Destroy the vm object and flush associated pages. + */ + vnode_destroy_vobject(vp); + + vfs_hash_remove(vp); + + /* + * Call nfscl_reclaimnode() to save attributes in the delegation, + * as required. + */ + if (vp->v_type == VREG) + nfscl_reclaimnode(vp); + + /* + * Free up any directory cookie structures and + * large file handle structures that might be associated with + * this nfs node. + */ + if (vp->v_type == VDIR) { + dp = LIST_FIRST(&np->n_cookies); + while (dp) { + dp2 = dp; + dp = LIST_NEXT(dp, ndm_list); + FREE((caddr_t)dp2, M_NFSDIROFF); + } + } + FREE((caddr_t)np->n_fhp, M_NFSFH); + if (np->n_v4 != NULL) + FREE((caddr_t)np->n_v4, M_NFSV4NODE); + mtx_destroy(&np->n_mtx); + uma_zfree(newnfsnode_zone, vp->v_data); + vp->v_data = NULL; + return (0); +} + +/* + * Invalidate both the access and attribute caches for this vnode. + */ +void +ncl_invalcaches(struct vnode *vp) +{ + struct nfsnode *np = VTONFS(vp); + int i; + + mtx_lock(&np->n_mtx); + for (i = 0; i < NFS_ACCESSCACHESIZE; i++) + np->n_accesscache[i].stamp = 0; + np->n_attrstamp = 0; + mtx_unlock(&np->n_mtx); +} + diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c new file mode 100644 index 0000000..bb18fcb --- /dev/null +++ b/sys/fs/nfsclient/nfs_clport.c @@ -0,0 +1,1271 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * generally, I don't like #includes inside .h files, but it seems to + * be the easiest way to handle the port. + */ +#include <fs/nfs/nfsport.h> +#include <netinet/if_ether.h> +#include <net/if_types.h> + +extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1; +extern struct vop_vector newnfs_vnodeops; +extern struct vop_vector newnfs_fifoops; +extern uma_zone_t newnfsnode_zone; +extern struct buf_ops buf_ops_newnfs; +extern int ncl_pbuf_freecnt; +extern short nfsv4_cbport; +extern int nfscl_enablecallb; +extern int nfs_numnfscbd; +extern int nfscl_inited; +struct mtx nfs_clstate_mutex; +struct mtx ncl_iod_mutex; +NFSDLOCKMUTEX; + +extern void (*ncl_call_invalcaches)(struct vnode *); + +/* + * Comparison function for vfs_hash functions. + */ +int +newnfs_vncmpf(struct vnode *vp, void *arg) +{ + struct nfsfh *nfhp = (struct nfsfh *)arg; + struct nfsnode *np = VTONFS(vp); + + if (np->n_fhp->nfh_len != nfhp->nfh_len || + NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len)) + return (1); + return (0); +} + +/* + * Look up a vnode/nfsnode by file handle. + * Callers must check for mount points!! + * In all cases, a pointer to a + * nfsnode structure is returned. + * This variant takes a "struct nfsfh *" as second argument and uses + * that structure up, either by hanging off the nfsnode or FREEing it. + */ +int +nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp, + struct componentname *cnp, struct thread *td, struct nfsnode **npp, + void *stuff) +{ + struct nfsnode *np, *dnp; + struct vnode *vp, *nvp; + struct nfsv4node *newd, *oldd; + int error; + u_int hash; + struct nfsmount *nmp; + + nmp = VFSTONFS(mntp); + dnp = VTONFS(dvp); + *npp = NULL; + + hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT); + + error = vfs_hash_get(mntp, hash, LK_EXCLUSIVE, + td, &nvp, newnfs_vncmpf, nfhp); + if (error == 0 && nvp != NULL) { + /* + * I believe there is a slight chance that vgonel() could + * get called on this vnode between when vn_lock() drops + * the VI_LOCK() and vget() acquires it again, so that it + * hasn't yet had v_usecount incremented. If this were to + * happen, the VI_DOOMED flag would be set, so check for + * that here. Since we now have the v_usecount incremented, + * we should be ok until we vrele() it, if the VI_DOOMED + * flag isn't set now. + */ + VI_LOCK(nvp); + if ((nvp->v_iflag & VI_DOOMED)) { + VI_UNLOCK(nvp); + vrele(nvp); + error = ENOENT; + } else { + VI_UNLOCK(nvp); + } + } + if (error) { + FREE((caddr_t)nfhp, M_NFSFH); + return (error); + } + if (nvp != NULL) { + np = VTONFS(nvp); + /* + * For NFSv4, check to see if it is the same name and + * replace the name, if it is different. + */ + oldd = newd = NULL; + if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL && + nvp->v_type == VREG && + (np->n_v4->n4_namelen != cnp->cn_namelen || + NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), + cnp->cn_namelen) || + dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || + NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len))) { + MALLOC(newd, struct nfsv4node *, + sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len + + + cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK); + NFSLOCKNODE(np); + if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG + && (np->n_v4->n4_namelen != cnp->cn_namelen || + NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), + cnp->cn_namelen) || + dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || + NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len))) { + oldd = np->n_v4; + np->n_v4 = newd; + newd = NULL; + np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; + np->n_v4->n4_namelen = cnp->cn_namelen; + NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len); + NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), + cnp->cn_namelen); + } + NFSUNLOCKNODE(np); + } + if (newd != NULL) + FREE((caddr_t)newd, M_NFSV4NODE); + if (oldd != NULL) + FREE((caddr_t)oldd, M_NFSV4NODE); + *npp = np; + FREE((caddr_t)nfhp, M_NFSFH); + return (0); + } + + /* + * Allocate before getnewvnode since doing so afterward + * might cause a bogus v_data pointer to get dereferenced + * elsewhere if zalloc should block. + */ + np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); + + error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp); + if (error) { + uma_zfree(newnfsnode_zone, np); + FREE((caddr_t)nfhp, M_NFSFH); + return (error); + } + vp = nvp; + vp->v_bufobj.bo_ops = &buf_ops_newnfs; + vp->v_data = np; + np->n_vnode = vp; + /* + * Initialize the mutex even if the vnode is going to be a loser. + * This simplifies the logic in reclaim, which can then unconditionally + * destroy the mutex (in the case of the loser, or if hash_insert + * happened to return an error no special casing is needed). + */ + mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); + + /* + * Are we getting the root? If so, make sure the vnode flags + * are correct + */ + if ((nfhp->nfh_len == nmp->nm_fhsize) && + !bcmp(nfhp->nfh_fh, nmp->nm_fh, nfhp->nfh_len)) { + if (vp->v_type == VNON) + vp->v_type = VDIR; + vp->v_vflag |= VV_ROOT; + } + + np->n_fhp = nfhp; + /* + * For NFSv4, we have to attach the directory file handle and + * file name, so that Open Ops can be done later. + */ + if (nmp->nm_flag & NFSMNT_NFSV4) { + MALLOC(np->n_v4, struct nfsv4node *, sizeof (struct nfsv4node) + + dnp->n_fhp->nfh_len + cnp->cn_namelen - 1, M_NFSV4NODE, + M_WAITOK); + np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; + np->n_v4->n4_namelen = cnp->cn_namelen; + NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len); + NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), + cnp->cn_namelen); + } else { + np->n_v4 = NULL; + } + + /* + * NFS supports recursive and shared locking. + */ + VN_LOCK_AREC(vp); + VN_LOCK_ASHARE(vp); + lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); + error = insmntque(vp, mntp); + if (error != 0) { + *npp = NULL; + mtx_destroy(&np->n_mtx); + FREE((caddr_t)nfhp, M_NFSFH); + if (np->n_v4 != NULL) + FREE((caddr_t)np->n_v4, M_NFSV4NODE); + uma_zfree(newnfsnode_zone, np); + return (error); + } + error = vfs_hash_insert(vp, hash, LK_EXCLUSIVE, + td, &nvp, newnfs_vncmpf, nfhp); + if (error) + return (error); + if (nvp != NULL) { + *npp = VTONFS(nvp); + /* vfs_hash_insert() vput()'s the losing vnode */ + return (0); + } + *npp = np; + + return (0); +} + +/* + * Anothe variant of nfs_nget(). This one is only used by reopen. It + * takes almost the same args as nfs_nget(), but only succeeds if an entry + * exists in the cache. (Since files should already be "open" with a + * vnode ref cnt on the node when reopen calls this, it should always + * succeed.) + * Also, don't get a vnode lock, since it may already be locked by some + * other process that is handling it. This is ok, since all other threads + * on the client are blocked by the nfsc_lock being exclusively held by the + * caller of this function. + */ +int +nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize, + struct thread *td, struct nfsnode **npp) +{ + struct vnode *nvp; + u_int hash; + struct nfsfh *nfhp; + int error; + + *npp = NULL; + /* For forced dismounts, just return error. */ + if ((mntp->mnt_kern_flag & MNTK_UNMOUNTF)) + return (EINTR); + MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, + M_NFSFH, M_WAITOK); + bcopy(fhp, &nfhp->nfh_fh[0], fhsize); + nfhp->nfh_len = fhsize; + + hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); + + /* + * First, try to get the vnode locked, but don't block for the lock. + */ + error = vfs_hash_get(mntp, hash, (LK_EXCLUSIVE | LK_NOWAIT), td, &nvp, + newnfs_vncmpf, nfhp); + if (error == 0 && nvp != NULL) { + VOP_UNLOCK(nvp, 0); + } else if (error == EBUSY) { + /* + * The LK_EXCLOTHER lock type tells nfs_lock1() to not try + * and lock the vnode, but just get a v_usecount on it. + * LK_NOWAIT is set so that when vget() returns ENOENT, + * vfs_hash_get() fails instead of looping. + * If this succeeds, it is safe so long as a vflush() with + * FORCECLOSE has not been done. Since the Renew thread is + * stopped and the MNTK_UNMOUNTF flag is set before doing + * a vflush() with FORCECLOSE, we should be ok here. + */ + if ((mntp->mnt_kern_flag & MNTK_UNMOUNTF)) + error = EINTR; + else + error = vfs_hash_get(mntp, hash, + (LK_EXCLOTHER | LK_NOWAIT), td, &nvp, + newnfs_vncmpf, nfhp); + } + FREE(nfhp, M_NFSFH); + if (error) + return (error); + if (nvp != NULL) { + *npp = VTONFS(nvp); + return (0); + } + return (EINVAL); +} + +/* + * Load the attribute cache (that lives in the nfsnode entry) with + * the attributes of the second argument and + * Iff vaper not NULL + * copy the attributes to *vaper + * Similar to nfs_loadattrcache(), except the attributes are passed in + * instead of being parsed out of the mbuf list. + */ +int +nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper, + void *stuff, int writeattr, int dontshrink) +{ + struct vnode *vp = *vpp; + struct vattr *vap, *nvap = &nap->na_vattr, *vaper = nvaper; + struct nfsnode *np; + struct nfsmount *nmp; + struct timespec mtime_save; + struct thread *td = curthread; + + /* + * If v_type == VNON it is a new node, so fill in the v_type, + * n_mtime fields. Check to see if it represents a special + * device, and if so, check for a possible alias. Once the + * correct vnode has been obtained, fill in the rest of the + * information. + */ + np = VTONFS(vp); + NFSLOCKNODE(np); + if (vp->v_type != nvap->va_type) { + vp->v_type = nvap->va_type; + if (vp->v_type == VFIFO) + vp->v_op = &newnfs_fifoops; + np->n_mtime = nvap->va_mtime; + } + nmp = VFSTONFS(vp->v_mount); + vap = &np->n_vattr.na_vattr; + mtime_save = vap->va_mtime; + if (writeattr) { + np->n_vattr.na_filerev = nap->na_filerev; + np->n_vattr.na_size = nap->na_size; + np->n_vattr.na_mtime = nap->na_mtime; + np->n_vattr.na_ctime = nap->na_ctime; + np->n_vattr.na_fsid = nap->na_fsid; + } else { + NFSBCOPY((caddr_t)nap, (caddr_t)&np->n_vattr, + sizeof (struct nfsvattr)); + } + + /* + * For NFSv4, if the node's fsid is not equal to the mount point's + * fsid, return the low order 32bits of the node's fsid. This + * allows getcwd(3) to work. There is a chance that the fsid might + * be the same as a local fs, but since this is in an NFS mount + * point, I don't think that will cause any problems? + */ + if ((nmp->nm_flag & (NFSMNT_NFSV4 | NFSMNT_HASSETFSID)) == + (NFSMNT_NFSV4 | NFSMNT_HASSETFSID) && + (nmp->nm_fsid[0] != np->n_vattr.na_filesid[0] || + nmp->nm_fsid[1] != np->n_vattr.na_filesid[1])) + vap->va_fsid = np->n_vattr.na_filesid[0]; + else + vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; + np->n_attrstamp = time_second; + /* Timestamp the NFS otw getattr fetch */ + if (td->td_proc) { + np->n_ac_ts_tid = td->td_tid; + np->n_ac_ts_pid = td->td_proc->p_pid; + np->n_ac_ts_syscalls = td->td_syscalls; + } else + bzero(&np->n_ac_ts, sizeof(struct nfs_attrcache_timestamp)); + + if (vap->va_size != np->n_size) { + if (vap->va_type == VREG) { + if (dontshrink && vap->va_size < np->n_size) { + /* + * We've been told not to shrink the file; + * zero np->n_attrstamp to indicate that + * the attributes are stale. + */ + vap->va_size = np->n_size; + np->n_attrstamp = 0; + } else if (np->n_flag & NMODIFIED) { + /* + * We've modified the file: Use the larger + * of our size, and the server's size. + */ + if (vap->va_size < np->n_size) { + vap->va_size = np->n_size; + } else { + np->n_size = vap->va_size; + np->n_flag |= NSIZECHANGED; + } + } else { + np->n_size = vap->va_size; + np->n_flag |= NSIZECHANGED; + } + vnode_pager_setsize(vp, np->n_size); + } else { + np->n_size = vap->va_size; + } + } + /* + * The following checks are added to prevent a race between (say) + * a READDIR+ and a WRITE. + * READDIR+, WRITE requests sent out. + * READDIR+ resp, WRITE resp received on client. + * However, the WRITE resp was handled before the READDIR+ resp + * causing the post op attrs from the write to be loaded first + * and the attrs from the READDIR+ to be loaded later. If this + * happens, we have stale attrs loaded into the attrcache. + * We detect this by for the mtime moving back. We invalidate the + * attrcache when this happens. + */ + if (timespeccmp(&mtime_save, &vap->va_mtime, >)) + /* Size changed or mtime went backwards */ + np->n_attrstamp = 0; + if (vaper != NULL) { + NFSBCOPY((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); + if (np->n_flag & NCHG) { + if (np->n_flag & NACC) + vaper->va_atime = np->n_atim; + if (np->n_flag & NUPD) + vaper->va_mtime = np->n_mtim; + } + } + NFSUNLOCKNODE(np); + return (0); +} + +/* + * Fill in the client id name. For these bytes: + * 1 - they must be unique + * 2 - they should be persistent across client reboots + * 1 is more critical than 2 + * Use the mount point's unique id plus either the uuid or, if that + * isn't set, random junk. + */ +void +nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen) +{ + int uuidlen; + + /* + * First, put in the 64bit mount point identifier. + */ + if (idlen >= sizeof (u_int64_t)) { + NFSBCOPY((caddr_t)&clval, cp, sizeof (u_int64_t)); + cp += sizeof (u_int64_t); + idlen -= sizeof (u_int64_t); + } + + /* + * If uuid is non-zero length, use it. + */ + uuidlen = strlen(uuid); + if (uuidlen > 0 && idlen >= uuidlen) { + NFSBCOPY(uuid, cp, uuidlen); + cp += uuidlen; + idlen -= uuidlen; + } + + /* + * This only normally happens if the uuid isn't set. + */ + while (idlen > 0) { + *cp++ = (u_int8_t)(arc4random() % 256); + idlen--; + } +} + +/* + * Fill in a lock owner name. For now, pid + the process's creation time. + */ +void +nfscl_filllockowner(struct thread *td, u_int8_t *cp) +{ + union { + u_int32_t lval; + u_int8_t cval[4]; + } tl; + struct proc *p; + +if (td == NULL) { + printf("NULL td\n"); + bzero(cp, 12); + return; +} + p = td->td_proc; +if (p == NULL) { + printf("NULL pid\n"); + bzero(cp, 12); + return; +} + tl.lval = p->p_pid; + *cp++ = tl.cval[0]; + *cp++ = tl.cval[1]; + *cp++ = tl.cval[2]; + *cp++ = tl.cval[3]; +if (p->p_stats == NULL) { + printf("pstats null\n"); + bzero(cp, 8); + return; +} + tl.lval = p->p_stats->p_start.tv_sec; + *cp++ = tl.cval[0]; + *cp++ = tl.cval[1]; + *cp++ = tl.cval[2]; + *cp++ = tl.cval[3]; + tl.lval = p->p_stats->p_start.tv_usec; + *cp++ = tl.cval[0]; + *cp++ = tl.cval[1]; + *cp++ = tl.cval[2]; + *cp = tl.cval[3]; +} + +/* + * Find the parent process for the thread passed in as an argument. + * If none exists, return NULL, otherwise return a thread for the parent. + * (Can be any of the threads, since it is only used for td->td_proc.) + */ +NFSPROC_T * +nfscl_getparent(struct thread *td) +{ + struct proc *p; + struct thread *ptd; + + if (td == NULL) + return (NULL); + p = td->td_proc; + if (p->p_pid == 0) + return (NULL); + p = p->p_pptr; + if (p == NULL) + return (NULL); + ptd = TAILQ_FIRST(&p->p_threads); + return (ptd); +} + +/* + * Start up the renew kernel thread. + */ +static void +start_nfscl(void *arg) +{ + struct nfsclclient *clp; + struct thread *td; + + clp = (struct nfsclclient *)arg; + td = TAILQ_FIRST(&clp->nfsc_renewthread->p_threads); + nfscl_renewthread(clp, td); + kproc_exit(0); +} + +void +nfscl_start_renewthread(struct nfsclclient *clp) +{ + + kproc_create(start_nfscl, (void *)clp, &clp->nfsc_renewthread, 0, 0, + "nfscl"); +} + +/* + * Handle wcc_data. + * For NFSv4, it assumes that nfsv4_wccattr() was used to set up the getattr + * as the first Op after PutFH. + * (For NFSv4, the postop attributes are after the Op, so they can't be + * parsed here. A separate call to nfscl_postop_attr() is required.) + */ +int +nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp, + struct nfsvattr *nap, int *flagp, int *wccflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsnode *np = VTONFS(vp); + struct nfsvattr nfsva; + int error = 0; + + if (wccflagp != NULL) + *wccflagp = 0; + if (nd->nd_flag & ND_NFSV3) { + *flagp = 0; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (*tl == newnfs_true) { + NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED); + if (wccflagp != NULL) { + mtx_lock(&np->n_mtx); + *wccflagp = (np->n_mtime.tv_sec == + fxdr_unsigned(u_int32_t, *(tl + 2)) && + np->n_mtime.tv_nsec == + fxdr_unsigned(u_int32_t, *(tl + 3))); + mtx_unlock(&np->n_mtx); + } + } + error = nfscl_postop_attr(nd, nap, flagp, stuff); + } else if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR)) + == (ND_NFSV4 | ND_V4WCCATTR)) { + error = nfsv4_loadattr(nd, NULL, &nfsva, NULL, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, NULL, NULL, NULL); + if (error) + return (error); + /* + * Get rid of Op# and status for next op. + */ + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*++tl) + nd->nd_flag |= ND_NOMOREDATA; + if (wccflagp != NULL && + nfsva.na_vattr.va_mtime.tv_sec != 0) { + mtx_lock(&np->n_mtx); + *wccflagp = (np->n_mtime.tv_sec == + nfsva.na_vattr.va_mtime.tv_sec && + np->n_mtime.tv_nsec == + nfsva.na_vattr.va_mtime.tv_sec); + mtx_unlock(&np->n_mtx); + } + } +nfsmout: + return (error); +} + +/* + * Get postop attributes. + */ +int +nfscl_postop_attr(struct nfsrv_descript *nd, struct nfsvattr *nap, int *retp, + void *stuff) +{ + u_int32_t *tl; + int error = 0; + + *retp = 0; + if (nd->nd_flag & ND_NOMOREDATA) + return (error); + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + *retp = fxdr_unsigned(int, *tl); + } else if (nd->nd_flag & ND_NFSV4) { + /* + * For NFSv4, the postop attr are at the end, so no point + * in looking if nd_repstat != 0. + */ + if (!nd->nd_repstat) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) + /* should never happen since nd_repstat != 0 */ + nd->nd_flag |= ND_NOMOREDATA; + else + *retp = 1; + } + } else if (!nd->nd_repstat) { + /* For NFSv2, the attributes are here iff nd_repstat == 0 */ + *retp = 1; + } + if (*retp) { + error = nfsm_loadattr(nd, nap); + if (error) + *retp = 0; + } +nfsmout: + return (error); +} + +/* + * Fill in the setable attributes. The full argument indicates whether + * to fill in them all or just mode and time. + */ +void +nfscl_fillsattr(struct nfsrv_descript *nd, struct vattr *vap, + struct vnode *vp, int flags, u_int32_t rdev) +{ + u_int32_t *tl; + struct nfsv2_sattr *sp; + nfsattrbit_t attrbits; + struct timeval curtime; + + switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) { + case ND_NFSV2: + NFSM_BUILD(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + if (vap->va_mode == (mode_t)VNOVAL) + sp->sa_mode = newnfs_xdrneg1; + else + sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); + if (vap->va_uid == (uid_t)VNOVAL) + sp->sa_uid = newnfs_xdrneg1; + else + sp->sa_uid = txdr_unsigned(vap->va_uid); + if (vap->va_gid == (gid_t)VNOVAL) + sp->sa_gid = newnfs_xdrneg1; + else + sp->sa_gid = txdr_unsigned(vap->va_gid); + if (flags & NFSSATTR_SIZE0) + sp->sa_size = 0; + else if (flags & NFSSATTR_SIZENEG1) + sp->sa_size = newnfs_xdrneg1; + else if (flags & NFSSATTR_SIZERDEV) + sp->sa_size = txdr_unsigned(rdev); + else + sp->sa_size = txdr_unsigned(vap->va_size); + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + break; + case ND_NFSV3: + getmicrotime(&curtime); + if (vap->va_mode != (mode_t)VNOVAL) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl = txdr_unsigned(vap->va_mode); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } + if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl = txdr_unsigned(vap->va_uid); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } + if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl = txdr_unsigned(vap->va_gid); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } + if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + txdr_hyper(vap->va_size, tl); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } + if (vap->va_atime.tv_sec != VNOVAL) { + if (vap->va_atime.tv_sec != curtime.tv_sec) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); + txdr_nfsv3time(&vap->va_atime, tl); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); + } + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); + } + if (vap->va_mtime.tv_sec != VNOVAL) { + if (vap->va_mtime.tv_sec != curtime.tv_sec) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); + txdr_nfsv3time(&vap->va_mtime, tl); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); + } + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); + } + break; + case ND_NFSV4: + NFSZERO_ATTRBIT(&attrbits); + if (vap->va_mode != (mode_t)VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_MODE); + if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNER); + if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP); + if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_SIZE); + if (vap->va_atime.tv_sec != VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET); + if (vap->va_mtime.tv_sec != VNOVAL) + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET); + (void) nfsv4_fillattr(nd, vp, NULL, vap, NULL, 0, &attrbits, + NULL, NULL, 0, 0); + break; + }; +} + +/* + * nfscl_request() - mostly a wrapper for newnfs_request(). + */ +int +nfscl_request(struct nfsrv_descript *nd, struct vnode *vp, NFSPROC_T *p, + struct ucred *cred, void *stuff) +{ + int ret, vers; + struct nfsmount *nmp; + + nmp = VFSTONFS(vp->v_mount); + if (nd->nd_flag & ND_NFSV4) + vers = NFS_VER4; + else if (nd->nd_flag & ND_NFSV3) + vers = NFS_VER3; + else + vers = NFS_VER2; + ret = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred, + NFS_PROG, vers, NULL, 1, NULL); + return (ret); +} + +/* + * fill in this bsden's variant of statfs using nfsstatfs. + */ +void +nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs) +{ + struct statfs *sbp = (struct statfs *)statfs; + nfsquad_t tquad; + + if (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) { + sbp->f_bsize = NFS_FABLKSIZE; + tquad.qval = sfp->sf_tbytes; + sbp->f_blocks = (long)(tquad.qval / ((u_quad_t)NFS_FABLKSIZE)); + tquad.qval = sfp->sf_fbytes; + sbp->f_bfree = (long)(tquad.qval / ((u_quad_t)NFS_FABLKSIZE)); + tquad.qval = sfp->sf_abytes; + sbp->f_bavail = (long)(tquad.qval / ((u_quad_t)NFS_FABLKSIZE)); + tquad.qval = sfp->sf_tfiles; + sbp->f_files = (tquad.lval[0] & 0x7fffffff); + tquad.qval = sfp->sf_ffiles; + sbp->f_ffree = (tquad.lval[0] & 0x7fffffff); + } else if ((nmp->nm_flag & NFSMNT_NFSV4) == 0) { + sbp->f_bsize = (int32_t)sfp->sf_bsize; + sbp->f_blocks = (int32_t)sfp->sf_blocks; + sbp->f_bfree = (int32_t)sfp->sf_bfree; + sbp->f_bavail = (int32_t)sfp->sf_bavail; + sbp->f_files = 0; + sbp->f_ffree = 0; + } +} + +/* + * Use the fsinfo stuff to update the mount point. + */ +void +nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp) +{ + + if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) && + fsp->fs_wtpref >= NFS_FABLKSIZE) + nmp->nm_wsize = (fsp->fs_wtpref + NFS_FABLKSIZE - 1) & + ~(NFS_FABLKSIZE - 1); + if (fsp->fs_wtmax < nmp->nm_wsize && fsp->fs_wtmax > 0) { + nmp->nm_wsize = fsp->fs_wtmax & ~(NFS_FABLKSIZE - 1); + if (nmp->nm_wsize == 0) + nmp->nm_wsize = fsp->fs_wtmax; + } + if (nmp->nm_wsize < NFS_FABLKSIZE) + nmp->nm_wsize = NFS_FABLKSIZE; + if ((nmp->nm_rsize == 0 || fsp->fs_rtpref < nmp->nm_rsize) && + fsp->fs_rtpref >= NFS_FABLKSIZE) + nmp->nm_rsize = (fsp->fs_rtpref + NFS_FABLKSIZE - 1) & + ~(NFS_FABLKSIZE - 1); + if (fsp->fs_rtmax < nmp->nm_rsize && fsp->fs_rtmax > 0) { + nmp->nm_rsize = fsp->fs_rtmax & ~(NFS_FABLKSIZE - 1); + if (nmp->nm_rsize == 0) + nmp->nm_rsize = fsp->fs_rtmax; + } + if (nmp->nm_rsize < NFS_FABLKSIZE) + nmp->nm_rsize = NFS_FABLKSIZE; + if ((nmp->nm_readdirsize == 0 || fsp->fs_dtpref < nmp->nm_readdirsize) + && fsp->fs_dtpref >= NFS_DIRBLKSIZ) + nmp->nm_readdirsize = (fsp->fs_dtpref + NFS_DIRBLKSIZ - 1) & + ~(NFS_DIRBLKSIZ - 1); + if (fsp->fs_rtmax < nmp->nm_readdirsize && fsp->fs_rtmax > 0) { + nmp->nm_readdirsize = fsp->fs_rtmax & ~(NFS_DIRBLKSIZ - 1); + if (nmp->nm_readdirsize == 0) + nmp->nm_readdirsize = fsp->fs_rtmax; + } + if (nmp->nm_readdirsize < NFS_DIRBLKSIZ) + nmp->nm_readdirsize = NFS_DIRBLKSIZ; + if (fsp->fs_maxfilesize > 0 && + fsp->fs_maxfilesize < nmp->nm_maxfilesize) + nmp->nm_maxfilesize = fsp->fs_maxfilesize; + nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp); + nmp->nm_state |= NFSSTA_GOTFSINFO; +} + +/* + * Get a pointer to my IP addrress and return it. + * Return NULL if you can't find one. + */ +u_int8_t * +nfscl_getmyip(struct nfsmount *nmp, int *isinet6p) +{ + struct sockaddr_in sad, *sin; + struct rtentry *rt; + u_int8_t *retp = NULL; + static struct in_addr laddr; + + *isinet6p = 0; + /* + * Loop up a route for the destination address. + */ + if (nmp->nm_nam->sa_family == AF_INET) { + bzero(&sad, sizeof (sad)); + sin = (struct sockaddr_in *)nmp->nm_nam; + sad.sin_family = AF_INET; + sad.sin_len = sizeof (struct sockaddr_in); + sad.sin_addr.s_addr = sin->sin_addr.s_addr; + rt = rtalloc1((struct sockaddr *)&sad, 0, 0UL); + if (rt != NULL) { + if (rt->rt_ifp != NULL && + rt->rt_ifa != NULL && + ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) && + rt->rt_ifa->ifa_addr->sa_family == AF_INET) { + sin = (struct sockaddr_in *) + rt->rt_ifa->ifa_addr; + laddr.s_addr = sin->sin_addr.s_addr; + retp = (u_int8_t *)&laddr; + } + RTFREE_LOCKED(rt); + } +#ifdef INET6 + } else if (nmp->nm_nam->sa_family == AF_INET6) { + struct sockaddr_in6 sad6, *sin6; + static struct in6_addr laddr6; + + bzero(&sad6, sizeof (sad6)); + sin6 = (struct sockaddr_in6 *)nmp->nm_nam; + sad6.sin6_family = AF_INET6; + sad6.sin6_len = sizeof (struct sockaddr_in6); + sad6.sin6_addr = sin6->sin6_addr; + rt = rtalloc1((struct sockaddr *)&sad6, 0, 0UL); + if (rt != NULL) { + if (rt->rt_ifp != NULL && + rt->rt_ifa != NULL && + ((rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) && + rt->rt_ifa->ifa_addr->sa_family == AF_INET6) { + sin6 = (struct sockaddr_in6 *) + rt->rt_ifa->ifa_addr; + laddr6 = sin6->sin6_addr; + retp = (u_int8_t *)&laddr6; + *isinet6p = 1; + } + RTFREE_LOCKED(rt); + } +#endif + } + return (retp); +} + +/* + * Copy NFS uid, gids from the cred structure. + */ +void +newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr) +{ + int ngroups, i; + + nfscr->nfsc_uid = cr->cr_uid; + ngroups = (cr->cr_ngroups > NGROUPS) ? NGROUPS : + cr->cr_ngroups; + for (i = 0; i < ngroups; i++) + nfscr->nfsc_groups[i] = cr->cr_groups[i]; + nfscr->nfsc_ngroups = ngroups; +} + + +/* + * Do any client specific initialization. + */ +void +nfscl_init(void) +{ + static int inited = 0; + + if (inited) + return; + inited = 1; + nfscl_inited = 1; + ncl_pbuf_freecnt = nswbuf / 2 + 1; +} + +/* + * Check each of the attributes to be set, to ensure they aren't already + * the correct value. Disable setting ones already correct. + */ +int +nfscl_checksattr(struct vattr *vap, struct nfsvattr *nvap) +{ + + if (vap->va_mode != (mode_t)VNOVAL) { + if (vap->va_mode == nvap->na_mode) + vap->va_mode = (mode_t)VNOVAL; + } + if (vap->va_uid != (uid_t)VNOVAL) { + if (vap->va_uid == nvap->na_uid) + vap->va_uid = (uid_t)VNOVAL; + } + if (vap->va_gid != (gid_t)VNOVAL) { + if (vap->va_gid == nvap->na_gid) + vap->va_gid = (gid_t)VNOVAL; + } + if (vap->va_size != VNOVAL) { + if (vap->va_size == nvap->na_size) + vap->va_size = VNOVAL; + } + + /* + * We are normally called with only a partially initialized + * VAP. Since the NFSv3 spec says that server may use the + * file attributes to store the verifier, the spec requires + * us to do a SETATTR RPC. FreeBSD servers store the verifier + * in atime, but we can't really assume that all servers will + * so we ensure that our SETATTR sets both atime and mtime. + */ + if (vap->va_mtime.tv_sec == VNOVAL) + vfs_timestamp(&vap->va_mtime); + if (vap->va_atime.tv_sec == VNOVAL) + vap->va_atime = vap->va_mtime; + return (1); +} + +/* + * Map nfsv4 errors to errno.h errors. + * The uid and gid arguments are only used for NFSERR_BADOWNER and that + * error should only be returned for the Open, Create and Setattr Ops. + * As such, most calls can just pass in 0 for those arguments. + */ +APPLESTATIC int +nfscl_maperr(struct thread *td, int error, uid_t uid, gid_t gid) +{ + struct proc *p; + + if (error < 10000) + return (error); + if (td != NULL) + p = td->td_proc; + else + p = NULL; + switch (error) { + case NFSERR_BADOWNER: + tprintf(p, LOG_INFO, + "No name and/or group mapping for uid,gid:(%d,%d)\n", + uid, gid); + return (EPERM); + case NFSERR_STALECLIENTID: + case NFSERR_STALESTATEID: + case NFSERR_EXPIRED: + case NFSERR_BADSTATEID: + printf("nfsv4 recover err returned %d\n", error); + return (EIO); + case NFSERR_BADHANDLE: + case NFSERR_SERVERFAULT: + case NFSERR_BADTYPE: + case NFSERR_FHEXPIRED: + case NFSERR_RESOURCE: + case NFSERR_MOVED: + case NFSERR_NOFILEHANDLE: + case NFSERR_MINORVERMISMATCH: + case NFSERR_OLDSTATEID: + case NFSERR_BADSEQID: + case NFSERR_LEASEMOVED: + case NFSERR_RECLAIMBAD: + case NFSERR_BADXDR: + case NFSERR_BADCHAR: + case NFSERR_BADNAME: + case NFSERR_OPILLEGAL: + printf("nfsv4 client/server protocol prob err=%d\n", + error); + return (EIO); + default: + tprintf(p, LOG_INFO, "nfsv4 err=%d\n", error); + return (EIO); + }; +} + +/* + * Locate a process by number; return only "live" processes -- i.e., neither + * zombies nor newly born but incompletely initialized processes. By not + * returning processes in the PRS_NEW state, we allow callers to avoid + * testing for that condition to avoid dereferencing p_ucred, et al. + * Identical to pfind() in kern_proc.c, except it assume the list is + * already locked. + */ +static struct proc * +pfind_locked(pid_t pid) +{ + struct proc *p; + + LIST_FOREACH(p, PIDHASH(pid), p_hash) + if (p->p_pid == pid) { + if (p->p_state == PRS_NEW) { + p = NULL; + break; + } + PROC_LOCK(p); + break; + } + return (p); +} + +/* + * Check to see if the process for this owner exists. Return 1 if it doesn't + * and 0 otherwise. + */ +int +nfscl_procdoesntexist(u_int8_t *own) +{ + union { + u_int32_t lval; + u_int8_t cval[4]; + } tl; + struct proc *p; + pid_t pid; + int ret = 0; + + tl.cval[0] = *own++; + tl.cval[1] = *own++; + tl.cval[2] = *own++; + tl.cval[3] = *own++; + pid = tl.lval; + p = pfind_locked(pid); + if (p == NULL) + return (1); + if (p->p_stats == NULL) { + PROC_UNLOCK(p); + return (0); + } + tl.cval[0] = *own++; + tl.cval[1] = *own++; + tl.cval[2] = *own++; + tl.cval[3] = *own++; + if (tl.lval != p->p_stats->p_start.tv_sec) { + ret = 1; + } else { + tl.cval[0] = *own++; + tl.cval[1] = *own++; + tl.cval[2] = *own++; + tl.cval[3] = *own; + if (tl.lval != p->p_stats->p_start.tv_usec) + ret = 1; + } + PROC_UNLOCK(p); + return (ret); +} + +/* + * - nfs pseudo system call for the client + */ +/* + * MPSAFE + */ +static int +nfssvc_nfscl(struct thread *td, struct nfssvc_args *uap) +{ + struct file *fp; + struct nfscbd_args nfscbdarg; + struct nfsd_nfscbd_args nfscbdarg2; + int error; + + if (uap->flag & NFSSVC_CBADDSOCK) { + error = copyin(uap->argp, (caddr_t)&nfscbdarg, sizeof(nfscbdarg)); + if (error) + return (error); + if ((error = fget(td, nfscbdarg.sock, &fp)) != 0) { + return (error); + } + if (fp->f_type != DTYPE_SOCKET) { + fdrop(fp, td); + return (EPERM); + } + error = nfscbd_addsock(fp); + fdrop(fp, td); + if (!error && nfscl_enablecallb == 0) { + nfsv4_cbport = nfscbdarg.port; + nfscl_enablecallb = 1; + } + } else if (uap->flag & NFSSVC_NFSCBD) { + if (uap->argp == NULL) + return (EINVAL); + error = copyin(uap->argp, (caddr_t)&nfscbdarg2, + sizeof(nfscbdarg2)); + if (error) + return (error); + error = nfscbd_nfsd(td, &nfscbdarg2); + } else { + error = EINVAL; + } + return (error); +} + +extern int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *); + +/* + * Called once to initialize data structures... + */ +static int +nfscl_modevent(module_t mod, int type, void *data) +{ + int error = 0; + static int loaded = 0; + + switch (type) { + case MOD_LOAD: + if (loaded) + return (0); + newnfs_portinit(); + mtx_init(&nfs_clstate_mutex, "nfs_clstate_mutex", NULL, + MTX_DEF); + mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF); + nfscl_init(); + NFSD_LOCK(); + nfsrvd_cbinit(0); + NFSD_UNLOCK(); + ncl_call_invalcaches = ncl_invalcaches; + nfsd_call_nfscl = nfssvc_nfscl; + loaded = 1; + break; + + case MOD_UNLOAD: + if (nfs_numnfscbd != 0) { + error = EBUSY; + break; + } + + ncl_call_invalcaches = NULL; + nfsd_call_nfscl = NULL; + /* and get rid of the mutexes */ + mtx_destroy(&nfs_clstate_mutex); + mtx_destroy(&ncl_iod_mutex); + loaded = 0; + break; + default: + error = EOPNOTSUPP; + break; + } + return error; +} +static moduledata_t nfscl_mod = { + "nfscl", + nfscl_modevent, + NULL, +}; +DECLARE_MODULE(nfscl, nfscl_mod, SI_SUB_VFS, SI_ORDER_ANY); + +/* So that loader and kldload(2) can find us, wherever we are.. */ +MODULE_VERSION(nfscl, 1); +MODULE_DEPEND(nfscl, newnfsd, 1, 1, 1); + diff --git a/sys/fs/nfsclient/nfs_clrpcops.c b/sys/fs/nfsclient/nfs_clrpcops.c new file mode 100644 index 0000000..9287d70 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clrpcops.c @@ -0,0 +1,4173 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Rpc op calls, generally called from the vnode op calls or through the + * buffer cache, for NFS v2, 3 and 4. + * These do not normally make any changes to vnode arguments or use + * structures that might change between the VFS variants. The returned + * arguments are all at the end, after the NFSPROC_T *p one. + */ + +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +/* + * Global variables + */ +extern int nfs_numnfscbd; +extern struct timeval nfsboottime; +extern u_int32_t newnfs_false, newnfs_true; +extern nfstype nfsv34_type[9]; +extern int nfsrv_useacl; +extern char nfsv4_callbackaddr[INET6_ADDRSTRLEN]; +NFSCLSTATEMUTEX; +int nfstest_outofseq = 0; +int nfscl_assumeposixlocks = 1; +int nfscl_enablecallb = 0; +short nfsv4_cbport = NFSV4_CBPORT; +int nfstest_openallsetattr = 0; +#endif /* !APPLEKEXT */ + +#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) + +static int nfsrpc_setattrrpc(vnode_t , struct vattr *, nfsv4stateid_t *, + struct ucred *, NFSPROC_T *, struct nfsvattr *, int *, void *); +static int nfsrpc_readrpc(vnode_t , struct uio *, struct ucred *, + nfsv4stateid_t *, NFSPROC_T *, struct nfsvattr *, int *, void *); +static int nfsrpc_writerpc(vnode_t , struct uio *, int *, u_char *, + struct ucred *, nfsv4stateid_t *, NFSPROC_T *, struct nfsvattr *, int *, + void *); +static int nfsrpc_createv23(vnode_t , char *, int, struct vattr *, + nfsquad_t, int, struct ucred *, NFSPROC_T *, struct nfsvattr *, + struct nfsvattr *, struct nfsfh **, int *, int *, void *); +static int nfsrpc_createv4(vnode_t , char *, int, struct vattr *, + nfsquad_t, int, struct nfsclowner *, struct nfscldeleg **, struct ucred *, + NFSPROC_T *, struct nfsvattr *, struct nfsvattr *, struct nfsfh **, int *, + int *, void *, int *); +static int nfsrpc_locku(struct nfsrv_descript *, struct nfsmount *, + struct nfscllockowner *, u_int64_t, u_int64_t, + u_int32_t, struct ucred *, NFSPROC_T *, int); +static void nfsrpc_doclose(struct nfsmount *, struct nfsclopenhead *, + NFSPROC_T *); +#ifdef NFS4_ACL_EXTATTR_NAME +static int nfsrpc_setaclrpc(vnode_t, struct ucred *, NFSPROC_T *, + struct acl *, nfsv4stateid_t *, void *); +#endif + +/* + * nfs null call from vfs. + */ +APPLESTATIC int +nfsrpc_null(vnode_t vp, struct ucred *cred, NFSPROC_T *p) +{ + int error; + struct nfsrv_descript nfsd, *nd = &nfsd; + + NFSCL_REQSTART(nd, NFSPROC_NULL, vp); + error = nfscl_request(nd, vp, p, cred, NULL); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs access rpc op. + * For nfs version 3 and 4, use the access rpc to check accessibility. If file + * modes are changed on the server, accesses might still fail later. + */ +APPLESTATIC int +nfsrpc_access(vnode_t vp, int acmode, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp) +{ + int error; + u_int32_t mode, rmode; + + if (acmode & VREAD) + mode = NFSACCESS_READ; + else + mode = 0; + if (vnode_vtype(vp) == VDIR) { + if (acmode & VWRITE) + mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND | + NFSACCESS_DELETE); + if (acmode & VEXEC) + mode |= NFSACCESS_LOOKUP; + } else { + if (acmode & VWRITE) + mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); + if (acmode & VEXEC) + mode |= NFSACCESS_EXECUTE; + } + + /* + * Now, just call nfsrpc_accessrpc() to do the actual RPC. + */ + error = nfsrpc_accessrpc(vp, mode, cred, p, nap, attrflagp, &rmode, + NULL); + + /* + * The NFS V3 spec does not clarify whether or not + * the returned access bits can be a superset of + * the ones requested, so... + */ + if (!error && (rmode & mode) != mode) + error = EACCES; + return (error); +} + +/* + * The actual rpc, separated out for Darwin. + */ +APPLESTATIC int +nfsrpc_accessrpc(vnode_t vp, u_int32_t mode, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, u_int32_t *rmodep, + void *stuff) +{ + u_int32_t *tl; + u_int32_t supported, rmode; + int error; + struct nfsrv_descript nfsd, *nd = &nfsd; + nfsattrbit_t attrbits; + + *attrflagp = 0; + supported = mode; + NFSCL_REQSTART(nd, NFSPROC_ACCESS, vp); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(mode); + if (nd->nd_flag & ND_NFSV4) { + /* + * And do a Getattr op. + */ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) { + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (error) + goto nfsmout; + } + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + supported = fxdr_unsigned(u_int32_t, *tl++); + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + } + rmode = fxdr_unsigned(u_int32_t, *tl); + if (nd->nd_flag & ND_NFSV4) + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + + /* + * It's not obvious what should be done about + * unsupported access modes. For now, be paranoid + * and clear the unsupported ones. + */ + rmode &= supported; + *rmodep = rmode; + } else + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs open rpc + */ +APPLESTATIC int +nfsrpc_open(vnode_t vp, int amode, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclopen *op; + struct nfscldeleg *dp; + struct nfsfh *nfhp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + u_int32_t mode, clidrev; + int ret, newone, error, expireret = 0, retrycnt; + + /* + * For NFSv4, Open Ops are only done on Regular Files. + */ + if (vnode_vtype(vp) != VREG) + return (0); + mode = 0; + if (amode & FREAD) + mode |= NFSV4OPEN_ACCESSREAD; + if (amode & FWRITE) + mode |= NFSV4OPEN_ACCESSWRITE; + nfhp = np->n_fhp; + + retrycnt = 0; +#ifdef notdef +{ char name[100]; int namel; +namel = (np->n_v4->n4_namelen < 100) ? np->n_v4->n4_namelen : 99; +bcopy(NFS4NODENAME(np->n_v4), name, namel); +name[namel] = '\0'; +printf("rpcopen p=0x%x name=%s",p->p_pid,name); +if (nfhp->nfh_len > 0) printf(" fh=0x%x\n",nfhp->nfh_fh[12]); +else printf(" fhl=0\n"); +} +#endif + do { + dp = NULL; + error = nfscl_open(vp, nfhp->nfh_fh, nfhp->nfh_len, mode, 1, + cred, p, NULL, &op, &newone, &ret, 1); + if (error) { + return (error); + } + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + else + clidrev = 0; + if (ret == NFSCLOPEN_DOOPEN) { + if (np->n_v4 != NULL) { + error = nfsrpc_openrpc(nmp, vp, np->n_v4->n4_data, + np->n_v4->n4_fhlen, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len, mode, op, + NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &dp, + 0, 0x0, cred, p, 0, 0); + if (dp != NULL) { +#ifdef APPLE + OSBitAndAtomic((int32_t)~NDELEGMOD, (UInt32 *)&np->n_flag); +#else + NFSLOCKNODE(np); + np->n_flag &= ~NDELEGMOD; + NFSUNLOCKNODE(np); +#endif + (void) nfscl_deleg(nmp->nm_mountp, + op->nfso_own->nfsow_clp, + nfhp->nfh_fh, nfhp->nfh_len, cred, p, &dp); + } + } else { + error = EIO; + } + newnfs_copyincred(cred, &op->nfso_cred); + } + + /* + * nfso_opencnt is the count of how many VOP_OPEN()s have + * been done on this Open successfully and a VOP_CLOSE() + * is expected for each of these. + * If error is non-zero, don't increment it, since the Open + * hasn't succeeded yet. + */ + if (!error) + op->nfso_opencnt++; + nfscl_openrelease(op, error, newone); + if (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY) { + (void) nfs_catnap(PZERO, "nfs_open"); + } else if ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) + && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + retrycnt++; + } + } while (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + return (error); +} + +/* + * the actual open rpc + */ +APPLESTATIC int +nfsrpc_openrpc(struct nfsmount *nmp, vnode_t vp, u_int8_t *nfhp, int fhlen, + u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, + u_int8_t *name, int namelen, struct nfscldeleg **dpp, + int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p, + int syscred, int recursed) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfscldeleg *dp, *ndp = NULL; + struct nfsvattr nfsva; + u_int32_t rflags, deleg; + nfsattrbit_t attrbits; + int error, ret, acesize, limitby; + + dp = *dpp; + *dpp = NULL; + nfscl_reqstart(nd, NFSPROC_OPEN, nmp, nfhp, fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid); + *tl++ = txdr_unsigned(mode & NFSV4OPEN_ACCESSBOTH); + *tl++ = txdr_unsigned((mode >> NFSLCK_SHIFT) & NFSV4OPEN_DENYBOTH); + *tl++ = op->nfso_own->nfsow_clp->nfsc_clientid.lval[0]; + *tl = op->nfso_own->nfsow_clp->nfsc_clientid.lval[1]; + (void) nfsm_strtom(nd, op->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OPEN_NOCREATE); + if (reclaim) { + *tl = txdr_unsigned(NFSV4OPEN_CLAIMPREVIOUS); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(delegtype); + } else { + if (dp != NULL) { + *tl = txdr_unsigned(NFSV4OPEN_CLAIMDELEGATECUR); + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = dp->nfsdl_stateid.seqid; + *tl++ = dp->nfsdl_stateid.other[0]; + *tl++ = dp->nfsdl_stateid.other[1]; + *tl = dp->nfsdl_stateid.other[2]; + } else { + *tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL); + } + (void) nfsm_strtom(nd, name, namelen); + } + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_CHANGE); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFY); + (void) nfsrv_putattrbit(nd, &attrbits); + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd); + if (!nd->nd_repstat) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + + 6 * NFSX_UNSIGNED); + op->nfso_stateid.seqid = *tl++; + op->nfso_stateid.other[0] = *tl++; + op->nfso_stateid.other[1] = *tl++; + op->nfso_stateid.other[2] = *tl; + rflags = fxdr_unsigned(u_int32_t, *(tl + 6)); + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + if (error) + goto nfsmout; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + deleg = fxdr_unsigned(u_int32_t, *tl); + if (deleg == NFSV4OPEN_DELEGATEREAD || + deleg == NFSV4OPEN_DELEGATEWRITE) { + if (!(op->nfso_own->nfsow_clp->nfsc_flags & + NFSCLFLAGS_FIRSTDELEG)) + op->nfso_own->nfsow_clp->nfsc_flags |= + (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG); + MALLOC(ndp, struct nfscldeleg *, + sizeof (struct nfscldeleg) + newfhlen, + M_NFSCLDELEG, M_WAITOK); + LIST_INIT(&ndp->nfsdl_owner); + LIST_INIT(&ndp->nfsdl_lock); + ndp->nfsdl_clp = op->nfso_own->nfsow_clp; + ndp->nfsdl_fhlen = newfhlen; + NFSBCOPY(newfhp, ndp->nfsdl_fh, newfhlen); + newnfs_copyincred(cred, &ndp->nfsdl_cred); + nfscl_lockinit(&ndp->nfsdl_rwlock); + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + + NFSX_UNSIGNED); + ndp->nfsdl_stateid.seqid = *tl++; + ndp->nfsdl_stateid.other[0] = *tl++; + ndp->nfsdl_stateid.other[1] = *tl++; + ndp->nfsdl_stateid.other[2] = *tl++; + ret = fxdr_unsigned(int, *tl); + if (deleg == NFSV4OPEN_DELEGATEWRITE) { + ndp->nfsdl_flags = NFSCLDL_WRITE; + /* + * Indicates how much the file can grow. + */ + NFSM_DISSECT(tl, u_int32_t *, + 3 * NFSX_UNSIGNED); + limitby = fxdr_unsigned(int, *tl++); + switch (limitby) { + case NFSV4OPEN_LIMITSIZE: + ndp->nfsdl_sizelimit = fxdr_hyper(tl); + break; + case NFSV4OPEN_LIMITBLOCKS: + ndp->nfsdl_sizelimit = + fxdr_unsigned(u_int64_t, *tl++); + ndp->nfsdl_sizelimit *= + fxdr_unsigned(u_int64_t, *tl); + break; + default: + error = NFSERR_BADXDR; + goto nfsmout; + }; + } else { + ndp->nfsdl_flags = NFSCLDL_READ; + } + if (ret) + ndp->nfsdl_flags |= NFSCLDL_RECALL; + error = nfsrv_dissectace(nd, &ndp->nfsdl_ace, &ret, + &acesize, p); + if (error) + goto nfsmout; + } else if (deleg != NFSV4OPEN_DELEGATENONE) { + error = NFSERR_BADXDR; + goto nfsmout; + } + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + error = nfsv4_loadattr(nd, NULL, &nfsva, NULL, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, NULL, p, cred); + if (error) + goto nfsmout; + if (ndp != NULL) { + ndp->nfsdl_change = nfsva.na_filerev; + ndp->nfsdl_modtime = nfsva.na_mtime; + ndp->nfsdl_flags |= NFSCLDL_MODTIMESET; + } + if (!reclaim && (rflags & NFSV4OPEN_RESULTCONFIRM)) { + do { + ret = nfsrpc_openconfirm(vp, newfhp, newfhlen, op, + cred, p); + if (ret == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfs_open"); + } while (ret == NFSERR_DELAY); + error = ret; + } + if ((rflags & NFSV4OPEN_LOCKTYPEPOSIX) || + nfscl_assumeposixlocks) + op->nfso_posixlock = 1; + else + op->nfso_posixlock = 0; + + /* + * If the server is handing out delegations, but we didn't + * get one because an OpenConfirm was required, try the + * Open again, to get a delegation. This is a harmless no-op, + * from a server's point of view. + */ + if (!reclaim && (rflags & NFSV4OPEN_RESULTCONFIRM) && + (op->nfso_own->nfsow_clp->nfsc_flags & NFSCLFLAGS_GOTDELEG) + && !error && dp == NULL && ndp == NULL && !recursed) { + do { + ret = nfsrpc_openrpc(nmp, vp, nfhp, fhlen, newfhp, + newfhlen, mode, op, name, namelen, &ndp, 0, 0x0, + cred, p, syscred, 1); + if (ret == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfs_open2"); + } while (ret == NFSERR_DELAY); + if (ret) { + if (ndp != NULL) + FREE((caddr_t)ndp, M_NFSCLDELEG); + if (ret == NFSERR_STALECLIENTID || + ret == NFSERR_STALEDONTRECOVER) + error = ret; + } + } + } + if (nd->nd_repstat != 0 && error == 0) + error = nd->nd_repstat; + if (error == NFSERR_STALECLIENTID) + nfscl_initiate_recovery(op->nfso_own->nfsow_clp); +nfsmout: + if (!error) + *dpp = ndp; + else if (ndp != NULL) + FREE((caddr_t)ndp, M_NFSCLDELEG); + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * open downgrade rpc + */ +APPLESTATIC int +nfsrpc_opendowngrade(vnode_t vp, u_int32_t mode, struct nfsclopen *op, + struct ucred *cred, NFSPROC_T *p) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + + NFSCL_REQSTART(nd, NFSPROC_OPENDOWNGRADE, vp); + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + 3 * NFSX_UNSIGNED); + *tl++ = op->nfso_stateid.seqid; + *tl++ = op->nfso_stateid.other[0]; + *tl++ = op->nfso_stateid.other[1]; + *tl++ = op->nfso_stateid.other[2]; + *tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid); + *tl++ = txdr_unsigned(mode & NFSV4OPEN_ACCESSBOTH); + *tl = txdr_unsigned((mode >> NFSLCK_SHIFT) & NFSV4OPEN_DENYBOTH); + error = nfscl_request(nd, vp, p, cred, NULL); + if (error) + return (error); + NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd); + if (!nd->nd_repstat) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + op->nfso_stateid.seqid = *tl++; + op->nfso_stateid.other[0] = *tl++; + op->nfso_stateid.other[1] = *tl++; + op->nfso_stateid.other[2] = *tl; + } + if (nd->nd_repstat && error == 0) + error = nd->nd_repstat; + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(op->nfso_own->nfsow_clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * V4 Close operation. + */ +APPLESTATIC int +nfsrpc_close(vnode_t vp, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclclient *clp; + struct nfsclopenhead oh; + int error; + + if (vnode_vtype(vp) != VREG) + return (0); + error = nfscl_getclose(vp, cred, p, &clp, &oh); + if (error) + return (error); + + if (!LIST_EMPTY(&oh)) + nfsrpc_doclose(VFSTONFS(vnode_mount(vp)), &oh, p); + nfscl_clientrelease(clp); + return (0); +} + +/* + * Close/free all the opens in the list. + */ +static void +nfsrpc_doclose(struct nfsmount *nmp, struct nfsclopenhead *ohp, NFSPROC_T *p) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsclopen *op, *nop; + struct nfscllockowner *lp; + struct nfscllock *lop, *nlop; + struct ucred *tcred; + u_int64_t off = 0, len = 0; + u_int32_t type = NFSV4LOCKT_READ; + int error; + + tcred = newnfs_getcred(); + op = LIST_FIRST(ohp); + while (op != NULL) { + nop = LIST_NEXT(op, nfso_list); + newnfs_copycred(&op->nfso_cred, tcred); + /* + * (Theoretically this could be done in the same + * compound as the close, but having multiple + * sequenced Ops in the same compound might be + * too scary for some servers.) + */ + if (op->nfso_posixlock) { + off = 0; + len = NFS64BITSSET; + type = NFSV4LOCKT_READ; + } + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + lop = LIST_FIRST(&lp->nfsl_lock); + while (lop != NULL) { + nlop = LIST_NEXT(lop, nfslo_list); + if (op->nfso_posixlock == 0) { + off = lop->nfslo_first; + len = lop->nfslo_end - lop->nfslo_first; + if (lop->nfslo_type == F_WRLCK) + type = NFSV4LOCKT_WRITE; + else + type = NFSV4LOCKT_READ; + } + if (lop == LIST_FIRST(&lp->nfsl_lock) || + op->nfso_posixlock == 0) { + NFSLOCKCLSTATE(); + nfscl_lockexcl(&lp->nfsl_rwlock, + NFSCLSTATEMUTEXPTR); + NFSUNLOCKCLSTATE(); + do { + error = nfsrpc_locku(nd, nmp, lp, off, len, + type, tcred, p, 0); + if ((nd->nd_repstat == NFSERR_GRACE || + nd->nd_repstat == NFSERR_DELAY) && + error == 0) + (void) nfs_catnap(PZERO, "nfs_close"); + } while ((nd->nd_repstat == NFSERR_GRACE || + nd->nd_repstat == NFSERR_DELAY) && error == 0); + NFSLOCKCLSTATE(); + nfscl_lockunlock(&lp->nfsl_rwlock); + NFSUNLOCKCLSTATE(); + } + nfscl_freelock(lop, 0); + lop = nlop; + } + } + NFSLOCKCLSTATE(); + nfscl_lockexcl(&op->nfso_own->nfsow_rwlock, NFSCLSTATEMUTEXPTR); + NFSUNLOCKCLSTATE(); + do { + error = nfscl_tryclose(op, tcred, nmp, p); + if (error == NFSERR_GRACE) + (void) nfs_catnap(PZERO, "nfs_close"); + } while (error == NFSERR_GRACE); + NFSLOCKCLSTATE(); + nfscl_lockunlock(&op->nfso_own->nfsow_rwlock); + NFSUNLOCKCLSTATE(); + + /* + * Move the lockowner to nfsc_defunctlockowner, + * so the Renew thread will do the ReleaseLockOwner + * Op on it later. There might still be other + * opens using the same lockowner name. + */ + lp = LIST_FIRST(&op->nfso_lock); + if (lp != NULL) { + while (LIST_NEXT(lp, nfsl_list) != NULL) + lp = LIST_NEXT(lp, nfsl_list); + LIST_PREPEND(&nmp->nm_clp->nfsc_defunctlockowner, + &op->nfso_lock, lp, nfsl_list); + LIST_INIT(&op->nfso_lock); + } + nfscl_freeopen(op, 0); + op = nop; + } + NFSFREECRED(tcred); +} + +/* + * The actual Close RPC. + */ +APPLESTATIC int +nfsrpc_closerpc(struct nfsrv_descript *nd, struct nfsmount *nmp, + struct nfsclopen *op, struct ucred *cred, NFSPROC_T *p, + int syscred) +{ + u_int32_t *tl; + int error; + + nfscl_reqstart(nd, NFSPROC_CLOSE, nmp, op->nfso_fh, + op->nfso_fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID); + *tl++ = txdr_unsigned(op->nfso_own->nfsow_seqid); + *tl++ = op->nfso_stateid.seqid; + *tl++ = op->nfso_stateid.other[0]; + *tl++ = op->nfso_stateid.other[1]; + *tl = op->nfso_stateid.other[2]; + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd); + if (nd->nd_repstat == 0) + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + error = nd->nd_repstat; + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(op->nfso_own->nfsow_clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * V4 Open Confirm RPC. + */ +APPLESTATIC int +nfsrpc_openconfirm(vnode_t vp, u_int8_t *nfhp, int fhlen, + struct nfsclopen *op, struct ucred *cred, NFSPROC_T *p) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + + nfscl_reqstart(nd, NFSPROC_OPENCONFIRM, VFSTONFS(vnode_mount(vp)), + nfhp, fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID); + *tl++ = op->nfso_stateid.seqid; + *tl++ = op->nfso_stateid.other[0]; + *tl++ = op->nfso_stateid.other[1]; + *tl++ = op->nfso_stateid.other[2]; + *tl = txdr_unsigned(op->nfso_own->nfsow_seqid); + error = nfscl_request(nd, vp, p, cred, NULL); + if (error) + return (error); + NFSCL_INCRSEQID(op->nfso_own->nfsow_seqid, nd); + if (!nd->nd_repstat) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + op->nfso_stateid.seqid = *tl++; + op->nfso_stateid.other[0] = *tl++; + op->nfso_stateid.other[1] = *tl++; + op->nfso_stateid.other[2] = *tl; + } + error = nd->nd_repstat; + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(op->nfso_own->nfsow_clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Do the setclientid and setclientid confirm RPCs. Called from nfs_statfs() + * when a mount has just occurred and when the server replies NFSERR_EXPIRED. + */ +APPLESTATIC int +nfsrpc_setclient(struct nfsmount *nmp, struct nfsclclient *clp, + struct ucred *cred, NFSPROC_T *p) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd; + struct nfsrv_descript *nd = &nfsd; + nfsattrbit_t attrbits; + u_int8_t *cp = NULL, *cp2, addr[INET6_ADDRSTRLEN + 9]; + u_short port; + int error, isinet6, callblen; + nfsquad_t confirm; + u_int32_t lease; + static u_int32_t rev = 0; + + if (nfsboottime.tv_sec == 0) + NFSSETBOOTTIME(nfsboottime); + nfscl_reqstart(nd, NFSPROC_SETCLIENTID, nmp, NULL, 0, NULL); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(nfsboottime.tv_sec); + *tl = txdr_unsigned(rev++); + (void) nfsm_strtom(nd, clp->nfsc_id, clp->nfsc_idlen); + + /* + * set up the callback address + */ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFS_CALLBCKPROG); + callblen = strlen(nfsv4_callbackaddr); + if (callblen == 0) + cp = nfscl_getmyip(nmp, &isinet6); + if (nfscl_enablecallb && nfs_numnfscbd > 0 && + (callblen > 0 || cp != NULL)) { + port = htons(nfsv4_cbport); + cp2 = (u_int8_t *)&port; +#ifdef INET6 + if ((callblen > 0 && + strchr(nfsv4_callbackaddr, ':')) || isinet6) { + char ip6buf[INET6_ADDRSTRLEN], *ip6add; + + (void) nfsm_strtom(nd, "tcp6", 4); + if (callblen == 0) { + ip6_sprintf(ip6buf, (struct in6_addr *)cp); + ip6add = ip6buf; + } else { + ip6add = nfsv4_callbackaddr; + } + snprintf(addr, INET6_ADDRSTRLEN + 9, "%s.%d.%d", + ip6add, cp2[0], cp2[1]); + } else +#endif + { + (void) nfsm_strtom(nd, "tcp", 3); + if (callblen == 0) + snprintf(addr, INET6_ADDRSTRLEN + 9, + "%d.%d.%d.%d.%d.%d", cp[0], cp[1], + cp[2], cp[3], cp2[0], cp2[1]); + else + snprintf(addr, INET6_ADDRSTRLEN + 9, + "%s.%d.%d", nfsv4_callbackaddr, + cp2[0], cp2[1]); + } + (void) nfsm_strtom(nd, addr, strlen(addr)); + } else { + (void) nfsm_strtom(nd, "tcp", 3); + (void) nfsm_strtom(nd, "0.0.0.0.0.0", 11); + } + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(clp->nfsc_cbident); + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + clp->nfsc_clientid.lval[0] = *tl++; + clp->nfsc_clientid.lval[1] = *tl++; + confirm.lval[0] = *tl++; + confirm.lval[1] = *tl; + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + + /* + * and confirm it. + */ + nfscl_reqstart(nd, NFSPROC_SETCLIENTIDCFRM, nmp, NULL, 0, NULL); + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + *tl++ = clp->nfsc_clientid.lval[0]; + *tl++ = clp->nfsc_clientid.lval[1]; + *tl++ = confirm.lval[0]; + *tl = confirm.lval[1]; + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, + cred, NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + if (nd->nd_repstat == 0) { + nfscl_reqstart(nd, NFSPROC_GETATTR, nmp, nmp->nm_fh, + nmp->nm_fhsize, NULL); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_LEASETIME); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, + cred, NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + if (nd->nd_repstat == 0) { + error = nfsv4_loadattr(nd, NULL, NULL, NULL, NULL, 0, NULL, + NULL, NULL, NULL, NULL, 0, NULL, &lease, NULL, p, cred); + if (error) + goto nfsmout; + clp->nfsc_renew = NFSCL_RENEW(lease); + clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; + clp->nfsc_clientidrev++; + if (clp->nfsc_clientidrev == 0) + clp->nfsc_clientidrev++; + } + } + } + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs getattr call. + */ +APPLESTATIC int +nfsrpc_getattr(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct nfsvattr *nap, void *stuff) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + nfsattrbit_t attrbits; + + NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp); + if (nd->nd_flag & ND_NFSV4) { + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (!nd->nd_repstat) + error = nfsm_loadattr(nd, nap); + else + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs getattr call with non-vnode arguemnts. + */ +APPLESTATIC int +nfsrpc_getattrnovp(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, int syscred, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, u_int64_t *xidp) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error, vers = NFS_VER2; + nfsattrbit_t attrbits; + + nfscl_reqstart(nd, NFSPROC_GETATTR, nmp, fhp, fhlen, NULL); + if (nd->nd_flag & ND_NFSV4) { + vers = NFS_VER4; + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } else if (nd->nd_flag & ND_NFSV3) { + vers = NFS_VER3; + } + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, vers, NULL, 1, xidp); + if (error) + return (error); + if (!nd->nd_repstat) + error = nfsm_loadattr(nd, nap); + else + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Do an nfs setattr operation. + */ +APPLESTATIC int +nfsrpc_setattr(vnode_t vp, struct vattr *vap, NFSACL_T *aclp, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *rnap, int *attrflagp, + void *stuff) +{ + int error, expireret = 0, openerr, retrycnt; + u_int32_t clidrev = 0, mode; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsfh *nfhp; + nfsv4stateid_t stateid; + void *lckp; + + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + if (vap != NULL && NFSATTRISSET(u_quad_t, vap, va_size)) + mode = NFSV4OPEN_ACCESSWRITE; + else + mode = NFSV4OPEN_ACCESSREAD; + retrycnt = 0; + do { + lckp = NULL; + openerr = 1; + if (NFSHASNFSV4(nmp)) { + nfhp = VTONFS(vp)->n_fhp; + error = nfscl_getstateid(vp, nfhp->nfh_fh, + nfhp->nfh_len, mode, cred, p, &stateid, &lckp); + if (error && vnode_vtype(vp) == VREG && + (mode == NFSV4OPEN_ACCESSWRITE || + nfstest_openallsetattr)) { + /* + * No Open stateid, so try and open the file + * now. + */ + if (mode == NFSV4OPEN_ACCESSWRITE) + openerr = nfsrpc_open(vp, FWRITE, cred, + p); + else + openerr = nfsrpc_open(vp, FREAD, cred, + p); + if (!openerr) + (void) nfscl_getstateid(vp, + nfhp->nfh_fh, nfhp->nfh_len, + mode, cred, p, &stateid, &lckp); + } + } + if (vap != NULL) + error = nfsrpc_setattrrpc(vp, vap, &stateid, cred, p, + rnap, attrflagp, stuff); +#ifdef NFS4_ACL_EXTATTR_NAME + else + error = nfsrpc_setaclrpc(vp, cred, p, aclp, &stateid, + stuff); +#else + else + error = EOPNOTSUPP; +#endif + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(nmp->nm_clp); + if (lckp != NULL) + nfscl_lockderef(lckp); + if (!openerr) + (void) nfsrpc_close(vp, cred, p); + if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + error == NFSERR_OLDSTATEID) { + (void) nfs_catnap(PZERO, "nfs_setattr"); + } else if ((error == NFSERR_EXPIRED || + error == NFSERR_BADSTATEID) && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + } + retrycnt++; + } while (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + (error == NFSERR_OLDSTATEID && retrycnt < 20) || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + return (error); +} + +static int +nfsrpc_setattrrpc(vnode_t vp, struct vattr *vap, + nfsv4stateid_t *stateidp, struct ucred *cred, NFSPROC_T *p, + struct nfsvattr *rnap, int *attrflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + nfsattrbit_t attrbits; + + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_SETATTR, vp); + if (nd->nd_flag & ND_NFSV4) + nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID); + vap->va_type = vnode_vtype(vp); + nfscl_fillsattr(nd, vap, vp, NFSSATTR_FULL, 0); + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } else if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) + error = nfscl_wcc_data(nd, vp, rnap, attrflagp, NULL, stuff); + if ((nd->nd_flag & ND_NFSV4) && !error) + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + if (!(nd->nd_flag & ND_NFSV3) && !nd->nd_repstat && !error) + error = nfscl_postop_attr(nd, rnap, attrflagp, stuff); + mbuf_freem(nd->nd_mrep); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + return (error); +} + +/* + * nfs lookup rpc + */ +APPLESTATIC int +nfsrpc_lookup(vnode_t dvp, char *name, int len, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *dnap, struct nfsvattr *nap, + struct nfsfh **nfhpp, int *attrflagp, int *dattrflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp; + struct nfsnode *np; + struct nfsfh *nfhp; + nfsattrbit_t attrbits; + int error = 0, lookupp = 0; + + *attrflagp = 0; + *dattrflagp = 0; + if (vnode_vtype(dvp) != VDIR) + return (ENOTDIR); + nmp = VFSTONFS(vnode_mount(dvp)); + if (len > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + if (NFSHASNFSV4(nmp) && len == 1 && + name[0] == '.') { + /* + * Just return the current dir's fh. + */ + np = VTONFS(dvp); + MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + + np->n_fhp->nfh_len, M_NFSFH, M_WAITOK); + nfhp->nfh_len = np->n_fhp->nfh_len; + NFSBCOPY(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len); + *nfhpp = nfhp; + return (0); + } + if (NFSHASNFSV4(nmp) && len == 2 && + name[0] == '.' && name[1] == '.') { + lookupp = 1; + NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, dvp); + } else { + NFSCL_REQSTART(nd, NFSPROC_LOOKUP, dvp); + (void) nfsm_strtom(nd, name, len); + } + if (nd->nd_flag & ND_NFSV4) { + NFSGETATTR_ATTRBIT(&attrbits); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, dvp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat) { + /* + * When an NFSv4 Lookupp returns ENOENT, it means that + * the lookup is at the root of an fs, so return this dir. + */ + if (nd->nd_repstat == NFSERR_NOENT && lookupp) { + np = VTONFS(dvp); + MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + + np->n_fhp->nfh_len, M_NFSFH, M_WAITOK); + nfhp->nfh_len = np->n_fhp->nfh_len; + NFSBCOPY(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len); + *nfhpp = nfhp; + mbuf_freem(nd->nd_mrep); + return (0); + } + if (nd->nd_flag & ND_NFSV3) + error = nfscl_postop_attr(nd, dnap, dattrflagp, stuff); + goto nfsmout; + } + if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) { + nd->nd_flag |= ND_NOMOREDATA; + goto nfsmout; + } + } + error = nfsm_getfh(nd, nfhpp); + if (error) + goto nfsmout; + + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if ((nd->nd_flag & ND_NFSV3) && !error) + error = nfscl_postop_attr(nd, dnap, dattrflagp, stuff); +nfsmout: + mbuf_freem(nd->nd_mrep); + if (!error && nd->nd_repstat) + error = nd->nd_repstat; + return (error); +} + +/* + * Do a readlink rpc. + */ +APPLESTATIC int +nfsrpc_readlink(vnode_t vp, struct uio *uiop, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsnode *np = VTONFS(vp); + nfsattrbit_t attrbits; + int error, len, cangetattr = 1; + + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_READLINK, vp); + if (nd->nd_flag & ND_NFSV4) { + /* + * And do a Getattr op. + */ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (!nd->nd_repstat && !error) { + NFSM_STRSIZ(len, NFS_MAXPATHLEN); + /* + * This seems weird to me, but must have been added to + * FreeBSD for some reason. The only thing I can think of + * is that there was/is some server that replies with + * more link data than it should? + */ + if (len == NFS_MAXPATHLEN) { + NFSLOCKNODE(np); + if (np->n_size > 0 && np->n_size < NFS_MAXPATHLEN) { + len = np->n_size; + cangetattr = 0; + } + NFSUNLOCKNODE(np); + } + error = nfsm_mbufuio(nd, uiop, len); + if ((nd->nd_flag & ND_NFSV4) && !error && cangetattr) + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + } + if (nd->nd_repstat && !error) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Read operation. + */ +APPLESTATIC int +nfsrpc_read(vnode_t vp, struct uio *uiop, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff) +{ + int error, expireret = 0, retrycnt; + u_int32_t clidrev = 0; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsnode *np = VTONFS(vp); + struct ucred *newcred; + struct nfsfh *nfhp = NULL; + nfsv4stateid_t stateid; + void *lckp; + + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + newcred = cred; + if (NFSHASNFSV4(nmp)) { + nfhp = np->n_fhp; + if (p == NULL) + newcred = NFSNEWCRED(cred); + } + retrycnt = 0; + do { + lckp = NULL; + if (NFSHASNFSV4(nmp)) + (void)nfscl_getstateid(vp, nfhp->nfh_fh, nfhp->nfh_len, + NFSV4OPEN_ACCESSREAD, newcred, p, &stateid, &lckp); + error = nfsrpc_readrpc(vp, uiop, newcred, &stateid, p, nap, + attrflagp, stuff); + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(nmp->nm_clp); + if (lckp != NULL) + nfscl_lockderef(lckp); + if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + error == NFSERR_OLDSTATEID) { + (void) nfs_catnap(PZERO, "nfs_read"); + } else if ((error == NFSERR_EXPIRED || + error == NFSERR_BADSTATEID) && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + } + retrycnt++; + } while (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + (error == NFSERR_OLDSTATEID && retrycnt < 20) || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + if (NFSHASNFSV4(nmp) && p == NULL) + NFSFREECRED(newcred); + return (error); +} + +/* + * The actual read RPC. + */ +static int +nfsrpc_readrpc(vnode_t vp, struct uio *uiop, struct ucred *cred, + nfsv4stateid_t *stateidp, NFSPROC_T *p, struct nfsvattr *nap, + int *attrflagp, void *stuff) +{ + u_int32_t *tl; + int error = 0, len, retlen, tsiz, eof = 0; + struct nfsrv_descript nfsd; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsrv_descript *nd = &nfsd; + + *attrflagp = 0; + tsiz = uio_uio_resid(uiop); + if (uiop->uio_offset + tsiz > 0xffffffff && + !NFSHASNFSV3OR4(nmp)) + return (EFBIG); + nd->nd_mrep = NULL; + while (tsiz > 0) { + *attrflagp = 0; + len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; + NFSCL_REQSTART(nd, NFSPROC_READ, vp); + if (nd->nd_flag & ND_NFSV4) + nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED * 3); + if (nd->nd_flag & ND_NFSV2) { + *tl++ = txdr_unsigned(uiop->uio_offset); + *tl++ = txdr_unsigned(len); + *tl = 0; + } else { + txdr_hyper(uiop->uio_offset, tl); + *(tl + 2) = txdr_unsigned(len); + } + /* + * Since I can't do a Getattr for NFSv4 for Write, there + * doesn't seem any point in doing one here, either. + * (See the comment in nfsrpc_writerpc() for more info.) + */ + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) { + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + } else if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV2)) { + error = nfsm_loadattr(nd, nap); + if (!error) + *attrflagp = 1; + } + if (nd->nd_repstat || error) { + if (!error) + error = nd->nd_repstat; + goto nfsmout; + } + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + eof = fxdr_unsigned(int, *(tl + 1)); + } else if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + eof = fxdr_unsigned(int, *tl); + } + NFSM_STRSIZ(retlen, nmp->nm_rsize); + error = nfsm_mbufuio(nd, uiop, retlen); + if (error) + goto nfsmout; + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + tsiz -= retlen; + if (!(nd->nd_flag & ND_NFSV2)) { + if (eof || retlen == 0) + tsiz = 0; + } else if (retlen < len) + tsiz = 0; + } + return (0); +nfsmout: + if (nd->nd_mrep != NULL) + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs write operation + */ +APPLESTATIC int +nfsrpc_write(vnode_t vp, struct uio *uiop, int *iomode, u_char *verfp, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, + void *stuff) +{ + int error, expireret = 0, retrycnt, nostateid; + u_int32_t clidrev = 0; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsnode *np = VTONFS(vp); + struct ucred *newcred; + struct nfsfh *nfhp = NULL; + nfsv4stateid_t stateid; + void *lckp; + + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + newcred = cred; + if (NFSHASNFSV4(nmp)) { + if (p == NULL) + newcred = NFSNEWCRED(cred); + nfhp = np->n_fhp; + } + retrycnt = 0; + do { + lckp = NULL; + nostateid = 0; + if (NFSHASNFSV4(nmp)) { + (void)nfscl_getstateid(vp, nfhp->nfh_fh, nfhp->nfh_len, + NFSV4OPEN_ACCESSWRITE, newcred, p, &stateid, &lckp); + if (stateid.other[0] == 0 && stateid.other[1] == 0 && + stateid.other[2] == 0) { + nostateid = 1; + printf("stateid0 in write\n"); + } + } + + /* + * If there is no stateid for NFSv4, it means this is an + * extraneous write after close. Basically a poorly + * implemented buffer cache. Just don't do the write. + */ + if (nostateid) + error = 0; + else + error = nfsrpc_writerpc(vp, uiop, iomode, verfp, + newcred, &stateid, p, nap, attrflagp, stuff); +if (error == NFSERR_BADSTATEID) { +printf("st=0x%x 0x%x 0x%x\n",stateid.other[0],stateid.other[1],stateid.other[2]); +nfscl_dumpstate(nmp, 1, 1, 0, 0); +} + if (error == NFSERR_STALESTATEID) + nfscl_initiate_recovery(nmp->nm_clp); + if (lckp != NULL) + nfscl_lockderef(lckp); + if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + error == NFSERR_OLDSTATEID) { + (void) nfs_catnap(PZERO, "nfs_write"); + } else if ((error == NFSERR_EXPIRED || + error == NFSERR_BADSTATEID) && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + } + retrycnt++; + } while (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + (error == NFSERR_OLDSTATEID && retrycnt < 20) || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + if (NFSHASNFSV4(nmp) && p == NULL) + NFSFREECRED(newcred); + return (error); +} + +/* + * The actual write RPC. + */ +static int +nfsrpc_writerpc(vnode_t vp, struct uio *uiop, int *iomode, + u_char *verfp, struct ucred *cred, nfsv4stateid_t *stateidp, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsnode *np = VTONFS(vp); + int error = 0, len, tsiz, rlen, commit, committed = NFSWRITE_FILESYNC; + int wccflag = 0, wsize; + int32_t backup; + struct nfsrv_descript nfsd; + struct nfsrv_descript *nd = &nfsd; + nfsattrbit_t attrbits; + +#ifdef DIAGNOSTIC + if (uiop->uio_iovcnt != 1) + panic("nfs: writerpc iovcnt > 1"); +#endif + *attrflagp = 0; + tsiz = uio_uio_resid(uiop); + NFSLOCKMNT(nmp); + if (uiop->uio_offset + tsiz > 0xffffffff && + !NFSHASNFSV3OR4(nmp)) { + NFSUNLOCKMNT(nmp); + return (EFBIG); + } + wsize = nmp->nm_wsize; + NFSUNLOCKMNT(nmp); + nd->nd_mrep = NULL; /* NFSv2 sometimes does a write with */ + nd->nd_repstat = 0; /* uio_resid == 0, so the while is not done */ + while (tsiz > 0) { + nmp = VFSTONFS(vnode_mount(vp)); + if (nmp == NULL) { + error = ENXIO; + goto nfsmout; + } + *attrflagp = 0; + len = (tsiz > wsize) ? wsize : tsiz; + NFSCL_REQSTART(nd, NFSPROC_WRITE, vp); + if (nd->nd_flag & ND_NFSV4) { + nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID); + NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER+2*NFSX_UNSIGNED); + txdr_hyper(uiop->uio_offset, tl); + tl += 2; + *tl++ = txdr_unsigned(*iomode); + *tl = txdr_unsigned(len); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER+3*NFSX_UNSIGNED); + txdr_hyper(uiop->uio_offset, tl); + tl += 2; + *tl++ = txdr_unsigned(len); + *tl++ = txdr_unsigned(*iomode); + *tl = txdr_unsigned(len); + } else { + u_int32_t x; + + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + /* + * Not sure why someone changed this, since the + * RFC clearly states that "beginoffset" and + * "totalcount" are ignored, but it wouldn't + * surprise me if there's a busted server out there. + */ + /* Set both "begin" and "current" to non-garbage. */ + x = txdr_unsigned((u_int32_t)uiop->uio_offset); + *tl++ = x; /* "begin offset" */ + *tl++ = x; /* "current offset" */ + x = txdr_unsigned(len); + *tl++ = x; /* total to this offset */ + *tl = x; /* size of this write */ + + } + nfsm_uiombuf(nd, uiop, len); + /* + * Although it is tempting to do a normal Getattr Op in the + * NFSv4 compound, the result can be a nearly hung client + * system if the Getattr asks for Owner and/or OwnerGroup. + * It occurs when the client can't map either the Owner or + * Owner_group name in the Getattr reply to a uid/gid. When + * there is a cache miss, the kernel does an upcall to the + * nfsuserd. Then, it can try and read the local /etc/passwd + * or /etc/group file. It can then block in getnewbuf(), + * waiting for dirty writes to be pushed to the NFS server. + * The only reason this doesn't result in a complete + * deadlock, is that the upcall times out and allows + * the write to complete. However, progress is so slow + * that it might just as well be deadlocked. + * So, we just get the attributes that change with each + * write Op. + * nb: nfscl_loadattrcache() needs to be told that these + * partial attributes from a write rpc are being + * passed in, via a argument flag. + */ + if (nd->nd_flag & ND_NFSV4) { + NFSWRITEGETATTR_ATTRBIT(&attrbits); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat) { + /* + * In case the rpc gets retried, roll + * the uio fileds changed by nfsm_uiombuf() + * back. + */ + uiop->uio_offset -= len; + uio_uio_resid_add(uiop, len); + uio_iov_base_add(uiop, -len); + uio_iov_len_add(uiop, len); + } + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + error = nfscl_wcc_data(nd, vp, nap, attrflagp, + &wccflag, stuff); + if (error) + goto nfsmout; + } + if (!nd->nd_repstat) { + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED + + NFSX_VERF); + rlen = fxdr_unsigned(int, *tl++); + if (rlen == 0) { + error = NFSERR_IO; + goto nfsmout; + } else if (rlen < len) { + backup = len - rlen; + uio_iov_base_add(uiop, -(backup)); + uio_iov_len_add(uiop, backup); + uiop->uio_offset -= backup; + uio_uio_resid_add(uiop, backup); + len = rlen; + } + commit = fxdr_unsigned(int, *tl++); + + /* + * Return the lowest committment level + * obtained by any of the RPCs. + */ + if (committed == NFSWRITE_FILESYNC) + committed = commit; + else if (committed == NFSWRITE_DATASYNC && + commit == NFSWRITE_UNSTABLE) + committed = commit; + if (verfp != NULL) + NFSBCOPY((caddr_t)tl, verfp, NFSX_VERF); + NFSLOCKMNT(nmp); + if (!NFSHASWRITEVERF(nmp)) { + NFSBCOPY((caddr_t)tl, + (caddr_t)&nmp->nm_verf[0], + NFSX_VERF); + NFSSETWRITEVERF(nmp); + } + NFSUNLOCKMNT(nmp); + } + if (nd->nd_flag & ND_NFSV4) + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (nd->nd_flag & (ND_NFSV2 | ND_NFSV4)) { + error = nfsm_loadattr(nd, nap); + if (!error) + *attrflagp = NFS_LATTR_NOSHRINK; + } + } else { + error = nd->nd_repstat; + } + if (error) + goto nfsmout; + NFSWRITERPC_SETTIME(wccflag, np, (nd->nd_flag & ND_NFSV4)); + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + tsiz -= len; + } +nfsmout: + if (nd->nd_mrep != NULL) + mbuf_freem(nd->nd_mrep); + *iomode = committed; + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + return (error); +} + +/* + * nfs mknod rpc + * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the + * mode set to specify the file type and the size field for rdev. + */ +APPLESTATIC int +nfsrpc_mknod(vnode_t dvp, char *name, int namelen, struct vattr *vap, + u_int32_t rdev, enum vtype vtyp, struct ucred *cred, NFSPROC_T *p, + struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp, + int *attrflagp, int *dattrflagp, void *dstuff) +{ + u_int32_t *tl; + int error = 0; + struct nfsrv_descript nfsd, *nd = &nfsd; + nfsattrbit_t attrbits; + + *nfhpp = NULL; + *attrflagp = 0; + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_MKNOD, dvp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = vtonfsv34_type(vtyp); + *tl++ = txdr_unsigned(NFSMAJOR(rdev)); + *tl = txdr_unsigned(NFSMINOR(rdev)); + } + (void) nfsm_strtom(nd, name, namelen); + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = vtonfsv34_type(vtyp); + } + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) + nfscl_fillsattr(nd, vap, dvp, 0, 0); + if ((nd->nd_flag & ND_NFSV3) && + (vtyp == VCHR || vtyp == VBLK)) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSMAJOR(rdev)); + *tl = txdr_unsigned(NFSMINOR(rdev)); + } + if (nd->nd_flag & ND_NFSV4) { + NFSGETATTR_ATTRBIT(&attrbits); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + } + if (nd->nd_flag & ND_NFSV2) + nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZERDEV, rdev); + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV4) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + if (error) + goto nfsmout; + } + error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp); + if (error) + goto nfsmout; + } + if (nd->nd_flag & ND_NFSV3) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (!error && nd->nd_repstat) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs file create call + * Mostly just call the approriate routine. (I separated out v4, so that + * error recovery wouldn't be as difficult.) + */ +APPLESTATIC int +nfsrpc_create(vnode_t dvp, char *name, int namelen, struct vattr *vap, + nfsquad_t cverf, int fmode, struct ucred *cred, NFSPROC_T *p, + struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp, + int *attrflagp, int *dattrflagp, void *dstuff) +{ + int error = 0, newone, expireret = 0, retrycnt, unlocked; + struct nfsclowner *owp; + struct nfscldeleg *dp; + struct nfsmount *nmp = VFSTONFS(vnode_mount(dvp)); + u_int32_t clidrev; + + if (NFSHASNFSV4(nmp)) { + retrycnt = 0; + do { + dp = NULL; + error = nfscl_open(dvp, NULL, 0, (NFSV4OPEN_ACCESSWRITE | + NFSV4OPEN_ACCESSREAD), 0, cred, p, &owp, NULL, &newone, + NULL, 1); + if (error) + return (error); + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + else + clidrev = 0; + error = nfsrpc_createv4(dvp, name, namelen, vap, cverf, fmode, + owp, &dp, cred, p, dnap, nnap, nfhpp, attrflagp, dattrflagp, + dstuff, &unlocked); + if (dp != NULL) + (void) nfscl_deleg(nmp->nm_mountp, owp->nfsow_clp, + (*nfhpp)->nfh_fh, (*nfhpp)->nfh_len, cred, p, &dp); + nfscl_ownerrelease(owp, error, newone, unlocked); + if (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY) { + (void) nfs_catnap(PZERO, "nfs_open"); + } else if ((error == NFSERR_EXPIRED || + error == NFSERR_BADSTATEID) && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + retrycnt++; + } + } while (error == NFSERR_GRACE || error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_DELAY || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + } else { + error = nfsrpc_createv23(dvp, name, namelen, vap, cverf, + fmode, cred, p, dnap, nnap, nfhpp, attrflagp, dattrflagp, + dstuff); + } + return (error); +} + +/* + * The create rpc for v2 and 3. + */ +static int +nfsrpc_createv23(vnode_t dvp, char *name, int namelen, struct vattr *vap, + nfsquad_t cverf, int fmode, struct ucred *cred, NFSPROC_T *p, + struct nfsvattr *dnap, struct nfsvattr *nnap, struct nfsfh **nfhpp, + int *attrflagp, int *dattrflagp, void *dstuff) +{ + u_int32_t *tl; + int error = 0; + struct nfsrv_descript nfsd, *nd = &nfsd; + + *nfhpp = NULL; + *attrflagp = 0; + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_CREATE, dvp); + (void) nfsm_strtom(nd, name, namelen); + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + if (fmode & O_EXCL) { + *tl = txdr_unsigned(NFSCREATE_EXCLUSIVE); + NFSM_BUILD(tl, u_int32_t *, NFSX_VERF); + *tl++ = cverf.lval[0]; + *tl = cverf.lval[1]; + } else { + *tl = txdr_unsigned(NFSCREATE_UNCHECKED); + nfscl_fillsattr(nd, vap, dvp, 0, 0); + } + } else { + nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZE0, 0); + } + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_repstat == 0) { + error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp); + if (error) + goto nfsmout; + } + if (nd->nd_flag & ND_NFSV3) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (nd->nd_repstat != 0 && error == 0) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +static int +nfsrpc_createv4(vnode_t dvp, char *name, int namelen, struct vattr *vap, + nfsquad_t cverf, int fmode, struct nfsclowner *owp, struct nfscldeleg **dpp, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, + struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp, + int *dattrflagp, void *dstuff, int *unlockedp) +{ + u_int32_t *tl; + int error = 0, deleg, newone, ret, acesize, limitby; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsclopen *op; + struct nfscldeleg *dp = NULL; + struct nfsnode *np; + struct nfsfh *nfhp; + nfsattrbit_t attrbits; + nfsv4stateid_t stateid; + u_int32_t rflags; + + *unlockedp = 0; + *nfhpp = NULL; + *dpp = NULL; + *attrflagp = 0; + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_CREATE, dvp); + /* + * For V4, this is actually an Open op. + */ + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(owp->nfsow_seqid); + *tl++ = txdr_unsigned(NFSV4OPEN_ACCESSWRITE | + NFSV4OPEN_ACCESSREAD); + *tl++ = txdr_unsigned(NFSV4OPEN_DENYNONE); + *tl++ = owp->nfsow_clp->nfsc_clientid.lval[0]; + *tl = owp->nfsow_clp->nfsc_clientid.lval[1]; + (void) nfsm_strtom(nd, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OPEN_CREATE); + if (fmode & O_EXCL) { + *tl = txdr_unsigned(NFSCREATE_EXCLUSIVE); + NFSM_BUILD(tl, u_int32_t *, NFSX_VERF); + *tl++ = cverf.lval[0]; + *tl = cverf.lval[1]; + } else { + *tl = txdr_unsigned(NFSCREATE_UNCHECKED); + nfscl_fillsattr(nd, vap, dvp, 0, 0); + } + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OPEN_CLAIMNULL); + (void) nfsm_strtom(nd, name, namelen); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (error) + goto nfsmout; + NFSCL_INCRSEQID(owp->nfsow_seqid, nd); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + + 6 * NFSX_UNSIGNED); + stateid.seqid = *tl++; + stateid.other[0] = *tl++; + stateid.other[1] = *tl++; + stateid.other[2] = *tl; + rflags = fxdr_unsigned(u_int32_t, *(tl + 6)); + (void) nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + deleg = fxdr_unsigned(int, *tl); + if (deleg == NFSV4OPEN_DELEGATEREAD || + deleg == NFSV4OPEN_DELEGATEWRITE) { + if (!(owp->nfsow_clp->nfsc_flags & + NFSCLFLAGS_FIRSTDELEG)) + owp->nfsow_clp->nfsc_flags |= + (NFSCLFLAGS_FIRSTDELEG | NFSCLFLAGS_GOTDELEG); + MALLOC(dp, struct nfscldeleg *, + sizeof (struct nfscldeleg) + NFSX_V4FHMAX, + M_NFSCLDELEG, M_WAITOK); + LIST_INIT(&dp->nfsdl_owner); + LIST_INIT(&dp->nfsdl_lock); + dp->nfsdl_clp = owp->nfsow_clp; + newnfs_copyincred(cred, &dp->nfsdl_cred); + nfscl_lockinit(&dp->nfsdl_rwlock); + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + + NFSX_UNSIGNED); + dp->nfsdl_stateid.seqid = *tl++; + dp->nfsdl_stateid.other[0] = *tl++; + dp->nfsdl_stateid.other[1] = *tl++; + dp->nfsdl_stateid.other[2] = *tl++; + ret = fxdr_unsigned(int, *tl); + if (deleg == NFSV4OPEN_DELEGATEWRITE) { + dp->nfsdl_flags = NFSCLDL_WRITE; + /* + * Indicates how much the file can grow. + */ + NFSM_DISSECT(tl, u_int32_t *, + 3 * NFSX_UNSIGNED); + limitby = fxdr_unsigned(int, *tl++); + switch (limitby) { + case NFSV4OPEN_LIMITSIZE: + dp->nfsdl_sizelimit = fxdr_hyper(tl); + break; + case NFSV4OPEN_LIMITBLOCKS: + dp->nfsdl_sizelimit = + fxdr_unsigned(u_int64_t, *tl++); + dp->nfsdl_sizelimit *= + fxdr_unsigned(u_int64_t, *tl); + break; + default: + error = NFSERR_BADXDR; + goto nfsmout; + }; + } else { + dp->nfsdl_flags = NFSCLDL_READ; + } + if (ret) + dp->nfsdl_flags |= NFSCLDL_RECALL; + error = nfsrv_dissectace(nd, &dp->nfsdl_ace, &ret, + &acesize, p); + if (error) + goto nfsmout; + } else if (deleg != NFSV4OPEN_DELEGATENONE) { + error = NFSERR_BADXDR; + goto nfsmout; + } + error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp); + if (error) + goto nfsmout; + if (dp != NULL && *attrflagp) { + dp->nfsdl_change = nnap->na_filerev; + dp->nfsdl_modtime = nnap->na_mtime; + dp->nfsdl_flags |= NFSCLDL_MODTIMESET; + } + /* + * We can now complete the Open state. + */ + nfhp = *nfhpp; + if (dp != NULL) { + dp->nfsdl_fhlen = nfhp->nfh_len; + NFSBCOPY(nfhp->nfh_fh, dp->nfsdl_fh, nfhp->nfh_len); + } + /* + * Get an Open structure that will be + * attached to the OpenOwner, acquired already. + */ + error = nfscl_open(dvp, nfhp->nfh_fh, nfhp->nfh_len, + (NFSV4OPEN_ACCESSWRITE | NFSV4OPEN_ACCESSREAD), 0, + cred, p, NULL, &op, &newone, NULL, 0); + if (error) + goto nfsmout; + op->nfso_stateid = stateid; + newnfs_copyincred(cred, &op->nfso_cred); + if ((rflags & NFSV4OPEN_RESULTCONFIRM)) { + do { + ret = nfsrpc_openconfirm(dvp, nfhp->nfh_fh, + nfhp->nfh_len, op, cred, p); + if (ret == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfs_create"); + } while (ret == NFSERR_DELAY); + error = ret; + } + + /* + * If the server is handing out delegations, but we didn't + * get one because an OpenConfirm was required, try the + * Open again, to get a delegation. This is a harmless no-op, + * from a server's point of view. + */ + if ((rflags & NFSV4OPEN_RESULTCONFIRM) && + (owp->nfsow_clp->nfsc_flags & NFSCLFLAGS_GOTDELEG) && + !error && dp == NULL) { + np = VTONFS(dvp); + do { + ret = nfsrpc_openrpc(VFSTONFS(vnode_mount(dvp)), dvp, + np->n_fhp->nfh_fh, np->n_fhp->nfh_len, + nfhp->nfh_fh, nfhp->nfh_len, + (NFSV4OPEN_ACCESSWRITE | NFSV4OPEN_ACCESSREAD), op, + name, namelen, &dp, 0, 0x0, cred, p, 0, 1); + if (ret == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfs_crt2"); + } while (ret == NFSERR_DELAY); + if (ret) { + if (dp != NULL) + FREE((caddr_t)dp, M_NFSCLDELEG); + if (ret == NFSERR_STALECLIENTID || + ret == NFSERR_STALEDONTRECOVER) + error = ret; + } + } + nfscl_openrelease(op, error, newone); + *unlockedp = 1; + } + if (nd->nd_repstat != 0 && error == 0) + error = nd->nd_repstat; + if (error == NFSERR_STALECLIENTID) + nfscl_initiate_recovery(owp->nfsow_clp); +nfsmout: + if (!error) + *dpp = dp; + else if (dp != NULL) + FREE((caddr_t)dp, M_NFSCLDELEG); + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Nfs remove rpc + */ +APPLESTATIC int +nfsrpc_remove(vnode_t dvp, char *name, int namelen, vnode_t vp, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, int *dattrflagp, + void *dstuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsnode *np; + struct nfsmount *nmp; + nfsv4stateid_t dstateid; + int error, ret = 0, i; + + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + nmp = VFSTONFS(vnode_mount(dvp)); +tryagain: + if (NFSHASNFSV4(nmp) && ret == 0) { + ret = nfscl_removedeleg(vp, p, &dstateid); + if (ret == 1) { + NFSCL_REQSTART(nd, NFSPROC_RETDELEGREMOVE, vp); + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + + NFSX_UNSIGNED); + *tl++ = dstateid.seqid; + *tl++ = dstateid.other[0]; + *tl++ = dstateid.other[1]; + *tl++ = dstateid.other[2]; + *tl = txdr_unsigned(NFSV4OP_PUTFH); + np = VTONFS(dvp); + (void) nfsm_fhtom(nd, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len, 0); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_REMOVE); + } + } else { + ret = 0; + } + if (ret == 0) + NFSCL_REQSTART(nd, NFSPROC_REMOVE, dvp); + (void) nfsm_strtom(nd, name, namelen); + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + /* For NFSv4, parse out any Delereturn replies. */ + if (ret > 0 && nd->nd_repstat != 0 && + (nd->nd_flag & ND_NOMOREDATA)) { + /* + * If the Delegreturn failed, try again without + * it. The server will Recall, as required. + */ + mbuf_freem(nd->nd_mrep); + goto tryagain; + } + for (i = 0; i < (ret * 2); i++) { + if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == + ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) + nd->nd_flag |= ND_NOMOREDATA; + } + } + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + } + if (nd->nd_repstat && !error) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Do an nfs rename rpc. + */ +APPLESTATIC int +nfsrpc_rename(vnode_t fdvp, vnode_t fvp, char *fnameptr, int fnamelen, + vnode_t tdvp, vnode_t tvp, char *tnameptr, int tnamelen, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *fnap, struct nfsvattr *tnap, + int *fattrflagp, int *tattrflagp, void *fstuff, void *tstuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp; + struct nfsnode *np; + nfsattrbit_t attrbits; + nfsv4stateid_t fdstateid, tdstateid; + int error = 0, ret = 0, gottd = 0, gotfd = 0, i; + + *fattrflagp = 0; + *tattrflagp = 0; + nmp = VFSTONFS(vnode_mount(fdvp)); + if (fnamelen > NFS_MAXNAMLEN || tnamelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); +tryagain: + if (NFSHASNFSV4(nmp) && ret == 0) { + ret = nfscl_renamedeleg(fvp, &fdstateid, &gotfd, tvp, + &tdstateid, &gottd, p); + if (gotfd && gottd) { + NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME2, fvp); + } else if (gotfd) { + NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME1, fvp); + } else if (gottd) { + NFSCL_REQSTART(nd, NFSPROC_RETDELEGRENAME1, tvp); + } + if (gotfd) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = fdstateid.seqid; + *tl++ = fdstateid.other[0]; + *tl++ = fdstateid.other[1]; + *tl = fdstateid.other[2]; + if (gottd) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_PUTFH); + np = VTONFS(tvp); + (void) nfsm_fhtom(nd, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len, 0); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_DELEGRETURN); + } + } + if (gottd) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = tdstateid.seqid; + *tl++ = tdstateid.other[0]; + *tl++ = tdstateid.other[1]; + *tl = tdstateid.other[2]; + } + if (ret > 0) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_PUTFH); + np = VTONFS(fdvp); + (void) nfsm_fhtom(nd, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len, 0); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_SAVEFH); + } + } else { + ret = 0; + } + if (ret == 0) + NFSCL_REQSTART(nd, NFSPROC_RENAME, fdvp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSWCCATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_PUTFH); + (void) nfsm_fhtom(nd, VTONFS(tdvp)->n_fhp->nfh_fh, + VTONFS(tdvp)->n_fhp->nfh_len, 0); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_V4WCCATTR; + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_RENAME); + } + (void) nfsm_strtom(nd, fnameptr, fnamelen); + if (!(nd->nd_flag & ND_NFSV4)) + (void) nfsm_fhtom(nd, VTONFS(tdvp)->n_fhp->nfh_fh, + VTONFS(tdvp)->n_fhp->nfh_len, 0); + (void) nfsm_strtom(nd, tnameptr, tnamelen); + error = nfscl_request(nd, fdvp, p, cred, fstuff); + if (error) + return (error); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + /* For NFSv4, parse out any Delereturn replies. */ + if (ret > 0 && nd->nd_repstat != 0 && + (nd->nd_flag & ND_NOMOREDATA)) { + /* + * If the Delegreturn failed, try again without + * it. The server will Recall, as required. + */ + mbuf_freem(nd->nd_mrep); + goto tryagain; + } + for (i = 0; i < (ret * 2); i++) { + if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == + ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) { + if (i == 0 && ret > 1) { + /* + * If the Delegreturn failed, try again + * without it. The server will Recall, as + * required. + * If ret > 1, the first iteration of this + * loop is the second DelegReturn result. + */ + mbuf_freem(nd->nd_mrep); + goto tryagain; + } else { + nd->nd_flag |= ND_NOMOREDATA; + } + } + } + } + /* Now, the first wcc attribute reply. */ + if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) + nd->nd_flag |= ND_NOMOREDATA; + } + error = nfscl_wcc_data(nd, fdvp, fnap, fattrflagp, NULL, + fstuff); + /* and the second wcc attribute reply. */ + if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4 && + !error) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) + nd->nd_flag |= ND_NOMOREDATA; + } + if (!error) + error = nfscl_wcc_data(nd, tdvp, tnap, tattrflagp, + NULL, tstuff); + } + if (nd->nd_repstat && !error) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs hard link create rpc + */ +APPLESTATIC int +nfsrpc_link(vnode_t dvp, vnode_t vp, char *name, int namelen, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, + struct nfsvattr *nap, int *attrflagp, int *dattrflagp, void *dstuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + nfsattrbit_t attrbits; + int error = 0; + + *attrflagp = 0; + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_LINK, vp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_PUTFH); + } + (void) nfsm_fhtom(nd, VTONFS(dvp)->n_fhp->nfh_fh, + VTONFS(dvp)->n_fhp->nfh_len, 0); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSWCCATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_V4WCCATTR; + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_LINK); + } + (void) nfsm_strtom(nd, name, namelen); + error = nfscl_request(nd, vp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) { + error = nfscl_postop_attr(nd, nap, attrflagp, dstuff); + if (!error) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, + NULL, dstuff); + } else if ((nd->nd_flag & (ND_NFSV4 | ND_NOMOREDATA)) == ND_NFSV4) { + /* + * First, parse out the PutFH and Getattr result. + */ + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (!(*(tl + 1))) + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (*(tl + 1)) + nd->nd_flag |= ND_NOMOREDATA; + /* + * Get the pre-op attributes. + */ + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + } + if (nd->nd_repstat && !error) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs symbolic link create rpc + */ +APPLESTATIC int +nfsrpc_symlink(vnode_t dvp, char *name, int namelen, char *target, + struct vattr *vap, struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, + struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp, + int *dattrflagp, void *dstuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp; + int slen, error = 0; + + *nfhpp = NULL; + *attrflagp = 0; + *dattrflagp = 0; + nmp = VFSTONFS(vnode_mount(dvp)); + slen = strlen(target); + if (slen > NFS_MAXPATHLEN || namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_SYMLINK, dvp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFLNK); + (void) nfsm_strtom(nd, target, slen); + } + (void) nfsm_strtom(nd, name, namelen); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) + nfscl_fillsattr(nd, vap, dvp, 0, 0); + if (!(nd->nd_flag & ND_NFSV4)) + (void) nfsm_strtom(nd, target, slen); + if (nd->nd_flag & ND_NFSV2) + nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZENEG1, 0); + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV4) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if ((nd->nd_flag & ND_NFSV3) && !error) { + if (!nd->nd_repstat) + error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp); + if (!error) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, + NULL, dstuff); + } + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + /* + * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. + */ + if (error == EEXIST) + error = 0; + return (error); +} + +/* + * nfs make dir rpc + */ +APPLESTATIC int +nfsrpc_mkdir(vnode_t dvp, char *name, int namelen, struct vattr *vap, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *dnap, + struct nfsvattr *nnap, struct nfsfh **nfhpp, int *attrflagp, + int *dattrflagp, void *dstuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + nfsattrbit_t attrbits; + int error = 0; + + *nfhpp = NULL; + *attrflagp = 0; + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_MKDIR, dvp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFDIR); + } + (void) nfsm_strtom(nd, name, namelen); + nfscl_fillsattr(nd, vap, dvp, NFSSATTR_SIZENEG1, 0); + if (nd->nd_flag & ND_NFSV4) { + NFSGETATTR_ATTRBIT(&attrbits); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV4) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (!nd->nd_repstat && !error) { + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + } + if (!error) + error = nfscl_mtofh(nd, nfhpp, nnap, attrflagp); + } + if ((nd->nd_flag & ND_NFSV3) && !error) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + /* + * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry. + */ + if (error == EEXIST) + error = 0; + return (error); +} + +/* + * nfs remove directory call + */ +APPLESTATIC int +nfsrpc_rmdir(vnode_t dvp, char *name, int namelen, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *dnap, int *dattrflagp, void *dstuff) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error = 0; + + *dattrflagp = 0; + if (namelen > NFS_MAXNAMLEN) + return (ENAMETOOLONG); + NFSCL_REQSTART(nd, NFSPROC_RMDIR, dvp); + (void) nfsm_strtom(nd, name, namelen); + error = nfscl_request(nd, dvp, p, cred, dstuff); + if (error) + return (error); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) + error = nfscl_wcc_data(nd, dvp, dnap, dattrflagp, NULL, dstuff); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + /* + * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. + */ + if (error == ENOENT) + error = 0; + return (error); +} + +/* + * Readdir rpc. + * Always returns with either uio_resid unchanged, if you are at the + * end of the directory, or uio_resid == 0, with all DIRBLKSIZ chunks + * filled in. + * I felt this would allow caching of directory blocks more easily + * than returning a pertially filled block. + * Directory offset cookies: + * Oh my, what to do with them... + * I can think of three ways to deal with them: + * 1 - have the layer above these RPCs maintain a map between logical + * directory byte offsets and the NFS directory offset cookies + * 2 - pass the opaque directory offset cookies up into userland + * and let the libc functions deal with them, via the system call + * 3 - return them to userland in the "struct dirent", so future versions + * of libc can use them and do whatever is necessary to amke things work + * above these rpc calls, in the meantime + * For now, I do #3 by "hiding" the directory offset cookies after the + * d_name field in struct dirent. This is space inside d_reclen that + * will be ignored by anything that doesn't know about them. + * The directory offset cookies are filled in as the last 8 bytes of + * each directory entry, after d_name. Someday, the userland libc + * functions may be able to use these. In the meantime, it satisfies + * OpenBSD's requirements for cookies being returned. + * If expects the directory offset cookie for the read to be in uio_offset + * and returns the one for the next entry after this directory block in + * there, as well. + */ +APPLESTATIC int +nfsrpc_readdir(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, + int *eofp, void *stuff) +{ + int len, left; + struct dirent *dp = NULL; + u_int32_t *tl; + nfsquad_t cookie, ncookie; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsnode *dnp = VTONFS(vp); + struct nfsvattr nfsva; + struct nfsrv_descript nfsd, *nd = &nfsd; + int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; + int reqsize, tryformoredirs = 1, readsize, eof = 0, gotmnton = 0; + long dotfileid, dotdotfileid = 0; + u_int32_t fakefileno = 0xffffffff, rderr; + char *cp; + nfsattrbit_t attrbits, dattrbits; + u_int32_t *tl2 = NULL; + size_t tresid; + +#ifdef DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uio_uio_resid(uiop) & (DIRBLKSIZ - 1))) + panic("nfs readdirrpc bad uio"); +#endif + + /* + * There is no point in reading a lot more than uio_resid, however + * adding one additional DIRBLKSIZ makes sense. Since uio_resid + * and nm_readdirsize are both exact multiples of DIRBLKSIZ, this + * will never make readsize > nm_readdirsize. + */ + readsize = nmp->nm_readdirsize; + if (readsize > uio_uio_resid(uiop)) + readsize = uio_uio_resid(uiop) + DIRBLKSIZ; + + *attrflagp = 0; + if (eofp) + *eofp = 0; + tresid = uio_uio_resid(uiop); + cookie.lval[0] = cookiep->nfsuquad[0]; + cookie.lval[1] = cookiep->nfsuquad[1]; + nd->nd_mrep = NULL; + + /* + * For NFSv4, first create the "." and ".." entries. + */ + if (NFSHASNFSV4(nmp)) { + reqsize = 6 * NFSX_UNSIGNED; + NFSGETATTR_ATTRBIT(&dattrbits); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FILEID); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TYPE); + if (NFSISSET_ATTRBIT(&dnp->n_vattr.na_suppattr, + NFSATTRBIT_MOUNTEDONFILEID)) { + NFSSETBIT_ATTRBIT(&attrbits, + NFSATTRBIT_MOUNTEDONFILEID); + gotmnton = 1; + } else { + /* + * Must fake it. Use the fileno, except when the + * fsid is != to that of the directory. For that + * case, generate a fake fileno that is not the same. + */ + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FSID); + gotmnton = 0; + } + + /* + * Joy, oh joy. For V4 we get to hand craft '.' and '..'. + */ + if (uiop->uio_offset == 0) { +#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000 + error = VOP_GETATTR(vp, &nfsva.na_vattr, cred); +#else + error = VOP_GETATTR(vp, &nfsva.na_vattr, cred, p); +#endif + if (error) + return (error); + dotfileid = nfsva.na_fileid; + NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, vp); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + len = fxdr_unsigned(int, *(tl + 2)); + if (len > 0 && len <= NFSX_V4FHMAX) + error = nfsm_advance(nd, NFSM_RNDUP(len), -1); + else + error = EPERM; + if (!error) { + NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED); + nfsva.na_mntonfileno = 0xffffffff; + error = nfsv4_loadattr(nd, NULL, &nfsva, NULL, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, NULL, p, cred); + if (error) { + dotdotfileid = dotfileid; + } else if (gotmnton) { + if (nfsva.na_mntonfileno != 0xffffffff) + dotdotfileid = nfsva.na_mntonfileno; + else + dotdotfileid = nfsva.na_fileid; + } else if (nfsva.na_filesid[0] == + dnp->n_vattr.na_filesid[0] && + nfsva.na_filesid[1] == + dnp->n_vattr.na_filesid[1]) { + dotdotfileid = nfsva.na_fileid; + } else { + do { + fakefileno--; + } while (fakefileno == + nfsva.na_fileid); + dotdotfileid = fakefileno; + } + } + } else if (nd->nd_repstat == NFSERR_NOENT) { + /* + * Lookupp returns NFSERR_NOENT when we are + * at the root, so just use the current dir. + */ + nd->nd_repstat = 0; + dotdotfileid = dotfileid; + } else { + error = nd->nd_repstat; + } + mbuf_freem(nd->nd_mrep); + if (error) + return (error); + nd->nd_mrep = NULL; + dp = (struct dirent *) CAST_DOWN(caddr_t, uio_iov_base(uiop)); + dp->d_type = DT_DIR; + dp->d_fileno = dotfileid; + dp->d_namlen = 1; + dp->d_name[0] = '.'; + dp->d_name[1] = '\0'; + dp->d_reclen = DIRENT_SIZE(dp) + NFSX_HYPER; + /* + * Just make these offset cookie 0. + */ + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = 0; + *tl = 0; + blksiz += dp->d_reclen; + uio_uio_resid_add(uiop, -(dp->d_reclen)); + uiop->uio_offset += dp->d_reclen; + uio_iov_base_add(uiop, dp->d_reclen); + uio_iov_len_add(uiop, -(dp->d_reclen)); + dp = (struct dirent *) CAST_DOWN(caddr_t, uio_iov_base(uiop)); + dp->d_type = DT_DIR; + dp->d_fileno = dotdotfileid; + dp->d_namlen = 2; + dp->d_name[0] = '.'; + dp->d_name[1] = '.'; + dp->d_name[2] = '\0'; + dp->d_reclen = DIRENT_SIZE(dp) + NFSX_HYPER; + /* + * Just make these offset cookie 0. + */ + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = 0; + *tl = 0; + blksiz += dp->d_reclen; + uio_uio_resid_add(uiop, -(dp->d_reclen)); + uiop->uio_offset += dp->d_reclen; + uio_iov_base_add(uiop, dp->d_reclen); + uio_iov_len_add(uiop, -(dp->d_reclen)); + } + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_RDATTRERROR); + } else { + reqsize = 5 * NFSX_UNSIGNED; + } + + + /* + * Loop around doing readdir rpc's of size readsize. + * The stopping criteria is EOF or buffer full. + */ + while (more_dirs && bigenough) { + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_READDIR, vp); + if (nd->nd_flag & ND_NFSV2) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = cookie.lval[1]; + *tl = txdr_unsigned(readsize); + } else { + NFSM_BUILD(tl, u_int32_t *, reqsize); + *tl++ = cookie.lval[0]; + *tl++ = cookie.lval[1]; + if (cookie.qval == 0) { + *tl++ = 0; + *tl++ = 0; + } else { + NFSLOCKNODE(dnp); + *tl++ = dnp->n_cookieverf.nfsuquad[0]; + *tl++ = dnp->n_cookieverf.nfsuquad[1]; + NFSUNLOCKNODE(dnp); + } + if (nd->nd_flag & ND_NFSV4) { + *tl++ = txdr_unsigned(readsize); + *tl = txdr_unsigned(readsize); + (void) nfsrv_putattrbit(nd, &attrbits); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &dattrbits); + } else { + *tl = txdr_unsigned(readsize); + } + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (!(nd->nd_flag & ND_NFSV2)) { + if (nd->nd_flag & ND_NFSV3) + error = nfscl_postop_attr(nd, nap, attrflagp, + stuff); + if (!nd->nd_repstat && !error) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER); + NFSLOCKNODE(dnp); + dnp->n_cookieverf.nfsuquad[0] = *tl++; + dnp->n_cookieverf.nfsuquad[1] = *tl; + NFSUNLOCKNODE(dnp); + } + } + if (nd->nd_repstat || error) { + if (!error) + error = nd->nd_repstat; + goto nfsmout; + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + more_dirs = fxdr_unsigned(int, *tl); + if (!more_dirs) + tryformoredirs = 0; + + /* loop thru the dir entries, doctoring them to 4bsd form */ + while (more_dirs && bigenough) { + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + ncookie.lval[0] = *tl++; + ncookie.lval[1] = *tl++; + len = fxdr_unsigned(int, *tl); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + nfsva.na_fileid = + fxdr_unsigned(long, *++tl); + len = fxdr_unsigned(int, *++tl); + } else { + NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED); + nfsva.na_fileid = + fxdr_unsigned(long, *tl++); + len = fxdr_unsigned(int, *tl); + } + if (len <= 0 || len > NFS_MAXNAMLEN) { + error = EBADRPC; + goto nfsmout; + } + tlen = NFSM_RNDUP(len); + if (tlen == len) + tlen += 4; /* To ensure null termination */ + left = DIRBLKSIZ - blksiz; + if ((int)(tlen + DIRHDSIZ + NFSX_HYPER) > left) { + dp->d_reclen += left; + uio_iov_base_add(uiop, left); + uio_iov_len_add(uiop, -(left)); + uio_uio_resid_add(uiop, -(left)); + uiop->uio_offset += left; + blksiz = 0; + } + if ((int)(tlen + DIRHDSIZ + NFSX_HYPER) > uio_uio_resid(uiop)) + bigenough = 0; + if (bigenough) { + dp = (struct dirent *) CAST_DOWN(caddr_t, uio_iov_base(uiop)); + dp->d_namlen = len; + dp->d_reclen = tlen + DIRHDSIZ + NFSX_HYPER; + dp->d_type = DT_UNKNOWN; + blksiz += dp->d_reclen; + if (blksiz == DIRBLKSIZ) + blksiz = 0; + uio_uio_resid_add(uiop, -(DIRHDSIZ)); + uiop->uio_offset += DIRHDSIZ; + uio_iov_base_add(uiop, DIRHDSIZ); + uio_iov_len_add(uiop, -(DIRHDSIZ)); + error = nfsm_mbufuio(nd, uiop, len); + if (error) + goto nfsmout; + cp = CAST_DOWN(caddr_t, uio_iov_base(uiop)); + tlen -= len; + *cp = '\0'; /* null terminate */ + cp += tlen; /* points to cookie storage */ + tl2 = (u_int32_t *)cp; + uio_iov_base_add(uiop, (tlen + NFSX_HYPER)); + uio_iov_len_add(uiop, -(tlen + NFSX_HYPER)); + uio_uio_resid_add(uiop, -(tlen + NFSX_HYPER)); + uiop->uio_offset += (tlen + NFSX_HYPER); + } else { + error = nfsm_advance(nd, NFSM_RNDUP(len), -1); + if (error) + goto nfsmout; + } + if (nd->nd_flag & ND_NFSV4) { + rderr = 0; + nfsva.na_mntonfileno = 0xffffffff; + error = nfsv4_loadattr(nd, NULL, &nfsva, NULL, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, &rderr, p, cred); + if (error) + goto nfsmout; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + ncookie.lval[0] = *tl++; + ncookie.lval[1] = *tl++; + } else { + NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED); + ncookie.lval[0] = 0; + ncookie.lval[1] = *tl++; + } + if (bigenough) { + if (nd->nd_flag & ND_NFSV4) { + if (rderr) { + dp->d_fileno = 0; + } else { + if (gotmnton) { + if (nfsva.na_mntonfileno != 0xffffffff) + dp->d_fileno = nfsva.na_mntonfileno; + else + dp->d_fileno = nfsva.na_fileid; + } else if (nfsva.na_filesid[0] == + dnp->n_vattr.na_filesid[0] && + nfsva.na_filesid[1] == + dnp->n_vattr.na_filesid[1]) { + dp->d_fileno = nfsva.na_fileid; + } else { + do { + fakefileno--; + } while (fakefileno == + nfsva.na_fileid); + dp->d_fileno = fakefileno; + } + dp->d_type = vtonfs_dtype(nfsva.na_type); + } + } else { + dp->d_fileno = nfsva.na_fileid; + } + *tl2++ = cookiep->nfsuquad[0] = cookie.lval[0] = + ncookie.lval[0]; + *tl2 = cookiep->nfsuquad[1] = cookie.lval[1] = + ncookie.lval[1]; + } + more_dirs = fxdr_unsigned(int, *tl); + } + /* + * If at end of rpc data, get the eof boolean + */ + if (!more_dirs) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + eof = fxdr_unsigned(int, *tl); + if (tryformoredirs) + more_dirs = !eof; + if (nd->nd_flag & ND_NFSV4) { + error = nfscl_postop_attr(nd, nap, attrflagp, + stuff); + if (error) + goto nfsmout; + } + } + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + } + /* + * Fill last record, iff any, out to a multiple of DIRBLKSIZ + * by increasing d_reclen for the last record. + */ + if (blksiz > 0) { + left = DIRBLKSIZ - blksiz; + dp->d_reclen += left; + uio_iov_base_add(uiop, left); + uio_iov_len_add(uiop, -(left)); + uio_uio_resid_add(uiop, -(left)); + uiop->uio_offset += left; + } + + /* + * If returning no data, assume end of file. + * If not bigenough, return not end of file, since you aren't + * returning all the data + * Otherwise, return the eof flag from the server. + */ + if (eofp) { + if (tresid == ((size_t)(uio_uio_resid(uiop)))) + *eofp = 1; + else if (!bigenough) + *eofp = 0; + else + *eofp = eof; + } + + /* + * Add extra empty records to any remaining DIRBLKSIZ chunks. + */ + while (uio_uio_resid(uiop) > 0 && ((size_t)(uio_uio_resid(uiop))) != tresid) { + dp = (struct dirent *) CAST_DOWN(caddr_t, uio_iov_base(uiop)); + dp->d_type = DT_UNKNOWN; + dp->d_fileno = 0; + dp->d_namlen = 0; + dp->d_name[0] = '\0'; + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = cookie.lval[0]; + *tl = cookie.lval[1]; + dp->d_reclen = DIRBLKSIZ; + uio_iov_base_add(uiop, DIRBLKSIZ); + uio_iov_len_add(uiop, -(DIRBLKSIZ)); + uio_uio_resid_add(uiop, -(DIRBLKSIZ)); + uiop->uio_offset += DIRBLKSIZ; + } + +nfsmout: + if (nd->nd_mrep != NULL) + mbuf_freem(nd->nd_mrep); + return (error); +} + +#ifndef APPLE +/* + * NFS V3 readdir plus RPC. Used in place of nfsrpc_readdir(). + * (Also used for NFS V4 when mount flag set.) + * (ditto above w.r.t. multiple of DIRBLKSIZ, etc.) + */ +APPLESTATIC int +nfsrpc_readdirplus(vnode_t vp, struct uio *uiop, nfsuint64 *cookiep, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, + int *eofp, void *stuff) +{ + int len, left; + struct dirent *dp = NULL; + u_int32_t *tl; + vnode_t newvp = NULLVP; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nameidata nami, *ndp = &nami; + struct componentname *cnp = &ndp->ni_cnd; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + struct nfsnode *dnp = VTONFS(vp), *np; + struct nfsvattr nfsva; + struct nfsfh *nfhp; + nfsquad_t cookie, ncookie; + int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; + int attrflag, tryformoredirs = 1, eof = 0, gotmnton = 0; + int unlocknewvp = 0; + long dotfileid, dotdotfileid = 0, fileno = 0; + char *cp; + nfsattrbit_t attrbits, dattrbits; + size_t tresid; + u_int32_t *tl2 = NULL, fakefileno = 0xffffffff, rderr; + +#ifdef DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uio_uio_resid(uiop) & (DIRBLKSIZ - 1))) + panic("nfs readdirplusrpc bad uio"); +#endif + *attrflagp = 0; + if (eofp != NULL) + *eofp = 0; + ndp->ni_dvp = vp; + nd->nd_mrep = NULL; + cookie.lval[0] = cookiep->nfsuquad[0]; + cookie.lval[1] = cookiep->nfsuquad[1]; + tresid = uio_uio_resid(uiop); + + /* + * For NFSv4, first create the "." and ".." entries. + */ + if (NFSHASNFSV4(nmp)) { + NFSGETATTR_ATTRBIT(&dattrbits); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FILEID); + if (NFSISSET_ATTRBIT(&dnp->n_vattr.na_suppattr, + NFSATTRBIT_MOUNTEDONFILEID)) { + NFSSETBIT_ATTRBIT(&attrbits, + NFSATTRBIT_MOUNTEDONFILEID); + gotmnton = 1; + } else { + /* + * Must fake it. Use the fileno, except when the + * fsid is != to that of the directory. For that + * case, generate a fake fileno that is not the same. + */ + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_FSID); + gotmnton = 0; + } + + /* + * Joy, oh joy. For V4 we get to hand craft '.' and '..'. + */ + if (uiop->uio_offset == 0) { +#if defined(__FreeBSD_version) && __FreeBSD_version >= 800000 + error = VOP_GETATTR(vp, &nfsva.na_vattr, cred); +#else + error = VOP_GETATTR(vp, &nfsva.na_vattr, cred, p); +#endif + if (error) + return (error); + dotfileid = nfsva.na_fileid; + NFSCL_REQSTART(nd, NFSPROC_LOOKUPP, vp); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OP_GETFH); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &attrbits); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + len = fxdr_unsigned(int, *(tl + 2)); + if (len > 0 && len <= NFSX_V4FHMAX) + error = nfsm_advance(nd, NFSM_RNDUP(len), -1); + else + error = EPERM; + if (!error) { + NFSM_DISSECT(tl, u_int32_t *, 2*NFSX_UNSIGNED); + nfsva.na_mntonfileno = 0xffffffff; + error = nfsv4_loadattr(nd, NULL, &nfsva, NULL, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, NULL, p, cred); + if (error) { + dotdotfileid = dotfileid; + } else if (gotmnton) { + if (nfsva.na_mntonfileno != 0xffffffff) + dotdotfileid = nfsva.na_mntonfileno; + else + dotdotfileid = nfsva.na_fileid; + } else if (nfsva.na_filesid[0] == + dnp->n_vattr.na_filesid[0] && + nfsva.na_filesid[1] == + dnp->n_vattr.na_filesid[1]) { + dotdotfileid = nfsva.na_fileid; + } else { + do { + fakefileno--; + } while (fakefileno == + nfsva.na_fileid); + dotdotfileid = fakefileno; + } + } + } else if (nd->nd_repstat == NFSERR_NOENT) { + /* + * Lookupp returns NFSERR_NOENT when we are + * at the root, so just use the current dir. + */ + nd->nd_repstat = 0; + dotdotfileid = dotfileid; + } else { + error = nd->nd_repstat; + } + mbuf_freem(nd->nd_mrep); + if (error) + return (error); + nd->nd_mrep = NULL; + dp = (struct dirent *)uio_iov_base(uiop); + dp->d_type = DT_DIR; + dp->d_fileno = dotfileid; + dp->d_namlen = 1; + dp->d_name[0] = '.'; + dp->d_name[1] = '\0'; + dp->d_reclen = DIRENT_SIZE(dp) + NFSX_HYPER; + /* + * Just make these offset cookie 0. + */ + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = 0; + *tl = 0; + blksiz += dp->d_reclen; + uio_uio_resid_add(uiop, -(dp->d_reclen)); + uiop->uio_offset += dp->d_reclen; + uio_iov_base_add(uiop, dp->d_reclen); + uio_iov_len_add(uiop, -(dp->d_reclen)); + dp = (struct dirent *)uio_iov_base(uiop); + dp->d_type = DT_DIR; + dp->d_fileno = dotdotfileid; + dp->d_namlen = 2; + dp->d_name[0] = '.'; + dp->d_name[1] = '.'; + dp->d_name[2] = '\0'; + dp->d_reclen = DIRENT_SIZE(dp) + NFSX_HYPER; + /* + * Just make these offset cookie 0. + */ + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = 0; + *tl = 0; + blksiz += dp->d_reclen; + uio_uio_resid_add(uiop, -(dp->d_reclen)); + uiop->uio_offset += dp->d_reclen; + uio_iov_base_add(uiop, dp->d_reclen); + uio_iov_len_add(uiop, -(dp->d_reclen)); + } + NFSREADDIRPLUS_ATTRBIT(&attrbits); + if (gotmnton) + NFSSETBIT_ATTRBIT(&attrbits, + NFSATTRBIT_MOUNTEDONFILEID); + } + + /* + * Loop around doing readdir rpc's of size nm_readdirsize. + * The stopping criteria is EOF or buffer full. + */ + while (more_dirs && bigenough) { + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_READDIRPLUS, vp); + NFSM_BUILD(tl, u_int32_t *, 6 * NFSX_UNSIGNED); + *tl++ = cookie.lval[0]; + *tl++ = cookie.lval[1]; + if (cookie.qval == 0) { + *tl++ = 0; + *tl++ = 0; + } else { + NFSLOCKNODE(dnp); + *tl++ = dnp->n_cookieverf.nfsuquad[0]; + *tl++ = dnp->n_cookieverf.nfsuquad[1]; + NFSUNLOCKNODE(dnp); + } + *tl++ = txdr_unsigned(nmp->nm_readdirsize); + *tl = txdr_unsigned(nmp->nm_readdirsize); + if (nd->nd_flag & ND_NFSV4) { + (void) nfsrv_putattrbit(nd, &attrbits); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + (void) nfsrv_putattrbit(nd, &dattrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (nd->nd_repstat || error) { + if (!error) + error = nd->nd_repstat; + goto nfsmout; + } + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + NFSLOCKNODE(dnp); + dnp->n_cookieverf.nfsuquad[0] = *tl++; + dnp->n_cookieverf.nfsuquad[1] = *tl++; + NFSUNLOCKNODE(dnp); + more_dirs = fxdr_unsigned(int, *tl); + if (!more_dirs) + tryformoredirs = 0; + + /* loop thru the dir entries, doctoring them to 4bsd form */ + while (more_dirs && bigenough) { + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + if (nd->nd_flag & ND_NFSV4) { + ncookie.lval[0] = *tl++; + ncookie.lval[1] = *tl++; + } else { + fileno = fxdr_unsigned(long, *++tl); + tl++; + } + len = fxdr_unsigned(int, *tl); + if (len <= 0 || len > NFS_MAXNAMLEN) { + error = EBADRPC; + goto nfsmout; + } + tlen = NFSM_RNDUP(len); + if (tlen == len) + tlen += 4; /* To ensure null termination */ + left = DIRBLKSIZ - blksiz; + if ((tlen + DIRHDSIZ + NFSX_HYPER) > left) { + dp->d_reclen += left; + uio_iov_base_add(uiop, left); + uio_iov_len_add(uiop, -(left)); + uio_uio_resid_add(uiop, -(left)); + uiop->uio_offset += left; + blksiz = 0; + } + if ((tlen + DIRHDSIZ + NFSX_HYPER) > uio_uio_resid(uiop)) + bigenough = 0; + if (bigenough) { + dp = (struct dirent *)uio_iov_base(uiop); + dp->d_namlen = len; + dp->d_reclen = tlen + DIRHDSIZ + NFSX_HYPER; + dp->d_type = DT_UNKNOWN; + blksiz += dp->d_reclen; + if (blksiz == DIRBLKSIZ) + blksiz = 0; + uio_uio_resid_add(uiop, -(DIRHDSIZ)); + uiop->uio_offset += DIRHDSIZ; + uio_iov_base_add(uiop, DIRHDSIZ); + uio_iov_len_add(uiop, -(DIRHDSIZ)); + cnp->cn_nameptr = uio_iov_base(uiop); + cnp->cn_namelen = len; + NFSCNHASHZERO(cnp); + error = nfsm_mbufuio(nd, uiop, len); + if (error) + goto nfsmout; + cp = uio_iov_base(uiop); + tlen -= len; + *cp = '\0'; + cp += tlen; /* points to cookie storage */ + tl2 = (u_int32_t *)cp; + uio_iov_base_add(uiop, (tlen + NFSX_HYPER)); + uio_iov_len_add(uiop, -(tlen + NFSX_HYPER)); + uio_uio_resid_add(uiop, -(tlen + NFSX_HYPER)); + uiop->uio_offset += (tlen + NFSX_HYPER); + } else { + error = nfsm_advance(nd, NFSM_RNDUP(len), -1); + if (error) + goto nfsmout; + } + nfhp = NULL; + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 3*NFSX_UNSIGNED); + ncookie.lval[0] = *tl++; + ncookie.lval[1] = *tl++; + attrflag = fxdr_unsigned(int, *tl); + if (attrflag) { + error = nfsm_loadattr(nd, &nfsva); + if (error) + goto nfsmout; + } + NFSM_DISSECT(tl,u_int32_t *,NFSX_UNSIGNED); + if (*tl) { + error = nfsm_getfh(nd, &nfhp); + if (error) + goto nfsmout; + } + if (!attrflag && nfhp != NULL) { + FREE((caddr_t)nfhp, M_NFSFH); + nfhp = NULL; + } + } else { + rderr = 0; + nfsva.na_mntonfileno = 0xffffffff; + error = nfsv4_loadattr(nd, NULL, &nfsva, &nfhp, + NULL, 0, NULL, NULL, NULL, NULL, NULL, 0, + NULL, NULL, &rderr, p, cred); + if (error) + goto nfsmout; + } + + if (bigenough) { + if (nd->nd_flag & ND_NFSV4) { + if (rderr) { + dp->d_fileno = 0; + } else if (gotmnton) { + if (nfsva.na_mntonfileno != 0xffffffff) + dp->d_fileno = nfsva.na_mntonfileno; + else + dp->d_fileno = nfsva.na_fileid; + } else if (nfsva.na_filesid[0] == + dnp->n_vattr.na_filesid[0] && + nfsva.na_filesid[1] == + dnp->n_vattr.na_filesid[1]) { + dp->d_fileno = nfsva.na_fileid; + } else { + do { + fakefileno--; + } while (fakefileno == + nfsva.na_fileid); + dp->d_fileno = fakefileno; + } + } else { + dp->d_fileno = fileno; + } + *tl2++ = cookiep->nfsuquad[0] = cookie.lval[0] = + ncookie.lval[0]; + *tl2 = cookiep->nfsuquad[1] = cookie.lval[1] = + ncookie.lval[1]; + + if (nfhp != NULL) { + if (NFSRV_CMPFH(nfhp->nfh_fh, nfhp->nfh_len, + dnp->n_fhp->nfh_fh, dnp->n_fhp->nfh_len)) { + VREF(vp); + newvp = vp; + unlocknewvp = 0; + FREE((caddr_t)nfhp, M_NFSFH); + np = dnp; + } else { + error = nfscl_nget(vnode_mount(vp), vp, + nfhp, cnp, p, &np, NULL); + if (!error) { + newvp = NFSTOV(np); + unlocknewvp = 1; + } + } + nfhp = NULL; + if (newvp != NULLVP) { + error = nfscl_loadattrcache(&newvp, + &nfsva, NULL, NULL, 0, 0); + if (error) { + if (unlocknewvp) + vput(newvp); + else + vrele(newvp); + goto nfsmout; + } + dp->d_type = + vtonfs_dtype(np->n_vattr.na_type); + ndp->ni_vp = newvp; + NFSCNHASH(cnp, HASHINIT); + if (cnp->cn_namelen <= NCHNAMLEN) { + np->n_ctime = + np->n_vattr.na_ctime.tv_sec; + cache_enter(ndp->ni_dvp,ndp->ni_vp,cnp); + } + if (unlocknewvp) + vput(newvp); + else + vrele(newvp); + newvp = NULLVP; + } + } + } else if (nfhp != NULL) { + FREE((caddr_t)nfhp, M_NFSFH); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + more_dirs = fxdr_unsigned(int, *tl); + } + /* + * If at end of rpc data, get the eof boolean + */ + if (!more_dirs) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + eof = fxdr_unsigned(int, *tl); + if (tryformoredirs) + more_dirs = !eof; + if (nd->nd_flag & ND_NFSV4) { + error = nfscl_postop_attr(nd, nap, attrflagp, + stuff); + if (error) + goto nfsmout; + } + } + mbuf_freem(nd->nd_mrep); + nd->nd_mrep = NULL; + } + /* + * Fill last record, iff any, out to a multiple of DIRBLKSIZ + * by increasing d_reclen for the last record. + */ + if (blksiz > 0) { + left = DIRBLKSIZ - blksiz; + dp->d_reclen += left; + uio_iov_base_add(uiop, left); + uio_iov_len_add(uiop, -(left)); + uio_uio_resid_add(uiop, -(left)); + uiop->uio_offset += left; + } + + /* + * If returning no data, assume end of file. + * If not bigenough, return not end of file, since you aren't + * returning all the data + * Otherwise, return the eof flag from the server. + */ + if (eofp != NULL) { + if (tresid == uio_uio_resid(uiop)) + *eofp = 1; + else if (!bigenough) + *eofp = 0; + else + *eofp = eof; + } + + /* + * Add extra empty records to any remaining DIRBLKSIZ chunks. + */ + while (uio_uio_resid(uiop) > 0 && uio_uio_resid(uiop) != tresid) { + dp = (struct dirent *)uio_iov_base(uiop); + dp->d_type = DT_UNKNOWN; + dp->d_fileno = 0; + dp->d_namlen = 0; + dp->d_name[0] = '\0'; + tl = (u_int32_t *)&dp->d_name[4]; + *tl++ = cookie.lval[0]; + *tl = cookie.lval[1]; + dp->d_reclen = DIRBLKSIZ; + uio_iov_base_add(uiop, DIRBLKSIZ); + uio_iov_len_add(uiop, -(DIRBLKSIZ)); + uio_uio_resid_add(uiop, -(DIRBLKSIZ)); + uiop->uio_offset += DIRBLKSIZ; + } + +nfsmout: + if (nd->nd_mrep != NULL) + mbuf_freem(nd->nd_mrep); + return (error); +} +#endif /* !APPLE */ + +/* + * Nfs commit rpc + */ +APPLESTATIC int +nfsrpc_commit(vnode_t vp, u_quad_t offset, int cnt, struct ucred *cred, + NFSPROC_T *p, u_char *verfp, struct nfsvattr *nap, int *attrflagp, + void *stuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + nfsattrbit_t attrbits; + int error; + + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_COMMIT, vp); + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + txdr_hyper(offset, tl); + tl += 2; + *tl = txdr_unsigned(cnt); + if (nd->nd_flag & ND_NFSV4) { + /* + * And do a Getattr op. + */ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETATTR); + NFSGETATTR_ATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + } + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + error = nfscl_wcc_data(nd, vp, nap, attrflagp, NULL, stuff); + if (!error && !nd->nd_repstat) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_VERF); + NFSBCOPY((caddr_t)tl, verfp, NFSX_VERF); + if (nd->nd_flag & ND_NFSV4) + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + } +nfsmout: + if (!error && nd->nd_repstat) + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * NFS byte range lock rpc. + * (Mostly just calls one of the three lower level RPC routines.) + */ +APPLESTATIC int +nfsrpc_advlock(vnode_t vp, off_t size, int op, struct flock *fl, + int reclaim, struct ucred *cred, NFSPROC_T *p) +{ + struct nfscllockowner *lp; + struct nfsclclient *clp; + struct nfsfh *nfhp; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + u_int64_t off, len; + off_t start, end; + u_int32_t clidrev = 0; + int error = 0, newone = 0, expireret = 0, retrycnt, donelocally; + int callcnt, dorpc; + + /* + * Convert the flock structure into a start and end and do POSIX + * bounds checking. + */ + switch (fl->l_whence) { + case SEEK_SET: + case SEEK_CUR: + /* + * Caller is responsible for adding any necessary offset + * when SEEK_CUR is used. + */ + start = fl->l_start; + off = fl->l_start; + break; + case SEEK_END: + start = size + fl->l_start; + off = size + fl->l_start; + break; + default: + return (EINVAL); + }; + if (start < 0) + return (EINVAL); + if (fl->l_len != 0) { + end = start + fl->l_len - 1; + if (end < start) + return (EINVAL); + } + + len = fl->l_len; + if (len == 0) + len = NFS64BITSSET; + retrycnt = 0; + do { + nd->nd_repstat = 0; + if (op == F_GETLK) { + error = nfscl_getcl(vp, cred, p, &clp); + if (error) + return (error); + error = nfscl_lockt(vp, clp, off, len, fl, p); + if (!error) { + clidrev = clp->nfsc_clientidrev; + error = nfsrpc_lockt(nd, vp, clp, off, len, fl, cred, + p); + } else if (error == -1) { + error = 0; + } + nfscl_clientrelease(clp); + } else if (op == F_UNLCK && fl->l_type == F_UNLCK) { + /* + * We must loop around for all lockowner cases. + */ + callcnt = 0; + error = nfscl_getcl(vp, cred, p, &clp); + if (error) + return (error); + do { + error = nfscl_relbytelock(vp, off, len, cred, p, callcnt, + clp, &lp, &dorpc); + /* + * If it returns a NULL lp, we're done. + */ + if (lp == NULL) { + if (callcnt == 0) + nfscl_clientrelease(clp); + else + nfscl_releasealllocks(clp, vp, p); + return (error); + } + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + else + clidrev = 0; + /* + * If the server doesn't support Posix lock semantics, + * only allow locks on the entire file, since it won't + * handle overlapping byte ranges. + * There might still be a problem when a lock + * upgrade/downgrade (read<->write) occurs, since the + * server "might" expect an unlock first? + */ + if (dorpc && (lp->nfsl_open->nfso_posixlock || + (off == 0 && len == NFS64BITSSET))) { + /* + * Since the lock records will go away, we must + * wait for grace and delay here. + */ + do { + error = nfsrpc_locku(nd, nmp, lp, off, len, + NFSV4LOCKT_READ, cred, p, 0); + if ((nd->nd_repstat == NFSERR_GRACE || + nd->nd_repstat == NFSERR_DELAY) && + error == 0) + (void) nfs_catnap(PZERO, "nfs_advlock"); + } while ((nd->nd_repstat == NFSERR_GRACE || + nd->nd_repstat == NFSERR_DELAY) && error == 0); + } + callcnt++; + } while (error == 0 && nd->nd_repstat == 0); + nfscl_releasealllocks(clp, vp, p); + } else if (op == F_SETLK) { + error = nfscl_getbytelock(vp, off, len, fl->l_type, cred, p, + NULL, 0, NULL, NULL, &lp, &newone, &donelocally); + if (error || donelocally) { + return (error); + } + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + else + clidrev = 0; + nfhp = VTONFS(vp)->n_fhp; + if (!lp->nfsl_open->nfso_posixlock && + (off != 0 || len != NFS64BITSSET)) { + error = EINVAL; + } else { + error = nfsrpc_lock(nd, nmp, vp, nfhp->nfh_fh, + nfhp->nfh_len, lp, newone, reclaim, off, + len, fl->l_type, cred, p, 0); + } + if (!error) + error = nd->nd_repstat; + nfscl_lockrelease(lp, error, newone); + } else { + error = EINVAL; + } + if (!error) + error = nd->nd_repstat; + if (error == NFSERR_GRACE || error == NFSERR_STALESTATEID || + error == NFSERR_STALEDONTRECOVER || + error == NFSERR_STALECLIENTID || error == NFSERR_DELAY) { + (void) nfs_catnap(PZERO, "nfs_advlock"); + } else if ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) + && clidrev != 0) { + expireret = nfscl_hasexpired(nmp->nm_clp, clidrev, p); + retrycnt++; + } + } while (error == NFSERR_GRACE || + error == NFSERR_STALECLIENTID || error == NFSERR_DELAY || + error == NFSERR_STALEDONTRECOVER || error == NFSERR_STALESTATEID || + ((error == NFSERR_EXPIRED || error == NFSERR_BADSTATEID) && + expireret == 0 && clidrev != 0 && retrycnt < 4)); + if (error && retrycnt >= 4) + error = EIO; + return (error); +} + +/* + * The lower level routine for the LockT case. + */ +APPLESTATIC int +nfsrpc_lockt(struct nfsrv_descript *nd, vnode_t vp, + struct nfsclclient *clp, u_int64_t off, u_int64_t len, struct flock *fl, + struct ucred *cred, NFSPROC_T *p) +{ + u_int32_t *tl; + int error, type, size; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + + NFSCL_REQSTART(nd, NFSPROC_LOCKT, vp); + NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + if (fl->l_type == F_RDLCK) + *tl++ = txdr_unsigned(NFSV4LOCKT_READ); + else + *tl++ = txdr_unsigned(NFSV4LOCKT_WRITE); + txdr_hyper(off, tl); + tl += 2; + txdr_hyper(len, tl); + tl += 2; + *tl++ = clp->nfsc_clientid.lval[0]; + *tl = clp->nfsc_clientid.lval[1]; + nfscl_filllockowner(p, own); + (void) nfsm_strtom(nd, own, NFSV4CL_LOCKNAMELEN); + error = nfscl_request(nd, vp, p, cred, NULL); + if (error) + return (error); + if (nd->nd_repstat == 0) { + fl->l_type = F_UNLCK; + } else if (nd->nd_repstat == NFSERR_DENIED) { + nd->nd_repstat = 0; + fl->l_whence = SEEK_SET; + NFSM_DISSECT(tl, u_int32_t *, 8 * NFSX_UNSIGNED); + fl->l_start = fxdr_hyper(tl); + tl += 2; + len = fxdr_hyper(tl); + tl += 2; + if (len == NFS64BITSSET) + fl->l_len = 0; + else + fl->l_len = len; + type = fxdr_unsigned(int, *tl++); + if (type == NFSV4LOCKT_WRITE) + fl->l_type = F_WRLCK; + else + fl->l_type = F_RDLCK; + /* + * XXX For now, I have no idea what to do with the + * conflicting lock_owner, so I'll just set the pid == 0 + * and skip over the lock_owner. + */ + fl->l_pid = (pid_t)0; + tl += 2; + size = fxdr_unsigned(int, *tl); + if (size < 0 || size > NFSV4_OPAQUELIMIT) + error = EBADRPC; + if (!error) + error = nfsm_advance(nd, NFSM_RNDUP(size), -1); + } else if (nd->nd_repstat == NFSERR_STALECLIENTID) + nfscl_initiate_recovery(clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * Lower level function that performs the LockU RPC. + */ +static int +nfsrpc_locku(struct nfsrv_descript *nd, struct nfsmount *nmp, + struct nfscllockowner *lp, u_int64_t off, u_int64_t len, + u_int32_t type, struct ucred *cred, NFSPROC_T *p, int syscred) +{ + u_int32_t *tl; + int error; + + nfscl_reqstart(nd, NFSPROC_LOCKU, nmp, lp->nfsl_open->nfso_fh, + lp->nfsl_open->nfso_fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + 6 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(type); + *tl = txdr_unsigned(lp->nfsl_seqid); + if (nfstest_outofseq && + (arc4random() % nfstest_outofseq) == 0) + *tl = txdr_unsigned(lp->nfsl_seqid + 1); + tl++; + *tl++ = lp->nfsl_stateid.seqid; + *tl++ = lp->nfsl_stateid.other[0]; + *tl++ = lp->nfsl_stateid.other[1]; + *tl++ = lp->nfsl_stateid.other[2]; + txdr_hyper(off, tl); + tl += 2; + txdr_hyper(len, tl); + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + NFSCL_INCRSEQID(lp->nfsl_seqid, nd); + if (error) + return (error); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + lp->nfsl_stateid.seqid = *tl++; + lp->nfsl_stateid.other[0] = *tl++; + lp->nfsl_stateid.other[1] = *tl++; + lp->nfsl_stateid.other[2] = *tl; + } else if (nd->nd_repstat == NFSERR_STALESTATEID) + nfscl_initiate_recovery(lp->nfsl_open->nfso_own->nfsow_clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * The actual Lock RPC. + */ +APPLESTATIC int +nfsrpc_lock(struct nfsrv_descript *nd, struct nfsmount *nmp, vnode_t vp, + u_int8_t *nfhp, int fhlen, struct nfscllockowner *lp, int newone, + int reclaim, u_int64_t off, u_int64_t len, short type, struct ucred *cred, + NFSPROC_T *p, int syscred) +{ + u_int32_t *tl; + int error, size; + + nfscl_reqstart(nd, NFSPROC_LOCK, nmp, nfhp, fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + if (type == F_RDLCK) + *tl++ = txdr_unsigned(NFSV4LOCKT_READ); + else + *tl++ = txdr_unsigned(NFSV4LOCKT_WRITE); + *tl++ = txdr_unsigned(reclaim); + txdr_hyper(off, tl); + tl += 2; + txdr_hyper(len, tl); + tl += 2; + if (newone) { + *tl = newnfs_true; + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + + 2 * NFSX_UNSIGNED + NFSX_HYPER); + *tl++ = txdr_unsigned(lp->nfsl_open->nfso_own->nfsow_seqid); + *tl++ = lp->nfsl_open->nfso_stateid.seqid; + *tl++ = lp->nfsl_open->nfso_stateid.other[0]; + *tl++ = lp->nfsl_open->nfso_stateid.other[1]; + *tl++ = lp->nfsl_open->nfso_stateid.other[2]; + *tl++ = txdr_unsigned(lp->nfsl_seqid); + *tl++ = lp->nfsl_open->nfso_own->nfsow_clp->nfsc_clientid.lval[0]; + *tl = lp->nfsl_open->nfso_own->nfsow_clp->nfsc_clientid.lval[1]; + (void) nfsm_strtom(nd, lp->nfsl_owner, NFSV4CL_LOCKNAMELEN); + } else { + *tl = newnfs_false; + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + NFSX_UNSIGNED); + *tl++ = lp->nfsl_stateid.seqid; + *tl++ = lp->nfsl_stateid.other[0]; + *tl++ = lp->nfsl_stateid.other[1]; + *tl++ = lp->nfsl_stateid.other[2]; + *tl = txdr_unsigned(lp->nfsl_seqid); + if (nfstest_outofseq && + (arc4random() % nfstest_outofseq) == 0) + *tl = txdr_unsigned(lp->nfsl_seqid + 1); + } + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + if (newone) + NFSCL_INCRSEQID(lp->nfsl_open->nfso_own->nfsow_seqid, nd); + NFSCL_INCRSEQID(lp->nfsl_seqid, nd); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + lp->nfsl_stateid.seqid = *tl++; + lp->nfsl_stateid.other[0] = *tl++; + lp->nfsl_stateid.other[1] = *tl++; + lp->nfsl_stateid.other[2] = *tl; + } else if (nd->nd_repstat == NFSERR_DENIED) { + NFSM_DISSECT(tl, u_int32_t *, 8 * NFSX_UNSIGNED); + size = fxdr_unsigned(int, *(tl + 7)); + if (size < 0 || size > NFSV4_OPAQUELIMIT) + error = EBADRPC; + if (!error) + error = nfsm_advance(nd, NFSM_RNDUP(size), -1); + } else if (nd->nd_repstat == NFSERR_STALESTATEID) + nfscl_initiate_recovery(lp->nfsl_open->nfso_own->nfsow_clp); +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs statfs rpc + * (always called with the vp for the mount point) + */ +APPLESTATIC int +nfsrpc_statfs(vnode_t vp, struct nfsstatfs *sbp, struct nfsfsinfo *fsp, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, + void *stuff) +{ + u_int32_t *tl = NULL; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp; + nfsattrbit_t attrbits; + int error; + + *attrflagp = 0; + nmp = VFSTONFS(vnode_mount(vp)); + if (NFSHASNFSV4(nmp)) { + /* + * For V4, you actually do a getattr. + */ + NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp); + NFSSTATFS_GETATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_USEGSSNAME; + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat == 0) { + error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, + NULL, NULL, sbp, fsp, NULL, 0, NULL, NULL, NULL, p, + cred); + if (!error) { + nmp->nm_fsid[0] = nap->na_filesid[0]; + nmp->nm_fsid[1] = nap->na_filesid[1]; + NFSSETHASSETFSID(nmp); + *attrflagp = 1; + } + } else { + error = nd->nd_repstat; + } + if (error) + goto nfsmout; + } else { + NFSCL_REQSTART(nd, NFSPROC_FSSTAT, vp); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_flag & ND_NFSV3) { + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (error) + goto nfsmout; + } + if (nd->nd_repstat) { + error = nd->nd_repstat; + goto nfsmout; + } + NFSM_DISSECT(tl, u_int32_t *, + NFSX_STATFS(nd->nd_flag & ND_NFSV3)); + } + if (NFSHASNFSV3(nmp)) { + sbp->sf_tbytes = fxdr_hyper(tl); tl += 2; + sbp->sf_fbytes = fxdr_hyper(tl); tl += 2; + sbp->sf_abytes = fxdr_hyper(tl); tl += 2; + sbp->sf_tfiles = fxdr_hyper(tl); tl += 2; + sbp->sf_ffiles = fxdr_hyper(tl); tl += 2; + sbp->sf_afiles = fxdr_hyper(tl); tl += 2; + sbp->sf_invarsec = fxdr_unsigned(u_int32_t, *tl); + } else if (NFSHASNFSV4(nmp) == 0) { + sbp->sf_tsize = fxdr_unsigned(u_int32_t, *tl++); + sbp->sf_bsize = fxdr_unsigned(u_int32_t, *tl++); + sbp->sf_blocks = fxdr_unsigned(u_int32_t, *tl++); + sbp->sf_bfree = fxdr_unsigned(u_int32_t, *tl++); + sbp->sf_bavail = fxdr_unsigned(u_int32_t, *tl); + } +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs pathconf rpc + */ +APPLESTATIC int +nfsrpc_pathconf(vnode_t vp, struct nfsv3_pathconf *pc, + struct ucred *cred, NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, + void *stuff) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + struct nfsmount *nmp; + u_int32_t *tl; + nfsattrbit_t attrbits; + int error; + + *attrflagp = 0; + nmp = VFSTONFS(vnode_mount(vp)); + if (NFSHASNFSV4(nmp)) { + /* + * For V4, you actually do a getattr. + */ + NFSCL_REQSTART(nd, NFSPROC_GETATTR, vp); + NFSPATHCONF_GETATTRBIT(&attrbits); + (void) nfsrv_putattrbit(nd, &attrbits); + nd->nd_flag |= ND_USEGSSNAME; + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (nd->nd_repstat == 0) { + error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, + pc, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, p, + cred); + if (!error) + *attrflagp = 1; + } else { + error = nd->nd_repstat; + } + } else { + NFSCL_REQSTART(nd, NFSPROC_PATHCONF, vp); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + if (!error) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_V3PATHCONF); + pc->pc_linkmax = fxdr_unsigned(u_int32_t, *tl++); + pc->pc_namemax = fxdr_unsigned(u_int32_t, *tl++); + pc->pc_notrunc = fxdr_unsigned(u_int32_t, *tl++); + pc->pc_chownrestricted = + fxdr_unsigned(u_int32_t, *tl++); + pc->pc_caseinsensitive = + fxdr_unsigned(u_int32_t, *tl++); + pc->pc_casepreserving = fxdr_unsigned(u_int32_t, *tl); + } + } +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs version 3 fsinfo rpc call + */ +APPLESTATIC int +nfsrpc_fsinfo(vnode_t vp, struct nfsfsinfo *fsp, struct ucred *cred, + NFSPROC_T *p, struct nfsvattr *nap, int *attrflagp, void *stuff) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + + *attrflagp = 0; + NFSCL_REQSTART(nd, NFSPROC_FSINFO, vp); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + error = nfscl_postop_attr(nd, nap, attrflagp, stuff); + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + if (!error) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_V3FSINFO); + fsp->fs_rtmax = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_rtpref = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_rtmult = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_wtmax = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_wtpref = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_wtmult = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_dtpref = fxdr_unsigned(u_int32_t, *tl++); + fsp->fs_maxfilesize = fxdr_hyper(tl); + tl += 2; + fxdr_nfsv3time(tl, &fsp->fs_timedelta); + tl += 2; + fsp->fs_properties = fxdr_unsigned(u_int32_t, *tl); + } +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * This function performs the Renew RPC. + */ +APPLESTATIC int +nfsrpc_renew(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd; + struct nfsrv_descript *nd = &nfsd; + struct nfsmount *nmp; + int error; + + nmp = clp->nfsc_nmp; + if (nmp == NULL) + return (0); + nfscl_reqstart(nd, NFSPROC_RENEW, nmp, NULL, 0, NULL); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = clp->nfsc_clientid.lval[0]; + *tl = clp->nfsc_clientid.lval[1]; + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * This function performs the Releaselockowner RPC. + */ +APPLESTATIC int +nfsrpc_rellockown(struct nfsmount *nmp, struct nfscllockowner *lp, + struct ucred *cred, NFSPROC_T *p) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + u_int32_t *tl; + int error; + + nfscl_reqstart(nd, NFSPROC_RELEASELCKOWN, nmp, NULL, 0, NULL); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = nmp->nm_clp->nfsc_clientid.lval[0]; + *tl = nmp->nm_clp->nfsc_clientid.lval[1]; + (void) nfsm_strtom(nd, lp->nfsl_owner, NFSV4CL_LOCKNAMELEN); + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * This function performs the Compound to get the mount pt FH. + */ +APPLESTATIC int +nfsrpc_getdirpath(struct nfsmount *nmp, u_char *dirpath, struct ucred *cred, + NFSPROC_T *p) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd; + struct nfsrv_descript *nd = &nfsd; + u_char *cp, *cp2; + int error, cnt, len, setnil; + u_int32_t *opcntp; + + nfscl_reqstart(nd, NFSPROC_PUTROOTFH, nmp, NULL, 0, &opcntp); + cp = dirpath; + cnt = 0; + do { + setnil = 0; + while (*cp == '/') + cp++; + cp2 = cp; + while (*cp2 != '\0' && *cp2 != '/') + cp2++; + if (*cp2 == '/') { + setnil = 1; + *cp2 = '\0'; + } + if (cp2 != cp) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_LOOKUP); + nfsm_strtom(nd, cp, strlen(cp)); + cnt++; + } + if (setnil) + *cp2++ = '/'; + cp = cp2; + } while (*cp != '\0'); + *opcntp = txdr_unsigned(2 + cnt); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV4OP_GETFH); + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + if (nd->nd_repstat == 0) { + NFSM_DISSECT(tl, u_int32_t *, (3 + 2 * cnt) * NFSX_UNSIGNED); + tl += (2 + 2 * cnt); + if ((len = fxdr_unsigned(int, *tl)) <= 0 || + len > NFSX_FHMAX) { + nd->nd_repstat = NFSERR_BADXDR; + } else { + nd->nd_repstat = nfsrv_mtostr(nd, nmp->nm_fh, len); + if (nd->nd_repstat == 0) + nmp->nm_fhsize = len; + } + } + error = nd->nd_repstat; +nfsmout: + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * This function performs the Delegreturn RPC. + */ +APPLESTATIC int +nfsrpc_delegreturn(struct nfscldeleg *dp, struct ucred *cred, + struct nfsmount *nmp, NFSPROC_T *p, int syscred) +{ + u_int32_t *tl; + struct nfsrv_descript nfsd; + struct nfsrv_descript *nd = &nfsd; + int error; + + nfscl_reqstart(nd, NFSPROC_DELEGRETURN, nmp, dp->nfsdl_fh, + dp->nfsdl_fhlen, NULL); + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = dp->nfsdl_stateid.seqid; + *tl++ = dp->nfsdl_stateid.other[0]; + *tl++ = dp->nfsdl_stateid.other[1]; + *tl = dp->nfsdl_stateid.other[2]; + if (syscred) + nd->nd_flag |= ND_USEGSSNAME; + error = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, NULL, p, cred, + NFS_PROG, NFS_VER4, NULL, 1, NULL); + if (error) + return (error); + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +#ifdef NFS4_ACL_EXTATTR_NAME +/* + * nfs getacl call. + */ +APPLESTATIC int +nfsrpc_getacl(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct acl *aclp, void *stuff) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + nfsattrbit_t attrbits; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + + if (nfsrv_useacl == 0 || !NFSHASNFSV4(nmp)) + return (EOPNOTSUPP); + NFSCL_REQSTART(nd, NFSPROC_GETACL, vp); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_ACL); + (void) nfsrv_putattrbit(nd, &attrbits); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + if (!nd->nd_repstat) + error = nfsv4_loadattr(nd, vp, NULL, NULL, NULL, 0, NULL, + NULL, NULL, NULL, aclp, 0, NULL, NULL, NULL, p, cred); + else + error = nd->nd_repstat; + mbuf_freem(nd->nd_mrep); + return (error); +} + +/* + * nfs setacl call. + */ +APPLESTATIC int +nfsrpc_setacl(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct acl *aclp, void *stuff) +{ + int error; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + + if (nfsrv_useacl == 0 || !NFSHASNFSV4(nmp)) + return (EOPNOTSUPP); + error = nfsrpc_setattr(vp, NULL, aclp, cred, p, NULL, NULL, stuff); + return (error); +} + +/* + * nfs setacl call. + */ +static int +nfsrpc_setaclrpc(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct acl *aclp, nfsv4stateid_t *stateidp, void *stuff) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + nfsattrbit_t attrbits; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + + if (!NFSHASNFSV4(nmp)) + return (EOPNOTSUPP); + NFSCL_REQSTART(nd, NFSPROC_SETACL, vp); + nfsm_stateidtom(nd, stateidp, NFSSTATEID_PUTSTATEID); + NFSZERO_ATTRBIT(&attrbits); + NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_ACL); + (void) nfsv4_fillattr(nd, vp, aclp, NULL, NULL, 0, &attrbits, + NULL, NULL, 0, 0); + error = nfscl_request(nd, vp, p, cred, stuff); + if (error) + return (error); + /* Don't care about the pre/postop attributes */ + mbuf_freem(nd->nd_mrep); + return (nd->nd_repstat); +} + +#endif /* NFS4_ACL_EXTATTR_NAME */ diff --git a/sys/fs/nfsclient/nfs_clstate.c b/sys/fs/nfsclient/nfs_clstate.c new file mode 100644 index 0000000..501fab0 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clstate.c @@ -0,0 +1,4133 @@ +/*- + * Copyright (c) 2009 Rick Macklem, University of Guelph + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * These functions implement the client side state handling for NFSv4. + * NFSv4 state handling: + * - A lockowner is used to determine lock contention, so it + * corresponds directly to a Posix pid. (1 to 1 mapping) + * - The correct granularity of an OpenOwner is not nearly so + * obvious. An OpenOwner does the following: + * - provides a serial sequencing of Open/Close/Lock-with-new-lockowner + * - is used to check for Open/SHare contention (not applicable to + * this client, since all Opens are Deny_None) + * As such, I considered both extrema. + * 1 OpenOwner per ClientID - Simple to manage, but fully serializes + * all Open, Close and Lock (with a new lockowner) Ops. + * 1 OpenOwner for each Open - This one results in an OpenConfirm for + * every Open, for most servers. + * So, I chose to use the same mapping as I did for LockOwnwers. + * The main concern here is that you can end up with multiple Opens + * for the same File Handle, but on different OpenOwners (opens + * inherited from parents, grandparents...) and you do not know + * which of these the vnodeop close applies to. This is handled by + * delaying the Close Op(s) until all of the Opens have been closed. + * (It is not yet obvious if this is the correct granularity.) + * - How the code handles serailization: + * - For the ClientId, is uses an exclusive lock while getting its + * SetClientId and during recovery. Otherwise, it uses a shared + * lock via a reference count. + * - For the rest of the data structures, it uses an SMP mutex + * (once the nfs client is SMP safe) and doesn't sleep while + * manipulating the linked lists. + * - The serialization of Open/Close/Lock/LockU falls out in the + * "wash", since OpenOwners and LockOwners are both mapped from + * Posix pid. In other words, there is only one Posix pid using + * any given owner, so that owner is serialized. (If you change + * the granularity of the OpenOwner, then code must be added to + * serialize Ops on the OpenOwner.) + * - When to get rid of OpenOwners and LockOwners. + * - When a process exits, it calls nfscl_cleanup(), which goes + * through the client list looking for all Open and Lock Owners. + * When one is found, it is marked "defunct" or in the case of + * an OpenOwner without any Opens, freed. + * The renew thread scans for defunct Owners and gets rid of them, + * if it can. The LockOwners will also be deleted when the + * associated Open is closed. + * - If the LockU or Close Op(s) fail during close in a way + * that could be recovered upon retry, they are relinked to the + * ClientId's defunct open list and retried by the renew thread + * until they succeed or an unmount/recovery occurs. + * (Since we are done with them, they do not need to be recovered.) + */ + +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +/* + * Global variables + */ +extern struct nfsstats newnfsstats; +extern struct nfsreqhead nfsd_reqq; +NFSREQSPINLOCK; +NFSCLSTATEMUTEX; +int nfscl_inited = 0; +struct nfsclhead nfsclhead; /* Head of clientid list */ +int nfscl_deleghighwater = NFSCLDELEGHIGHWATER; +#endif /* !APPLEKEXT */ + +static int nfscl_delegcnt = 0; +static int nfscl_getopen(struct nfsclownerhead *, u_int8_t *, int, u_int8_t *, + NFSPROC_T *, u_int32_t, struct nfsclowner **, struct nfsclopen **); +static void nfscl_clrelease(struct nfsclclient *); +static void nfscl_cleanclient(struct nfsclclient *); +static void nfscl_expireclient(struct nfsclclient *, struct nfsmount *, + struct ucred *, NFSPROC_T *); +static int nfscl_expireopen(struct nfsclclient *, struct nfsclopen *, + struct nfsmount *, struct ucred *, NFSPROC_T *); +static void nfscl_recover(struct nfsclclient *, struct ucred *, NFSPROC_T *); +static void nfscl_insertlock(struct nfscllockowner *, struct nfscllock *, + struct nfscllock *, int); +static int nfscl_updatelock(struct nfscllockowner *, struct nfscllock **, + struct nfscllock **, int); +static void nfscl_delegreturnall(struct nfsclclient *, NFSPROC_T *); +static u_int32_t nfscl_nextcbident(void); +static mount_t nfscl_getmnt(u_int32_t); +static struct nfscldeleg *nfscl_finddeleg(struct nfsclclient *, u_int8_t *, + int); +static int nfscl_checkconflict(struct nfscllockownerhead *, struct nfscllock *, + u_int8_t *, struct nfscllock **); +static void nfscl_freelockowner(struct nfscllockowner *, int); +static void nfscl_freealllocks(struct nfscllockownerhead *, int); +static int nfscl_localconflict(struct nfsclclient *, struct nfscllock *, + u_int8_t *, struct nfscldeleg *, struct nfscllock **); +static void nfscl_newopen(struct nfsclclient *, struct nfscldeleg *, + struct nfsclowner **, struct nfsclowner **, struct nfsclopen **, + struct nfsclopen **, u_int8_t *, u_int8_t *, int, int *); +static int nfscl_moveopen(vnode_t , struct nfsclclient *, + struct nfsmount *, struct nfsclopen *, struct nfsclowner *, + struct nfscldeleg *, struct ucred *, NFSPROC_T *); +static void nfscl_totalrecall(struct nfsclclient *); +static int nfscl_relock(vnode_t , struct nfsclclient *, struct nfsmount *, + struct nfscllockowner *, struct nfscllock *, struct ucred *, NFSPROC_T *); +static int nfscl_tryopen(struct nfsmount *, vnode_t , u_int8_t *, int, + u_int8_t *, int, u_int32_t, struct nfsclopen *, u_int8_t *, int, + struct nfscldeleg **, int, u_int32_t, struct ucred *, NFSPROC_T *); +static int nfscl_trylock(struct nfsmount *, vnode_t , u_int8_t *, + int, struct nfscllockowner *, int, int, u_int64_t, u_int64_t, short, + struct ucred *, NFSPROC_T *); +static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t, + struct nfsclopen *, struct nfscldeleg **, struct ucred *, NFSPROC_T *); +static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *); +static int nfscl_errmap(struct nfsrv_descript *); +static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *); +static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *, + struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *); +static void nfscl_freeopenowner(struct nfsclowner *, int); +static void nfscl_cleandeleg(struct nfscldeleg *); +static int nfscl_trydelegreturn(struct nfscldeleg *, struct ucred *, + struct nfsmount *, NFSPROC_T *); + +static short nfscberr_null[] = { + 0, + 0, +}; + +static short nfscberr_getattr[] = { + NFSERR_RESOURCE, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfscberr_recall[] = { + NFSERR_RESOURCE, + NFSERR_BADHANDLE, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + 0, +}; + +static short *nfscl_cberrmap[] = { + nfscberr_null, + nfscberr_null, + nfscberr_null, + nfscberr_getattr, + nfscberr_recall +}; + +#define NETFAMILY(clp) \ + (((clp)->nfsc_flags & NFSCLFLAGS_AFINET6) ? AF_INET6 : AF_INET) + +/* + * Called for an open operation. + * If the nfhp argument is NULL, just get an openowner. + */ +APPLESTATIC int +nfscl_open(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t amode, int usedeleg, + struct ucred *cred, NFSPROC_T *p, struct nfsclowner **owpp, + struct nfsclopen **opp, int *newonep, int *retp, int lockit) +{ + struct nfsclclient *clp; + struct nfsclowner *owp, *nowp; + struct nfsclopen *op = NULL, *nop = NULL; + struct nfscldeleg *dp; + struct nfsclownerhead *ohp; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + int ret; + + if (newonep != NULL) + *newonep = 0; + if (opp != NULL) + *opp = NULL; + if (owpp != NULL) + *owpp = NULL; + + /* + * Might need one or both of these, so MALLOC them now, to + * avoid a tsleep() in MALLOC later. + */ + MALLOC(nowp, struct nfsclowner *, sizeof (struct nfsclowner), + M_NFSCLOWNER, M_WAITOK); + if (nfhp != NULL) + MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + + fhlen - 1, M_NFSCLOPEN, M_WAITOK); + ret = nfscl_getcl(vp, cred, p, &clp); + if (ret != 0) { + FREE((caddr_t)nowp, M_NFSCLOWNER); + if (nop != NULL) + FREE((caddr_t)nop, M_NFSCLOPEN); + return (ret); + } + + /* + * Get the Open iff it already exists. + * If none found, add the new one or return error, depending upon + * "create". + */ + nfscl_filllockowner(p, own); + NFSLOCKCLSTATE(); + dp = NULL; + /* First check the delegation list */ + if (nfhp != NULL && usedeleg) { + LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { + if (dp->nfsdl_fhlen == fhlen && + !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { + if (!(amode & NFSV4OPEN_ACCESSWRITE) || + (dp->nfsdl_flags & NFSCLDL_WRITE)) + break; + dp = NULL; + break; + } + } + } + + if (dp != NULL) + ohp = &dp->nfsdl_owner; + else + ohp = &clp->nfsc_owner; + /* Now, search for an openowner */ + LIST_FOREACH(owp, ohp, nfsow_list) { + if (!NFSBCMP(owp->nfsow_owner, own, NFSV4CL_LOCKNAMELEN)) + break; + } + + /* + * Create a new open, as required. + */ + nfscl_newopen(clp, dp, &owp, &nowp, &op, &nop, own, nfhp, fhlen, + newonep); + + /* + * Serialize modifications to the open owner for multiple threads + * within the same process using a read/write sleep lock. + */ + if (lockit) + nfscl_lockexcl(&owp->nfsow_rwlock, NFSCLSTATEMUTEXPTR); + NFSUNLOCKCLSTATE(); + if (nowp != NULL) + FREE((caddr_t)nowp, M_NFSCLOWNER); + if (nop != NULL) + FREE((caddr_t)nop, M_NFSCLOPEN); + if (owpp != NULL) + *owpp = owp; + if (opp != NULL) + *opp = op; + if (retp != NULL) + *retp = NFSCLOPEN_OK; + + /* + * Now, check the mode on the open and return the appropriate + * value. + */ + if (op != NULL && (amode & ~(op->nfso_mode))) { + op->nfso_mode |= amode; + if (retp != NULL && dp == NULL) + *retp = NFSCLOPEN_DOOPEN; + } + return (0); +} + +/* + * Create a new open, as required. + */ +static void +nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp, + struct nfsclowner **owpp, struct nfsclowner **nowpp, struct nfsclopen **opp, + struct nfsclopen **nopp, u_int8_t *own, u_int8_t *fhp, int fhlen, + int *newonep) +{ + struct nfsclowner *owp = *owpp, *nowp; + struct nfsclopen *op, *nop; + + if (nowpp != NULL) + nowp = *nowpp; + else + nowp = NULL; + if (nopp != NULL) + nop = *nopp; + else + nop = NULL; + if (owp == NULL && nowp != NULL) { + NFSBCOPY(own, nowp->nfsow_owner, NFSV4CL_LOCKNAMELEN); + LIST_INIT(&nowp->nfsow_open); + nowp->nfsow_clp = clp; + nowp->nfsow_seqid = 0; + nowp->nfsow_defunct = 0; + nfscl_lockinit(&nowp->nfsow_rwlock); + if (dp != NULL) { + newnfsstats.cllocalopenowners++; + LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list); + } else { + newnfsstats.clopenowners++; + LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list); + } + owp = *owpp = nowp; + *nowpp = NULL; + if (newonep != NULL) + *newonep = 1; + } + + /* If an fhp has been specified, create an Open as well. */ + if (fhp != NULL) { + /* and look for the correct open, based upon FH */ + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == fhlen && + !NFSBCMP(op->nfso_fh, fhp, fhlen)) + break; + } + if (op == NULL && nop != NULL) { + nop->nfso_own = owp; + nop->nfso_mode = 0; + nop->nfso_opencnt = 0; + nop->nfso_posixlock = 1; + nop->nfso_fhlen = fhlen; + NFSBCOPY(fhp, nop->nfso_fh, fhlen); + LIST_INIT(&nop->nfso_lock); + nop->nfso_stateid.seqid = 0; + nop->nfso_stateid.other[0] = 0; + nop->nfso_stateid.other[1] = 0; + nop->nfso_stateid.other[2] = 0; + if (dp != NULL) { + TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); + TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, + nfsdl_list); + dp->nfsdl_timestamp = NFSD_MONOSEC + 120; + newnfsstats.cllocalopens++; + } else { + newnfsstats.clopens++; + } + LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list); + *opp = nop; + *nopp = NULL; + if (newonep != NULL) + *newonep = 1; + } else { + *opp = op; + } + } +} + +/* + * Called to find/add a delegation to a client. + */ +APPLESTATIC int +nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp, + int fhlen, struct ucred *cred, NFSPROC_T *p, struct nfscldeleg **dpp) +{ + struct nfscldeleg *dp = *dpp, *tdp; + + /* + * First, if we have received a Read delegation for a file on a + * read/write file system, just return it, because they aren't + * useful, imho. + */ + if (mp != NULL && dp != NULL && !NFSMNT_RDONLY(mp) && + (dp->nfsdl_flags & NFSCLDL_READ)) { + (void) nfscl_trydelegreturn(dp, cred, VFSTONFS(mp), p); + FREE((caddr_t)dp, M_NFSCLDELEG); + *dpp = NULL; + return (0); + } + + /* Look for the correct deleg, based upon FH */ + NFSLOCKCLSTATE(); + tdp = nfscl_finddeleg(clp, nfhp, fhlen); + if (tdp == NULL) { + if (dp == NULL) { + NFSUNLOCKCLSTATE(); + return (NFSERR_BADSTATEID); + } + *dpp = NULL; + TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); + LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp, + nfsdl_hash); + dp->nfsdl_timestamp = NFSD_MONOSEC + 120; + newnfsstats.cldelegates++; + nfscl_delegcnt++; + } else { + /* + * Delegation already exists, what do we do if a new one?? + */ + if (dp != NULL) { + printf("Deleg already exists!\n"); + FREE((caddr_t)dp, M_NFSCLDELEG); + *dpp = NULL; + } else { + *dpp = tdp; + } + } + NFSUNLOCKCLSTATE(); + return (0); +} + +/* + * Find a delegation for this file handle. Return NULL upon failure. + */ +static struct nfscldeleg * +nfscl_finddeleg(struct nfsclclient *clp, u_int8_t *fhp, int fhlen) +{ + struct nfscldeleg *dp; + + LIST_FOREACH(dp, NFSCLDELEGHASH(clp, fhp, fhlen), nfsdl_hash) { + if (dp->nfsdl_fhlen == fhlen && + !NFSBCMP(dp->nfsdl_fh, fhp, fhlen)) + break; + } + return (dp); +} + +/* + * Get a stateid for an I/O operation. First, look for an open and iff + * found, return either a lockowner stateid or the open stateid. + * If no Open is found, just return error and the special stateid of all zeros. + */ +APPLESTATIC int +nfscl_getstateid(vnode_t vp, u_int8_t *nfhp, int fhlen, u_int32_t mode, + struct ucred *cred, NFSPROC_T *p, nfsv4stateid_t *stateidp, + void **lckpp) +{ + struct nfsclclient *clp; + struct nfsclowner *owp; + struct nfsclopen *op; + struct nfscllockowner *lp; + struct nfscldeleg *dp; + struct nfsnode *np; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + int error, done; + + *lckpp = NULL; + /* + * Initially, just set the special stateid of all zeros. + */ + stateidp->seqid = 0; + stateidp->other[0] = 0; + stateidp->other[1] = 0; + stateidp->other[2] = 0; + if (vnode_vtype(vp) != VREG) + return (EISDIR); + np = VTONFS(vp); + NFSLOCKCLSTATE(); + clp = nfscl_findcl(VFSTONFS(vnode_mount(vp))); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (EACCES); + } + + /* + * First, look for a delegation. + */ + LIST_FOREACH(dp, NFSCLDELEGHASH(clp, nfhp, fhlen), nfsdl_hash) { + if (dp->nfsdl_fhlen == fhlen && + !NFSBCMP(nfhp, dp->nfsdl_fh, fhlen)) { + if (!(mode & NFSV4OPEN_ACCESSWRITE) || + (dp->nfsdl_flags & NFSCLDL_WRITE)) { + stateidp->seqid = dp->nfsdl_stateid.seqid; + stateidp->other[0] = dp->nfsdl_stateid.other[0]; + stateidp->other[1] = dp->nfsdl_stateid.other[1]; + stateidp->other[2] = dp->nfsdl_stateid.other[2]; + if (!(np->n_flag & NDELEGRECALL)) { + TAILQ_REMOVE(&clp->nfsc_deleg, dp, + nfsdl_list); + TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, + nfsdl_list); + dp->nfsdl_timestamp = NFSD_MONOSEC + + 120; + dp->nfsdl_rwlock.nfslock_usecnt++; + *lckpp = (void *)&dp->nfsdl_rwlock; + } + NFSUNLOCKCLSTATE(); + return (0); + } + break; + } + } + + if (p != NULL) { + /* + * If p != NULL, we want to search the parentage tree + * for a matching OpenOwner and use that. + */ + nfscl_filllockowner(p, own); + error = nfscl_getopen(&clp->nfsc_owner, nfhp, fhlen, NULL, p, + mode, NULL, &op); + if (error) { + NFSUNLOCKCLSTATE(); + return (error); + } + + /* now look for a lockowner */ + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { + stateidp->seqid = lp->nfsl_stateid.seqid; + stateidp->other[0] = lp->nfsl_stateid.other[0]; + stateidp->other[1] = lp->nfsl_stateid.other[1]; + stateidp->other[2] = lp->nfsl_stateid.other[2]; + NFSUNLOCKCLSTATE(); + return (0); + } + } + } else { + /* + * If p == NULL, it is a read ahead or write behind, + * so just look for any OpenOwner that will work. + */ + done = 0; + owp = LIST_FIRST(&clp->nfsc_owner); + while (!done && owp != NULL) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == fhlen && + !NFSBCMP(op->nfso_fh, nfhp, fhlen) && + (mode & op->nfso_mode) == mode) { + done = 1; + break; + } + } + if (!done) + owp = LIST_NEXT(owp, nfsow_list); + } + if (!done) { + NFSUNLOCKCLSTATE(); + return (ENOENT); + } + /* for read aheads or write behinds, use the open cred */ + newnfs_copycred(&op->nfso_cred, cred); + } + + /* + * No lock stateid, so return the open stateid. + */ + stateidp->seqid = op->nfso_stateid.seqid; + stateidp->other[0] = op->nfso_stateid.other[0]; + stateidp->other[1] = op->nfso_stateid.other[1]; + stateidp->other[2] = op->nfso_stateid.other[2]; + NFSUNLOCKCLSTATE(); + return (0); +} + +/* + * Get an existing open. Search up the parentage tree for a match and + * return with the first one found. + */ +static int +nfscl_getopen(struct nfsclownerhead *ohp, u_int8_t *nfhp, int fhlen, + u_int8_t *rown, NFSPROC_T *p, u_int32_t mode, struct nfsclowner **owpp, + struct nfsclopen **opp) +{ + struct nfsclowner *owp = NULL; + struct nfsclopen *op; + NFSPROC_T *nproc; + u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp; + + nproc = p; + op = NULL; + while (op == NULL && (nproc != NULL || rown != NULL)) { + if (nproc != NULL) { + nfscl_filllockowner(nproc, own); + ownp = own; + } else { + ownp = rown; + } + /* Search the client list */ + LIST_FOREACH(owp, ohp, nfsow_list) { + if (!NFSBCMP(owp->nfsow_owner, ownp, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (owp != NULL) { + /* and look for the correct open */ + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == fhlen && + !NFSBCMP(op->nfso_fh, nfhp, fhlen) + && (op->nfso_mode & mode) == mode) { + break; + } + } + } + if (rown != NULL) + break; + if (op == NULL) + nproc = nfscl_getparent(nproc); + } + if (op == NULL) { + return (EBADF); + } + if (owpp) + *owpp = owp; + *opp = op; + return (0); +} + +/* + * Release use of an open owner. Called when open operations are done + * with the open owner. + */ +APPLESTATIC void +nfscl_ownerrelease(struct nfsclowner *owp, __unused int error, + __unused int candelete, int unlocked) +{ + + if (owp == NULL) + return; + NFSLOCKCLSTATE(); + if (!unlocked) + nfscl_lockunlock(&owp->nfsow_rwlock); + nfscl_clrelease(owp->nfsow_clp); + NFSUNLOCKCLSTATE(); +} + +/* + * Release use of an open structure under an open owner. + */ +APPLESTATIC void +nfscl_openrelease(struct nfsclopen *op, int error, int candelete) +{ + struct nfsclclient *clp; + struct nfsclowner *owp; + + if (op == NULL) + return; + NFSLOCKCLSTATE(); + owp = op->nfso_own; + nfscl_lockunlock(&owp->nfsow_rwlock); + clp = owp->nfsow_clp; + if (error && candelete && op->nfso_opencnt == 0) + nfscl_freeopen(op, 0); + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); +} + +/* + * Called to get a clientid structure. It will optionally lock the + * client data structures to do the SetClientId/SetClientId_confirm, + * but will release that lock and return the clientid with a refernce + * count on it. + */ +APPLESTATIC int +nfscl_getcl(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct nfsclclient **clpp) +{ + struct nfsclclient *clp; + struct nfsclclient *newclp; + struct nfscllockowner *lp, *nlp; + struct nfsmount *nmp = VFSTONFS(vnode_mount(vp)); + int igotlock = 0, error, trystalecnt, clidinusedelay, i; + u_int16_t idlen; + + idlen = strlen(hostuuid); + if (idlen > 0) + idlen += sizeof (u_int64_t); + else + idlen += sizeof (u_int64_t) + 16; /* 16 random bytes */ + MALLOC(newclp, struct nfsclclient *, sizeof (struct nfsclclient) + + idlen - 1, M_NFSCLCLIENT, M_WAITOK); + NFSLOCKCLSTATE(); + clp = nmp->nm_clp; + if (clp == NULL) { + clp = newclp; + NFSBZERO((caddr_t)clp, sizeof(struct nfsclclient) + idlen - 1); + clp->nfsc_idlen = idlen; + LIST_INIT(&clp->nfsc_owner); + TAILQ_INIT(&clp->nfsc_deleg); + for (i = 0; i < NFSCLDELEGHASHSIZE; i++) + LIST_INIT(&clp->nfsc_deleghash[i]); + LIST_INIT(&clp->nfsc_defunctlockowner); + clp->nfsc_flags = NFSCLFLAGS_INITED; + clp->nfsc_clientidrev = 1; + clp->nfsc_cbident = nfscl_nextcbident(); + nfscl_fillclid(nmp->nm_clval, hostuuid, clp->nfsc_id, + clp->nfsc_idlen); + LIST_INSERT_HEAD(&nfsclhead, clp, nfsc_list); + nmp->nm_clp = clp; + clp->nfsc_nmp = nmp; + NFSUNLOCKCLSTATE(); + nfscl_start_renewthread(clp); + } else { + NFSUNLOCKCLSTATE(); + FREE((caddr_t)newclp, M_NFSCLCLIENT); + } + NFSLOCKCLSTATE(); + while ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0 && !igotlock) + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, + NFSCLSTATEMUTEXPTR); + if (!igotlock) + nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR); + NFSUNLOCKCLSTATE(); + + /* + * If it needs a clientid, do the setclientid now. + */ + if ((clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID) == 0) { + if (!igotlock) + panic("nfscl_clget"); + if (p == NULL) { + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return (EACCES); + } + /* get rid of defunct lockowners */ + LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, + nlp) { + nfscl_freelockowner(lp, 0); + } + /* + * If RFC3530 Sec. 14.2.33 is taken literally, + * NFSERR_CLIDINUSE will be returned persistently for the + * case where a new mount of the same file system is using + * a different principal. In practice, NFSERR_CLIDINUSE is + * only returned when there is outstanding unexpired state + * on the clientid. As such, try for twice the lease + * interval, if we know what that is. Otherwise, make a + * wild ass guess. + * The case of returning NFSERR_STALECLIENTID is far less + * likely, but might occur if there is a significant delay + * between doing the SetClientID and SetClientIDConfirm Ops, + * such that the server throws away the clientid before + * receiving the SetClientIDConfirm. + */ + if (clp->nfsc_renew > 0) + clidinusedelay = NFSCL_LEASE(clp->nfsc_renew) * 2; + else + clidinusedelay = 120; + trystalecnt = 3; + do { + error = nfsrpc_setclient(VFSTONFS(vnode_mount(vp)), clp, + cred, p); + if (error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER || + error == NFSERR_CLIDINUSE) { + (void) nfs_catnap(PZERO, "nfs_setcl"); + } + } while (((error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER) && --trystalecnt > 0) || + (error == NFSERR_CLIDINUSE && --clidinusedelay > 0)); + if (error) { + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return (error); + } + clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; + } + if (igotlock) { + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 1); + NFSUNLOCKCLSTATE(); + } + + *clpp = clp; + return (0); +} + +/* + * Get a reference to a clientid and return it, if valid. + */ +APPLESTATIC struct nfsclclient * +nfscl_findcl(struct nfsmount *nmp) +{ + struct nfsclclient *clp; + + clp = nmp->nm_clp; + if (clp == NULL || !(clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) + return (NULL); + return (clp); +} + +/* + * Release the clientid structure. It may be locked or reference counted. + */ +static void +nfscl_clrelease(struct nfsclclient *clp) +{ + + if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) + nfsv4_unlock(&clp->nfsc_lock, 0); + else + nfsv4_relref(&clp->nfsc_lock); +} + +/* + * External call for nfscl_clrelease. + */ +APPLESTATIC void +nfscl_clientrelease(struct nfsclclient *clp) +{ + + NFSLOCKCLSTATE(); + if (clp->nfsc_lock.nfslock_lock & NFSV4LOCK_LOCK) + nfsv4_unlock(&clp->nfsc_lock, 0); + else + nfsv4_relref(&clp->nfsc_lock); + NFSUNLOCKCLSTATE(); +} + +/* + * Called when wanting to lock a byte region. + */ +APPLESTATIC int +nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len, + short type, struct ucred *cred, NFSPROC_T *p, struct nfsclclient *rclp, + int recovery, u_int8_t *rownp, u_int8_t *ropenownp, + struct nfscllockowner **lpp, int *newonep, int *donelocallyp) +{ + struct nfscllockowner *lp; + struct nfsclopen *op; + struct nfsclclient *clp; + struct nfscllockowner *nlp; + struct nfscllock *nlop, *otherlop; + struct nfscldeleg *dp = NULL, *ldp = NULL; + struct nfscllockownerhead *lhp = NULL; + struct nfsnode *np; + u_int8_t own[NFSV4CL_LOCKNAMELEN], *ownp; + int error = 0, ret, donelocally = 0; + u_int32_t mode; + + if (type == F_WRLCK) + mode = NFSV4OPEN_ACCESSWRITE; + else + mode = NFSV4OPEN_ACCESSREAD; + np = VTONFS(vp); + *lpp = NULL; + *newonep = 0; + *donelocallyp = 0; + + /* + * Might need these, so MALLOC them now, to + * avoid a tsleep() in MALLOC later. + */ + MALLOC(nlp, struct nfscllockowner *, + sizeof (struct nfscllockowner), M_NFSCLLOCKOWNER, M_WAITOK); + MALLOC(otherlop, struct nfscllock *, + sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); + MALLOC(nlop, struct nfscllock *, + sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); + nlop->nfslo_type = type; + nlop->nfslo_first = off; + if (len == NFS64BITSSET) { + nlop->nfslo_end = NFS64BITSSET; + } else { + nlop->nfslo_end = off + len; + if (nlop->nfslo_end <= nlop->nfslo_first) + error = NFSERR_INVAL; + } + + if (!error) { + if (recovery) + clp = rclp; + else + error = nfscl_getcl(vp, cred, p, &clp); + } + if (error) { + FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); + FREE((caddr_t)otherlop, M_NFSCLLOCK); + FREE((caddr_t)nlop, M_NFSCLLOCK); + return (error); + } + + op = NULL; + if (recovery) { + ownp = rownp; + } else { + nfscl_filllockowner(p, own); + ownp = own; + } + if (!recovery) { + NFSLOCKCLSTATE(); + /* + * First, search for a delegation. If one exists for this file, + * the lock can be done locally against it, so long as there + * isn't a local lock conflict. + */ + ldp = dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len); + /* Just sanity check for correct type of delegation */ + if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_RECALL) || + (type == F_WRLCK && !(dp->nfsdl_flags & NFSCLDL_WRITE)))) + dp = NULL; + } + if (dp != NULL) { + /* Now, find the associated open to get the correct openowner */ + ret = nfscl_getopen(&dp->nfsdl_owner, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len, NULL, p, mode, NULL, &op); + if (ret) + ret = nfscl_getopen(&clp->nfsc_owner, + np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p, + mode, NULL, &op); + if (!ret) { + lhp = &dp->nfsdl_lock; + TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); + TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp, nfsdl_list); + dp->nfsdl_timestamp = NFSD_MONOSEC + 120; + donelocally = 1; + } else { + dp = NULL; + } + } + if (!donelocally) { + /* + * Get the related Open. + */ + if (recovery) + error = nfscl_getopen(&clp->nfsc_owner, + np->n_fhp->nfh_fh, np->n_fhp->nfh_len, ropenownp, + NULL, mode, NULL, &op); + else + error = nfscl_getopen(&clp->nfsc_owner, + np->n_fhp->nfh_fh, np->n_fhp->nfh_len, NULL, p, + mode, NULL, &op); + if (!error) + lhp = &op->nfso_lock; + } + if (!error && !recovery) + error = nfscl_localconflict(clp, nlop, ownp, ldp, NULL); + if (error) { + if (!recovery) { + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); + } + FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); + FREE((caddr_t)otherlop, M_NFSCLLOCK); + FREE((caddr_t)nlop, M_NFSCLLOCK); + return (error); + } + + /* + * Ok, see if a lockowner exists and create one, as required. + */ + LIST_FOREACH(lp, lhp, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, ownp, NFSV4CL_LOCKNAMELEN)) + break; + } + if (lp == NULL) { + NFSBCOPY(ownp, nlp->nfsl_owner, NFSV4CL_LOCKNAMELEN); + if (recovery) + NFSBCOPY(ropenownp, nlp->nfsl_openowner, + NFSV4CL_LOCKNAMELEN); + else + NFSBCOPY(op->nfso_own->nfsow_owner, nlp->nfsl_openowner, + NFSV4CL_LOCKNAMELEN); + nlp->nfsl_seqid = 0; + nlp->nfsl_defunct = 0; + nlp->nfsl_inprog = NULL; + nfscl_lockinit(&nlp->nfsl_rwlock); + LIST_INIT(&nlp->nfsl_lock); + if (donelocally) { + nlp->nfsl_open = NULL; + newnfsstats.cllocallockowners++; + } else { + nlp->nfsl_open = op; + newnfsstats.cllockowners++; + } + LIST_INSERT_HEAD(lhp, nlp, nfsl_list); + lp = nlp; + nlp = NULL; + *newonep = 1; + } + + /* + * Now, update the byte ranges for locks. + */ + ret = nfscl_updatelock(lp, &nlop, &otherlop, donelocally); + if (!ret) + donelocally = 1; + if (donelocally) { + *donelocallyp = 1; + if (!recovery) + nfscl_clrelease(clp); + } else { + /* + * Serial modifications on the lock owner for multiple threads + * for the same process using a read/write lock. + */ + if (!recovery) + nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); + } + if (!recovery) + NFSUNLOCKCLSTATE(); + + if (nlp) + FREE((caddr_t)nlp, M_NFSCLLOCKOWNER); + if (nlop) + FREE((caddr_t)nlop, M_NFSCLLOCK); + if (otherlop) + FREE((caddr_t)otherlop, M_NFSCLLOCK); + + *lpp = lp; + return (0); +} + +/* + * Called to unlock a byte range, for LockU. + */ +APPLESTATIC int +nfscl_relbytelock(vnode_t vp, u_int64_t off, u_int64_t len, + __unused struct ucred *cred, NFSPROC_T *p, int callcnt, + struct nfsclclient *clp, struct nfscllockowner **lpp, int *dorpcp) +{ + struct nfscllockowner *lp; + struct nfsclowner *owp; + struct nfsclopen *op; + struct nfscllock *nlop, *other_lop = NULL; + struct nfscldeleg *dp; + struct nfsnode *np; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + int ret = 0, fnd, error; + + np = VTONFS(vp); + *lpp = NULL; + *dorpcp = 0; + + /* + * Might need these, so MALLOC them now, to + * avoid a tsleep() in MALLOC later. + */ + MALLOC(nlop, struct nfscllock *, + sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); + nlop->nfslo_type = F_UNLCK; + nlop->nfslo_first = off; + if (len == NFS64BITSSET) { + nlop->nfslo_end = NFS64BITSSET; + } else { + nlop->nfslo_end = off + len; + if (nlop->nfslo_end <= nlop->nfslo_first) { + FREE((caddr_t)nlop, M_NFSCLLOCK); + return (NFSERR_INVAL); + } + } + if (callcnt == 0) { + MALLOC(other_lop, struct nfscllock *, + sizeof (struct nfscllock), M_NFSCLLOCK, M_WAITOK); + *other_lop = *nlop; + } + nfscl_filllockowner(p, own); + dp = NULL; + NFSLOCKCLSTATE(); + if (callcnt == 0) + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len); + + /* Search for a local conflict. */ + error = nfscl_localconflict(clp, nlop, own, dp, NULL); + if (error) { + NFSUNLOCKCLSTATE(); + FREE((caddr_t)nlop, M_NFSCLLOCK); + if (other_lop != NULL) + FREE((caddr_t)other_lop, M_NFSCLLOCK); + return (error); + } + + /* + * First, unlock any local regions on a delegation. + */ + if (dp != NULL) { + /* Look for this lockowner. */ + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (lp != NULL) + /* Use other_lop, so nlop is still available */ + (void)nfscl_updatelock(lp, &other_lop, NULL, 1); + } + + /* + * Now, find a matching open/lockowner that hasn't already been done, + * as marked by nfsl_inprog. + */ + lp = NULL; + fnd = 0; + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == np->n_fhp->nfh_len && + !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (lp->nfsl_inprog == NULL && + !NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) { + fnd = 1; + break; + } + } + if (fnd) + break; + } + } + if (fnd) + break; + } + + if (lp != NULL) { + ret = nfscl_updatelock(lp, &nlop, NULL, 0); + if (ret) + *dorpcp = 1; + /* + * Serial modifications on the lock owner for multiple + * threads for the same process using a read/write lock. + */ + lp->nfsl_inprog = p; + nfscl_lockexcl(&lp->nfsl_rwlock, NFSCLSTATEMUTEXPTR); + *lpp = lp; + } + NFSUNLOCKCLSTATE(); + if (nlop) + FREE((caddr_t)nlop, M_NFSCLLOCK); + if (other_lop) + FREE((caddr_t)other_lop, M_NFSCLLOCK); + return (0); +} + +/* + * Release all lockowners marked in progess for this process and file. + */ +APPLESTATIC void +nfscl_releasealllocks(struct nfsclclient *clp, vnode_t vp, NFSPROC_T *p) +{ + struct nfsclowner *owp; + struct nfsclopen *op; + struct nfscllockowner *lp; + struct nfsnode *np; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + + np = VTONFS(vp); + nfscl_filllockowner(p, own); + NFSLOCKCLSTATE(); + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == np->n_fhp->nfh_len && + !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (lp->nfsl_inprog == p && + !NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) { + lp->nfsl_inprog = NULL; + nfscl_lockunlock(&lp->nfsl_rwlock); + } + } + } + } + } + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); +} + +/* + * Called to find out if any bytes within the byte range specified are + * write locked by the calling process. Used to determine if flushing + * is required before a LockU. + * If in doubt, return 1, so the flush will occur. + */ +APPLESTATIC int +nfscl_checkwritelocked(vnode_t vp, struct flock *fl, + struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclowner *owp; + struct nfscllockowner *lp; + struct nfsclopen *op; + struct nfsclclient *clp; + struct nfscllock *lop; + struct nfscldeleg *dp; + struct nfsnode *np; + u_int64_t off, end; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + int error = 0; + + np = VTONFS(vp); + switch (fl->l_whence) { + case SEEK_SET: + case SEEK_CUR: + /* + * Caller is responsible for adding any necessary offset + * when SEEK_CUR is used. + */ + off = fl->l_start; + break; + case SEEK_END: + off = np->n_size + fl->l_start; + break; + default: + return (1); + }; + if (fl->l_len != 0) { + end = off + fl->l_len; + if (end < off) + return (1); + } else { + end = NFS64BITSSET; + } + + error = nfscl_getcl(vp, cred, p, &clp); + if (error) + return (1); + nfscl_filllockowner(p, own); + NFSLOCKCLSTATE(); + + /* + * First check the delegation locks. + */ + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL) { + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (lp != NULL) { + LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { + if (lop->nfslo_first >= end) + break; + if (lop->nfslo_end <= off) + continue; + if (lop->nfslo_type == F_WRLCK) { + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); + return (1); + } + } + } + } + + /* + * Now, check state against the server. + */ + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op->nfso_fhlen == np->n_fhp->nfh_len && + !NFSBCMP(op->nfso_fh, np->n_fhp->nfh_fh, op->nfso_fhlen)) { + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (lp != NULL) { + LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { + if (lop->nfslo_first >= end) + break; + if (lop->nfslo_end <= off) + continue; + if (lop->nfslo_type == F_WRLCK) { + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); + return (1); + } + } + } + } + } + } + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); + return (0); +} + +/* + * Release a byte range lock owner structure. + */ +APPLESTATIC void +nfscl_lockrelease(struct nfscllockowner *lp, int error, int candelete) +{ + struct nfsclclient *clp; + + if (lp == NULL) + return; + NFSLOCKCLSTATE(); + clp = lp->nfsl_open->nfso_own->nfsow_clp; + if (error != 0 && candelete && + (lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED) == 0) + nfscl_freelockowner(lp, 0); + else + nfscl_lockunlock(&lp->nfsl_rwlock); + nfscl_clrelease(clp); + NFSUNLOCKCLSTATE(); +} + +/* + * Free up an open structure and any associated byte range lock structures. + */ +APPLESTATIC void +nfscl_freeopen(struct nfsclopen *op, int local) +{ + + LIST_REMOVE(op, nfso_list); + nfscl_freealllocks(&op->nfso_lock, local); + FREE((caddr_t)op, M_NFSCLOPEN); + if (local) + newnfsstats.cllocalopens--; + else + newnfsstats.clopens--; +} + +/* + * Free up all lock owners and associated locks. + */ +static void +nfscl_freealllocks(struct nfscllockownerhead *lhp, int local) +{ + struct nfscllockowner *lp, *nlp; + + LIST_FOREACH_SAFE(lp, lhp, nfsl_list, nlp) { + if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) + panic("nfscllckw"); + nfscl_freelockowner(lp, local); + } +} + +/* + * Called for an Open when NFSERR_EXPIRED is received from the server. + * If there are no byte range locks nor a Share Deny lost, try to do a + * fresh Open. Otherwise, free the open. + */ +static int +nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op, + struct nfsmount *nmp, struct ucred *cred, NFSPROC_T *p) +{ + struct nfscllockowner *lp; + struct nfscldeleg *dp; + int mustdelete = 0, error; + + /* + * Look for any byte range lock(s). + */ + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (!LIST_EMPTY(&lp->nfsl_lock)) { + mustdelete = 1; + break; + } + } + + /* + * If no byte range lock(s) nor a Share deny, try to re-open. + */ + if (!mustdelete && (op->nfso_mode & NFSLCK_DENYBITS) == 0) { + newnfs_copycred(&op->nfso_cred, cred); + dp = NULL; + error = nfsrpc_reopen(nmp, op->nfso_fh, + op->nfso_fhlen, op->nfso_mode, op, &dp, cred, p); + if (error) { + mustdelete = 1; + if (dp != NULL) { + FREE((caddr_t)dp, M_NFSCLDELEG); + dp = NULL; + } + } + if (dp != NULL) + nfscl_deleg(nmp->nm_mountp, clp, op->nfso_fh, + op->nfso_fhlen, cred, p, &dp); + } + + /* + * If a byte range lock or Share deny or couldn't re-open, free it. + */ + if (mustdelete) + nfscl_freeopen(op, 0); + return (mustdelete); +} + +/* + * Free up an open owner structure. + */ +static void +nfscl_freeopenowner(struct nfsclowner *owp, int local) +{ + + LIST_REMOVE(owp, nfsow_list); + FREE((caddr_t)owp, M_NFSCLOWNER); + if (local) + newnfsstats.cllocalopenowners--; + else + newnfsstats.clopenowners--; +} + +/* + * Free up a byte range lock owner structure. + */ +static void +nfscl_freelockowner(struct nfscllockowner *lp, int local) +{ + struct nfscllock *lop, *nlop; + + LIST_REMOVE(lp, nfsl_list); + LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) { + nfscl_freelock(lop, local); + } + FREE((caddr_t)lp, M_NFSCLLOCKOWNER); + if (local) + newnfsstats.cllocallockowners--; + else + newnfsstats.cllockowners--; +} + +/* + * Free up a byte range lock structure. + */ +APPLESTATIC void +nfscl_freelock(struct nfscllock *lop, int local) +{ + + LIST_REMOVE(lop, nfslo_list); + FREE((caddr_t)lop, M_NFSCLLOCK); + if (local) + newnfsstats.cllocallocks--; + else + newnfsstats.cllocks--; +} + +/* + * Clean out the state related to a delegation. + */ +static void +nfscl_cleandeleg(struct nfscldeleg *dp) +{ + struct nfsclowner *owp, *nowp; + struct nfsclopen *op; + + LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, nowp) { + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL) { + if (LIST_NEXT(op, nfso_list) != NULL) + panic("nfscleandel"); + nfscl_freeopen(op, 1); + } + nfscl_freeopenowner(owp, 1); + } + nfscl_freealllocks(&dp->nfsdl_lock, 1); +} + +/* + * Free a delegation. + */ +static void +nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp) +{ + + TAILQ_REMOVE(hdp, dp, nfsdl_list); + LIST_REMOVE(dp, nfsdl_hash); + FREE((caddr_t)dp, M_NFSCLDELEG); + newnfsstats.cldelegates--; + nfscl_delegcnt--; +} + +/* + * Free up all state related to this client structure. + */ +static void +nfscl_cleanclient(struct nfsclclient *clp) +{ + struct nfsclowner *owp, *nowp; + struct nfsclopen *op, *nop; + struct nfscllockowner *lp, *nlp; + + + /* get rid of defunct lockowners */ + LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) { + nfscl_freelockowner(lp, 0); + } + + /* Now, all the OpenOwners, etc. */ + LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { + LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { + nfscl_freeopen(op, 0); + } + nfscl_freeopenowner(owp, 0); + } +} + +/* + * Called when an NFSERR_EXPIRED is received from the server. + */ +static void +nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp, + struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclowner *owp, *nowp, *towp; + struct nfsclopen *op, *nop, *top; + struct nfscldeleg *dp, *ndp; + int ret, printed = 0; + + /* + * First, merge locally issued Opens into the list for the server. + */ + dp = TAILQ_FIRST(&clp->nfsc_deleg); + while (dp != NULL) { + ndp = TAILQ_NEXT(dp, nfsdl_list); + owp = LIST_FIRST(&dp->nfsdl_owner); + while (owp != NULL) { + nowp = LIST_NEXT(owp, nfsow_list); + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL) { + if (LIST_NEXT(op, nfso_list) != NULL) + panic("nfsclexp"); + LIST_FOREACH(towp, &clp->nfsc_owner, nfsow_list) { + if (!NFSBCMP(towp->nfsow_owner, owp->nfsow_owner, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (towp != NULL) { + /* Merge opens in */ + LIST_FOREACH(top, &towp->nfsow_open, nfso_list) { + if (top->nfso_fhlen == op->nfso_fhlen && + !NFSBCMP(top->nfso_fh, op->nfso_fh, + op->nfso_fhlen)) { + top->nfso_mode |= op->nfso_mode; + top->nfso_opencnt += op->nfso_opencnt; + break; + } + } + if (top == NULL) { + /* Just add the open to the owner list */ + LIST_REMOVE(op, nfso_list); + op->nfso_own = towp; + LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list); + newnfsstats.cllocalopens--; + newnfsstats.clopens++; + } + } else { + /* Just add the openowner to the client list */ + LIST_REMOVE(owp, nfsow_list); + owp->nfsow_clp = clp; + LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list); + newnfsstats.cllocalopenowners--; + newnfsstats.clopenowners++; + newnfsstats.cllocalopens--; + newnfsstats.clopens++; + } + } + owp = nowp; + } + if (!printed && !LIST_EMPTY(&dp->nfsdl_lock)) { + printed = 1; + printf("nfsv4 expired locks lost\n"); + } + nfscl_cleandeleg(dp); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + dp = ndp; + } + if (!TAILQ_EMPTY(&clp->nfsc_deleg)) + panic("nfsclexp"); + + /* + * Now, try and reopen against the server. + */ + LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { + owp->nfsow_seqid = 0; + LIST_FOREACH_SAFE(op, &owp->nfsow_open, nfso_list, nop) { + ret = nfscl_expireopen(clp, op, nmp, cred, p); + if (ret && !printed) { + printed = 1; + printf("nfsv4 expired locks lost\n"); + } + } + if (LIST_EMPTY(&owp->nfsow_open)) + nfscl_freeopenowner(owp, 0); + } +} + +#ifndef __FreeBSD__ +/* + * Called from exit() upon process termination. + */ +APPLESTATIC void +nfscl_cleanup(NFSPROC_T *p) +{ + struct nfsclclient *clp; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + + if (!nfscl_inited) + return; + nfscl_filllockowner(p, own); + + NFSLOCKCLSTATE(); + /* + * Loop through all the clientids, looking for the OpenOwners. + */ + LIST_FOREACH(clp, &nfsclhead, nfsc_list) + nfscl_cleanup_common(clp, own); + NFSUNLOCKCLSTATE(); +} +#endif /* !__FreeBSD__ */ + +/* + * Common code used by nfscl_cleanup() and nfscl_cleanupkext(). + * Must be called with CLSTATE lock held. + */ +static void +nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own) +{ + struct nfsclowner *owp, *nowp; + struct nfsclopen *op; + struct nfscllockowner *lp, *nlp; + struct nfscldeleg *dp; + + /* First, get rid of local locks on delegations. */ + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { + LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) { + if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { + if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED)) + panic("nfscllckw"); + nfscl_freelockowner(lp, 1); + } + } + } + owp = LIST_FIRST(&clp->nfsc_owner); + while (owp != NULL) { + nowp = LIST_NEXT(owp, nfsow_list); + if (!NFSBCMP(owp->nfsow_owner, own, + NFSV4CL_LOCKNAMELEN)) { + /* + * If there are children that haven't closed the + * file descriptors yet, the opens will still be + * here. For that case, let the renew thread clear + * out the OpenOwner later. + */ + if (LIST_EMPTY(&owp->nfsow_open)) + nfscl_freeopenowner(owp, 0); + else + owp->nfsow_defunct = 1; + } else { + /* look for lockowners on other opens */ + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, + NFSV4CL_LOCKNAMELEN)) + lp->nfsl_defunct = 1; + } + } + } + owp = nowp; + } + + /* and check the defunct list */ + LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) { + if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) + lp->nfsl_defunct = 1; + } +} + +#if defined(APPLEKEXT) || defined(__FreeBSD__) +/* + * Simulate the call nfscl_cleanup() by looking for open owners associated + * with processes that no longer exist, since a call to nfscl_cleanup() + * can't be patched into exit(). + */ +static void +nfscl_cleanupkext(struct nfsclclient *clp) +{ + struct nfsclowner *owp, *nowp; + struct nfscllockowner *lp; + + NFSPROCLISTLOCK(); + NFSLOCKCLSTATE(); + LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) { + if (nfscl_procdoesntexist(owp->nfsow_owner)) + nfscl_cleanup_common(clp, owp->nfsow_owner); + } + + /* and check the defunct list */ + LIST_FOREACH(lp, &clp->nfsc_defunctlockowner, nfsl_list) { + if (nfscl_procdoesntexist(lp->nfsl_owner)) + lp->nfsl_defunct = 1; + } + NFSUNLOCKCLSTATE(); + NFSPROCLISTUNLOCK(); +} +#endif /* APPLEKEXT || __FreeBSD__ */ + +/* + * Called from nfs umount to free up the clientid. + */ +APPLESTATIC void +nfscl_umount(struct nfsmount *nmp, NFSPROC_T *p) +{ + struct nfsclclient *clp; + struct ucred *cred; + int igotlock; + + clp = nmp->nm_clp; + if (clp != NULL) { + if ((clp->nfsc_flags & NFSCLFLAGS_INITED) == 0) + panic("nfscl umount"); + + /* + * First, handshake with the nfscl renew thread, to terminate + * it. + */ + clp->nfsc_flags |= NFSCLFLAGS_UMOUNT; + while (clp->nfsc_flags & NFSCLFLAGS_HASTHREAD) + (void) tsleep((caddr_t)clp, PWAIT, "nfsclumnt", hz); + + NFSLOCKCLSTATE(); + do { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, + NFSCLSTATEMUTEXPTR); + } while (!igotlock); + NFSUNLOCKCLSTATE(); + + /* + * Free up all the state. It will expire on the server, but + * maybe we should do a SetClientId/SetClientIdConfirm so + * the server throws it away? + */ + LIST_REMOVE(clp, nfsc_list); + nfscl_delegreturnall(clp, p); + cred = newnfs_getcred(); + (void) nfsrpc_setclient(nmp, clp, cred, p); + nfscl_cleanclient(clp); + nmp->nm_clp = NULL; + NFSFREECRED(cred); + FREE((caddr_t)clp, M_NFSCLCLIENT); + } + +} + +/* + * This function is called when a server replies with NFSERR_STALECLIENTID + * or NFSERR_STALESTATEID. It traverses the clientid lists, doing Opens + * and Locks with reclaim. If these fail, it deletes the corresponding state. + */ +static void +nfscl_recover(struct nfsclclient *clp, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclowner *owp, *nowp; + struct nfsclopen *op, *nop; + struct nfscllockowner *lp, *nlp; + struct nfscllock *lop, *nlop; + struct nfscldeleg *dp, *ndp, *tdp; + struct nfsmount *nmp; + struct ucred *tcred; + struct nfsclopenhead extra_open; + struct nfscldeleghead extra_deleg; + struct nfsreq *rep; + u_int64_t len; + u_int32_t delegtype = NFSV4OPEN_DELEGATEWRITE, mode; + int igotlock = 0, error, trycnt, firstlock, s; + + /* + * First, lock the client structure, so everyone else will + * block when trying to use state. + */ + NFSLOCKCLSTATE(); + do { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, + NFSCLSTATEMUTEXPTR); + } while (!igotlock); + NFSUNLOCKCLSTATE(); + + nmp = clp->nfsc_nmp; + if (nmp == NULL) + panic("nfscl recover"); + trycnt = 5; + do { + error = nfsrpc_setclient(nmp, clp, cred, p); + } while ((error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); + if (error) { + nfscl_cleanclient(clp); + clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | + NFSCLFLAGS_RECOVER); + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return; + } + clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; + clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; + + /* + * Mark requests already queued on the server, so that they don't + * initiate another recovery cycle. Any requests already in the + * queue that handle state information will have the old stale + * clientid/stateid and will get a NFSERR_STALESTATEID or + * NFSERR_STALECLIENTID reply from the server. This will be + * translated to NFSERR_STALEDONTRECOVER when R_DONTRECOVER is set. + */ + s = splsoftclock(); + NFSLOCKREQ(); + TAILQ_FOREACH(rep, &nfsd_reqq, r_chain) { + if (rep->r_nmp == nmp) + rep->r_flags |= R_DONTRECOVER; + } + NFSUNLOCKREQ(); + splx(s); + + /* get rid of defunct lockowners */ + LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, nlp) { + nfscl_freelockowner(lp, 0); + } + + /* + * Now, mark all delegations "need reclaim". + */ + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) + dp->nfsdl_flags |= NFSCLDL_NEEDRECLAIM; + + TAILQ_INIT(&extra_deleg); + LIST_INIT(&extra_open); + /* + * Now traverse the state lists, doing Open and Lock Reclaims. + */ + tcred = newnfs_getcred(); + owp = LIST_FIRST(&clp->nfsc_owner); + while (owp != NULL) { + nowp = LIST_NEXT(owp, nfsow_list); + owp->nfsow_seqid = 0; + op = LIST_FIRST(&owp->nfsow_open); + while (op != NULL) { + nop = LIST_NEXT(op, nfso_list); + if (error != NFSERR_NOGRACE) { + /* Search for a delegation to reclaim with the open */ + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { + if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) + continue; + if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { + mode = NFSV4OPEN_ACCESSWRITE; + delegtype = NFSV4OPEN_DELEGATEWRITE; + } else { + mode = NFSV4OPEN_ACCESSREAD; + delegtype = NFSV4OPEN_DELEGATEREAD; + } + if ((op->nfso_mode & mode) == mode && + op->nfso_fhlen == dp->nfsdl_fhlen && + !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, op->nfso_fhlen)) + break; + } + ndp = dp; + if (dp == NULL) + delegtype = NFSV4OPEN_DELEGATENONE; + newnfs_copycred(&op->nfso_cred, tcred); + error = nfscl_tryopen(nmp, NULL, op->nfso_fh, + op->nfso_fhlen, op->nfso_fh, op->nfso_fhlen, + op->nfso_mode, op, NULL, 0, &ndp, 1, delegtype, + tcred, p); + if (!error) { + /* Handle any replied delegation */ + if (ndp != NULL && ((ndp->nfsdl_flags & NFSCLDL_WRITE) + || NFSMNT_RDONLY(nmp->nm_mountp))) { + if ((ndp->nfsdl_flags & NFSCLDL_WRITE)) + mode = NFSV4OPEN_ACCESSWRITE; + else + mode = NFSV4OPEN_ACCESSREAD; + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { + if (!(dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) + continue; + if ((op->nfso_mode & mode) == mode && + op->nfso_fhlen == dp->nfsdl_fhlen && + !NFSBCMP(op->nfso_fh, dp->nfsdl_fh, + op->nfso_fhlen)) { + dp->nfsdl_stateid = ndp->nfsdl_stateid; + dp->nfsdl_sizelimit = ndp->nfsdl_sizelimit; + dp->nfsdl_ace = ndp->nfsdl_ace; + dp->nfsdl_change = ndp->nfsdl_change; + dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; + if ((ndp->nfsdl_flags & NFSCLDL_RECALL)) + dp->nfsdl_flags |= NFSCLDL_RECALL; + FREE((caddr_t)ndp, M_NFSCLDELEG); + ndp = NULL; + break; + } + } + } + if (ndp != NULL) + TAILQ_INSERT_HEAD(&extra_deleg, ndp, nfsdl_list); + + /* and reclaim all byte range locks */ + lp = LIST_FIRST(&op->nfso_lock); + while (lp != NULL) { + nlp = LIST_NEXT(lp, nfsl_list); + lp->nfsl_seqid = 0; + firstlock = 1; + lop = LIST_FIRST(&lp->nfsl_lock); + while (lop != NULL) { + nlop = LIST_NEXT(lop, nfslo_list); + if (lop->nfslo_end == NFS64BITSSET) + len = NFS64BITSSET; + else + len = lop->nfslo_end - lop->nfslo_first; + if (error != NFSERR_NOGRACE) + error = nfscl_trylock(nmp, NULL, + op->nfso_fh, op->nfso_fhlen, lp, + firstlock, 1, lop->nfslo_first, len, + lop->nfslo_type, tcred, p); + if (error != 0) + nfscl_freelock(lop, 0); + else + firstlock = 0; + lop = nlop; + } + /* If no locks, but a lockowner, just delete it. */ + if (LIST_EMPTY(&lp->nfsl_lock)) + nfscl_freelockowner(lp, 0); + lp = nlp; + } + } else { + nfscl_freeopen(op, 0); + } + } + op = nop; + } + owp = nowp; + } + + /* + * Now, try and get any delegations not yet reclaimed by cobbling + * to-gether an appropriate open. + */ + nowp = NULL; + dp = TAILQ_FIRST(&clp->nfsc_deleg); + while (dp != NULL) { + ndp = TAILQ_NEXT(dp, nfsdl_list); + if ((dp->nfsdl_flags & NFSCLDL_NEEDRECLAIM)) { + if (nowp == NULL) { + MALLOC(nowp, struct nfsclowner *, + sizeof (struct nfsclowner), M_NFSCLOWNER, M_WAITOK); + /* + * Name must be as long an largest possible + * NFSV4CL_LOCKNAMELEN. 12 for now. + */ + NFSBCOPY("RECLAIMDELEG", nowp->nfsow_owner, + NFSV4CL_LOCKNAMELEN); + LIST_INIT(&nowp->nfsow_open); + nowp->nfsow_clp = clp; + nowp->nfsow_seqid = 0; + nowp->nfsow_defunct = 0; + nfscl_lockinit(&nowp->nfsow_rwlock); + } + nop = NULL; + if (error != NFSERR_NOGRACE) { + MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + + dp->nfsdl_fhlen - 1, M_NFSCLOPEN, M_WAITOK); + nop->nfso_own = nowp; + if ((dp->nfsdl_flags & NFSCLDL_WRITE)) { + nop->nfso_mode = NFSV4OPEN_ACCESSWRITE; + delegtype = NFSV4OPEN_DELEGATEWRITE; + } else { + nop->nfso_mode = NFSV4OPEN_ACCESSREAD; + delegtype = NFSV4OPEN_DELEGATEREAD; + } + nop->nfso_opencnt = 0; + nop->nfso_posixlock = 1; + nop->nfso_fhlen = dp->nfsdl_fhlen; + NFSBCOPY(dp->nfsdl_fh, nop->nfso_fh, dp->nfsdl_fhlen); + LIST_INIT(&nop->nfso_lock); + nop->nfso_stateid.seqid = 0; + nop->nfso_stateid.other[0] = 0; + nop->nfso_stateid.other[1] = 0; + nop->nfso_stateid.other[2] = 0; + newnfs_copycred(&dp->nfsdl_cred, tcred); + newnfs_copyincred(tcred, &nop->nfso_cred); + tdp = NULL; + error = nfscl_tryopen(nmp, NULL, nop->nfso_fh, + nop->nfso_fhlen, nop->nfso_fh, nop->nfso_fhlen, + nop->nfso_mode, nop, NULL, 0, &tdp, 1, + delegtype, tcred, p); + if (tdp != NULL) { + if ((tdp->nfsdl_flags & NFSCLDL_WRITE)) + mode = NFSV4OPEN_ACCESSWRITE; + else + mode = NFSV4OPEN_ACCESSREAD; + if ((nop->nfso_mode & mode) == mode && + nop->nfso_fhlen == tdp->nfsdl_fhlen && + !NFSBCMP(nop->nfso_fh, tdp->nfsdl_fh, + nop->nfso_fhlen)) { + dp->nfsdl_stateid = tdp->nfsdl_stateid; + dp->nfsdl_sizelimit = tdp->nfsdl_sizelimit; + dp->nfsdl_ace = tdp->nfsdl_ace; + dp->nfsdl_change = tdp->nfsdl_change; + dp->nfsdl_flags &= ~NFSCLDL_NEEDRECLAIM; + if ((tdp->nfsdl_flags & NFSCLDL_RECALL)) + dp->nfsdl_flags |= NFSCLDL_RECALL; + FREE((caddr_t)tdp, M_NFSCLDELEG); + } else { + TAILQ_INSERT_HEAD(&extra_deleg, tdp, nfsdl_list); + } + } + } + if (error) { + if (nop != NULL) + FREE((caddr_t)nop, M_NFSCLOPEN); + /* + * Couldn't reclaim it, so throw the state + * away. Ouch!! + */ + nfscl_cleandeleg(dp); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + } else { + LIST_INSERT_HEAD(&extra_open, nop, nfso_list); + } + } + dp = ndp; + } + + /* + * Now, get rid of extra Opens and Delegations. + */ + LIST_FOREACH_SAFE(op, &extra_open, nfso_list, nop) { + do { + newnfs_copycred(&op->nfso_cred, tcred); + error = nfscl_tryclose(op, tcred, nmp, p); + if (error == NFSERR_GRACE) + (void) nfs_catnap(PZERO, "nfsexcls"); + } while (error == NFSERR_GRACE); + LIST_REMOVE(op, nfso_list); + FREE((caddr_t)op, M_NFSCLOPEN); + } + if (nowp != NULL) + FREE((caddr_t)nowp, M_NFSCLOWNER); + + TAILQ_FOREACH_SAFE(dp, &extra_deleg, nfsdl_list, ndp) { + do { + newnfs_copycred(&dp->nfsdl_cred, tcred); + error = nfscl_trydelegreturn(dp, tcred, nmp, p); + if (error == NFSERR_GRACE) + (void) nfs_catnap(PZERO, "nfsexdlg"); + } while (error == NFSERR_GRACE); + TAILQ_REMOVE(&extra_deleg, dp, nfsdl_list); + FREE((caddr_t)dp, M_NFSCLDELEG); + } + + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + NFSFREECRED(tcred); +} + +/* + * This function is called when a server replies with NFSERR_EXPIRED. + * It deletes all state for the client and does a fresh SetClientId/confirm. + * XXX Someday it should post a signal to the process(es) that hold the + * state, so they know that lock state has been lost. + */ +APPLESTATIC int +nfscl_hasexpired(struct nfsclclient *clp, u_int32_t clidrev, NFSPROC_T *p) +{ + struct nfscllockowner *lp, *nlp; + struct nfsmount *nmp; + struct ucred *cred; + int igotlock = 0, error, trycnt; + + /* + * If the clientid has gone away or a new SetClientid has already + * been done, just return ok. + */ + if (clp == NULL || clidrev != clp->nfsc_clientidrev) + return (0); + + /* + * First, lock the client structure, so everyone else will + * block when trying to use state. Also, use NFSCLFLAGS_EXPIREIT so + * that only one thread does the work. + */ + NFSLOCKCLSTATE(); + clp->nfsc_flags |= NFSCLFLAGS_EXPIREIT; + do { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, NULL, + NFSCLSTATEMUTEXPTR); + } while (!igotlock && (clp->nfsc_flags & NFSCLFLAGS_EXPIREIT)); + if ((clp->nfsc_flags & NFSCLFLAGS_EXPIREIT) == 0) { + if (igotlock) + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return (0); + } + NFSUNLOCKCLSTATE(); + + nmp = clp->nfsc_nmp; + if (nmp == NULL) + panic("nfscl expired"); + cred = newnfs_getcred(); + trycnt = 5; + do { + error = nfsrpc_setclient(nmp, clp, cred, p); + } while ((error == NFSERR_STALECLIENTID || + error == NFSERR_STALEDONTRECOVER) && --trycnt > 0); + if (error) { + /* + * Clear out any state. + */ + nfscl_cleanclient(clp); + clp->nfsc_flags &= ~(NFSCLFLAGS_HASCLIENTID | + NFSCLFLAGS_RECOVER); + } else { + /* get rid of defunct lockowners */ + LIST_FOREACH_SAFE(lp, &clp->nfsc_defunctlockowner, nfsl_list, + nlp) { + nfscl_freelockowner(lp, 0); + } + + /* + * Expire the state for the client. + */ + nfscl_expireclient(clp, nmp, cred, p); + clp->nfsc_flags |= NFSCLFLAGS_HASCLIENTID; + clp->nfsc_flags &= ~NFSCLFLAGS_RECOVER; + } + NFSFREECRED(cred); + clp->nfsc_flags &= ~NFSCLFLAGS_EXPIREIT; + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return (error); +} + +/* + * This function inserts a lock in the list after insert_lop. + */ +static void +nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop, + struct nfscllock *insert_lop, int local) +{ + + if ((struct nfscllockowner *)insert_lop == lp) + LIST_INSERT_HEAD(&lp->nfsl_lock, new_lop, nfslo_list); + else + LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list); + if (local) + newnfsstats.cllocallocks++; + else + newnfsstats.cllocks++; +} + +/* + * This function updates the locking for a lock owner and given file. It + * maintains a list of lock ranges ordered on increasing file offset that + * are NFSCLLOCK_READ or NFSCLLOCK_WRITE and non-overlapping (aka POSIX style). + * It always adds new_lop to the list and sometimes uses the one pointed + * at by other_lopp. + * Returns 1 if the locks were modified, 0 otherwise. + */ +static int +nfscl_updatelock(struct nfscllockowner *lp, struct nfscllock **new_lopp, + struct nfscllock **other_lopp, int local) +{ + struct nfscllock *new_lop = *new_lopp; + struct nfscllock *lop, *tlop, *ilop; + struct nfscllock *other_lop; + int unlock = 0, modified = 0; + u_int64_t tmp; + + /* + * Work down the list until the lock is merged. + */ + if (new_lop->nfslo_type == F_UNLCK) + unlock = 1; + ilop = (struct nfscllock *)lp; + lop = LIST_FIRST(&lp->nfsl_lock); + while (lop != NULL) { + /* + * Only check locks for this file that aren't before the start of + * new lock's range. + */ + if (lop->nfslo_end >= new_lop->nfslo_first) { + if (new_lop->nfslo_end < lop->nfslo_first) { + /* + * If the new lock ends before the start of the + * current lock's range, no merge, just insert + * the new lock. + */ + break; + } + if (new_lop->nfslo_type == lop->nfslo_type || + (new_lop->nfslo_first <= lop->nfslo_first && + new_lop->nfslo_end >= lop->nfslo_end)) { + /* + * This lock can be absorbed by the new lock/unlock. + * This happens when it covers the entire range + * of the old lock or is contiguous + * with the old lock and is of the same type or an + * unlock. + */ + if (new_lop->nfslo_type != lop->nfslo_type || + new_lop->nfslo_first != lop->nfslo_first || + new_lop->nfslo_end != lop->nfslo_end) + modified = 1; + if (lop->nfslo_first < new_lop->nfslo_first) + new_lop->nfslo_first = lop->nfslo_first; + if (lop->nfslo_end > new_lop->nfslo_end) + new_lop->nfslo_end = lop->nfslo_end; + tlop = lop; + lop = LIST_NEXT(lop, nfslo_list); + nfscl_freelock(tlop, local); + continue; + } + + /* + * All these cases are for contiguous locks that are not the + * same type, so they can't be merged. + */ + if (new_lop->nfslo_first <= lop->nfslo_first) { + /* + * This case is where the new lock overlaps with the + * first part of the old lock. Move the start of the + * old lock to just past the end of the new lock. The + * new lock will be inserted in front of the old, since + * ilop hasn't been updated. (We are done now.) + */ + if (lop->nfslo_first != new_lop->nfslo_end) { + lop->nfslo_first = new_lop->nfslo_end; + modified = 1; + } + break; + } + if (new_lop->nfslo_end >= lop->nfslo_end) { + /* + * This case is where the new lock overlaps with the + * end of the old lock's range. Move the old lock's + * end to just before the new lock's first and insert + * the new lock after the old lock. + * Might not be done yet, since the new lock could + * overlap further locks with higher ranges. + */ + if (lop->nfslo_end != new_lop->nfslo_first) { + lop->nfslo_end = new_lop->nfslo_first; + modified = 1; + } + ilop = lop; + lop = LIST_NEXT(lop, nfslo_list); + continue; + } + /* + * The final case is where the new lock's range is in the + * middle of the current lock's and splits the current lock + * up. Use *other_lopp to handle the second part of the + * split old lock range. (We are done now.) + * For unlock, we use new_lop as other_lop and tmp, since + * other_lop and new_lop are the same for this case. + * We noted the unlock case above, so we don't need + * new_lop->nfslo_type any longer. + */ + tmp = new_lop->nfslo_first; + if (unlock) { + other_lop = new_lop; + *new_lopp = NULL; + } else { + other_lop = *other_lopp; + *other_lopp = NULL; + } + other_lop->nfslo_first = new_lop->nfslo_end; + other_lop->nfslo_end = lop->nfslo_end; + other_lop->nfslo_type = lop->nfslo_type; + lop->nfslo_end = tmp; + nfscl_insertlock(lp, other_lop, lop, local); + ilop = lop; + modified = 1; + break; + } + ilop = lop; + lop = LIST_NEXT(lop, nfslo_list); + if (lop == NULL) + break; + } + + /* + * Insert the new lock in the list at the appropriate place. + */ + if (!unlock) { + nfscl_insertlock(lp, new_lop, ilop, local); + *new_lopp = NULL; + modified = 1; + } + return (modified); +} + +/* + * This function must be run as a kernel thread. + * It does Renew Ops and recovery, when required. + */ +APPLESTATIC void +nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p) +{ + struct nfsclowner *owp, *nowp; + struct nfsclopen *op; + struct nfscllockowner *lp, *nlp, *olp; + struct nfscldeleghead dh; + struct nfscllockownerhead lh; + struct nfscldeleg *dp, *ndp; + struct ucred *cred; + u_int32_t clidrev; + int error, cbpathdown, islept, igotlock, ret, clearok; + + cred = newnfs_getcred(); + clp->nfsc_flags |= NFSCLFLAGS_HASTHREAD; + for(;;) { + newnfs_setroot(cred); + cbpathdown = 0; + if (clp->nfsc_flags & NFSCLFLAGS_RECOVER) + nfscl_recover(clp, cred, p); + if (clp->nfsc_expire <= NFSD_MONOSEC && + (clp->nfsc_flags & NFSCLFLAGS_HASCLIENTID)) { + clp->nfsc_expire = NFSD_MONOSEC + clp->nfsc_renew; + clidrev = clp->nfsc_clientidrev; + error = nfsrpc_renew(clp, cred, p); + if (error == NFSERR_CBPATHDOWN) + cbpathdown = 1; + else if (error == NFSERR_STALECLIENTID) + clp->nfsc_flags |= NFSCLFLAGS_RECOVER; + else if (error == NFSERR_EXPIRED) + (void) nfscl_hasexpired(clp, clidrev, p); + } + + LIST_INIT(&lh); + TAILQ_INIT(&dh); + NFSLOCKCLSTATE(); + if (cbpathdown) + /* It's a Total Recall! */ + nfscl_totalrecall(clp); + + /* + * Now, handle defunct owners. + */ + owp = LIST_FIRST(&clp->nfsc_owner); + while (owp != NULL) { + nowp = LIST_NEXT(owp, nfsow_list); + if (LIST_EMPTY(&owp->nfsow_open)) { + if (owp->nfsow_defunct) + nfscl_freeopenowner(owp, 0); + } else { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + lp = LIST_FIRST(&op->nfso_lock); + while (lp != NULL) { + nlp = LIST_NEXT(lp, nfsl_list); + if (lp->nfsl_defunct && + LIST_EMPTY(&lp->nfsl_lock)) { + LIST_FOREACH(olp, &lh, nfsl_list) { + if (!NFSBCMP(olp->nfsl_owner, + lp->nfsl_owner,NFSV4CL_LOCKNAMELEN)) + break; + } + if (olp == NULL) { + LIST_REMOVE(lp, nfsl_list); + LIST_INSERT_HEAD(&lh, lp, nfsl_list); + } else { + nfscl_freelockowner(lp, 0); + } + } + lp = nlp; + } + } + } + owp = nowp; + } + + /* also search the defunct list */ + lp = LIST_FIRST(&clp->nfsc_defunctlockowner); + while (lp != NULL) { + nlp = LIST_NEXT(lp, nfsl_list); + if (lp->nfsl_defunct) { + LIST_FOREACH(olp, &lh, nfsl_list) { + if (!NFSBCMP(olp->nfsl_owner, lp->nfsl_owner, + NFSV4CL_LOCKNAMELEN)) + break; + } + if (olp == NULL) { + LIST_REMOVE(lp, nfsl_list); + LIST_INSERT_HEAD(&lh, lp, nfsl_list); + } else { + nfscl_freelockowner(lp, 0); + } + } + lp = nlp; + } + /* and release defunct lock owners */ + LIST_FOREACH_SAFE(lp, &lh, nfsl_list, nlp) { + nfscl_freelockowner(lp, 0); + } + + /* + * Do the recall on any delegations. To avoid trouble, always + * come back up here after having slept. + */ + igotlock = 0; +tryagain: + dp = TAILQ_FIRST(&clp->nfsc_deleg); + while (dp != NULL) { + ndp = TAILQ_NEXT(dp, nfsdl_list); + if ((dp->nfsdl_flags & NFSCLDL_RECALL)) { + /* + * Wait for outstanding I/O ops to be done. + */ + if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { + if (igotlock) { + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + } + dp->nfsdl_rwlock.nfslock_lock |= + NFSV4LOCK_WANTED; + (void) nfsmsleep(&dp->nfsdl_rwlock, + NFSCLSTATEMUTEXPTR, PZERO, "nfscld", + NULL); + goto tryagain; + } + while (!igotlock) { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, + &islept, NFSCLSTATEMUTEXPTR); + if (islept) + goto tryagain; + } + NFSUNLOCKCLSTATE(); + newnfs_copycred(&dp->nfsdl_cred, cred); + ret = nfscl_recalldeleg(clp, clp->nfsc_nmp, dp, + NULL, cred, p); + if (!ret) { + nfscl_cleandeleg(dp); + TAILQ_REMOVE(&clp->nfsc_deleg, dp, + nfsdl_list); + LIST_REMOVE(dp, nfsdl_hash); + TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); + nfscl_delegcnt--; + newnfsstats.cldelegates--; + } + NFSLOCKCLSTATE(); + } + dp = ndp; + } + + /* + * Clear out old delegations, if we are above the high water + * mark. Only clear out ones with no state related to them. + * The tailq list is in LRU order. + */ + dp = TAILQ_LAST(&clp->nfsc_deleg, nfscldeleghead); + while (nfscl_delegcnt > nfscl_deleghighwater && dp != NULL) { + ndp = TAILQ_PREV(dp, nfscldeleghead, nfsdl_list); + if (dp->nfsdl_rwlock.nfslock_usecnt == 0 && + dp->nfsdl_rwlock.nfslock_lock == 0 && + dp->nfsdl_timestamp < NFSD_MONOSEC && + !(dp->nfsdl_flags & (NFSCLDL_RECALL | NFSCLDL_ZAPPED | + NFSCLDL_NEEDRECLAIM))) { + clearok = 1; + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL && op->nfso_opencnt > 0) { + clearok = 0; + break; + } + } + if (clearok) { + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!LIST_EMPTY(&lp->nfsl_lock)) { + clearok = 0; + break; + } + } + } + if (clearok) { + TAILQ_REMOVE(&clp->nfsc_deleg, dp, nfsdl_list); + LIST_REMOVE(dp, nfsdl_hash); + TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list); + nfscl_delegcnt--; + newnfsstats.cldelegates--; + } + } + dp = ndp; + } + if (igotlock) + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + + /* + * Delegreturn any delegations cleaned out or recalled. + */ + TAILQ_FOREACH_SAFE(dp, &dh, nfsdl_list, ndp) { + newnfs_copycred(&dp->nfsdl_cred, cred); + (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); + TAILQ_REMOVE(&dh, dp, nfsdl_list); + FREE((caddr_t)dp, M_NFSCLDELEG); + } + +#if defined(APPLEKEXT) || defined(__FreeBSD__) + /* + * Simulate the calls to nfscl_cleanup() when a process + * exits, since the call can't be patched into exit(). + */ + { + struct timespec mytime; + static time_t prevsec = 0; + + NFSGETNANOTIME(&mytime); + if (prevsec != mytime.tv_sec) { + prevsec = mytime.tv_sec; + nfscl_cleanupkext(clp); + } + } +#endif /* APPLEKEXT || __FreeBSD__ */ + + if ((clp->nfsc_flags & NFSCLFLAGS_RECOVER) == 0) + (void) tsleep((caddr_t)clp, PWAIT, "nfscl", hz); + if (clp->nfsc_flags & NFSCLFLAGS_UMOUNT) { + NFSFREECRED(cred); + clp->nfsc_flags &= ~NFSCLFLAGS_HASTHREAD; + wakeup((caddr_t)clp); + return; + } + } +} + +/* + * Initiate state recovery. Called when NFSERR_STALECLIENTID or + * NFSERR_STALESTATEID is received. + */ +APPLESTATIC void +nfscl_initiate_recovery(struct nfsclclient *clp) +{ + + if (clp == NULL) + return; + NFSLOCKCLSTATE(); + clp->nfsc_flags |= NFSCLFLAGS_RECOVER; + NFSUNLOCKCLSTATE(); + wakeup((caddr_t)clp); +} + +/* + * Dump out the state stuff for debugging. + */ +APPLESTATIC void +nfscl_dumpstate(struct nfsmount *nmp, int openowner, int opens, + int lockowner, int locks) +{ + struct nfsclclient *clp; + struct nfsclowner *owp; + struct nfsclopen *op; + struct nfscllockowner *lp; + struct nfscllock *lop; + struct nfscldeleg *dp; + + clp = nmp->nm_clp; + if (clp == NULL) { + printf("nfscl dumpstate NULL clp\n"); + return; + } + NFSLOCKCLSTATE(); + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) { + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + if (openowner && !LIST_EMPTY(&owp->nfsow_open)) + printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", + owp->nfsow_owner[0], owp->nfsow_owner[1], + owp->nfsow_owner[2], owp->nfsow_owner[3], + owp->nfsow_seqid); + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (opens) + printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", + op->nfso_stateid.other[0], op->nfso_stateid.other[1], + op->nfso_stateid.other[2], op->nfso_opencnt, + op->nfso_fh[12]); + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (lockowner) + printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", + lp->nfsl_owner[0], lp->nfsl_owner[1], + lp->nfsl_owner[2], lp->nfsl_owner[3], + lp->nfsl_seqid, + lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], + lp->nfsl_stateid.other[2]); + LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { + if (locks) +#ifdef __FreeBSD__ + printf("lck typ=%d fst=%ju end=%ju\n", + lop->nfslo_type, (intmax_t)lop->nfslo_first, + (intmax_t)lop->nfslo_end); +#else + printf("lck typ=%d fst=%qd end=%qd\n", + lop->nfslo_type, lop->nfslo_first, + lop->nfslo_end); +#endif + } + } + } + } + } + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + if (openowner && !LIST_EMPTY(&owp->nfsow_open)) + printf("owner=0x%x 0x%x 0x%x 0x%x seqid=%d\n", + owp->nfsow_owner[0], owp->nfsow_owner[1], + owp->nfsow_owner[2], owp->nfsow_owner[3], + owp->nfsow_seqid); + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (opens) + printf("open st=0x%x 0x%x 0x%x cnt=%d fh12=0x%x\n", + op->nfso_stateid.other[0], op->nfso_stateid.other[1], + op->nfso_stateid.other[2], op->nfso_opencnt, + op->nfso_fh[12]); + LIST_FOREACH(lp, &op->nfso_lock, nfsl_list) { + if (lockowner) + printf("lckown=0x%x 0x%x 0x%x 0x%x seqid=%d st=0x%x 0x%x 0x%x\n", + lp->nfsl_owner[0], lp->nfsl_owner[1], + lp->nfsl_owner[2], lp->nfsl_owner[3], + lp->nfsl_seqid, + lp->nfsl_stateid.other[0], lp->nfsl_stateid.other[1], + lp->nfsl_stateid.other[2]); + LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { + if (locks) +#ifdef __FreeBSD__ + printf("lck typ=%d fst=%ju end=%ju\n", + lop->nfslo_type, (intmax_t)lop->nfslo_first, + (intmax_t)lop->nfslo_end); +#else + printf("lck typ=%d fst=%qd end=%qd\n", + lop->nfslo_type, lop->nfslo_first, + lop->nfslo_end); +#endif + } + } + } + } + NFSUNLOCKCLSTATE(); +} + +/* + * Check for duplicate open owners and opens. + * (Only used as a diagnostic aid.) + */ +APPLESTATIC void +nfscl_dupopen(vnode_t vp, int dupopens) +{ + struct nfsclclient *clp; + struct nfsclowner *owp, *owp2; + struct nfsclopen *op, *op2; + struct nfsfh *nfhp; + + clp = VFSTONFS(vnode_mount(vp))->nm_clp; + if (clp == NULL) { + printf("nfscl dupopen NULL clp\n"); + return; + } + nfhp = VTONFS(vp)->n_fhp; + NFSLOCKCLSTATE(); + + /* + * First, search for duplicate owners. + * These should never happen! + */ + LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + if (owp != owp2 && + !NFSBCMP(owp->nfsow_owner, owp2->nfsow_owner, + NFSV4CL_LOCKNAMELEN)) { + NFSUNLOCKCLSTATE(); + printf("DUP OWNER\n"); + nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, 0); + return; + } + } + } + + /* + * Now, search for duplicate stateids. + * These shouldn't happen, either. + */ + LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op != op2 && + (op->nfso_stateid.other[0] != 0 || + op->nfso_stateid.other[1] != 0 || + op->nfso_stateid.other[2] != 0) && + op->nfso_stateid.other[0] == op2->nfso_stateid.other[0] && + op->nfso_stateid.other[1] == op2->nfso_stateid.other[1] && + op->nfso_stateid.other[2] == op2->nfso_stateid.other[2]) { + NFSUNLOCKCLSTATE(); + printf("DUP STATEID\n"); + nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, 0, + 0); + return; + } + } + } + } + } + + /* + * Now search for duplicate opens. + * Duplicate opens for the same owner + * should never occur. Other duplicates are + * possible and are checked for if "dupopens" + * is true. + */ + LIST_FOREACH(owp2, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op2, &owp2->nfsow_open, nfso_list) { + if (nfhp->nfh_len == op2->nfso_fhlen && + !NFSBCMP(nfhp->nfh_fh, op2->nfso_fh, nfhp->nfh_len)) { + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if (op != op2 && nfhp->nfh_len == op->nfso_fhlen && + !NFSBCMP(nfhp->nfh_fh, op->nfso_fh, nfhp->nfh_len) && + (!NFSBCMP(op->nfso_own->nfsow_owner, + op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN) || + dupopens)) { + if (!NFSBCMP(op->nfso_own->nfsow_owner, + op2->nfso_own->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { + NFSUNLOCKCLSTATE(); + printf("BADDUP OPEN\n"); + } else { + NFSUNLOCKCLSTATE(); + printf("DUP OPEN\n"); + } + nfscl_dumpstate(VFSTONFS(vnode_mount(vp)), 1, 1, + 0, 0); + return; + } + } + } + } + } + } + NFSUNLOCKCLSTATE(); +} + +/* + * During close, find an open that needs to be dereferenced and + * dereference it. If there are no more opens for this file, + * return the list of opens, so they can be closed on the + * server. As such, opens aren't closed on the server until + * all the opens for the file are closed off. + * This is the safe way, since it is difficult to identify + * which open the close is for. + * If it returns 0 for success, there will be a referenced + * clp returned via clpp and a list of opens to close/free + * on ohp. + */ +APPLESTATIC int +nfscl_getclose(vnode_t vp, struct ucred *cred, NFSPROC_T *p, + struct nfsclclient **clpp, struct nfsclopenhead *ohp) +{ + struct nfsclclient *clp; + struct nfsclowner *owp, *nowp; + struct nfsclopen *op, *nop; + struct nfscldeleg *dp; + struct nfsfh *nfhp; + int error, notdecr, candelete; + + error = nfscl_getcl(vp, cred, p, &clp); + if (error) + return (error); + *clpp = clp; + + LIST_INIT(ohp); + nfhp = VTONFS(vp)->n_fhp; + notdecr = 1; + NFSLOCKCLSTATE(); + /* + * First, look for one under a delegation that was locally issued + * and just decrement the opencnt for it. Since all my Opens against + * the server are DENY_NONE, I don't see a problem with hanging + * onto them. (It is much easier to use one of the extant Opens + * that I already have on the server when a Delegation is recalled + * than to do fresh Opens.) Someday, I might need to rethink this, but.. + */ + dp = nfscl_finddeleg(clp, nfhp->nfh_fh, nfhp->nfh_len); + if (dp != NULL) { + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL) { + /* + * Since a delegation is for a file, there + * should never be more than one open for + * each openowner. + */ + if (LIST_NEXT(op, nfso_list) != NULL) + panic("nfscdeleg opens"); + if (notdecr && op->nfso_opencnt > 0) { + notdecr = 0; + op->nfso_opencnt--; + break; + } + } + } + } + + /* Now process the opens against the server. */ + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + op = LIST_FIRST(&owp->nfsow_open); + while (op != NULL) { + nop = LIST_NEXT(op, nfso_list); + if (op->nfso_fhlen == nfhp->nfh_len && + !NFSBCMP(op->nfso_fh, nfhp->nfh_fh, nfhp->nfh_len)) { + /* Found an open, decrement cnt if possible */ + if (notdecr && op->nfso_opencnt > 0) { + notdecr = 0; + op->nfso_opencnt--; + } + /* + * There are more opens, so just return after + * putting any opens already found back in the + * state list. + */ + if (op->nfso_opencnt > 0) { + /* reuse op, since we're returning */ + op = LIST_FIRST(ohp); + while (op != NULL) { + nop = LIST_NEXT(op, nfso_list); + LIST_REMOVE(op, nfso_list); + LIST_INSERT_HEAD(&op->nfso_own->nfsow_open, + op, nfso_list); + op = nop; + } + NFSUNLOCKCLSTATE(); + LIST_INIT(ohp); + return (0); + } + + /* + * Move this entry to the list of opens to be returned. + * (If we find other open(s) still in use, it will be + * put back in the state list in the code just above.) + */ + LIST_REMOVE(op, nfso_list); + LIST_INSERT_HEAD(ohp, op, nfso_list); + } + op = nop; + } + } + + if (dp != NULL) { + /* + * If we are flushing all writes against the server for this + * file upon close, we do not need to keep the local opens + * (against the delegation) if they all have an opencnt == 0, + * since there are now no opens on the file and no dirty blocks. + * If the writes aren't being flushed upon close, + * a test for "no dirty blocks to write back" would have to + * be added to this code. + */ + candelete = 1; + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL && op->nfso_opencnt > 0) { + candelete = 0; + break; + } + } + if (candelete) { + LIST_FOREACH_SAFE(owp, &dp->nfsdl_owner, nfsow_list, + nowp) { + op = LIST_FIRST(&owp->nfsow_open); + if (op != NULL) + nfscl_freeopen(op, 1); + nfscl_freeopenowner(owp, 1); + } + } + } + NFSUNLOCKCLSTATE(); + if (notdecr) + printf("nfscl: never fnd open\n"); + return (0); +} + +/* + * Return all delegations on this client. + * (Must be called with client sleep lock.) + */ +static void +nfscl_delegreturnall(struct nfsclclient *clp, NFSPROC_T *p) +{ + struct nfscldeleg *dp, *ndp; + struct ucred *cred; + + cred = newnfs_getcred(); + TAILQ_FOREACH_SAFE(dp, &clp->nfsc_deleg, nfsdl_list, ndp) { + nfscl_cleandeleg(dp); + (void) nfscl_trydelegreturn(dp, cred, clp->nfsc_nmp, p); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + } + NFSFREECRED(cred); +} + +/* + * Do a callback RPC. + */ +APPLESTATIC void +nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p) +{ + int i, op; + u_int32_t *tl; + struct nfsclclient *clp; + struct nfscldeleg *dp = NULL; + int numops, taglen = -1, error = 0, trunc, ret = 0; + u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp, cbident; + u_char tag[NFSV4_SMALLSTR + 1], *tagstr; + vnode_t vp = NULL; + struct nfsnode *np; + struct vattr va; + struct nfsfh *nfhp; + mount_t mp; + nfsattrbit_t attrbits, rattrbits; + nfsv4stateid_t stateid; + + nfsrvd_rephead(nd); + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + taglen = fxdr_unsigned(int, *tl); + if (taglen < 0) { + error = EBADRPC; + goto nfsmout; + } + if (taglen <= NFSV4_SMALLSTR) + tagstr = tag; + else + tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); + error = nfsrv_mtostr(nd, tagstr, taglen); + if (error) { + if (taglen > NFSV4_SMALLSTR) + free(tagstr, M_TEMP); + taglen = -1; + goto nfsmout; + } + (void) nfsm_strtom(nd, tag, taglen); + if (taglen > NFSV4_SMALLSTR) { + free(tagstr, M_TEMP); + } + NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + minorvers = fxdr_unsigned(u_int32_t, *tl++); + if (minorvers != NFSV4_MINORVERSION) + nd->nd_repstat = NFSERR_MINORVERMISMATCH; + cbident = fxdr_unsigned(u_int32_t, *tl++); + if (nd->nd_repstat) + numops = 0; + else + numops = fxdr_unsigned(int, *tl); + /* + * Loop around doing the sub ops. + */ + for (i = 0; i < numops; i++) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); + *repp++ = *tl; + op = fxdr_unsigned(int, *tl); + if (op < NFSV4OP_CBGETATTR || op > NFSV4OP_CBRECALL) { + nd->nd_repstat = NFSERR_OPILLEGAL; + *repp = nfscl_errmap(nd); + retops++; + break; + } + nd->nd_procnum = op; + newnfsstats.cbrpccnt[nd->nd_procnum]++; + switch (op) { + case NFSV4OP_CBGETATTR: + clp = NULL; + error = nfsm_getfh(nd, &nfhp); + if (!error) + error = nfsrv_getattrbits(nd, &attrbits, + NULL, NULL); + if (!error) { + mp = nfscl_getmnt(cbident); + if (mp == NULL) + error = NFSERR_SERVERFAULT; + } + if (!error) { + dp = NULL; + NFSLOCKCLSTATE(); + clp = nfscl_findcl(VFSTONFS(mp)); + if (clp != NULL) + dp = nfscl_finddeleg(clp, nfhp->nfh_fh, + nfhp->nfh_len); + NFSUNLOCKCLSTATE(); + if (dp == NULL) + error = NFSERR_SERVERFAULT; + } + if (!error) { + ret = nfscl_ngetreopen(mp, nfhp->nfh_fh, + nfhp->nfh_len, p, &np); + if (!ret) + vp = NFSTOV(np); + } + if (nfhp != NULL) + FREE((caddr_t)nfhp, M_NFSFH); + if (!error) { + NFSZERO_ATTRBIT(&rattrbits); + if (NFSISSET_ATTRBIT(&attrbits, + NFSATTRBIT_SIZE)) { + if (!ret) + va.va_size = np->n_size; + else + va.va_size = dp->nfsdl_size; + NFSSETBIT_ATTRBIT(&rattrbits, + NFSATTRBIT_SIZE); + } + if (NFSISSET_ATTRBIT(&attrbits, + NFSATTRBIT_CHANGE)) { + va.va_filerev = dp->nfsdl_change; + if (ret || (np->n_flag & NDELEGMOD)) + va.va_filerev++; + NFSSETBIT_ATTRBIT(&rattrbits, + NFSATTRBIT_CHANGE); + } + (void) nfsv4_fillattr(nd, NULL, NULL, &va, + NULL, 0, &rattrbits, NULL, NULL, 0, 0); + if (!ret) + vrele(vp); + } + break; + case NFSV4OP_CBRECALL: + clp = NULL; + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + + NFSX_UNSIGNED); + stateid.seqid = *tl++; + NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + trunc = fxdr_unsigned(int, *tl); + error = nfsm_getfh(nd, &nfhp); + if (!error) { + mp = nfscl_getmnt(cbident); + if (mp == NULL) + error = NFSERR_SERVERFAULT; + } + if (!error) { + NFSLOCKCLSTATE(); + clp = nfscl_findcl(VFSTONFS(mp)); + if (clp != NULL) { + dp = nfscl_finddeleg(clp, nfhp->nfh_fh, + nfhp->nfh_len); + if (dp != NULL) { + dp->nfsdl_flags |= + NFSCLDL_RECALL; + wakeup((caddr_t)clp); + } + } else { + error = NFSERR_SERVERFAULT; + } + NFSUNLOCKCLSTATE(); + } + if (nfhp != NULL) + FREE((caddr_t)nfhp, M_NFSFH); + break; + }; + if (error) { + if (error == EBADRPC || error == NFSERR_BADXDR) { + nd->nd_repstat = NFSERR_BADXDR; + } else { + nd->nd_repstat = error; + } + error = 0; + } + retops++; + if (nd->nd_repstat) { + *repp = nfscl_errmap(nd); + break; + } else + *repp = 0; /* NFS4_OK */ + } +nfsmout: + if (error) { + if (error == EBADRPC || error == NFSERR_BADXDR) + nd->nd_repstat = NFSERR_BADXDR; + else + printf("nfsv4 comperr1=%d\n", error); + } + if (taglen == -1) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = 0; + *tl = 0; + } else { + *retopsp = txdr_unsigned(retops); + } + *nd->nd_errp = nfscl_errmap(nd); +} + +/* + * Generate the next cbident value. Basically just increment a static value + * and then check that it isn't already in the list, if it has wrapped around. + */ +static u_int32_t +nfscl_nextcbident(void) +{ + struct nfsclclient *clp; + int matched; + static u_int32_t nextcbident = 0; + static int haswrapped = 0; + + nextcbident++; + if (nextcbident == 0) + haswrapped = 1; + if (haswrapped) { + /* + * Search the clientid list for one already using this cbident. + */ + do { + matched = 0; + NFSLOCKCLSTATE(); + LIST_FOREACH(clp, &nfsclhead, nfsc_list) { + if (clp->nfsc_cbident == nextcbident) { + matched = 1; + break; + } + } + NFSUNLOCKCLSTATE(); + if (matched == 1) + nextcbident++; + } while (matched); + } + return (nextcbident); +} + +/* + * Get the mount point related to a given cbident. + */ +static mount_t +nfscl_getmnt(u_int32_t cbident) +{ + struct nfsclclient *clp; + struct nfsmount *nmp; + + NFSLOCKCLSTATE(); + LIST_FOREACH(clp, &nfsclhead, nfsc_list) { + if (clp->nfsc_cbident == cbident) + break; + } + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (NULL); + } + nmp = clp->nfsc_nmp; + NFSUNLOCKCLSTATE(); + return (nmp->nm_mountp); +} + +/* + * Search for a lock conflict locally on the client. A conflict occurs if + * - not same owner and overlapping byte range and at least one of them is + * a write lock or this is an unlock. + */ +static int +nfscl_localconflict(struct nfsclclient *clp, struct nfscllock *nlop, + u_int8_t *own, struct nfscldeleg *dp, struct nfscllock **lopp) +{ + struct nfsclowner *owp; + struct nfsclopen *op; + int ret; + + if (dp != NULL) { + ret = nfscl_checkconflict(&dp->nfsdl_lock, nlop, own, lopp); + if (ret) + return (ret); + } + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + ret = nfscl_checkconflict(&op->nfso_lock, nlop, own, + lopp); + if (ret) + return (ret); + } + } + return (0); +} + +static int +nfscl_checkconflict(struct nfscllockownerhead *lhp, struct nfscllock *nlop, + u_int8_t *own, struct nfscllock **lopp) +{ + struct nfscllockowner *lp; + struct nfscllock *lop; + + LIST_FOREACH(lp, lhp, nfsl_list) { + if (NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) { + LIST_FOREACH(lop, &lp->nfsl_lock, nfslo_list) { + if (lop->nfslo_first >= nlop->nfslo_end) + break; + if (lop->nfslo_end <= nlop->nfslo_first) + continue; + if (lop->nfslo_type == F_WRLCK || + nlop->nfslo_type == F_WRLCK || + nlop->nfslo_type == F_UNLCK) { + if (lopp != NULL) + *lopp = lop; + return (NFSERR_DENIED); + } + } + } + } + return (0); +} + +/* + * Check for a local conflicting lock. + */ +APPLESTATIC int +nfscl_lockt(vnode_t vp, struct nfsclclient *clp, u_int64_t off, + u_int64_t len, struct flock *fl, NFSPROC_T *p) +{ + struct nfscllock *lop, nlck; + struct nfscldeleg *dp; + struct nfsnode *np; + u_int8_t own[NFSV4CL_LOCKNAMELEN]; + int error; + + nlck.nfslo_type = fl->l_type; + nlck.nfslo_first = off; + if (len == NFS64BITSSET) { + nlck.nfslo_end = NFS64BITSSET; + } else { + nlck.nfslo_end = off + len; + if (nlck.nfslo_end <= nlck.nfslo_first) + return (NFSERR_INVAL); + } + np = VTONFS(vp); + nfscl_filllockowner(p, own); + NFSLOCKCLSTATE(); + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + error = nfscl_localconflict(clp, &nlck, own, dp, &lop); + if (error == NFSERR_DENIED) + error = EACCES; + if (error) { + fl->l_whence = SEEK_SET; + fl->l_start = lop->nfslo_first; + if (lop->nfslo_end == NFS64BITSSET) + fl->l_len = 0; + else + fl->l_len = lop->nfslo_end - lop->nfslo_first; + fl->l_pid = (pid_t)0; + fl->l_type = lop->nfslo_type; + } else if (dp != NULL && ((dp->nfsdl_flags & NFSCLDL_WRITE) || + fl->l_type == F_RDLCK)) { + /* + * The delegation ensures that there isn't a conflicting + * lock on the server, so return -1 to indicate an RPC + * isn't required. + */ + fl->l_type = F_UNLCK; + error = -1; + } + NFSUNLOCKCLSTATE(); + return (error); +} + +/* + * Handle Recall of a delegation. + * The clp must be exclusive locked when this is called. + */ +static int +nfscl_recalldeleg(struct nfsclclient *clp, struct nfsmount *nmp, + struct nfscldeleg *dp, vnode_t vp, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclowner *owp, *lowp, *nowp; + struct nfsclopen *op, *lop; + struct nfscllockowner *lp; + struct nfscllock *lckp; + struct nfsnode *np; + int error = 0, ret, gotvp = 0; + + if (vp == NULL) { + /* + * First, get a vnode for the file. This is needed to do RPCs. + */ + ret = nfscl_ngetreopen(nmp->nm_mountp, dp->nfsdl_fh, + dp->nfsdl_fhlen, p, &np); + if (ret) { + /* + * File isn't open, so nothing to move over to the + * server. + */ + return (0); + } + vp = NFSTOV(np); + gotvp = 1; + } else { + np = VTONFS(vp); + } + dp->nfsdl_flags &= ~NFSCLDL_MODTIMESET; + NFSINVALATTRCACHE(np); + + /* + * Ok, if it's a write delegation, flush data to the server, so + * that close/open consistency is retained. + */ + NFSLOCKNODE(np); + if ((dp->nfsdl_flags & NFSCLDL_WRITE) && (np->n_flag & NMODIFIED)) { +#ifdef APPLE + OSBitOrAtomic((u_int32_t)NDELEGRECALL, (UInt32 *)&np->n_flag); +#else + np->n_flag |= NDELEGRECALL; +#endif + NFSUNLOCKNODE(np); + (void) ncl_flush(vp, MNT_WAIT, cred, p, 1); + NFSLOCKNODE(np); +#ifdef APPLE + OSBitAndAtomic((int32_t)~(NMODIFIED | NDELEGRECALL), (UInt32 *)&np->n_flag); +#else + np->n_flag &= ~(NMODIFIED | NDELEGRECALL); +#endif + } + NFSUNLOCKNODE(np); + + /* + * Now, for each openowner with opens issued locally, move them + * over to state against the server. + */ + LIST_FOREACH(lowp, &dp->nfsdl_owner, nfsow_list) { + lop = LIST_FIRST(&lowp->nfsow_open); + if (lop != NULL) { + if (LIST_NEXT(lop, nfso_list) != NULL) + panic("nfsdlg mult opens"); + /* + * Look for the same openowner against the server. + */ + LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) { + if (!NFSBCMP(lowp->nfsow_owner, + owp->nfsow_owner, NFSV4CL_LOCKNAMELEN)) { + newnfs_copycred(&dp->nfsdl_cred, cred); + ret = nfscl_moveopen(vp, clp, nmp, lop, + owp, dp, cred, p); + if (ret == NFSERR_STALECLIENTID || + ret == NFSERR_STALEDONTRECOVER) { + if (gotvp) + vrele(vp); + return (ret); + } + if (ret) { + nfscl_freeopen(lop, 1); + if (!error) + error = ret; + } + break; + } + } + + /* + * If no openowner found, create one and get an open + * for it. + */ + if (owp == NULL) { + MALLOC(nowp, struct nfsclowner *, + sizeof (struct nfsclowner), M_NFSCLOWNER, + M_WAITOK); + nfscl_newopen(clp, NULL, &owp, &nowp, &op, + NULL, lowp->nfsow_owner, dp->nfsdl_fh, + dp->nfsdl_fhlen, NULL); + newnfs_copycred(&dp->nfsdl_cred, cred); + ret = nfscl_moveopen(vp, clp, nmp, lop, + owp, dp, cred, p); + if (ret) { + nfscl_freeopenowner(owp, 0); + if (ret == NFSERR_STALECLIENTID || + ret == NFSERR_STALEDONTRECOVER) { + if (gotvp) + vrele(vp); + return (ret); + } + if (ret) { + nfscl_freeopen(lop, 1); + if (!error) + error = ret; + } + } + } + } + } + + /* + * Now, get byte range locks for any locks done locally. + */ + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + LIST_FOREACH(lckp, &lp->nfsl_lock, nfslo_list) { + newnfs_copycred(&dp->nfsdl_cred, cred); + ret = nfscl_relock(vp, clp, nmp, lp, lckp, cred, p); + if (ret == NFSERR_STALESTATEID || + ret == NFSERR_STALEDONTRECOVER || + ret == NFSERR_STALECLIENTID) { + if (gotvp) + vrele(vp); + return (ret); + } + if (ret && !error) + error = ret; + } + } + if (gotvp) + vrele(vp); + return (error); +} + +/* + * Move a locally issued open over to an owner on the state list. + * SIDE EFFECT: If it needs to sleep (do an rpc), it unlocks clstate and + * returns with it unlocked. + */ +static int +nfscl_moveopen(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, + struct nfsclopen *lop, struct nfsclowner *owp, struct nfscldeleg *dp, + struct ucred *cred, NFSPROC_T *p) +{ + struct nfsclopen *op, *nop; + struct nfscldeleg *ndp; + struct nfsnode *np; + int error = 0, newone; + + /* + * First, look for an appropriate open, If found, just increment the + * opencnt in it. + */ + LIST_FOREACH(op, &owp->nfsow_open, nfso_list) { + if ((op->nfso_mode & lop->nfso_mode) == lop->nfso_mode && + op->nfso_fhlen == lop->nfso_fhlen && + !NFSBCMP(op->nfso_fh, lop->nfso_fh, op->nfso_fhlen)) { + op->nfso_opencnt += lop->nfso_opencnt; + nfscl_freeopen(lop, 1); + return (0); + } + } + + /* No appropriate open, so we have to do one against the server. */ + np = VTONFS(vp); + MALLOC(nop, struct nfsclopen *, sizeof (struct nfsclopen) + + lop->nfso_fhlen - 1, M_NFSCLOPEN, M_WAITOK); + newone = 0; + nfscl_newopen(clp, NULL, &owp, NULL, &op, &nop, owp->nfsow_owner, + lop->nfso_fh, lop->nfso_fhlen, &newone); + ndp = dp; + error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, np->n_v4->n4_fhlen, + lop->nfso_fh, lop->nfso_fhlen, lop->nfso_mode, op, + NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, &ndp, 0, 0, cred, p); + if (error) { + if (newone) + nfscl_freeopen(op, 0); + } else { + if (newone) + newnfs_copyincred(cred, &op->nfso_cred); + op->nfso_mode |= lop->nfso_mode; + op->nfso_opencnt += lop->nfso_opencnt; + nfscl_freeopen(lop, 1); + } + if (nop != NULL) + FREE((caddr_t)nop, M_NFSCLOPEN); + if (ndp != NULL) { + /* + * What should I do with the returned delegation, since the + * delegation is being recalled? For now, just printf and + * through it away. + */ + printf("Moveopen returned deleg\n"); + FREE((caddr_t)ndp, M_NFSCLDELEG); + } + return (error); +} + +/* + * Recall all delegations on this client. + */ +static void +nfscl_totalrecall(struct nfsclclient *clp) +{ + struct nfscldeleg *dp; + + TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) + dp->nfsdl_flags |= NFSCLDL_RECALL; +} + +/* + * Relock byte ranges. Called for delegation recall and state expiry. + */ +static int +nfscl_relock(vnode_t vp, struct nfsclclient *clp, struct nfsmount *nmp, + struct nfscllockowner *lp, struct nfscllock *lop, struct ucred *cred, + NFSPROC_T *p) +{ + struct nfscllockowner *nlp; + struct nfsfh *nfhp; + u_int64_t off, len; + u_int32_t clidrev = 0; + int error, newone, donelocally; + + off = lop->nfslo_first; + len = lop->nfslo_end - lop->nfslo_first; + error = nfscl_getbytelock(vp, off, len, lop->nfslo_type, cred, p, + clp, 1, lp->nfsl_owner, lp->nfsl_openowner, &nlp, &newone, + &donelocally); + if (error || donelocally) + return (error); + if (nmp->nm_clp != NULL) + clidrev = nmp->nm_clp->nfsc_clientidrev; + else + clidrev = 0; + nfhp = VTONFS(vp)->n_fhp; + error = nfscl_trylock(nmp, vp, nfhp->nfh_fh, + nfhp->nfh_len, nlp, newone, 0, off, + len, lop->nfslo_type, cred, p); + if (error) + nfscl_freelockowner(nlp, 0); + return (error); +} + +/* + * Called to re-open a file. Basically get a vnode for the file handle + * and then call nfsrpc_openrpc() to do the rest. + */ +static int +nfsrpc_reopen(struct nfsmount *nmp, u_int8_t *fhp, int fhlen, + u_int32_t mode, struct nfsclopen *op, struct nfscldeleg **dpp, + struct ucred *cred, NFSPROC_T *p) +{ + struct nfsnode *np; + vnode_t vp; + int error; + + error = nfscl_ngetreopen(nmp->nm_mountp, fhp, fhlen, p, &np); + if (error) + return (error); + vp = NFSTOV(np); + if (np->n_v4 != NULL) { + error = nfscl_tryopen(nmp, vp, np->n_v4->n4_data, + np->n_v4->n4_fhlen, fhp, fhlen, mode, op, + NFS4NODENAME(np->n_v4), np->n_v4->n4_namelen, dpp, 0, 0, + cred, p); + } else { + error = EINVAL; + } + vrele(vp); + return (error); +} + +/* + * Try an open against the server. Just call nfsrpc_openrpc(), retrying while + * NFSERR_DELAY. Also, try system credentials, if the passed in credentials + * fail. + */ +static int +nfscl_tryopen(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, int fhlen, + u_int8_t *newfhp, int newfhlen, u_int32_t mode, struct nfsclopen *op, + u_int8_t *name, int namelen, struct nfscldeleg **ndpp, + int reclaim, u_int32_t delegtype, struct ucred *cred, NFSPROC_T *p) +{ + int error; + + do { + error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, newfhlen, + mode, op, name, namelen, ndpp, reclaim, delegtype, cred, p, + 0, 0); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstryop"); + } while (error == NFSERR_DELAY); + if (error == EAUTH || error == EACCES) { + /* Try again using system credentials */ + newnfs_setroot(cred); + do { + error = nfsrpc_openrpc(nmp, vp, fhp, fhlen, newfhp, + newfhlen, mode, op, name, namelen, ndpp, reclaim, + delegtype, cred, p, 1, 0); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstryop"); + } while (error == NFSERR_DELAY); + } + return (error); +} + +/* + * Try a byte range lock. Just loop on nfsrpc_lock() while it returns + * NFSERR_DELAY. Also, retry with system credentials, if the provided + * cred don't work. + */ +static int +nfscl_trylock(struct nfsmount *nmp, vnode_t vp, u_int8_t *fhp, + int fhlen, struct nfscllockowner *nlp, int newone, int reclaim, + u_int64_t off, u_int64_t len, short type, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + + do { + error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, newone, + reclaim, off, len, type, cred, p, 0); + if (!error && nd->nd_repstat == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrylck"); + } while (!error && nd->nd_repstat == NFSERR_DELAY); + if (!error) + error = nd->nd_repstat; + if (error == EAUTH || error == EACCES) { + /* Try again using root credentials */ + newnfs_setroot(cred); + do { + error = nfsrpc_lock(nd, nmp, vp, fhp, fhlen, nlp, + newone, reclaim, off, len, type, cred, p, 1); + if (!error && nd->nd_repstat == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrylck"); + } while (!error && nd->nd_repstat == NFSERR_DELAY); + if (!error) + error = nd->nd_repstat; + } + return (error); +} + +/* + * Try a delegreturn against the server. Just call nfsrpc_delegreturn(), + * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in + * credentials fail. + */ +static int +nfscl_trydelegreturn(struct nfscldeleg *dp, struct ucred *cred, + struct nfsmount *nmp, NFSPROC_T *p) +{ + int error; + + do { + error = nfsrpc_delegreturn(dp, cred, nmp, p, 0); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrydp"); + } while (error == NFSERR_DELAY); + if (error == EAUTH || error == EACCES) { + /* Try again using system credentials */ + newnfs_setroot(cred); + do { + error = nfsrpc_delegreturn(dp, cred, nmp, p, 1); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrydp"); + } while (error == NFSERR_DELAY); + } + return (error); +} + +/* + * Try a close against the server. Just call nfsrpc_closerpc(), + * retrying while NFSERR_DELAY. Also, try system credentials, if the passed in + * credentials fail. + */ +APPLESTATIC int +nfscl_tryclose(struct nfsclopen *op, struct ucred *cred, + struct nfsmount *nmp, NFSPROC_T *p) +{ + struct nfsrv_descript nfsd, *nd = &nfsd; + int error; + + do { + error = nfsrpc_closerpc(nd, nmp, op, cred, p, 0); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrycl"); + } while (error == NFSERR_DELAY); + if (error == EAUTH || error == EACCES) { + /* Try again using system credentials */ + newnfs_setroot(cred); + do { + error = nfsrpc_closerpc(nd, nmp, op, cred, p, 1); + if (error == NFSERR_DELAY) + (void) nfs_catnap(PZERO, "nfstrycl"); + } while (error == NFSERR_DELAY); + } + return (error); +} + +/* + * Decide if a delegation on a file permits close without flushing writes + * to the server. This might be a big performance win in some environments. + * (Not useful until the client does caching on local stable storage.) + */ +APPLESTATIC int +nfscl_mustflush(vnode_t vp) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np; + struct nfsmount *nmp; + + np = VTONFS(vp); + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return (1); + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (1); + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_RECALL)) + == NFSCLDL_WRITE && + (dp->nfsdl_sizelimit >= np->n_size || + !NFSHASSTRICT3530(nmp))) { + NFSUNLOCKCLSTATE(); + return (0); + } + NFSUNLOCKCLSTATE(); + return (1); +} + +/* + * See if a (write) delegation exists for this file. + */ +APPLESTATIC int +nfscl_nodeleg(vnode_t vp, int writedeleg) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np; + struct nfsmount *nmp; + + np = VTONFS(vp); + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return (1); + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (1); + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_RECALL) == 0 && + (writedeleg == 0 || (dp->nfsdl_flags & NFSCLDL_WRITE) + == NFSCLDL_WRITE)) { + NFSUNLOCKCLSTATE(); + return (0); + } + NFSUNLOCKCLSTATE(); + return (1); +} + +/* + * Look for an associated delegation that should be DelegReturned. + */ +APPLESTATIC int +nfscl_removedeleg(vnode_t vp, NFSPROC_T *p, nfsv4stateid_t *stp) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsclowner *owp; + struct nfscllockowner *lp; + struct nfsmount *nmp; + struct ucred *cred; + struct nfsnode *np; + int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; + + nmp = VFSTONFS(vnode_mount(vp)); + np = VTONFS(vp); + NFSLOCKCLSTATE(); + /* + * Loop around waiting for: + * - outstanding I/O operations on delegations to complete + * - for a delegation on vp that has state, lock the client and + * do a recall + * - return delegation with no state + */ + while (1) { + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (retcnt); + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len); + if (dp != NULL) { + /* + * Wait for outstanding I/O ops to be done. + */ + if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { + if (igotlock) { + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + } + dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; + (void) nfsmsleep(&dp->nfsdl_rwlock, + NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); + continue; + } + needsrecall = 0; + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + if (!LIST_EMPTY(&owp->nfsow_open)) { + needsrecall = 1; + break; + } + } + if (!needsrecall) { + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!LIST_EMPTY(&lp->nfsl_lock)) { + needsrecall = 1; + break; + } + } + } + if (needsrecall && !triedrecall) { + islept = 0; + while (!igotlock) { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, + &islept, NFSCLSTATEMUTEXPTR); + if (islept) + break; + } + if (islept) + continue; + NFSUNLOCKCLSTATE(); + cred = newnfs_getcred(); + newnfs_copycred(&dp->nfsdl_cred, cred); + (void) nfscl_recalldeleg(clp, nmp, dp, vp, cred, p); + NFSFREECRED(cred); + triedrecall = 1; + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + continue; + } + *stp = dp->nfsdl_stateid; + retcnt = 1; + nfscl_cleandeleg(dp); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + } + if (igotlock) + nfsv4_unlock(&clp->nfsc_lock, 0); + NFSUNLOCKCLSTATE(); + return (retcnt); + } +} + +/* + * Look for associated delegation(s) that should be DelegReturned. + */ +APPLESTATIC int +nfscl_renamedeleg(vnode_t fvp, nfsv4stateid_t *fstp, int *gotfdp, vnode_t tvp, + nfsv4stateid_t *tstp, int *gottdp, NFSPROC_T *p) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsclowner *owp; + struct nfscllockowner *lp; + struct nfsmount *nmp; + struct ucred *cred; + struct nfsnode *np; + int igotlock = 0, triedrecall = 0, needsrecall, retcnt = 0, islept; + + nmp = VFSTONFS(vnode_mount(fvp)); + *gotfdp = 0; + *gottdp = 0; + NFSLOCKCLSTATE(); + /* + * Loop around waiting for: + * - outstanding I/O operations on delegations to complete + * - for a delegation on fvp that has state, lock the client and + * do a recall + * - return delegation(s) with no state. + */ + while (1) { + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (retcnt); + } + np = VTONFS(fvp); + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len); + if (dp != NULL && *gotfdp == 0) { + /* + * Wait for outstanding I/O ops to be done. + */ + if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { + if (igotlock) { + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + } + dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; + (void) nfsmsleep(&dp->nfsdl_rwlock, + NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); + continue; + } + needsrecall = 0; + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + if (!LIST_EMPTY(&owp->nfsow_open)) { + needsrecall = 1; + break; + } + } + if (!needsrecall) { + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!LIST_EMPTY(&lp->nfsl_lock)) { + needsrecall = 1; + break; + } + } + } + if (needsrecall && !triedrecall) { + islept = 0; + while (!igotlock) { + igotlock = nfsv4_lock(&clp->nfsc_lock, 1, + &islept, NFSCLSTATEMUTEXPTR); + if (islept) + break; + } + if (islept) + continue; + NFSUNLOCKCLSTATE(); + cred = newnfs_getcred(); + newnfs_copycred(&dp->nfsdl_cred, cred); + (void) nfscl_recalldeleg(clp, nmp, dp, fvp, cred, p); + NFSFREECRED(cred); + triedrecall = 1; + NFSLOCKCLSTATE(); + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + continue; + } + *fstp = dp->nfsdl_stateid; + retcnt++; + *gotfdp = 1; + nfscl_cleandeleg(dp); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + } + if (igotlock) { + nfsv4_unlock(&clp->nfsc_lock, 0); + igotlock = 0; + } + if (tvp != NULL) { + np = VTONFS(tvp); + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, + np->n_fhp->nfh_len); + if (dp != NULL && *gottdp == 0) { + /* + * Wait for outstanding I/O ops to be done. + */ + if (dp->nfsdl_rwlock.nfslock_usecnt > 0) { + dp->nfsdl_rwlock.nfslock_lock |= NFSV4LOCK_WANTED; + (void) nfsmsleep(&dp->nfsdl_rwlock, + NFSCLSTATEMUTEXPTR, PZERO, "nfscld", NULL); + continue; + } + LIST_FOREACH(owp, &dp->nfsdl_owner, nfsow_list) { + if (!LIST_EMPTY(&owp->nfsow_open)) { + NFSUNLOCKCLSTATE(); + return (retcnt); + } + } + LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) { + if (!LIST_EMPTY(&lp->nfsl_lock)) { + NFSUNLOCKCLSTATE(); + return (retcnt); + } + } + *tstp = dp->nfsdl_stateid; + retcnt++; + *gottdp = 1; + nfscl_cleandeleg(dp); + nfscl_freedeleg(&clp->nfsc_deleg, dp); + } + } + NFSUNLOCKCLSTATE(); + return (retcnt); + } +} + +/* + * Get a reference on the clientid associated with the mount point. + * Return 1 if success, 0 otherwise. + */ +APPLESTATIC int +nfscl_getref(struct nfsmount *nmp) +{ + struct nfsclclient *clp; + + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return (0); + } + nfsv4_getref(&clp->nfsc_lock, NULL, NFSCLSTATEMUTEXPTR); + NFSUNLOCKCLSTATE(); + return (1); +} + +/* + * Release a reference on a clientid acquired with the above call. + */ +APPLESTATIC void +nfscl_relref(struct nfsmount *nmp) +{ + struct nfsclclient *clp; + + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return; + } + nfsv4_relref(&clp->nfsc_lock); + NFSUNLOCKCLSTATE(); +} + +/* + * Save the size attribute in the delegation, since the nfsnode + * is going away. + */ +APPLESTATIC void +nfscl_reclaimnode(vnode_t vp) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp; + + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return; + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return; + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) + dp->nfsdl_size = np->n_size; + NFSUNLOCKCLSTATE(); +} + +/* + * Get the saved size attribute in the delegation, since it is a + * newly allocated nfsnode. + */ +APPLESTATIC void +nfscl_newnode(vnode_t vp) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp; + + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return; + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return; + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) + np->n_size = dp->nfsdl_size; + NFSUNLOCKCLSTATE(); +} + +/* + * If there is a valid write delegation for this file, set the modtime + * to the local clock time. + */ +APPLESTATIC void +nfscl_delegmodtime(vnode_t vp) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp; + + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return; + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return; + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && (dp->nfsdl_flags & NFSCLDL_WRITE)) { + NFSGETNANOTIME(&dp->nfsdl_modtime); + dp->nfsdl_flags |= NFSCLDL_MODTIMESET; + } + NFSUNLOCKCLSTATE(); +} + +/* + * If there is a valid write delegation for this file with a modtime set, + * put that modtime in mtime. + */ +APPLESTATIC void +nfscl_deleggetmodtime(vnode_t vp, struct timespec *mtime) +{ + struct nfsclclient *clp; + struct nfscldeleg *dp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp; + + nmp = VFSTONFS(vnode_mount(vp)); + if (!NFSHASNFSV4(nmp)) + return; + NFSLOCKCLSTATE(); + clp = nfscl_findcl(nmp); + if (clp == NULL) { + NFSUNLOCKCLSTATE(); + return; + } + dp = nfscl_finddeleg(clp, np->n_fhp->nfh_fh, np->n_fhp->nfh_len); + if (dp != NULL && + (dp->nfsdl_flags & (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) == + (NFSCLDL_WRITE | NFSCLDL_MODTIMESET)) + *mtime = dp->nfsdl_modtime; + NFSUNLOCKCLSTATE(); +} + +static int +nfscl_errmap(struct nfsrv_descript *nd) +{ + short *defaulterrp, *errp; + + if (!nd->nd_repstat) + return (0); + if (nd->nd_procnum == NFSPROC_NOOP) + return (txdr_unsigned(nd->nd_repstat & 0xffff)); + if (nd->nd_repstat == EBADRPC) + return (txdr_unsigned(NFSERR_BADXDR)); + if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || + nd->nd_repstat == NFSERR_OPILLEGAL) + return (txdr_unsigned(nd->nd_repstat)); + errp = defaulterrp = nfscl_cberrmap[nd->nd_procnum]; + while (*++errp) + if (*errp == (short)nd->nd_repstat) + return (txdr_unsigned(nd->nd_repstat)); + return (txdr_unsigned(*defaulterrp)); +} + diff --git a/sys/fs/nfsclient/nfs_clsubs.c b/sys/fs/nfsclient/nfs_clsubs.c new file mode 100644 index 0000000..7ae2860 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clsubs.c @@ -0,0 +1,402 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_subs.c 8.8 (Berkeley) 5/22/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * These functions support the macros and help fiddle mbuf chains for + * the nfs op functions. They do things like create the rpc header and + * copy data between mbuf chains and uio lists. + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/proc.h> +#include <sys/mount.h> +#include <sys/vnode.h> +#include <sys/namei.h> +#include <sys/mbuf.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/malloc.h> +#include <sys/sysent.h> +#include <sys/syscall.h> +#include <sys/sysproto.h> + +#include <vm/vm.h> +#include <vm/vm_object.h> +#include <vm/vm_extern.h> +#include <vm/uma.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfs_lock.h> + +#include <netinet/in.h> + +/* + * Note that stdarg.h and the ANSI style va_start macro is used for both + * ANSI and traditional C compilers. + */ +#include <machine/stdarg.h> + +extern struct mtx ncl_iod_mutex; +extern struct proc *ncl_iodwant[NFS_MAXRAHEAD]; +extern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD]; +extern int ncl_numasync; +extern unsigned int ncl_iodmax; +extern struct nfsstats newnfsstats; + +int +ncl_uninit(struct vfsconf *vfsp) +{ + int i; + + /* + * Tell all nfsiod processes to exit. Clear ncl_iodmax, and wakeup + * any sleeping nfsiods so they check ncl_iodmax and exit. + */ + mtx_lock(&ncl_iod_mutex); + ncl_iodmax = 0; + for (i = 0; i < ncl_numasync; i++) + if (ncl_iodwant[i]) + wakeup(&ncl_iodwant[i]); + /* The last nfsiod to exit will wake us up when ncl_numasync hits 0 */ + while (ncl_numasync) + msleep(&ncl_numasync, &ncl_iod_mutex, PWAIT, "ioddie", 0); + mtx_unlock(&ncl_iod_mutex); + ncl_nhuninit(); + return (0); +} + +void +ncl_dircookie_lock(struct nfsnode *np) +{ + mtx_lock(&np->n_mtx); + while (np->n_flag & NDIRCOOKIELK) + (void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0); + np->n_flag |= NDIRCOOKIELK; + mtx_unlock(&np->n_mtx); +} + +void +ncl_dircookie_unlock(struct nfsnode *np) +{ + mtx_lock(&np->n_mtx); + np->n_flag &= ~NDIRCOOKIELK; + wakeup(&np->n_flag); + mtx_unlock(&np->n_mtx); +} + +int +ncl_upgrade_vnlock(struct vnode *vp) +{ + int old_lock; + + if ((old_lock = VOP_ISLOCKED(vp)) != LK_EXCLUSIVE) { + if (old_lock == LK_SHARED) { + /* Upgrade to exclusive lock, this might block */ + vn_lock(vp, LK_UPGRADE | LK_RETRY); + } else { + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + } + } + return old_lock; +} + +void +ncl_downgrade_vnlock(struct vnode *vp, int old_lock) +{ + if (old_lock != LK_EXCLUSIVE) { + if (old_lock == LK_SHARED) { + /* Downgrade from exclusive lock, this might block */ + vn_lock(vp, LK_DOWNGRADE); + } else { + VOP_UNLOCK(vp, 0); + } + } +} + +void +ncl_printf(const char *fmt, ...) +{ + va_list ap; + + mtx_lock(&Giant); + va_start(ap, fmt); + printf(fmt, ap); + va_end(ap); + mtx_unlock(&Giant); +} + +#ifdef NFS_ACDEBUG +#include <sys/sysctl.h> +SYSCTL_DECL(_vfs_newnfs); +static int nfs_acdebug; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0, ""); +#endif + +/* + * Check the time stamp + * If the cache is valid, copy contents to *vap and return 0 + * otherwise return an error + */ +int +ncl_getattrcache(struct vnode *vp, struct vattr *vaper) +{ + struct nfsnode *np; + struct vattr *vap; + struct nfsmount *nmp; + int timeo; + + np = VTONFS(vp); + vap = &np->n_vattr.na_vattr; + nmp = VFSTONFS(vp->v_mount); +#ifdef NFS_ACDEBUG + mtx_lock(&Giant); /* ncl_printf() */ +#endif + mtx_lock(&np->n_mtx); + /* XXX n_mtime doesn't seem to be updated on a miss-and-reload */ + timeo = (time_second - np->n_mtime.tv_sec) / 10; + +#ifdef NFS_ACDEBUG + if (nfs_acdebug>1) + ncl_printf("nfs_getattrcache: initial timeo = %d\n", timeo); +#endif + + if (vap->va_type == VDIR) { + if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin) + timeo = nmp->nm_acdirmin; + else if (timeo > nmp->nm_acdirmax) + timeo = nmp->nm_acdirmax; + } else { + if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin) + timeo = nmp->nm_acregmin; + else if (timeo > nmp->nm_acregmax) + timeo = nmp->nm_acregmax; + } + +#ifdef NFS_ACDEBUG + if (nfs_acdebug > 2) + ncl_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n", + nmp->nm_acregmin, nmp->nm_acregmax, + nmp->nm_acdirmin, nmp->nm_acdirmax); + + if (nfs_acdebug) + ncl_printf("nfs_getattrcache: age = %d; final timeo = %d\n", + (time_second - np->n_attrstamp), timeo); +#endif + + if ((time_second - np->n_attrstamp) >= timeo) { + newnfsstats.attrcache_misses++; + mtx_unlock(&np->n_mtx); + return( ENOENT); + } + newnfsstats.attrcache_hits++; + if (vap->va_size != np->n_size) { + if (vap->va_type == VREG) { + if (np->n_flag & NMODIFIED) { + if (vap->va_size < np->n_size) + vap->va_size = np->n_size; + else + np->n_size = vap->va_size; + } else { + np->n_size = vap->va_size; + } + vnode_pager_setsize(vp, np->n_size); + } else { + np->n_size = vap->va_size; + } + } + bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); + if (np->n_flag & NCHG) { + if (np->n_flag & NACC) + vaper->va_atime = np->n_atim; + if (np->n_flag & NUPD) + vaper->va_mtime = np->n_mtim; + } + mtx_unlock(&np->n_mtx); +#ifdef NFS_ACDEBUG + mtx_unlock(&Giant); /* ncl_printf() */ +#endif + return (0); +} + +static nfsuint64 nfs_nullcookie = { { 0, 0 } }; +/* + * This function finds the directory cookie that corresponds to the + * logical byte offset given. + */ +nfsuint64 * +ncl_getcookie(struct nfsnode *np, off_t off, int add) +{ + struct nfsdmap *dp, *dp2; + int pos; + nfsuint64 *retval = NULL; + + pos = (uoff_t)off / NFS_DIRBLKSIZ; + if (pos == 0 || off < 0) { +#ifdef DIAGNOSTIC + if (add) + panic("nfs getcookie add at <= 0"); +#endif + return (&nfs_nullcookie); + } + pos--; + dp = LIST_FIRST(&np->n_cookies); + if (!dp) { + if (add) { + MALLOC(dp, struct nfsdmap *, sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp->ndm_eocookie = 0; + LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list); + } else + goto out; + } + while (pos >= NFSNUMCOOKIES) { + pos -= NFSNUMCOOKIES; + if (LIST_NEXT(dp, ndm_list)) { + if (!add && dp->ndm_eocookie < NFSNUMCOOKIES && + pos >= dp->ndm_eocookie) + goto out; + dp = LIST_NEXT(dp, ndm_list); + } else if (add) { + MALLOC(dp2, struct nfsdmap *, sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp2->ndm_eocookie = 0; + LIST_INSERT_AFTER(dp, dp2, ndm_list); + dp = dp2; + } else + goto out; + } + if (pos >= dp->ndm_eocookie) { + if (add) + dp->ndm_eocookie = pos + 1; + else + goto out; + } + retval = &dp->ndm_cookies[pos]; +out: + return (retval); +} + +/* + * Invalidate cached directory information, except for the actual directory + * blocks (which are invalidated separately). + * Done mainly to avoid the use of stale offset cookies. + */ +void +ncl_invaldir(struct vnode *vp) +{ + struct nfsnode *np = VTONFS(vp); + +#ifdef DIAGNOSTIC + if (vp->v_type != VDIR) + panic("nfs: invaldir not dir"); +#endif + ncl_dircookie_lock(np); + np->n_direofoffset = 0; + np->n_cookieverf.nfsuquad[0] = 0; + np->n_cookieverf.nfsuquad[1] = 0; + if (LIST_FIRST(&np->n_cookies)) + LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0; + ncl_dircookie_unlock(np); +} + +/* + * The write verifier has changed (probably due to a server reboot), so all + * B_NEEDCOMMIT blocks will have to be written again. Since they are on the + * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT + * and B_CLUSTEROK flags. Once done the new write verifier can be set for the + * mount point. + * + * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data + * writes are not clusterable. + */ +void +ncl_clearcommit(struct mount *mp) +{ + struct vnode *vp, *nvp; + struct buf *bp, *nbp; + struct bufobj *bo; + + MNT_ILOCK(mp); + MNT_VNODE_FOREACH(vp, mp, nvp) { + bo = &vp->v_bufobj; + VI_LOCK(vp); + if (vp->v_iflag & VI_DOOMED) { + VI_UNLOCK(vp); + continue; + } + vholdl(vp); + VI_UNLOCK(vp); + MNT_IUNLOCK(mp); + BO_LOCK(bo); + TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { + if (!BUF_ISLOCKED(bp) && + (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) + == (B_DELWRI | B_NEEDCOMMIT)) + bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); + } + BO_UNLOCK(bo); + vdrop(vp); + MNT_ILOCK(mp); + } + MNT_IUNLOCK(mp); +} + +/* + * Called once to initialize data structures... + */ +int +ncl_init(struct vfsconf *vfsp) +{ + int i; + + /* Ensure async daemons disabled */ + for (i = 0; i < NFS_MAXRAHEAD; i++) { + ncl_iodwant[i] = NULL; + ncl_iodmount[i] = NULL; + } + ncl_nhinit(); /* Init the nfsnode table */ + + return (0); +} + diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c new file mode 100644 index 0000000..1ceba85 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clvfsops.c @@ -0,0 +1,1257 @@ +/*- + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_vfsops.c 8.12 (Berkeley) 5/20/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + + +#include "opt_bootp.h" +#include "opt_nfsroot.h" + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/clock.h> +#include <sys/lock.h> +#include <sys/malloc.h> +#include <sys/mbuf.h> +#include <sys/module.h> +#include <sys/mount.h> +#include <sys/proc.h> +#include <sys/socket.h> +#include <sys/socketvar.h> +#include <sys/sockio.h> +#include <sys/sysctl.h> +#include <sys/vnode.h> +#include <sys/signalvar.h> + +#include <vm/vm.h> +#include <vm/vm_extern.h> +#include <vm/uma.h> + +#include <net/if.h> +#include <net/route.h> +#include <netinet/in.h> + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfsdiskless.h> + +extern int nfscl_ticks; +extern struct timeval nfsboottime; +extern struct nfsstats newnfsstats; + +MALLOC_DEFINE(M_NEWNFSREQ, "newnfsclient_req", "New NFS request header"); +MALLOC_DEFINE(M_NEWNFSMNT, "newnfsmnt", "New NFS mount struct"); + +SYSCTL_DECL(_vfs_newnfs); +SYSCTL_STRUCT(_vfs_newnfs, NFS_NFSSTATS, nfsstats, CTLFLAG_RW, + &newnfsstats, nfsstats, "S,nfsstats"); +static int nfs_ip_paranoia = 1; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_ip_paranoia, CTLFLAG_RW, + &nfs_ip_paranoia, 0, ""); +static int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY; +SYSCTL_INT(_vfs_newnfs, NFS_TPRINTF_INITIAL_DELAY, + downdelayinitial, CTLFLAG_RW, &nfs_tprintf_initial_delay, 0, ""); +/* how long between console messages "nfs server foo not responding" */ +static int nfs_tprintf_delay = NFS_TPRINTF_DELAY; +SYSCTL_INT(_vfs_newnfs, NFS_TPRINTF_DELAY, + downdelayinterval, CTLFLAG_RW, &nfs_tprintf_delay, 0, ""); + +static void nfs_decode_args(struct mount *mp, struct nfsmount *nmp, + struct nfs_args *argp, struct ucred *, struct thread *); +static int mountnfs(struct nfs_args *, struct mount *, + struct sockaddr *, char *, u_char *, u_char *, u_char *, + struct vnode **, struct ucred *, struct thread *); +static vfs_mount_t nfs_mount; +static vfs_cmount_t nfs_cmount; +static vfs_unmount_t nfs_unmount; +static vfs_root_t nfs_root; +static vfs_statfs_t nfs_statfs; +static vfs_sync_t nfs_sync; +static vfs_sysctl_t nfs_sysctl; + +/* + * nfs vfs operations. + */ +static struct vfsops nfs_vfsops = { + .vfs_init = ncl_init, + .vfs_mount = nfs_mount, + .vfs_cmount = nfs_cmount, + .vfs_root = nfs_root, + .vfs_statfs = nfs_statfs, + .vfs_sync = nfs_sync, + .vfs_uninit = ncl_uninit, + .vfs_unmount = nfs_unmount, + .vfs_sysctl = nfs_sysctl, +}; +VFS_SET(nfs_vfsops, newnfs, VFCF_NETWORK); + +/* So that loader and kldload(2) can find us, wherever we are.. */ +MODULE_VERSION(newnfs, 1); + +/* + * This structure must be filled in by a primary bootstrap or bootstrap + * server for a diskless/dataless machine. It is initialized below just + * to ensure that it is allocated to initialized data (.data not .bss). + */ +struct nfs_diskless newnfs_diskless = { { { 0 } } }; +struct nfsv3_diskless newnfsv3_diskless = { { { 0 } } }; +int newnfs_diskless_valid = 0; + +SYSCTL_INT(_vfs_newnfs, OID_AUTO, diskless_valid, CTLFLAG_RD, + &newnfs_diskless_valid, 0, ""); + +SYSCTL_STRING(_vfs_newnfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD, + newnfsv3_diskless.root_hostnam, 0, ""); + +SYSCTL_OPAQUE(_vfs_newnfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD, + &newnfsv3_diskless.root_saddr, sizeof newnfsv3_diskless.root_saddr, + "%Ssockaddr_in", ""); + + +void newnfsargs_ntoh(struct nfs_args *); +static int nfs_mountdiskless(char *, + struct sockaddr_in *, struct nfs_args *, + struct thread *, struct vnode **, struct mount *); +static void nfs_convert_diskless(void); +static void nfs_convert_oargs(struct nfs_args *args, + struct onfs_args *oargs); + +int +newnfs_iosize(struct nfsmount *nmp) +{ + int iosize, maxio; + + /* First, set the upper limit for iosize */ + if (nmp->nm_flag & NFSMNT_NFSV4) { + maxio = NFS_MAXBSIZE; + } else if (nmp->nm_flag & NFSMNT_NFSV3) { + if (nmp->nm_sotype == SOCK_DGRAM) + maxio = NFS_MAXDGRAMDATA; + else + maxio = NFS_MAXBSIZE; + } else { + maxio = NFS_V2MAXDATA; + } + if (nmp->nm_rsize > maxio || nmp->nm_rsize == 0) + nmp->nm_rsize = maxio; + if (nmp->nm_rsize > MAXBSIZE) + nmp->nm_rsize = MAXBSIZE; + if (nmp->nm_readdirsize > maxio || nmp->nm_readdirsize == 0) + nmp->nm_readdirsize = maxio; + if (nmp->nm_readdirsize > nmp->nm_rsize) + nmp->nm_readdirsize = nmp->nm_rsize; + if (nmp->nm_wsize > maxio || nmp->nm_wsize == 0) + nmp->nm_wsize = maxio; + if (nmp->nm_wsize > MAXBSIZE) + nmp->nm_wsize = MAXBSIZE; + + /* + * Calculate the size used for io buffers. Use the larger + * of the two sizes to minimise nfs requests but make sure + * that it is at least one VM page to avoid wasting buffer + * space. + */ + iosize = imax(nmp->nm_rsize, nmp->nm_wsize); + iosize = imax(iosize, PAGE_SIZE); + nmp->nm_mountp->mnt_stat.f_iosize = iosize; + return (iosize); +} + +static void +nfs_convert_oargs(struct nfs_args *args, struct onfs_args *oargs) +{ + + args->version = NFS_ARGSVERSION; + args->addr = oargs->addr; + args->addrlen = oargs->addrlen; + args->sotype = oargs->sotype; + args->proto = oargs->proto; + args->fh = oargs->fh; + args->fhsize = oargs->fhsize; + args->flags = oargs->flags; + args->wsize = oargs->wsize; + args->rsize = oargs->rsize; + args->readdirsize = oargs->readdirsize; + args->timeo = oargs->timeo; + args->retrans = oargs->retrans; + args->readahead = oargs->readahead; + args->hostname = oargs->hostname; +} + +static void +nfs_convert_diskless(void) +{ + + bcopy(&newnfs_diskless.myif, &newnfsv3_diskless.myif, + sizeof(struct ifaliasreq)); + bcopy(&newnfs_diskless.mygateway, &newnfsv3_diskless.mygateway, + sizeof(struct sockaddr_in)); + nfs_convert_oargs(&newnfsv3_diskless.root_args,&newnfs_diskless.root_args); + if (newnfsv3_diskless.root_args.flags & NFSMNT_NFSV3) { + newnfsv3_diskless.root_fhsize = NFSX_MYFH; + bcopy(newnfs_diskless.root_fh, newnfsv3_diskless.root_fh, NFSX_MYFH); + } else { + newnfsv3_diskless.root_fhsize = NFSX_V2FH; + bcopy(newnfs_diskless.root_fh, newnfsv3_diskless.root_fh, NFSX_V2FH); + } + bcopy(&newnfs_diskless.root_saddr,&newnfsv3_diskless.root_saddr, + sizeof(struct sockaddr_in)); + bcopy(newnfs_diskless.root_hostnam, newnfsv3_diskless.root_hostnam, MNAMELEN); + newnfsv3_diskless.root_time = newnfs_diskless.root_time; + bcopy(newnfs_diskless.my_hostnam, newnfsv3_diskless.my_hostnam, + MAXHOSTNAMELEN); + newnfs_diskless_valid = 3; +} + +/* + * nfs statfs call + */ +static int +nfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td) +{ + struct vnode *vp; + struct nfsmount *nmp = VFSTONFS(mp); + struct nfsvattr nfsva; + struct nfsfsinfo fs; + struct nfsstatfs sb; + int error = 0, attrflag, gotfsinfo = 0, ret; + struct nfsnode *np; + + error = vfs_busy(mp, MBF_NOWAIT); + if (error) + return (error); + error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np); + if (error) { + vfs_unbusy(mp); + return (error); + } + vp = NFSTOV(np); + mtx_lock(&nmp->nm_mtx); + if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) { + mtx_unlock(&nmp->nm_mtx); + error = nfsrpc_fsinfo(vp, &fs, td->td_ucred, td, &nfsva, + &attrflag, NULL); + if (!error) + gotfsinfo = 1; + } else + mtx_unlock(&nmp->nm_mtx); + if (!error) + error = nfsrpc_statfs(vp, &sb, &fs, td->td_ucred, td, &nfsva, + &attrflag, NULL); + if (attrflag == 0) { + ret = nfsrpc_getattrnovp(nmp, nmp->nm_fh, nmp->nm_fhsize, 1, + td->td_ucred, td, &nfsva, NULL); + if (ret) { + /* + * Just set default values to get things going. + */ + NFSBZERO((caddr_t)&nfsva, sizeof (struct nfsvattr)); + nfsva.na_vattr.va_type = VDIR; + nfsva.na_vattr.va_mode = 0777; + nfsva.na_vattr.va_nlink = 100; + nfsva.na_vattr.va_uid = (uid_t)0; + nfsva.na_vattr.va_gid = (gid_t)0; + nfsva.na_vattr.va_fileid = 2; + nfsva.na_vattr.va_gen = 1; + nfsva.na_vattr.va_blocksize = NFS_FABLKSIZE; + nfsva.na_vattr.va_size = 512 * 1024; + } + } + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + if (!error) { + mtx_lock(&nmp->nm_mtx); + if (gotfsinfo || (nmp->nm_flag & NFSMNT_NFSV4)) + nfscl_loadfsinfo(nmp, &fs); + nfscl_loadsbinfo(nmp, &sb, sbp); + sbp->f_flags = nmp->nm_flag; + sbp->f_iosize = newnfs_iosize(nmp); + mtx_unlock(&nmp->nm_mtx); + if (sbp != &mp->mnt_stat) { + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + strncpy(&sbp->f_fstypename[0], mp->mnt_vfc->vfc_name, MFSNAMELEN); + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + vput(vp); + vfs_unbusy(mp); + return (error); +} + +/* + * nfs version 3 fsinfo rpc call + */ +int +ncl_fsinfo(struct nfsmount *nmp, struct vnode *vp, struct ucred *cred, + struct thread *td) +{ + struct nfsfsinfo fs; + struct nfsvattr nfsva; + int error, attrflag; + + error = nfsrpc_fsinfo(vp, &fs, cred, td, &nfsva, &attrflag, NULL); + if (!error) { + if (attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, + 1); + mtx_lock(&nmp->nm_mtx); + nfscl_loadfsinfo(nmp, &fs); + mtx_unlock(&nmp->nm_mtx); + } + return (error); +} + +/* + * Mount a remote root fs via. nfs. This depends on the info in the + * newnfs_diskless structure that has been filled in properly by some primary + * bootstrap. + * It goes something like this: + * - do enough of "ifconfig" by calling ifioctl() so that the system + * can talk to the server + * - If newnfs_diskless.mygateway is filled in, use that address as + * a default gateway. + * - build the rootfs mount point and call mountnfs() to do the rest. + * + * It is assumed to be safe to read, modify, and write the nfsv3_diskless + * structure, as well as other global NFS client variables here, as + * ncl_mountroot() will be called once in the boot before any other NFS + * client activity occurs. + */ +int +ncl_mountroot(struct mount *mp, struct thread *td) +{ + struct nfsv3_diskless *nd = &newnfsv3_diskless; + struct socket *so; + struct vnode *vp; + struct ifreq ir; + int error, i; + u_long l; + char buf[128]; + char *cp; + +#if defined(BOOTP_NFSROOT) && defined(BOOTP) + bootpc_init(); /* use bootp to get newnfs_diskless filled in */ +#elif defined(NFS_ROOT) + nfs_setup_diskless(); +#endif + + nfscl_init(); + + if (newnfs_diskless_valid == 0) + return (-1); + if (newnfs_diskless_valid == 1) + nfs_convert_diskless(); + + /* + * XXX splnet, so networks will receive... + */ + splnet(); + + /* + * Do enough of ifconfig(8) so that the critical net interface can + * talk to the server. + */ + error = socreate(nd->myif.ifra_addr.sa_family, &so, nd->root_args.sotype, 0, + td->td_ucred, td); + if (error) + panic("ncl_mountroot: socreate(%04x): %d", + nd->myif.ifra_addr.sa_family, error); + +#if 0 /* XXX Bad idea */ + /* + * We might not have been told the right interface, so we pass + * over the first ten interfaces of the same kind, until we get + * one of them configured. + */ + + for (i = strlen(nd->myif.ifra_name) - 1; + nd->myif.ifra_name[i] >= '0' && + nd->myif.ifra_name[i] <= '9'; + nd->myif.ifra_name[i] ++) { + error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td); + if(!error) + break; + } +#endif + error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td); + if (error) + panic("ncl_mountroot: SIOCAIFADDR: %d", error); + if ((cp = getenv("boot.netif.mtu")) != NULL) { + ir.ifr_mtu = strtol(cp, NULL, 10); + bcopy(nd->myif.ifra_name, ir.ifr_name, IFNAMSIZ); + freeenv(cp); + error = ifioctl(so, SIOCSIFMTU, (caddr_t)&ir, td); + if (error) + printf("ncl_mountroot: SIOCSIFMTU: %d", error); + } + soclose(so); + + /* + * If the gateway field is filled in, set it as the default route. + * Note that pxeboot will set a default route of 0 if the route + * is not set by the DHCP server. Check also for a value of 0 + * to avoid panicking inappropriately in that situation. + */ + if (nd->mygateway.sin_len != 0 && + nd->mygateway.sin_addr.s_addr != 0) { + struct sockaddr_in mask, sin; + + bzero((caddr_t)&mask, sizeof(mask)); + sin = mask; + sin.sin_family = AF_INET; + sin.sin_len = sizeof(sin); + error = rtrequest(RTM_ADD, (struct sockaddr *)&sin, + (struct sockaddr *)&nd->mygateway, + (struct sockaddr *)&mask, + RTF_UP | RTF_GATEWAY, NULL); + if (error) + panic("ncl_mountroot: RTM_ADD: %d", error); + } + + /* + * Create the rootfs mount point. + */ + nd->root_args.fh = nd->root_fh; + nd->root_args.fhsize = nd->root_fhsize; + l = ntohl(nd->root_saddr.sin_addr.s_addr); + snprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s", + (l >> 24) & 0xff, (l >> 16) & 0xff, + (l >> 8) & 0xff, (l >> 0) & 0xff, nd->root_hostnam); + printf("NFS ROOT: %s\n", buf); + if ((error = nfs_mountdiskless(buf, + &nd->root_saddr, &nd->root_args, td, &vp, mp)) != 0) { + return (error); + } + + /* + * This is not really an nfs issue, but it is much easier to + * set hostname here and then let the "/etc/rc.xxx" files + * mount the right /var based upon its preset value. + */ + bcopy(nd->my_hostnam, hostname, MAXHOSTNAMELEN); + hostname[MAXHOSTNAMELEN - 1] = '\0'; + for (i = 0; i < MAXHOSTNAMELEN; i++) + if (hostname[i] == '\0') + break; + inittodr(ntohl(nd->root_time)); + return (0); +} + +/* + * Internal version of mount system call for diskless setup. + */ +static int +nfs_mountdiskless(char *path, + struct sockaddr_in *sin, struct nfs_args *args, struct thread *td, + struct vnode **vpp, struct mount *mp) +{ + struct sockaddr *nam; + int error; + + nam = sodupsockaddr((struct sockaddr *)sin, M_WAITOK); + if ((error = mountnfs(args, mp, nam, path, NULL, NULL, NULL, vpp, + td->td_ucred, td)) != 0) { + printf("ncl_mountroot: mount %s on /: %d\n", path, error); + return (error); + } + return (0); +} + +static void +nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp, + struct ucred *cred, struct thread *td) +{ + int s; + int adjsock; + + s = splnet(); + + /* + * Set read-only flag if requested; otherwise, clear it if this is + * an update. If this is not an update, then either the read-only + * flag is already clear, or this is a root mount and it was set + * intentionally at some previous point. + */ + if (vfs_getopt(mp->mnt_optnew, "ro", NULL, NULL) == 0) { + MNT_ILOCK(mp); + mp->mnt_flag |= MNT_RDONLY; + MNT_IUNLOCK(mp); + } else if (mp->mnt_flag & MNT_UPDATE) { + MNT_ILOCK(mp); + mp->mnt_flag &= ~MNT_RDONLY; + MNT_IUNLOCK(mp); + } + + /* + * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes + * no sense in that context. Also, set up appropriate retransmit + * and soft timeout behavior. + */ + if (argp->sotype == SOCK_STREAM) { + nmp->nm_flag &= ~NFSMNT_NOCONN; + nmp->nm_timeo = NFS_MAXTIMEO; + } + + /* Also clear RDIRPLUS if not NFSv3, it crashes some servers */ + if ((argp->flags & NFSMNT_NFSV3) == 0) + nmp->nm_flag &= ~NFSMNT_RDIRPLUS; + + /* Also re-bind if we're switching to/from a connected UDP socket */ + adjsock = ((nmp->nm_flag & NFSMNT_NOCONN) != + (argp->flags & NFSMNT_NOCONN)); + + /* Update flags atomically. Don't change the lock bits. */ + nmp->nm_flag = argp->flags | nmp->nm_flag; + splx(s); + + if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) { + nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10; + if (nmp->nm_timeo < NFS_MINTIMEO) + nmp->nm_timeo = NFS_MINTIMEO; + else if (nmp->nm_timeo > NFS_MAXTIMEO) + nmp->nm_timeo = NFS_MAXTIMEO; + } + + if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) { + nmp->nm_retry = argp->retrans; + if (nmp->nm_retry > NFS_MAXREXMIT) + nmp->nm_retry = NFS_MAXREXMIT; + } + + if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) { + nmp->nm_wsize = argp->wsize; + /* Round down to multiple of blocksize */ + nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1); + if (nmp->nm_wsize <= 0) + nmp->nm_wsize = NFS_FABLKSIZE; + } + + if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) { + nmp->nm_rsize = argp->rsize; + /* Round down to multiple of blocksize */ + nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1); + if (nmp->nm_rsize <= 0) + nmp->nm_rsize = NFS_FABLKSIZE; + } + + if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) { + nmp->nm_readdirsize = argp->readdirsize; + } + + if ((argp->flags & NFSMNT_ACREGMIN) && argp->acregmin >= 0) + nmp->nm_acregmin = argp->acregmin; + else + nmp->nm_acregmin = NFS_MINATTRTIMO; + if ((argp->flags & NFSMNT_ACREGMAX) && argp->acregmax >= 0) + nmp->nm_acregmax = argp->acregmax; + else + nmp->nm_acregmax = NFS_MAXATTRTIMO; + if ((argp->flags & NFSMNT_ACDIRMIN) && argp->acdirmin >= 0) + nmp->nm_acdirmin = argp->acdirmin; + else + nmp->nm_acdirmin = NFS_MINDIRATTRTIMO; + if ((argp->flags & NFSMNT_ACDIRMAX) && argp->acdirmax >= 0) + nmp->nm_acdirmax = argp->acdirmax; + else + nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO; + if (nmp->nm_acdirmin > nmp->nm_acdirmax) + nmp->nm_acdirmin = nmp->nm_acdirmax; + if (nmp->nm_acregmin > nmp->nm_acregmax) + nmp->nm_acregmin = nmp->nm_acregmax; + + if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0) { + if (argp->readahead <= NFS_MAXRAHEAD) + nmp->nm_readahead = argp->readahead; + else + nmp->nm_readahead = NFS_MAXRAHEAD; + } + if ((argp->flags & NFSMNT_WCOMMITSIZE) && argp->wcommitsize >= 0) { + if (argp->wcommitsize < nmp->nm_wsize) + nmp->nm_wcommitsize = nmp->nm_wsize; + else + nmp->nm_wcommitsize = argp->wcommitsize; + } + + adjsock |= ((nmp->nm_sotype != argp->sotype) || + (nmp->nm_soproto != argp->proto)); + + if (nmp->nm_client != NULL && adjsock) { + int haslock = 0, error = 0; + + if (nmp->nm_sotype == SOCK_STREAM) { + error = newnfs_sndlock(&nmp->nm_sockreq.nr_lock); + if (!error) + haslock = 1; + } + if (!error) { + newnfs_disconnect(&nmp->nm_sockreq); + if (haslock) + newnfs_sndunlock(&nmp->nm_sockreq.nr_lock); + nmp->nm_sotype = argp->sotype; + nmp->nm_soproto = argp->proto; + if (nmp->nm_sotype == SOCK_DGRAM) + while (newnfs_connect(nmp, &nmp->nm_sockreq, + cred, td, 0)) { + printf("newnfs_args: retrying connect\n"); + (void) nfs_catnap(PSOCK, "newnfscon"); + } + } + } else { + nmp->nm_sotype = argp->sotype; + nmp->nm_soproto = argp->proto; + } +} + +static const char *nfs_opts[] = { "from", "nfs_args", + "noatime", "noexec", "suiddir", "nosuid", "nosymfollow", "union", + "noclusterr", "noclusterw", "multilabel", "acls", "force", "update", + "async", "dumbtimer", "noconn", "nolockd", "intr", "rdirplus", "resvport", + "readdirsize", "soft", "hard", "mntudp", "tcp", "wsize", "rsize", + "retrans", "acregmin", "acregmax", "acdirmin", "acdirmax", + NULL }; + +/* + * VFS Operations. + * + * mount system call + * It seems a bit dumb to copyinstr() the host and path here and then + * bcopy() them in mountnfs(), but I wanted to detect errors before + * doing the sockargs() call because sockargs() allocates an mbuf and + * an error after that means that I have to release the mbuf. + */ +/* ARGSUSED */ +static int +nfs_mount(struct mount *mp, struct thread *td) +{ + struct nfs_args args = { + .version = NFS_ARGSVERSION, + .addr = NULL, + .addrlen = sizeof (struct sockaddr_in), + .sotype = SOCK_STREAM, + .proto = 0, + .fh = NULL, + .fhsize = 0, + .flags = 0, + .wsize = NFS_WSIZE, + .rsize = NFS_RSIZE, + .readdirsize = NFS_READDIRSIZE, + .timeo = 10, + .retrans = NFS_RETRANS, + .readahead = NFS_DEFRAHEAD, + .wcommitsize = 0, /* was: NQ_DEFLEASE */ + .hostname = NULL, + /* args version 4 */ + .acregmin = NFS_MINATTRTIMO, + .acregmax = NFS_MAXATTRTIMO, + .acdirmin = NFS_MINDIRATTRTIMO, + .acdirmax = NFS_MAXDIRATTRTIMO, + .dirlen = 0, + .krbnamelen = 0, + }; + int error; + struct sockaddr *nam; + struct vnode *vp; + char hst[MNAMELEN]; + size_t len; + u_char nfh[NFSX_FHMAX], krbname[100], dirpath[100], srvkrbname[100]; + + if (vfs_filteropt(mp->mnt_optnew, nfs_opts)) { + error = EINVAL; + goto out; + } + + if ((mp->mnt_flag & (MNT_ROOTFS | MNT_UPDATE)) == MNT_ROOTFS) { + error = ncl_mountroot(mp, td); + goto out; + } + + error = vfs_copyopt(mp->mnt_optnew, "nfs_args", &args, sizeof args); + if (error) + goto out; + + if (args.version != NFS_ARGSVERSION) { + error = EPROGMISMATCH; + goto out; + } + + nfscl_init(); + + if (mp->mnt_flag & MNT_UPDATE) { + struct nfsmount *nmp = VFSTONFS(mp); + + if (nmp == NULL) { + error = EIO; + goto out; + } + /* + * When doing an update, we can't change version, + * security, switch lockd strategies or change cookie + * translation + */ + args.flags = (args.flags & + ~(NFSMNT_NFSV3 | + NFSMNT_NFSV4 | + NFSMNT_KERB | + NFSMNT_INTEGRITY | + NFSMNT_PRIVACY | + NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/)) | + (nmp->nm_flag & + (NFSMNT_NFSV3 | + NFSMNT_NFSV4 | + NFSMNT_KERB | + NFSMNT_INTEGRITY | + NFSMNT_PRIVACY | + NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/)); + nfs_decode_args(mp, nmp, &args, td->td_ucred, td); + goto out; + } + + /* + * Make the nfs_ip_paranoia sysctl serve as the default connection + * or no-connection mode for those protocols that support + * no-connection mode (the flag will be cleared later for protocols + * that do not support no-connection mode). This will allow a client + * to receive replies from a different IP then the request was + * sent to. Note: default value for nfs_ip_paranoia is 1 (paranoid), + * not 0. + */ + if (nfs_ip_paranoia == 0) + args.flags |= NFSMNT_NOCONN; + if (args.fhsize < 0 || args.fhsize > NFSX_FHMAX) { + error = EINVAL; + goto out; + } + if (args.fhsize > 0) { + error = copyin((caddr_t)args.fh, (caddr_t)nfh, args.fhsize); + if (error) + goto out; + } + error = copyinstr(args.hostname, hst, MNAMELEN-1, &len); + if (error) + goto out; + bzero(&hst[len], MNAMELEN - len); + if (args.krbnamelen > 0) { + if (args.krbnamelen >= 100) { + error = EINVAL; + goto out; + } + error = copyin(args.krbname, krbname, args.krbnamelen); + if (error) + goto out; + krbname[args.krbnamelen] = '\0'; + } else { + krbname[0] = '\0'; + args.krbnamelen = 0; + } + if (args.dirlen > 0) { + if (args.dirlen >= 100) { + error = EINVAL; + goto out; + } + error = copyin(args.dirpath, dirpath, args.dirlen); + if (error) + goto out; + dirpath[args.dirlen] = '\0'; + } else { + dirpath[0] = '\0'; + args.dirlen = 0; + } + if (args.srvkrbnamelen > 0) { + if (args.srvkrbnamelen >= 100) { + error = EINVAL; + goto out; + } + error = copyin(args.srvkrbname, srvkrbname, args.srvkrbnamelen); + if (error) + goto out; + srvkrbname[args.srvkrbnamelen] = '\0'; + } else { + srvkrbname[0] = '\0'; + args.srvkrbnamelen = 0; + } + /* sockargs() call must be after above copyin() calls */ + error = getsockaddr(&nam, (caddr_t)args.addr, args.addrlen); + if (error) + goto out; + args.fh = nfh; + error = mountnfs(&args, mp, nam, hst, krbname, dirpath, srvkrbname, + &vp, td->td_ucred, td); +out: + if (!error) { + MNT_ILOCK(mp); + mp->mnt_kern_flag |= (MNTK_MPSAFE|MNTK_LOOKUP_SHARED); + MNT_IUNLOCK(mp); + } + return (error); +} + + +/* + * VFS Operations. + * + * mount system call + * It seems a bit dumb to copyinstr() the host and path here and then + * bcopy() them in mountnfs(), but I wanted to detect errors before + * doing the sockargs() call because sockargs() allocates an mbuf and + * an error after that means that I have to release the mbuf. + */ +/* ARGSUSED */ +static int +nfs_cmount(struct mntarg *ma, void *data, int flags, struct thread *td) +{ + int error; + struct nfs_args args; + + error = copyin(data, &args, sizeof (struct nfs_args)); + if (error) + return error; + + ma = mount_arg(ma, "nfs_args", &args, sizeof args); + + error = kernel_mount(ma, flags); + return (error); +} + +/* + * Common code for mount and mountroot + */ +static int +mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam, + char *hst, u_char *krbname, u_char *dirpath, u_char *srvkrbname, + struct vnode **vpp, struct ucred *cred, struct thread *td) +{ + struct nfsmount *nmp; + struct nfsnode *np; + int error, trycnt, ret, clearintr; + struct nfsvattr nfsva; + static u_int64_t clval = 0; + + if (mp->mnt_flag & MNT_UPDATE) { + nmp = VFSTONFS(mp); + printf("%s: MNT_UPDATE is no longer handled here\n", __func__); + FREE(nam, M_SONAME); + return (0); + } else { + MALLOC(nmp, struct nfsmount *, sizeof (struct nfsmount) + + argp->krbnamelen + argp->dirlen + argp->srvkrbnamelen + 2, + M_NEWNFSMNT, M_WAITOK); + bzero((caddr_t)nmp, sizeof (struct nfsmount) + + argp->krbnamelen + argp->dirlen + argp->srvkrbnamelen + 2); + TAILQ_INIT(&nmp->nm_bufq); + if (clval == 0) + clval = (u_int64_t)nfsboottime.tv_sec; + nmp->nm_clval = clval++; + nmp->nm_krbnamelen = argp->krbnamelen; + nmp->nm_dirpathlen = argp->dirlen; + nmp->nm_srvkrbnamelen = argp->srvkrbnamelen; + if (nmp->nm_dirpathlen > 0) { + /* + * Since we will be doing dirpath as root, + * set nm_uid to the real uid doing the mount, + * since that is normally the user with a valid TGT. + */ + nmp->nm_uid = td->td_ucred->cr_ruid; + } else { + /* + * Just set to -1, so the first Op + * will set it later, to the uid of + * the process doing that (usually + * from a first open in the mount + * point). + */ + nmp->nm_uid = (uid_t)-1; + } + + /* Copy and null terminate all the names */ + if (nmp->nm_krbnamelen > 0) { + bcopy(krbname, nmp->nm_krbname, nmp->nm_krbnamelen); + nmp->nm_name[nmp->nm_krbnamelen] = '\0'; + } + if (nmp->nm_dirpathlen > 0) { + bcopy(dirpath, NFSMNT_DIRPATH(nmp), + nmp->nm_dirpathlen); + nmp->nm_name[nmp->nm_krbnamelen + nmp->nm_dirpathlen + + 1] = '\0'; + } + if (nmp->nm_srvkrbnamelen > 0) { + bcopy(srvkrbname, NFSMNT_SRVKRBNAME(nmp), + nmp->nm_srvkrbnamelen); + nmp->nm_name[nmp->nm_krbnamelen + nmp->nm_dirpathlen + + nmp->nm_srvkrbnamelen + 2] = '\0'; + } + nmp->nm_sockreq.nr_cred = crhold(cred); + mtx_init(&nmp->nm_sockreq.nr_mtx, "nfssock", NULL, MTX_DEF); + mp->mnt_data = nmp; + } + vfs_getnewfsid(mp); + nmp->nm_mountp = mp; + mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF | MTX_DUPOK); + + /* + * V2 can only handle 32 bit filesizes. A 4GB-1 limit may be too + * high, depending on whether we end up with negative offsets in + * the client or server somewhere. 2GB-1 may be safer. + * + * For V3, ncl_fsinfo will adjust this as necessary. Assume maximum + * that we can handle until we find out otherwise. + * XXX Our "safe" limit on the client is what we can store in our + * buffer cache using signed(!) block numbers. + */ + if ((argp->flags & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0) + nmp->nm_maxfilesize = 0xffffffffLL; + else + nmp->nm_maxfilesize = (u_int64_t)0x80000000 * DEV_BSIZE - 1; + + nmp->nm_timeo = NFS_TIMEO; + nmp->nm_retry = NFS_RETRANS; + if ((argp->flags & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) == 0) { + nmp->nm_wsize = NFS_WSIZE; + nmp->nm_rsize = NFS_RSIZE; + nmp->nm_readdirsize = NFS_READDIRSIZE; + } + nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000); + nmp->nm_numgrps = NFS_MAXGRPS; + nmp->nm_readahead = NFS_DEFRAHEAD; + nmp->nm_tprintf_delay = nfs_tprintf_delay; + if (nmp->nm_tprintf_delay < 0) + nmp->nm_tprintf_delay = 0; + nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay; + if (nmp->nm_tprintf_initial_delay < 0) + nmp->nm_tprintf_initial_delay = 0; + nmp->nm_fhsize = argp->fhsize; + if (nmp->nm_fhsize > 0) + bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize); + bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN); + nmp->nm_nam = nam; + /* Set up the sockets and per-host congestion */ + nmp->nm_sotype = argp->sotype; + nmp->nm_soproto = argp->proto; + nmp->nm_sockreq.nr_prog = NFS_PROG; + if ((argp->flags & NFSMNT_NFSV4)) + nmp->nm_sockreq.nr_vers = NFS_VER4; + else if ((argp->flags & NFSMNT_NFSV3)) + nmp->nm_sockreq.nr_vers = NFS_VER3; + else + nmp->nm_sockreq.nr_vers = NFS_VER2; + + nfs_decode_args(mp, nmp, argp, cred, td); + + /* + * For Connection based sockets (TCP,...) do the connect here, + * but make it interruptible, even for non-interuptible mounts. + */ + if ((nmp->nm_flag & NFSMNT_INT) == 0) { + nmp->nm_flag |= NFSMNT_INT; + clearintr = 1; + } else { + clearintr = 0; + } + if ((error = newnfs_connect(nmp, &nmp->nm_sockreq, cred, td, 0))) + goto bad; + if (clearintr) + nmp->nm_flag &= ~NFSMNT_INT; + + /* + * A reference count is needed on the nfsnode representing the + * remote root. If this object is not persistent, then backward + * traversals of the mount point (i.e. "..") will not work if + * the nfsnode gets flushed out of the cache. Ufs does not have + * this problem, because one can identify root inodes by their + * number == ROOTINO (2). + */ + if (nmp->nm_fhsize == 0 && (nmp->nm_flag & NFSMNT_NFSV4) && + nmp->nm_dirpathlen > 0) { + /* + * If the fhsize on the mount point == 0 for V4, the mount + * path needs to be looked up. + */ + trycnt = 3; + do { + error = nfsrpc_getdirpath(nmp, NFSMNT_DIRPATH(nmp), + cred, td); + if (error) + (void) nfs_catnap(PZERO, "nfsgetdirp"); + } while (error && --trycnt > 0); + if (error) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + goto bad; + } + } + if (nmp->nm_fhsize > 0) { + error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np); + if (error) + goto bad; + *vpp = NFSTOV(np); + + /* + * Get file attributes and transfer parameters for the + * mountpoint. This has the side effect of filling in + * (*vpp)->v_type with the correct value. + */ + ret = nfsrpc_getattrnovp(nmp, nmp->nm_fh, nmp->nm_fhsize, 1, + cred, td, &nfsva, NULL); + if (ret) { + /* + * Just set default values to get things going. + */ + NFSBZERO((caddr_t)&nfsva, sizeof (struct nfsvattr)); + nfsva.na_vattr.va_type = VDIR; + nfsva.na_vattr.va_mode = 0777; + nfsva.na_vattr.va_nlink = 100; + nfsva.na_vattr.va_uid = (uid_t)0; + nfsva.na_vattr.va_gid = (gid_t)0; + nfsva.na_vattr.va_fileid = 2; + nfsva.na_vattr.va_gen = 1; + nfsva.na_vattr.va_blocksize = NFS_FABLKSIZE; + nfsva.na_vattr.va_size = 512 * 1024; + } + (void) nfscl_loadattrcache(vpp, &nfsva, NULL, NULL, 0, 1); + if (argp->flags & NFSMNT_NFSV3) + ncl_fsinfo(nmp, *vpp, cred, td); + + /* + * Lose the lock but keep the ref. + */ + VOP_UNLOCK(*vpp, 0); + return (0); + } + error = EIO; + +bad: + newnfs_disconnect(&nmp->nm_sockreq); + crfree(nmp->nm_sockreq.nr_cred); + mtx_destroy(&nmp->nm_sockreq.nr_mtx); + mtx_destroy(&nmp->nm_mtx); + FREE(nmp, M_NEWNFSMNT); + FREE(nam, M_SONAME); + return (error); +} + +/* + * unmount system call + */ +static int +nfs_unmount(struct mount *mp, int mntflags, struct thread *td) +{ + struct nfsmount *nmp; + int error, flags = 0, trycnt = 0; + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + nmp = VFSTONFS(mp); + /* + * Goes something like this.. + * - Call vflush() to clear out vnodes for this filesystem + * - Close the socket + * - Free up the data structures + */ + /* In the forced case, cancel any outstanding requests. */ + if (mntflags & MNT_FORCE) { + error = newnfs_nmcancelreqs(nmp); + if (error) + goto out; + /* For a forced close, get rid of the renew thread now */ + nfscl_umount(nmp, td); + } + /* We hold 1 extra ref on the root vnode; see comment in mountnfs(). */ + do { + error = vflush(mp, 1, flags, td); + if ((mntflags & MNT_FORCE) && error != 0 && ++trycnt < 30) + (void) nfs_catnap(PSOCK, "newndm"); + } while ((mntflags & MNT_FORCE) && error != 0 && trycnt < 30); + if (error) + goto out; + + /* + * We are now committed to the unmount. + */ + if ((mntflags & MNT_FORCE) == 0) + nfscl_umount(nmp, td); + newnfs_disconnect(&nmp->nm_sockreq); + crfree(nmp->nm_sockreq.nr_cred); + FREE(nmp->nm_nam, M_SONAME); + + mtx_destroy(&nmp->nm_sockreq.nr_mtx); + mtx_destroy(&nmp->nm_mtx); + FREE(nmp, M_NEWNFSMNT); +out: + return (error); +} + +/* + * Return root of a filesystem + */ +static int +nfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) +{ + struct vnode *vp; + struct nfsmount *nmp; + struct nfsnode *np; + int error; + + nmp = VFSTONFS(mp); + error = ncl_nget(mp, nmp->nm_fh, nmp->nm_fhsize, &np); + if (error) + return error; + vp = NFSTOV(np); + /* + * Get transfer parameters and attributes for root vnode once. + */ + mtx_lock(&nmp->nm_mtx); + if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) { + mtx_unlock(&nmp->nm_mtx); + ncl_fsinfo(nmp, vp, curthread->td_ucred, curthread); + } else + mtx_unlock(&nmp->nm_mtx); + if (vp->v_type == VNON) + vp->v_type = VDIR; + vp->v_vflag |= VV_ROOT; + *vpp = vp; + return (0); +} + +/* + * Flush out the buffer cache + */ +/* ARGSUSED */ +static int +nfs_sync(struct mount *mp, int waitfor, struct thread *td) +{ + struct vnode *vp, *mvp; + int error, allerror = 0; + + /* + * Force stale buffer cache information to be flushed. + */ + MNT_ILOCK(mp); +loop: + MNT_VNODE_FOREACH(vp, mp, mvp) { + VI_LOCK(vp); + MNT_IUNLOCK(mp); + /* XXX Racy bv_cnt check. */ + if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 || + waitfor == MNT_LAZY) { + VI_UNLOCK(vp); + MNT_ILOCK(mp); + continue; + } + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { + MNT_ILOCK(mp); + MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); + goto loop; + } + error = VOP_FSYNC(vp, waitfor, td); + if (error) + allerror = error; + VOP_UNLOCK(vp, 0); + vrele(vp); + + MNT_ILOCK(mp); + } + MNT_IUNLOCK(mp); + return (allerror); +} + +static int +nfs_sysctl(struct mount *mp, fsctlop_t op, struct sysctl_req *req) +{ + struct nfsmount *nmp = VFSTONFS(mp); + struct vfsquery vq; + int error; + + bzero(&vq, sizeof(vq)); + switch (op) { +#if 0 + case VFS_CTL_NOLOCKS: + val = (nmp->nm_flag & NFSMNT_NOLOCKS) ? 1 : 0; + if (req->oldptr != NULL) { + error = SYSCTL_OUT(req, &val, sizeof(val)); + if (error) + return (error); + } + if (req->newptr != NULL) { + error = SYSCTL_IN(req, &val, sizeof(val)); + if (error) + return (error); + if (val) + nmp->nm_flag |= NFSMNT_NOLOCKS; + else + nmp->nm_flag &= ~NFSMNT_NOLOCKS; + } + break; +#endif + case VFS_CTL_QUERY: + mtx_lock(&nmp->nm_mtx); + if (nmp->nm_state & NFSSTA_TIMEO) + vq.vq_flags |= VQ_NOTRESP; + mtx_unlock(&nmp->nm_mtx); +#if 0 + if (!(nmp->nm_flag & NFSMNT_NOLOCKS) && + (nmp->nm_state & NFSSTA_LOCKTIMEO)) + vq.vq_flags |= VQ_NOTRESPLOCK; +#endif + error = SYSCTL_OUT(req, &vq, sizeof(vq)); + break; + case VFS_CTL_TIMEO: + if (req->oldptr != NULL) { + error = SYSCTL_OUT(req, &nmp->nm_tprintf_initial_delay, + sizeof(nmp->nm_tprintf_initial_delay)); + if (error) + return (error); + } + if (req->newptr != NULL) { + error = vfs_suser(mp, req->td); + if (error) + return (error); + error = SYSCTL_IN(req, &nmp->nm_tprintf_initial_delay, + sizeof(nmp->nm_tprintf_initial_delay)); + if (error) + return (error); + if (nmp->nm_tprintf_initial_delay < 0) + nmp->nm_tprintf_initial_delay = 0; + } + break; + default: + return (ENOTSUP); + } + return (0); +} + diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c new file mode 100644 index 0000000..62808a4 --- /dev/null +++ b/sys/fs/nfsclient/nfs_clvnops.c @@ -0,0 +1,3131 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from nfs_vnops.c 8.16 (Berkeley) 5/27/95 + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * vnode op calls for Sun NFS version 2, 3 and 4 + */ + +#include "opt_inet.h" + +#include <sys/param.h> +#include <sys/kernel.h> +#include <sys/systm.h> +#include <sys/resourcevar.h> +#include <sys/proc.h> +#include <sys/mount.h> +#include <sys/bio.h> +#include <sys/buf.h> +#include <sys/malloc.h> +#include <sys/mbuf.h> +#include <sys/namei.h> +#include <sys/socket.h> +#include <sys/vnode.h> +#include <sys/dirent.h> +#include <sys/fcntl.h> +#include <sys/lockf.h> +#include <sys/stat.h> +#include <sys/sysctl.h> +#include <sys/signalvar.h> + +#include <vm/vm.h> +#include <vm/vm_object.h> +#include <vm/vm_extern.h> +#include <vm/vm_object.h> + + +#include <fs/nfs/nfsport.h> +#include <fs/nfsclient/nfsnode.h> +#include <fs/nfsclient/nfsmount.h> +#include <fs/nfsclient/nfs.h> +#include <fs/nfsclient/nfs_lock.h> + +#include <net/if.h> +#include <netinet/vinet.h> +#include <netinet/in.h> +#include <netinet/in_var.h> + +/* Defs */ +#define TRUE 1 +#define FALSE 0 + +extern struct nfsstats newnfsstats; +MALLOC_DECLARE(M_NEWNFSREQ); +vop_advlock_t *ncl_advlock_p = ncl_dolock; + +/* + * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these + * calls are not in getblk() and brelse() so that they would not be necessary + * here. + */ +#ifndef B_VMIO +#define vfs_busy_pages(bp, f) +#endif + +static vop_read_t nfsfifo_read; +static vop_write_t nfsfifo_write; +static vop_close_t nfsfifo_close; +static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, + struct thread *); +static vop_lookup_t nfs_lookup; +static vop_create_t nfs_create; +static vop_mknod_t nfs_mknod; +static vop_open_t nfs_open; +static vop_close_t nfs_close; +static vop_access_t nfs_access; +static vop_getattr_t nfs_getattr; +static vop_setattr_t nfs_setattr; +static vop_read_t nfs_read; +static vop_fsync_t nfs_fsync; +static vop_remove_t nfs_remove; +static vop_link_t nfs_link; +static vop_rename_t nfs_rename; +static vop_mkdir_t nfs_mkdir; +static vop_rmdir_t nfs_rmdir; +static vop_symlink_t nfs_symlink; +static vop_readdir_t nfs_readdir; +static vop_strategy_t nfs_strategy; +static vop_lock1_t nfs_lock1; +static int nfs_lookitup(struct vnode *, char *, int, + struct ucred *, struct thread *, struct nfsnode **); +static int nfs_sillyrename(struct vnode *, struct vnode *, + struct componentname *); +static vop_access_t nfsspec_access; +static vop_readlink_t nfs_readlink; +static vop_print_t nfs_print; +static vop_advlock_t nfs_advlock; +static vop_advlockasync_t nfs_advlockasync; +#ifdef NFS4_ACL_EXTATTR_NAME +static vop_getacl_t nfs_getacl; +static vop_setacl_t nfs_setacl; +#endif + +/* + * Global vfs data structures for nfs + */ +struct vop_vector newnfs_vnodeops = { + .vop_default = &default_vnodeops, + .vop_access = nfs_access, + .vop_advlock = nfs_advlock, + .vop_advlockasync = nfs_advlockasync, + .vop_close = nfs_close, + .vop_create = nfs_create, + .vop_fsync = nfs_fsync, + .vop_getattr = nfs_getattr, + .vop_getpages = ncl_getpages, + .vop_putpages = ncl_putpages, + .vop_inactive = ncl_inactive, + .vop_link = nfs_link, + .vop_lock1 = nfs_lock1, + .vop_lookup = nfs_lookup, + .vop_mkdir = nfs_mkdir, + .vop_mknod = nfs_mknod, + .vop_open = nfs_open, + .vop_print = nfs_print, + .vop_read = nfs_read, + .vop_readdir = nfs_readdir, + .vop_readlink = nfs_readlink, + .vop_reclaim = ncl_reclaim, + .vop_remove = nfs_remove, + .vop_rename = nfs_rename, + .vop_rmdir = nfs_rmdir, + .vop_setattr = nfs_setattr, + .vop_strategy = nfs_strategy, + .vop_symlink = nfs_symlink, + .vop_write = ncl_write, +#ifdef NFS4_ACL_EXTATTR_NAME + .vop_getacl = nfs_getacl, + .vop_setacl = nfs_setacl, +#endif +}; + +struct vop_vector newnfs_fifoops = { + .vop_default = &fifo_specops, + .vop_access = nfsspec_access, + .vop_close = nfsfifo_close, + .vop_fsync = nfs_fsync, + .vop_getattr = nfs_getattr, + .vop_inactive = ncl_inactive, + .vop_print = nfs_print, + .vop_read = nfsfifo_read, + .vop_reclaim = ncl_reclaim, + .vop_setattr = nfs_setattr, + .vop_write = nfsfifo_write, +}; + +static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp, struct vattr *vap); +static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, + int namelen, struct ucred *cred, struct thread *td); +static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, + char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp, + char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td); +static int nfs_renameit(struct vnode *sdvp, struct vnode *svp, + struct componentname *scnp, struct sillyrename *sp); + +/* + * Global variables + */ +#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) + +SYSCTL_DECL(_vfs_newnfs); + +static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, + &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); + +static int nfs_prime_access_cache = 0; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, prime_access_cache, CTLFLAG_RW, + &nfs_prime_access_cache, 0, + "Prime NFS ACCESS cache when fetching attributes"); + +static int newnfs_commit_on_close = 0; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, commit_on_close, CTLFLAG_RW, + &newnfs_commit_on_close, 0, "write+commit on close, else only write"); + +static int nfs_clean_pages_on_close = 1; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW, + &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close"); + +int newnfs_directio_enable = 0; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, directio_enable, CTLFLAG_RW, + &newnfs_directio_enable, 0, "Enable NFS directio"); + +static int newnfs_neglookup_enable = 1; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, neglookup_enable, CTLFLAG_RW, + &newnfs_neglookup_enable, 0, "Enable NFS negative lookup caching"); + +/* + * This sysctl allows other processes to mmap a file that has been opened + * O_DIRECT by a process. In general, having processes mmap the file while + * Direct IO is in progress can lead to Data Inconsistencies. But, we allow + * this by default to prevent DoS attacks - to prevent a malicious user from + * opening up files O_DIRECT preventing other users from mmap'ing these + * files. "Protected" environments where stricter consistency guarantees are + * required can disable this knob. The process that opened the file O_DIRECT + * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not + * meaningful. + */ +int newnfs_directio_allow_mmap = 1; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, directio_allow_mmap, CTLFLAG_RW, + &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens"); + +#if 0 +SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_hits, CTLFLAG_RD, + &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count"); + +SYSCTL_INT(_vfs_newnfs, OID_AUTO, access_cache_misses, CTLFLAG_RD, + &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count"); +#endif + +#define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \ + | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \ + | NFSACCESS_DELETE | NFSACCESS_LOOKUP) + +/* + * SMP Locking Note : + * The list of locks after the description of the lock is the ordering + * of other locks acquired with the lock held. + * np->n_mtx : Protects the fields in the nfsnode. + VM Object Lock + VI_MTX (acquired indirectly) + * nmp->nm_mtx : Protects the fields in the nfsmount. + rep->r_mtx + * ncl_iod_mutex : Global lock, protects shared nfsiod state. + * nfs_reqq_mtx : Global lock, protects the nfs_reqq list. + nmp->nm_mtx + rep->r_mtx + * rep->r_mtx : Protects the fields in an nfsreq. + */ + +static int +nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td, + struct ucred *cred, u_int32_t *retmode) +{ + int error = 0, attrflag, i, lrupos; + u_int32_t rmode; + struct nfsnode *np = VTONFS(vp); + struct nfsvattr nfsva; + + error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag, + &rmode, NULL); + if (attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + if (!error) { + lrupos = 0; + mtx_lock(&np->n_mtx); + for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { + if (np->n_accesscache[i].uid == cred->cr_uid) { + np->n_accesscache[i].mode = rmode; + np->n_accesscache[i].stamp = time_second; + break; + } + if (i > 0 && np->n_accesscache[i].stamp < + np->n_accesscache[lrupos].stamp) + lrupos = i; + } + if (i == NFS_ACCESSCACHESIZE) { + np->n_accesscache[lrupos].uid = cred->cr_uid; + np->n_accesscache[lrupos].mode = rmode; + np->n_accesscache[lrupos].stamp = time_second; + } + mtx_unlock(&np->n_mtx); + if (retmode != NULL) + *retmode = rmode; + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + return (error); +} + +/* + * nfs access vnode op. + * For nfs version 2, just return ok. File accesses may fail later. + * For nfs version 3, use the access rpc to check accessibility. If file modes + * are changed on the server, accesses might still fail later. + */ +static int +nfs_access(struct vop_access_args *ap) +{ + struct vnode *vp = ap->a_vp; + int error = 0, i, gotahit; + u_int32_t mode, wmode, rmode; + int v34 = NFS_ISV34(vp); + struct nfsnode *np = VTONFS(vp); + + /* + * Disallow write attempts on filesystems mounted read-only; + * unless the file is a socket, fifo, or a block or character + * device resident on the filesystem. + */ + if ((ap->a_accmode & (VWRITE | VAPPEND +#ifdef NFS4_ACL_EXTATTR_NAME + | VWRITE_NAMED_ATTRS | VDELETE_CHILD | VWRITE_ATTRIBUTES | + VDELETE | VWRITE_ACL | VWRITE_OWNER +#endif + )) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) { + switch (vp->v_type) { + case VREG: + case VDIR: + case VLNK: + return (EROFS); + default: + break; + } + } + /* + * For nfs v3 or v4, check to see if we have done this recently, and if + * so return our cached result instead of making an ACCESS call. + * If not, do an access rpc, otherwise you are stuck emulating + * ufs_access() locally using the vattr. This may not be correct, + * since the server may apply other access criteria such as + * client uid-->server uid mapping that we do not know about. + */ + if (v34) { + if (ap->a_accmode & VREAD) + mode = NFSACCESS_READ; + else + mode = 0; + if (vp->v_type != VDIR) { + if (ap->a_accmode & VWRITE) + mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); + if (ap->a_accmode & VAPPEND) + mode |= NFSACCESS_EXTEND; + if (ap->a_accmode & VEXEC) + mode |= NFSACCESS_EXECUTE; +#ifdef NFS4_ACL_EXTATTR_NAME + if (ap->a_accmode & VDELETE) + mode |= NFSACCESS_DELETE; +#endif + } else { + if (ap->a_accmode & VWRITE) + mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND); + if (ap->a_accmode & VAPPEND) + mode |= NFSACCESS_EXTEND; + if (ap->a_accmode & VEXEC) + mode |= NFSACCESS_LOOKUP; +#ifdef NFS4_ACL_EXTATTR_NAME + if (ap->a_accmode & VDELETE) + mode |= NFSACCESS_DELETE; + if (ap->a_accmode & VDELETE_CHILD) + mode |= NFSACCESS_MODIFY; +#endif + } + /* XXX safety belt, only make blanket request if caching */ + if (nfsaccess_cache_timeout > 0) { + wmode = NFSACCESS_READ | NFSACCESS_MODIFY | + NFSACCESS_EXTEND | NFSACCESS_EXECUTE | + NFSACCESS_DELETE | NFSACCESS_LOOKUP; + } else { + wmode = mode; + } + + /* + * Does our cached result allow us to give a definite yes to + * this request? + */ + gotahit = 0; + mtx_lock(&np->n_mtx); + for (i = 0; i < NFS_ACCESSCACHESIZE; i++) { + if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) { + if (time_second < (np->n_accesscache[i].stamp + + nfsaccess_cache_timeout) && + (np->n_accesscache[i].mode & mode) == mode) { + NFSINCRGLOBAL(newnfsstats.accesscache_hits); + gotahit = 1; + } + break; + } + } + mtx_unlock(&np->n_mtx); + if (gotahit == 0) { + /* + * Either a no, or a don't know. Go to the wire. + */ + NFSINCRGLOBAL(newnfsstats.accesscache_misses); + error = nfs34_access_otw(vp, wmode, ap->a_td, + ap->a_cred, &rmode); + if (!error && + (rmode & mode) != mode) + error = EACCES; + } + return (error); + } else { + if ((error = nfsspec_access(ap)) != 0) { + return (error); + } + /* + * Attempt to prevent a mapped root from accessing a file + * which it shouldn't. We try to read a byte from the file + * if the user is root and the file is not zero length. + * After calling nfsspec_access, we should have the correct + * file size cached. + */ + mtx_lock(&np->n_mtx); + if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD) + && VTONFS(vp)->n_size > 0) { + struct iovec aiov; + struct uio auio; + char buf[1]; + + mtx_unlock(&np->n_mtx); + aiov.iov_base = buf; + aiov.iov_len = 1; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_resid = 1; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_td = ap->a_td; + + if (vp->v_type == VREG) + error = ncl_readrpc(vp, &auio, ap->a_cred); + else if (vp->v_type == VDIR) { + char* bp; + bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); + aiov.iov_base = bp; + aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; + error = ncl_readdirrpc(vp, &auio, ap->a_cred, + ap->a_td); + free(bp, M_TEMP); + } else if (vp->v_type == VLNK) + error = ncl_readlinkrpc(vp, &auio, ap->a_cred); + else + error = EACCES; + } else + mtx_unlock(&np->n_mtx); + return (error); + } +} + + +/* + * nfs open vnode op + * Check to see if the type is ok + * and that deletion is not in progress. + * For paged in text files, you will need to flush the page cache + * if consistency is lost. + */ +/* ARGSUSED */ +static int +nfs_open(struct vop_open_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct vattr vattr; + int error; + int fmode = ap->a_mode; + + if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) + return (EOPNOTSUPP); + + /* + * For NFSv4, we need to do the Open Op before cache validation, + * so that we conform to RFC3530 Sec. 9.3.1. + */ + if (NFS_ISV4(vp)) { + error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td); + if (error) { + error = nfscl_maperr(ap->a_td, error, (uid_t)0, + (gid_t)0); + return (error); + } + } + + /* + * Now, if this Open will be doing reading, re-validate/flush the + * cache, so that Close/Open coherency is maintained. + */ + if ((fmode & FREAD) && (!NFS_ISV4(vp) || nfscl_mustflush(vp))) { + mtx_lock(&np->n_mtx); + if (np->n_flag & NMODIFIED) { + mtx_unlock(&np->n_mtx); + error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); + if (error == EINTR || error == EIO) { + if (NFS_ISV4(vp)) + (void) nfsrpc_close(vp, ap->a_cred, + ap->a_td); + return (error); + } + np->n_attrstamp = 0; + if (vp->v_type == VDIR) + np->n_direofoffset = 0; + error = VOP_GETATTR(vp, &vattr, ap->a_cred); + if (error) { + if (NFS_ISV4(vp)) + (void) nfsrpc_close(vp, ap->a_cred, + ap->a_td); + return (error); + } + mtx_lock(&np->n_mtx); + np->n_mtime = vattr.va_mtime; + if (NFS_ISV4(vp)) + np->n_change = vattr.va_filerev; + mtx_unlock(&np->n_mtx); + } else { + struct thread *td = curthread; + + if (np->n_ac_ts_syscalls != td->td_syscalls || + np->n_ac_ts_tid != td->td_tid || + td->td_proc == NULL || + np->n_ac_ts_pid != td->td_proc->p_pid) { + np->n_attrstamp = 0; + } + mtx_unlock(&np->n_mtx); + error = VOP_GETATTR(vp, &vattr, ap->a_cred); + if (error) { + if (NFS_ISV4(vp)) + (void) nfsrpc_close(vp, ap->a_cred, + ap->a_td); + return (error); + } + mtx_lock(&np->n_mtx); + if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) || + NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { + if (vp->v_type == VDIR) + np->n_direofoffset = 0; + mtx_unlock(&np->n_mtx); + error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); + if (error == EINTR || error == EIO) { + if (NFS_ISV4(vp)) + (void) nfsrpc_close(vp, + ap->a_cred, ap->a_td); + return (error); + } + mtx_lock(&np->n_mtx); + np->n_mtime = vattr.va_mtime; + if (NFS_ISV4(vp)) + np->n_change = vattr.va_filerev; + } + mtx_unlock(&np->n_mtx); + } + } + + /* + * If the object has >= 1 O_DIRECT active opens, we disable caching. + */ + if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { + if (np->n_directio_opens == 0) { + error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); + if (error) { + if (NFS_ISV4(vp)) + (void) nfsrpc_close(vp, ap->a_cred, + ap->a_td); + return (error); + } + mtx_lock(&np->n_mtx); + np->n_flag |= NNONCACHE; + } else { + mtx_lock(&np->n_mtx); + } + np->n_directio_opens++; + mtx_unlock(&np->n_mtx); + } + vnode_create_vobject(vp, vattr.va_size, ap->a_td); + return (0); +} + +/* + * nfs close vnode op + * What an NFS client should do upon close after writing is a debatable issue. + * Most NFS clients push delayed writes to the server upon close, basically for + * two reasons: + * 1 - So that any write errors may be reported back to the client process + * doing the close system call. By far the two most likely errors are + * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. + * 2 - To put a worst case upper bound on cache inconsistency between + * multiple clients for the file. + * There is also a consistency problem for Version 2 of the protocol w.r.t. + * not being able to tell if other clients are writing a file concurrently, + * since there is no way of knowing if the changed modify time in the reply + * is only due to the write for this client. + * (NFS Version 3 provides weak cache consistency data in the reply that + * should be sufficient to detect and handle this case.) + * + * The current code does the following: + * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers + * for NFS Version 3 - flush dirty buffers to the server but don't invalidate + * or commit them (this satisfies 1 and 2 except for the + * case where the server crashes after this close but + * before the commit RPC, which is felt to be "good + * enough". Changing the last argument to ncl_flush() to + * a 1 would force a commit operation, if it is felt a + * commit is necessary now. + * for NFS Version 4 - flush the dirty buffers and commit them, if + * nfscl_mustflush() says this is necessary. + * It is necessary if there is no write delegation held, + * in order to satisfy open/close coherency. + * If the file isn't cached on local stable storage, + * it may be necessary in order to detect "out of space" + * errors from the server, if the write delegation + * issued by the server doesn't allow the file to grow. + */ +/* ARGSUSED */ +static int +nfs_close(struct vop_close_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct nfsvattr nfsva; + struct ucred *cred; + int error = 0, ret, localcred = 0; + int fmode = ap->a_fflag; + + if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)) + return (0); + /* + * During shutdown, a_cred isn't valid, so just use root. + */ + if (ap->a_cred == NOCRED) { + cred = newnfs_getcred(); + localcred = 1; + } else { + cred = ap->a_cred; + } + if (vp->v_type == VREG) { + /* + * Examine and clean dirty pages, regardless of NMODIFIED. + * This closes a major hole in close-to-open consistency. + * We want to push out all dirty pages (and buffers) on + * close, regardless of whether they were dirtied by + * mmap'ed writes or via write(). + */ + if (nfs_clean_pages_on_close && vp->v_object) { + VM_OBJECT_LOCK(vp->v_object); + vm_object_page_clean(vp->v_object, 0, 0, 0); + VM_OBJECT_UNLOCK(vp->v_object); + } + mtx_lock(&np->n_mtx); + if (np->n_flag & NMODIFIED) { + mtx_unlock(&np->n_mtx); + if (NFS_ISV3(vp)) { + /* + * Under NFSv3 we have dirty buffers to dispose of. We + * must flush them to the NFS server. We have the option + * of waiting all the way through the commit rpc or just + * waiting for the initial write. The default is to only + * wait through the initial write so the data is in the + * server's cache, which is roughly similar to the state + * a standard disk subsystem leaves the file in on close(). + * + * We cannot clear the NMODIFIED bit in np->n_flag due to + * potential races with other processes, and certainly + * cannot clear it if we don't commit. + * These races occur when there is no longer the old + * traditional vnode locking implemented for Vnode Ops. + */ + int cm = newnfs_commit_on_close ? 1 : 0; + error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm); + /* np->n_flag &= ~NMODIFIED; */ + } else if (NFS_ISV4(vp)) { + int cm; + if (newnfs_commit_on_close != 0) + cm = 1; + else + cm = nfscl_mustflush(vp); + error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm); + /* as above w.r.t. races when clearing NMODIFIED */ + /* np->n_flag &= ~NMODIFIED; */ + } else + error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1); + mtx_lock(&np->n_mtx); + } + /* + * Invalidate the attribute cache in all cases. + * An open is going to fetch fresh attrs any way, other procs + * on this node that have file open will be forced to do an + * otw attr fetch, but this is safe. + * --> A user found that their RPC count dropped by 20% when + * this was commented out and I can't see any requirement + * for it, so I've disabled it when negative lookups are + * enabled. (What does this have to do with negative lookup + * caching? Well nothing, except it was reported by the + * same user that needed negative lookup caching and I wanted + * there to be a way to disable it via sysctl to see if it + * is the cause of some caching/coherency issue that might + * crop up.) + */ + if (newnfs_neglookup_enable == 0) + np->n_attrstamp = 0; + if (np->n_flag & NWRITEERR) { + np->n_flag &= ~NWRITEERR; + error = np->n_error; + } + mtx_unlock(&np->n_mtx); + } + + if (NFS_ISV4(vp)) { + /* + * Get attributes so "change" is up to date. + */ + if (!error) { + ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva, + NULL); + if (!ret) { + np->n_change = nfsva.na_filerev; + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, + NULL, 0, 0); + } + } + + /* + * and do the close. + */ + ret = nfsrpc_close(vp, cred, ap->a_td); + if (!error && ret) + error = ret; + if (error) + error = nfscl_maperr(ap->a_td, error, (uid_t)0, + (gid_t)0); + } + if (newnfs_directio_enable) + KASSERT((np->n_directio_asyncwr == 0), + ("nfs_close: dirty unflushed (%d) directio buffers\n", + np->n_directio_asyncwr)); + if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) { + mtx_lock(&np->n_mtx); + KASSERT((np->n_directio_opens > 0), + ("nfs_close: unexpectedly value (0) of n_directio_opens\n")); + np->n_directio_opens--; + if (np->n_directio_opens == 0) + np->n_flag &= ~NNONCACHE; + mtx_unlock(&np->n_mtx); + } + if (localcred) + NFSFREECRED(cred); + return (error); +} + +/* + * nfs getattr call from vfs. + */ +static int +nfs_getattr(struct vop_getattr_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct thread *td = curthread; /* XXX */ + struct nfsnode *np = VTONFS(vp); + int error = 0; + struct nfsvattr nfsva; + struct vattr *vap = ap->a_vap; + struct vattr vattr; + + /* + * Update local times for special files. + */ + mtx_lock(&np->n_mtx); + if (np->n_flag & (NACC | NUPD)) + np->n_flag |= NCHG; + mtx_unlock(&np->n_mtx); + /* + * First look in the cache. + */ + if (ncl_getattrcache(vp, &vattr) == 0) { + vap->va_type = vattr.va_type; + vap->va_mode = vattr.va_mode; + vap->va_nlink = vattr.va_nlink; + vap->va_uid = vattr.va_uid; + vap->va_gid = vattr.va_gid; + vap->va_fsid = vattr.va_fsid; + vap->va_fileid = vattr.va_fileid; + vap->va_size = vattr.va_size; + vap->va_blocksize = vattr.va_blocksize; + vap->va_atime = vattr.va_atime; + vap->va_mtime = vattr.va_mtime; + vap->va_ctime = vattr.va_ctime; + vap->va_gen = vattr.va_gen; + vap->va_flags = vattr.va_flags; + vap->va_rdev = vattr.va_rdev; + vap->va_bytes = vattr.va_bytes; + vap->va_filerev = vattr.va_filerev; + /* + * Get the local modify time for the case of a write + * delegation. + */ + nfscl_deleggetmodtime(vp, &vap->va_mtime); + return (0); + } + + if (NFS_ISV34(vp) && nfs_prime_access_cache && + nfsaccess_cache_timeout > 0) { + NFSINCRGLOBAL(newnfsstats.accesscache_misses); + nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL); + if (ncl_getattrcache(vp, ap->a_vap) == 0) { + nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime); + return (0); + } + } + error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL); + if (!error) + error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0); + if (!error) { + /* + * Get the local modify time for the case of a write + * delegation. + */ + nfscl_deleggetmodtime(vp, &vap->va_mtime); + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + return (error); +} + +/* + * nfs setattr call. + */ +static int +nfs_setattr(struct vop_setattr_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct thread *td = curthread; /* XXX */ + struct vattr *vap = ap->a_vap; + int error = 0; + u_quad_t tsize; + +#ifndef nolint + tsize = (u_quad_t)0; +#endif + + /* + * Setting of flags and marking of atimes are not supported. + */ + if (vap->va_flags != VNOVAL) + return (EOPNOTSUPP); + + /* + * Disallow write attempts if the filesystem is mounted read-only. + */ + if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || + vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || + vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && + (vp->v_mount->mnt_flag & MNT_RDONLY)) + return (EROFS); + if (vap->va_size != VNOVAL) { + switch (vp->v_type) { + case VDIR: + return (EISDIR); + case VCHR: + case VBLK: + case VSOCK: + case VFIFO: + if (vap->va_mtime.tv_sec == VNOVAL && + vap->va_atime.tv_sec == VNOVAL && + vap->va_mode == (mode_t)VNOVAL && + vap->va_uid == (uid_t)VNOVAL && + vap->va_gid == (gid_t)VNOVAL) + return (0); + vap->va_size = VNOVAL; + break; + default: + /* + * Disallow write attempts if the filesystem is + * mounted read-only. + */ + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + /* + * We run vnode_pager_setsize() early (why?), + * we must set np->n_size now to avoid vinvalbuf + * V_SAVE races that might setsize a lower + * value. + */ + mtx_lock(&np->n_mtx); + tsize = np->n_size; + mtx_unlock(&np->n_mtx); + error = ncl_meta_setsize(vp, ap->a_cred, td, + vap->va_size); + mtx_lock(&np->n_mtx); + if (np->n_flag & NMODIFIED) { + tsize = np->n_size; + mtx_unlock(&np->n_mtx); + if (vap->va_size == 0) + error = ncl_vinvalbuf(vp, 0, td, 1); + else + error = ncl_vinvalbuf(vp, V_SAVE, td, 1); + if (error) { + vnode_pager_setsize(vp, tsize); + return (error); + } + /* + * Call nfscl_delegmodtime() to set the modify time + * locally, as required. + */ + nfscl_delegmodtime(vp); + } else + mtx_unlock(&np->n_mtx); + /* + * np->n_size has already been set to vap->va_size + * in ncl_meta_setsize(). We must set it again since + * nfs_loadattrcache() could be called through + * ncl_meta_setsize() and could modify np->n_size. + */ + mtx_lock(&np->n_mtx); + np->n_vattr.na_size = np->n_size = vap->va_size; + mtx_unlock(&np->n_mtx); + }; + } else { + mtx_lock(&np->n_mtx); + if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) && + (np->n_flag & NMODIFIED) && vp->v_type == VREG) { + mtx_unlock(&np->n_mtx); + if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 && + (error == EINTR || error == EIO)) + return (error); + } else + mtx_unlock(&np->n_mtx); + } + error = nfs_setattrrpc(vp, vap, ap->a_cred, td); + if (error && vap->va_size != VNOVAL) { + mtx_lock(&np->n_mtx); + np->n_size = np->n_vattr.na_size = tsize; + vnode_pager_setsize(vp, tsize); + mtx_unlock(&np->n_mtx); + } + return (error); +} + +/* + * Do an nfs setattr rpc. + */ +static int +nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, + struct thread *td) +{ + struct nfsnode *np = VTONFS(vp); + int error, ret, attrflag, i; + struct nfsvattr nfsva; + + if (NFS_ISV34(vp)) { + mtx_lock(&np->n_mtx); + for (i = 0; i < NFS_ACCESSCACHESIZE; i++) + np->n_accesscache[i].stamp = 0; + np->n_flag |= NDELEGMOD; + mtx_unlock(&np->n_mtx); + } + error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag, + NULL); + if (attrflag) { + ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + if (ret && !error) + error = ret; + } + if (error && NFS_ISV4(vp)) + error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid); + return (error); +} + +/* + * nfs lookup call, one step at a time... + * First look in cache + * If not found, unlock the directory nfsnode and do the rpc + */ +static int +nfs_lookup(struct vop_lookup_args *ap) +{ + struct componentname *cnp = ap->a_cnp; + struct vnode *dvp = ap->a_dvp; + struct vnode **vpp = ap->a_vpp; + int flags = cnp->cn_flags; + struct vnode *newvp; + struct nfsmount *nmp; + struct nfsnode *np; + int error = 0, attrflag, dattrflag; + struct thread *td = cnp->cn_thread; + struct nfsfh *nfhp; + struct nfsvattr dnfsva, nfsva; + + *vpp = NULLVP; + if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) + return (EROFS); + if (dvp->v_type != VDIR) + return (ENOTDIR); + nmp = VFSTONFS(dvp->v_mount); + np = VTONFS(dvp); + + /* For NFSv4, wait until any remove is done. */ + mtx_lock(&np->n_mtx); + while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) { + np->n_flag |= NREMOVEWANT; + (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0); + } + mtx_unlock(&np->n_mtx); + + if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0) + return (error); + if ((error = cache_lookup(dvp, vpp, cnp)) && + (error != ENOENT || newnfs_neglookup_enable != 0)) { + struct vattr vattr; + + if (error == ENOENT) { + if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred) && + vattr.va_mtime.tv_sec == np->n_dmtime) { + NFSINCRGLOBAL(newnfsstats.lookupcache_hits); + return (ENOENT); + } + cache_purge_negative(dvp); + np->n_dmtime = 0; + } else { + newvp = *vpp; + if (nfscl_nodeleg(newvp, 0) == 0 || + (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred) && + vattr.va_ctime.tv_sec==VTONFS(newvp)->n_ctime)) { + NFSINCRGLOBAL(newnfsstats.lookupcache_hits); + if (cnp->cn_nameiop != LOOKUP && + (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + return (0); + } + cache_purge(newvp); + if (dvp != newvp) + vput(newvp); + else + vrele(newvp); + *vpp = NULLVP; + } + } + error = 0; + newvp = NULLVP; + NFSINCRGLOBAL(newnfsstats.lookupcache_misses); + error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen, + cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, + NULL); + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + if (error) { + if (newnfs_neglookup_enable != 0 && + error == ENOENT && (cnp->cn_flags & MAKEENTRY) && + cnp->cn_nameiop != CREATE) { + if (np->n_dmtime == 0) + np->n_dmtime = np->n_vattr.na_mtime.tv_sec; + cache_enter(dvp, NULL, cnp); + } + if (newvp != NULLVP) { + vput(newvp); + *vpp = NULLVP; + } + if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && + (flags & ISLASTCN) && error == ENOENT) { + if (dvp->v_mount->mnt_flag & MNT_RDONLY) + error = EROFS; + else + error = EJUSTRETURN; + } + if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + if (NFS_ISV4(dvp)) + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + return (error); + } + + /* + * Handle RENAME case... + */ + if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { + if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { + FREE((caddr_t)nfhp, M_NFSFH); + return (EISDIR); + } + error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, td, &np, + NULL); + if (error) + return (error); + newvp = NFSTOV(np); + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + *vpp = newvp; + cnp->cn_flags |= SAVENAME; + return (0); + } + + if ((flags & ISDOTDOT)) { + VOP_UNLOCK(dvp, 0); + error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, td, &np, + NULL); + vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); + if (error) + return (error); + newvp = NFSTOV(np); + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) { + FREE((caddr_t)nfhp, M_NFSFH); + VREF(dvp); + newvp = dvp; + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } else { + error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, td, &np, + NULL); + if (error) + return (error); + newvp = NFSTOV(np); + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } + if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + if ((cnp->cn_flags & MAKEENTRY) && + (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { + np->n_ctime = np->n_vattr.na_vattr.va_ctime.tv_sec; + cache_enter(dvp, newvp, cnp); + } + *vpp = newvp; + return (0); +} + +/* + * nfs read call. + * Just call ncl_bioread() to do the work. + */ +static int +nfs_read(struct vop_read_args *ap) +{ + struct vnode *vp = ap->a_vp; + + switch (vp->v_type) { + case VREG: + return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); + case VDIR: + return (EISDIR); + default: + return (EOPNOTSUPP); + } +} + +/* + * nfs readlink call + */ +static int +nfs_readlink(struct vop_readlink_args *ap) +{ + struct vnode *vp = ap->a_vp; + + if (vp->v_type != VLNK) + return (EINVAL); + return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred)); +} + +/* + * Do a readlink rpc. + * Called by ncl_doio() from below the buffer cache. + */ +int +ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) +{ + int error, ret, attrflag; + struct nfsvattr nfsva; + + error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva, + &attrflag, NULL); + if (attrflag) { + ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + if (ret && !error) + error = ret; + } + if (error && NFS_ISV4(vp)) + error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * nfs read rpc call + * Ditto above + */ +int +ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) +{ + int error, ret, attrflag; + struct nfsvattr nfsva; + + error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva, &attrflag, + NULL); + if (attrflag) { + ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + if (ret && !error) + error = ret; + } + if (error && NFS_ISV4(vp)) + error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * nfs write call + */ +int +ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, + int *iomode, int *must_commit) +{ + struct nfsvattr nfsva; + int error = 0, attrflag, ret; + u_char verf[NFSX_VERF]; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + + *must_commit = 0; + error = nfsrpc_write(vp, uiop, iomode, verf, cred, + uiop->uio_td, &nfsva, &attrflag, NULL); + NFSLOCKMNT(nmp); + if (!error && NFSHASWRITEVERF(nmp) && + NFSBCMP(verf, nmp->nm_verf, NFSX_VERF)) { + *must_commit = 1; + NFSBCOPY(verf, nmp->nm_verf, NFSX_VERF); + } + NFSUNLOCKMNT(nmp); + if (attrflag) { + if (VTONFS(vp)->n_flag & ND_NFSV4) + ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1, + 1); + else + ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, + 1); + if (ret && !error) + error = ret; + } + if (vp->v_mount->mnt_kern_flag & MNTK_ASYNC) + *iomode = NFSWRITE_FILESYNC; + if (error && NFS_ISV4(vp)) + error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * nfs mknod rpc + * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the + * mode set to specify the file type and the size field for rdev. + */ +static int +nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, + struct vattr *vap) +{ + struct nfsvattr nfsva, dnfsva; + struct vnode *newvp = NULL; + struct nfsnode *np = NULL, *dnp; + struct nfsfh *nfhp; + struct vattr vattr; + int error = 0, attrflag, dattrflag; + u_int32_t rdev; + + if (vap->va_type == VCHR || vap->va_type == VBLK) + rdev = vap->va_rdev; + else if (vap->va_type == VFIFO || vap->va_type == VSOCK) + rdev = 0xffffffff; + else + return (EOPNOTSUPP); + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) + return (error); + error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap, + rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva, + &nfsva, &nfhp, &attrflag, &dattrflag, NULL); + if (!error) { + if (!nfhp) + (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, + &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, + NULL); + if (nfhp) + error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, + cnp->cn_thread, &np, NULL); + } + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + if (!error) { + newvp = NFSTOV(np); + if (attrflag) + error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } + if (!error) { + if ((cnp->cn_flags & MAKEENTRY)) + cache_enter(dvp, newvp, cnp); + *vpp = newvp; + } else if (NFS_ISV4(dvp)) { + error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, + vap->va_gid); + } + dnp = VTONFS(dvp); + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + if (!dattrflag) + dnp->n_attrstamp = 0; + mtx_unlock(&dnp->n_mtx); + return (error); +} + +/* + * nfs mknod vop + * just call nfs_mknodrpc() to do the work. + */ +/* ARGSUSED */ +static int +nfs_mknod(struct vop_mknod_args *ap) +{ + return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap)); +} + +static u_long create_verf; +/* + * nfs file create call + */ +static int +nfs_create(struct vop_create_args *ap) +{ + struct vnode *dvp = ap->a_dvp; + struct vattr *vap = ap->a_vap; + struct componentname *cnp = ap->a_cnp; + struct nfsnode *np = NULL, *dnp; + struct vnode *newvp = NULL; + struct nfsmount *nmp; + struct nfsvattr dnfsva, nfsva; + struct nfsfh *nfhp; + nfsquad_t cverf; + int error = 0, attrflag, dattrflag, fmode = 0; + struct vattr vattr; + + /* + * Oops, not for me.. + */ + if (vap->va_type == VSOCK) + return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); + + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) + return (error); + if (vap->va_vaflags & VA_EXCLUSIVE) + fmode |= O_EXCL; + dnp = VTONFS(dvp); + nmp = VFSTONFS(vnode_mount(dvp)); +again: + /* For NFSv4, wait until any remove is done. */ + mtx_lock(&dnp->n_mtx); + while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) { + dnp->n_flag |= NREMOVEWANT; + (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0); + } + mtx_unlock(&dnp->n_mtx); + + CURVNET_SET(nmp->nm_sockreq.nr_so->so_vnet); +#ifdef INET + INIT_VNET_INET(curvnet); + if (!TAILQ_EMPTY(&V_in_ifaddrhead)) + cverf.lval[0] = IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr.s_addr; + else +#endif + cverf.lval[0] = create_verf; + cverf.lval[1] = ++create_verf; + CURVNET_RESTORE(); + error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen, + vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, + &nfhp, &attrflag, &dattrflag, NULL); + if (!error) { + if (nfhp == NULL) + (void) nfsrpc_lookup(dvp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread, + &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag, + NULL); + if (nfhp != NULL) + error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, + cnp->cn_thread, &np, NULL); + } + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + if (!error) { + newvp = NFSTOV(np); + if (attrflag) + error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } + if (error) { + if (newvp != NULL) { + vrele(newvp); + newvp = NULL; + } + if (NFS_ISV34(dvp) && (fmode & O_EXCL) && + error == NFSERR_NOTSUPP) { + fmode &= ~O_EXCL; + goto again; + } + } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) { + if (nfscl_checksattr(vap, &nfsva)) { + error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred, + cnp->cn_thread, &nfsva, &attrflag, NULL); + if (error && (vap->va_uid != (uid_t)VNOVAL || + vap->va_gid != (gid_t)VNOVAL)) { + /* try again without setting uid/gid */ + vap->va_uid = (uid_t)VNOVAL; + vap->va_gid = (uid_t)VNOVAL; + error = nfsrpc_setattr(newvp, vap, NULL, + cnp->cn_cred, cnp->cn_thread, &nfsva, + &attrflag, NULL); + } + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, + NULL, 0, 1); + } + } + if (!error) { + if (cnp->cn_flags & MAKEENTRY) + cache_enter(dvp, newvp, cnp); + *ap->a_vpp = newvp; + } else if (NFS_ISV4(dvp)) { + error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid, + vap->va_gid); + } + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + if (!dattrflag) + dnp->n_attrstamp = 0; + mtx_unlock(&dnp->n_mtx); + return (error); +} + +/* + * nfs file remove call + * To try and make nfs semantics closer to ufs semantics, a file that has + * other processes using the vnode is renamed instead of removed and then + * removed later on the last close. + * - If v_usecount > 1 + * If a rename is not already in the works + * call nfs_sillyrename() to set it up + * else + * do the remove rpc + */ +static int +nfs_remove(struct vop_remove_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + struct nfsnode *np = VTONFS(vp); + int error = 0; + struct vattr vattr; + +#ifndef DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("nfs_remove: no name"); + if (vrefcnt(vp) < 1) + panic("nfs_remove: bad v_usecount"); +#endif + if (vp->v_type == VDIR) + error = EPERM; + else if (vrefcnt(vp) == 1 || (np->n_sillyrename && + VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && + vattr.va_nlink > 1)) { + /* + * Purge the name cache so that the chance of a lookup for + * the name succeeding while the remove is in progress is + * minimized. Without node locking it can still happen, such + * that an I/O op returns ESTALE, but since you get this if + * another host removes the file.. + */ + cache_purge(vp); + /* + * throw away biocache buffers, mainly to avoid + * unnecessary delayed writes later. + */ + error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1); + /* Do the rpc */ + if (error != EINTR && error != EIO) + error = nfs_removerpc(dvp, vp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread); + /* + * Kludge City: If the first reply to the remove rpc is lost.. + * the reply to the retransmitted request will be ENOENT + * since the file was in fact removed + * Therefore, we cheat and return success. + */ + if (error == ENOENT) + error = 0; + } else if (!np->n_sillyrename) + error = nfs_sillyrename(dvp, vp, cnp); + np->n_attrstamp = 0; + return (error); +} + +/* + * nfs file remove rpc called from nfs_inactive + */ +int +ncl_removeit(struct sillyrename *sp, struct vnode *vp) +{ + /* + * Make sure that the directory vnode is still valid. + * XXX we should lock sp->s_dvp here. + */ + if (sp->s_dvp->v_type == VBAD) + return (0); + return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen, + sp->s_cred, NULL)); +} + +/* + * Nfs remove rpc, called from nfs_remove() and ncl_removeit(). + */ +static int +nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name, + int namelen, struct ucred *cred, struct thread *td) +{ + struct nfsvattr dnfsva; + struct nfsnode *dnp = VTONFS(dvp); + int error = 0, dattrflag; + + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NREMOVEINPROG; + mtx_unlock(&dnp->n_mtx); + error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva, + &dattrflag, NULL); + mtx_lock(&dnp->n_mtx); + if ((dnp->n_flag & NREMOVEWANT)) { + dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG); + mtx_unlock(&dnp->n_mtx); + wakeup((caddr_t)dnp); + } else { + dnp->n_flag &= ~NREMOVEINPROG; + mtx_unlock(&dnp->n_mtx); + } + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + if (!dattrflag) + dnp->n_attrstamp = 0; + mtx_unlock(&dnp->n_mtx); + if (error && NFS_ISV4(dvp)) + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * nfs file rename call + */ +static int +nfs_rename(struct vop_rename_args *ap) +{ + struct vnode *fvp = ap->a_fvp; + struct vnode *tvp = ap->a_tvp; + struct vnode *fdvp = ap->a_fdvp; + struct vnode *tdvp = ap->a_tdvp; + struct componentname *tcnp = ap->a_tcnp; + struct componentname *fcnp = ap->a_fcnp; + struct nfsnode *fnp = VTONFS(ap->a_fvp); + struct nfsnode *tdnp = VTONFS(ap->a_tdvp); + struct nfsv4node *newv4 = NULL; + int error; + +#ifndef DIAGNOSTIC + if ((tcnp->cn_flags & HASBUF) == 0 || + (fcnp->cn_flags & HASBUF) == 0) + panic("nfs_rename: no name"); +#endif + /* Check for cross-device rename */ + if ((fvp->v_mount != tdvp->v_mount) || + (tvp && (fvp->v_mount != tvp->v_mount))) { + error = EXDEV; + goto out; + } + + if (fvp == tvp) { + ncl_printf("nfs_rename: fvp == tvp (can't happen)\n"); + error = 0; + goto out; + } + if ((error = vn_lock(fvp, LK_EXCLUSIVE))) + goto out; + + /* + * We have to flush B_DELWRI data prior to renaming + * the file. If we don't, the delayed-write buffers + * can be flushed out later after the file has gone stale + * under NFSV3. NFSV2 does not have this problem because + * ( as far as I can tell ) it flushes dirty buffers more + * often. + * + * Skip the rename operation if the fsync fails, this can happen + * due to the server's volume being full, when we pushed out data + * that was written back to our cache earlier. Not checking for + * this condition can result in potential (silent) data loss. + */ + error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread); + VOP_UNLOCK(fvp, 0); + if (!error && tvp) + error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread); + if (error) + goto out; + + /* + * If the tvp exists and is in use, sillyrename it before doing the + * rename of the new file over it. + * XXX Can't sillyrename a directory. + */ + if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && + tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { + vput(tvp); + tvp = NULL; + } + + error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen, + tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, + tcnp->cn_thread); + + if (!error) { + /* + * For NFSv4, check to see if it is the same name and + * replace the name, if it is different. + */ + MALLOC(newv4, struct nfsv4node *, + sizeof (struct nfsv4node) + + tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1, + M_NFSV4NODE, M_WAITOK); + mtx_lock(&tdnp->n_mtx); + mtx_lock(&fnp->n_mtx); + if (fnp->n_v4 != NULL && fvp->v_type == VREG && + (fnp->n_v4->n4_namelen != tcnp->cn_namelen || + NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4), + tcnp->cn_namelen) || + tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen || + NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, + tdnp->n_fhp->nfh_len))) { +#ifdef notdef +{ char nnn[100]; int nnnl; +nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99; +bcopy(tcnp->cn_nameptr, nnn, nnnl); +nnn[nnnl] = '\0'; +printf("ren replace=%s\n",nnn); +} +#endif + FREE((caddr_t)fnp->n_v4, M_NFSV4NODE); + fnp->n_v4 = newv4; + newv4 = NULL; + fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len; + fnp->n_v4->n4_namelen = tcnp->cn_namelen; + NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data, + tdnp->n_fhp->nfh_len); + NFSBCOPY(tcnp->cn_nameptr, + NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen); + } + mtx_unlock(&tdnp->n_mtx); + mtx_unlock(&fnp->n_mtx); + if (newv4 != NULL) + FREE((caddr_t)newv4, M_NFSV4NODE); + } + + if (fvp->v_type == VDIR) { + if (tvp != NULL && tvp->v_type == VDIR) + cache_purge(tdvp); + cache_purge(fdvp); + } + +out: + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + vrele(fdvp); + vrele(fvp); + /* + * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. + */ + if (error == ENOENT) + error = 0; + return (error); +} + +/* + * nfs file rename rpc called from nfs_remove() above + */ +static int +nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp, + struct sillyrename *sp) +{ + + return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen, + sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred, + scnp->cn_thread)); +} + +/* + * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). + */ +static int +nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr, + int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr, + int tnamelen, struct ucred *cred, struct thread *td) +{ + struct nfsvattr fnfsva, tnfsva; + struct nfsnode *fdnp = VTONFS(fdvp); + struct nfsnode *tdnp = VTONFS(tdvp); + int error = 0, fattrflag, tattrflag; + + error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp, + tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag, + &tattrflag, NULL, NULL); + mtx_lock(&fdnp->n_mtx); + fdnp->n_flag |= NMODIFIED; + mtx_unlock(&fdnp->n_mtx); + mtx_lock(&tdnp->n_mtx); + tdnp->n_flag |= NMODIFIED; + mtx_unlock(&tdnp->n_mtx); + if (fattrflag) + (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1); + else + fdnp->n_attrstamp = 0; + if (tattrflag) + (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1); + else + tdnp->n_attrstamp = 0; + if (error && NFS_ISV4(fdvp)) + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * nfs hard link create call + */ +static int +nfs_link(struct vop_link_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct vnode *tdvp = ap->a_tdvp; + struct componentname *cnp = ap->a_cnp; + struct nfsnode *tdnp; + struct nfsvattr nfsva, dnfsva; + int error = 0, attrflag, dattrflag; + + if (vp->v_mount != tdvp->v_mount) { + return (EXDEV); + } + + /* + * Push all writes to the server, so that the attribute cache + * doesn't get "out of sync" with the server. + * XXX There should be a better way! + */ + VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread); + + error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen, + cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag, + &dattrflag, NULL); + tdnp = VTONFS(tdvp); + mtx_lock(&tdnp->n_mtx); + tdnp->n_flag |= NMODIFIED; + mtx_unlock(&tdnp->n_mtx); + if (attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + else + VTONFS(vp)->n_attrstamp = 0; + if (dattrflag) + (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1); + else + tdnp->n_attrstamp = 0; + /* + * If negative lookup caching is enabled, I might as well + * add an entry for this node. Not necessary for correctness, + * but if negative caching is enabled, then the system + * must care about lookup caching hit rate, so... + */ + if (newnfs_neglookup_enable != 0 && + (cnp->cn_flags & MAKEENTRY)) + cache_enter(tdvp, vp, cnp); + if (error && NFS_ISV4(vp)) + error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, + (gid_t)0); + return (error); +} + +/* + * nfs symbolic link create call + */ +static int +nfs_symlink(struct vop_symlink_args *ap) +{ + struct vnode *dvp = ap->a_dvp; + struct vattr *vap = ap->a_vap; + struct componentname *cnp = ap->a_cnp; + struct nfsvattr nfsva, dnfsva; + struct nfsfh *nfhp; + struct nfsnode *np = NULL, *dnp; + struct vnode *newvp = NULL; + int error = 0, attrflag, dattrflag, ret; + + vap->va_type = VLNK; + error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen, + ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, + &nfsva, &nfhp, &attrflag, &dattrflag, NULL); + if (nfhp) { + ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, + &np, NULL); + if (!ret) + newvp = NFSTOV(np); + else if (!error) + error = ret; + } + if (newvp != NULL) { + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } else if (!error) { + /* + * If we do not have an error and we could not extract the + * newvp from the response due to the request being NFSv2, we + * have to do a lookup in order to obtain a newvp to return. + */ + error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, + cnp->cn_cred, cnp->cn_thread, &np); + if (!error) + newvp = NFSTOV(np); + } + if (error) { + if (newvp) + vput(newvp); + if (NFS_ISV4(dvp)) + error = nfscl_maperr(cnp->cn_thread, error, + vap->va_uid, vap->va_gid); + } else { + /* + * If negative lookup caching is enabled, I might as well + * add an entry for this node. Not necessary for correctness, + * but if negative caching is enabled, then the system + * must care about lookup caching hit rate, so... + */ + if (newnfs_neglookup_enable != 0 && + (cnp->cn_flags & MAKEENTRY)) + cache_enter(dvp, newvp, cnp); + *ap->a_vpp = newvp; + } + + dnp = VTONFS(dvp); + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + mtx_unlock(&dnp->n_mtx); + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + else + dnp->n_attrstamp = 0; + return (error); +} + +/* + * nfs make dir call + */ +static int +nfs_mkdir(struct vop_mkdir_args *ap) +{ + struct vnode *dvp = ap->a_dvp; + struct vattr *vap = ap->a_vap; + struct componentname *cnp = ap->a_cnp; + struct nfsnode *np = NULL, *dnp; + struct vnode *newvp = NULL; + struct vattr vattr; + struct nfsfh *nfhp; + struct nfsvattr nfsva, dnfsva; + int error = 0, attrflag, dattrflag, ret; + + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred))) + return (error); + vap->va_type = VDIR; + error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, + vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp, + &attrflag, &dattrflag, NULL); + dnp = VTONFS(dvp); + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + mtx_unlock(&dnp->n_mtx); + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + else + dnp->n_attrstamp = 0; + if (nfhp) { + ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread, + &np, NULL); + if (!ret) { + newvp = NFSTOV(np); + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, + NULL, 0, 1); + } else if (!error) + error = ret; + } + if (!error && newvp == NULL) { + error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, + cnp->cn_cred, cnp->cn_thread, &np); + if (!error) { + newvp = NFSTOV(np); + if (newvp->v_type != VDIR) + error = EEXIST; + } + } + if (error) { + if (newvp) + vput(newvp); + if (NFS_ISV4(dvp)) + error = nfscl_maperr(cnp->cn_thread, error, + vap->va_uid, vap->va_gid); + } else { + /* + * If negative lookup caching is enabled, I might as well + * add an entry for this node. Not necessary for correctness, + * but if negative caching is enabled, then the system + * must care about lookup caching hit rate, so... + */ + if (newnfs_neglookup_enable != 0 && + (cnp->cn_flags & MAKEENTRY)) + cache_enter(dvp, newvp, cnp); + *ap->a_vpp = newvp; + } + return (error); +} + +/* + * nfs remove directory call + */ +static int +nfs_rmdir(struct vop_rmdir_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + struct nfsnode *dnp; + struct nfsvattr dnfsva; + int error, dattrflag; + + if (dvp == vp) + return (EINVAL); + error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen, + cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL); + dnp = VTONFS(dvp); + mtx_lock(&dnp->n_mtx); + dnp->n_flag |= NMODIFIED; + mtx_unlock(&dnp->n_mtx); + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + else + dnp->n_attrstamp = 0; + + cache_purge(dvp); + cache_purge(vp); + if (error && NFS_ISV4(dvp)) + error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0, + (gid_t)0); + /* + * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. + */ + if (error == ENOENT) + error = 0; + return (error); +} + +/* + * nfs readdir call + */ +static int +nfs_readdir(struct vop_readdir_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct uio *uio = ap->a_uio; + int tresid, error = 0; + struct vattr vattr; + + if (vp->v_type != VDIR) + return(EPERM); + + /* + * First, check for hit on the EOF offset cache + */ + if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && + (np->n_flag & NMODIFIED) == 0) { + if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) { + mtx_lock(&np->n_mtx); + if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) || + !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) { + mtx_unlock(&np->n_mtx); + NFSINCRGLOBAL(newnfsstats.direofcache_hits); + return (0); + } else + mtx_unlock(&np->n_mtx); + } + } + + /* + * Call ncl_bioread() to do the real work. + */ + tresid = uio->uio_resid; + error = ncl_bioread(vp, uio, 0, ap->a_cred); + + if (!error && uio->uio_resid == tresid) + NFSINCRGLOBAL(newnfsstats.direofcache_misses); + return (error); +} + +/* + * Readdir rpc call. + * Called from below the buffer cache by ncl_doio(). + */ +int +ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, + struct thread *td) +{ + struct nfsvattr nfsva; + nfsuint64 *cookiep, cookie; + struct nfsnode *dnp = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, eof, attrflag; + +#ifndef DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) || + (uiop->uio_resid & (DIRBLKSIZ - 1))) + panic("nfs readdirrpc bad uio"); +#endif + + /* + * If there is no cookie, assume directory was stale. + */ + ncl_dircookie_lock(dnp); + cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); + if (cookiep) { + cookie = *cookiep; + ncl_dircookie_unlock(dnp); + } else { + ncl_dircookie_unlock(dnp); + return (NFSERR_BAD_COOKIE); + } + + if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) + (void)ncl_fsinfo(nmp, vp, cred, td); + + error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva, + &attrflag, &eof, NULL); + if (attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + + if (!error) { + /* + * We are now either at the end of the directory or have filled + * the block. + */ + if (eof) + dnp->n_direofoffset = uiop->uio_offset; + else { + if (uiop->uio_resid > 0) + ncl_printf("EEK! readdirrpc resid > 0\n"); + ncl_dircookie_lock(dnp); + cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); + *cookiep = cookie; + ncl_dircookie_unlock(dnp); + } + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + return (error); +} + +/* + * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc(). + */ +int +ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, + struct thread *td) +{ + struct nfsvattr nfsva; + nfsuint64 *cookiep, cookie; + struct nfsnode *dnp = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, attrflag, eof; + +#ifndef DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) || + (uiop->uio_resid & (DIRBLKSIZ - 1))) + panic("nfs readdirplusrpc bad uio"); +#endif + + /* + * If there is no cookie, assume directory was stale. + */ + ncl_dircookie_lock(dnp); + cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0); + if (cookiep) { + cookie = *cookiep; + ncl_dircookie_unlock(dnp); + } else { + ncl_dircookie_unlock(dnp); + return (NFSERR_BAD_COOKIE); + } + + if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp)) + (void)ncl_fsinfo(nmp, vp, cred, td); + error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva, + &attrflag, &eof, NULL); + if (attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1); + + if (!error) { + /* + * We are now either at end of the directory or have filled the + * the block. + */ + if (eof) + dnp->n_direofoffset = uiop->uio_offset; + else { + if (uiop->uio_resid > 0) + ncl_printf("EEK! readdirplusrpc resid > 0\n"); + ncl_dircookie_lock(dnp); + cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1); + *cookiep = cookie; + ncl_dircookie_unlock(dnp); + } + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + return (error); +} + +/* + * Silly rename. To make the NFS filesystem that is stateless look a little + * more like the "ufs" a remove of an active vnode is translated to a rename + * to a funny looking filename that is removed by nfs_inactive on the + * nfsnode. There is the potential for another process on a different client + * to create the same funny name between the nfs_lookitup() fails and the + * nfs_rename() completes, but... + */ +static int +nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) +{ + struct sillyrename *sp; + struct nfsnode *np; + int error; + short pid; + unsigned int lticks; + + cache_purge(dvp); + np = VTONFS(vp); +#ifndef DIAGNOSTIC + if (vp->v_type == VDIR) + panic("nfs: sillyrename dir"); +#endif + MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), + M_NEWNFSREQ, M_WAITOK); + sp->s_cred = crhold(cnp->cn_cred); + sp->s_dvp = dvp; + VREF(dvp); + + /* + * Fudge together a funny name. + * Changing the format of the funny name to accomodate more + * sillynames per directory. + * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is + * CPU ticks since boot. + */ + pid = cnp->cn_thread->td_proc->p_pid; + lticks = (unsigned int)ticks; + for ( ; ; ) { + sp->s_namlen = sprintf(sp->s_name, + ".nfs.%08x.%04x4.4", lticks, + pid); + if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, + cnp->cn_thread, NULL)) + break; + lticks++; + } + error = nfs_renameit(dvp, vp, cnp, sp); + if (error) + goto bad; + error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, + cnp->cn_thread, &np); + np->n_sillyrename = sp; + return (0); +bad: + vrele(sp->s_dvp); + crfree(sp->s_cred); + free((caddr_t)sp, M_NEWNFSREQ); + return (error); +} + +/* + * Look up a file name and optionally either update the file handle or + * allocate an nfsnode, depending on the value of npp. + * npp == NULL --> just do the lookup + * *npp == NULL --> allocate a new nfsnode and make sure attributes are + * handled too + * *npp != NULL --> update the file handle in the vnode + */ +static int +nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, + struct thread *td, struct nfsnode **npp) +{ + struct vnode *newvp = NULL, *vp; + struct nfsnode *np, *dnp = VTONFS(dvp); + struct nfsfh *nfhp, *onfhp; + struct nfsvattr nfsva, dnfsva; + struct componentname cn; + int error = 0, attrflag, dattrflag; + u_int hash; + + error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva, + &nfhp, &attrflag, &dattrflag, NULL); + if (dattrflag) + (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1); + if (npp && !error) { + if (*npp != NULL) { + np = *npp; + vp = NFSTOV(np); + /* + * For NFSv4, check to see if it is the same name and + * replace the name, if it is different. + */ + if (np->n_v4 != NULL && nfsva.na_type == VREG && + (np->n_v4->n4_namelen != len || + NFSBCMP(name, NFS4NODENAME(np->n_v4), len) || + dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || + NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len))) { +#ifdef notdef +{ char nnn[100]; int nnnl; +nnnl = (len < 100) ? len : 99; +bcopy(name, nnn, nnnl); +nnn[nnnl] = '\0'; +printf("replace=%s\n",nnn); +} +#endif + FREE((caddr_t)np->n_v4, M_NFSV4NODE); + MALLOC(np->n_v4, struct nfsv4node *, + sizeof (struct nfsv4node) + + dnp->n_fhp->nfh_len + len - 1, + M_NFSV4NODE, M_WAITOK); + np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; + np->n_v4->n4_namelen = len; + NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, + dnp->n_fhp->nfh_len); + NFSBCOPY(name, NFS4NODENAME(np->n_v4), len); + } + hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, + FNV1_32_INIT); + onfhp = np->n_fhp; + /* + * Rehash node for new file handle. + */ + vfs_hash_rehash(vp, hash); + np->n_fhp = nfhp; + if (onfhp != NULL) + FREE((caddr_t)onfhp, M_NFSFH); + newvp = NFSTOV(np); + } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) { + FREE((caddr_t)nfhp, M_NFSFH); + VREF(dvp); + newvp = dvp; + } else { + cn.cn_nameptr = name; + cn.cn_namelen = len; + error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td, + &np, NULL); + if (error) + return (error); + newvp = NFSTOV(np); + } + if (!attrflag && *npp == NULL) { + vrele(newvp); + return (ENOENT); + } + if (attrflag) + (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL, + 0, 1); + } + if (npp && *npp == NULL) { + if (error) { + if (newvp) { + if (newvp == dvp) + vrele(newvp); + else + vput(newvp); + } + } else + *npp = np; + } + if (error && NFS_ISV4(dvp)) + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + return (error); +} + +/* + * Nfs Version 3 and 4 commit rpc + */ +int +ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred, + struct thread *td) +{ + struct nfsvattr nfsva; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error, attrflag; + u_char verf[NFSX_VERF]; + + mtx_lock(&nmp->nm_mtx); + if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) { + mtx_unlock(&nmp->nm_mtx); + return (0); + } + mtx_unlock(&nmp->nm_mtx); + error = nfsrpc_commit(vp, offset, cnt, cred, td, verf, &nfsva, + &attrflag, NULL); + if (!error) { + if (NFSBCMP((caddr_t)nmp->nm_verf, verf, NFSX_VERF)) { + NFSBCOPY(verf, (caddr_t)nmp->nm_verf, NFSX_VERF); + error = NFSERR_STALEWRITEVERF; + } + if (!error && attrflag) + (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, + 0, 1); + } else if (NFS_ISV4(vp)) { + error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0); + } + return (error); +} + +/* + * Strategy routine. + * For async requests when nfsiod(s) are running, queue the request by + * calling ncl_asyncio(), otherwise just all ncl_doio() to do the + * request. + */ +static int +nfs_strategy(struct vop_strategy_args *ap) +{ + struct buf *bp = ap->a_bp; + struct ucred *cr; + + KASSERT(!(bp->b_flags & B_DONE), + ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); + BUF_ASSERT_HELD(bp); + + if (bp->b_iocmd == BIO_READ) + cr = bp->b_rcred; + else + cr = bp->b_wcred; + + /* + * If the op is asynchronous and an i/o daemon is waiting + * queue the request, wake it up and wait for completion + * otherwise just do it ourselves. + */ + if ((bp->b_flags & B_ASYNC) == 0 || + ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) + (void)ncl_doio(ap->a_vp, bp, cr, curthread); + return (0); +} + +/* + * fsync vnode op. Just call ncl_flush() with commit == 1. + */ +/* ARGSUSED */ +static int +nfs_fsync(struct vop_fsync_args *ap) +{ + return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1)); +} + +/* + * Flush all the blocks associated with a vnode. + * Walk through the buffer pool and push any dirty pages + * associated with the vnode. + */ +int +ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td, + int commit) +{ + struct nfsnode *np = VTONFS(vp); + struct buf *bp; + int i; + struct buf *nbp; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; + int passone = 1, trycnt = 0; + u_quad_t off, endoff, toff; + struct ucred* wcred = NULL; + struct buf **bvec = NULL; + struct bufobj *bo; +#ifndef NFS_COMMITBVECSIZ +#define NFS_COMMITBVECSIZ 20 +#endif + struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; + int bvecsize = 0, bveccount; + + if (nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + if (!commit) + passone = 0; + bo = &vp->v_bufobj; + /* + * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the + * server, but has not been committed to stable storage on the server + * yet. On the first pass, the byte range is worked out and the commit + * rpc is done. On the second pass, ncl_writebp() is called to do the + * job. + */ +again: + off = (u_quad_t)-1; + endoff = 0; + bvecpos = 0; + if (NFS_ISV34(vp) && commit) { + if (bvec != NULL && bvec != bvec_on_stack) + free(bvec, M_TEMP); + /* + * Count up how many buffers waiting for a commit. + */ + bveccount = 0; + BO_LOCK(bo); + TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { + if (!BUF_ISLOCKED(bp) && + (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) + == (B_DELWRI | B_NEEDCOMMIT)) + bveccount++; + } + /* + * Allocate space to remember the list of bufs to commit. It is + * important to use M_NOWAIT here to avoid a race with nfs_write. + * If we can't get memory (for whatever reason), we will end up + * committing the buffers one-by-one in the loop below. + */ + if (bveccount > NFS_COMMITBVECSIZ) { + /* + * Release the vnode interlock to avoid a lock + * order reversal. + */ + BO_UNLOCK(bo); + bvec = (struct buf **) + malloc(bveccount * sizeof(struct buf *), + M_TEMP, M_NOWAIT); + BO_LOCK(bo); + if (bvec == NULL) { + bvec = bvec_on_stack; + bvecsize = NFS_COMMITBVECSIZ; + } else + bvecsize = bveccount; + } else { + bvec = bvec_on_stack; + bvecsize = NFS_COMMITBVECSIZ; + } + TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { + if (bvecpos >= bvecsize) + break; + if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { + nbp = TAILQ_NEXT(bp, b_bobufs); + continue; + } + if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) != + (B_DELWRI | B_NEEDCOMMIT)) { + BUF_UNLOCK(bp); + nbp = TAILQ_NEXT(bp, b_bobufs); + continue; + } + BO_UNLOCK(bo); + bremfree(bp); + /* + * Work out if all buffers are using the same cred + * so we can deal with them all with one commit. + * + * NOTE: we are not clearing B_DONE here, so we have + * to do it later on in this routine if we intend to + * initiate I/O on the bp. + * + * Note: to avoid loopback deadlocks, we do not + * assign b_runningbufspace. + */ + if (wcred == NULL) + wcred = bp->b_wcred; + else if (wcred != bp->b_wcred) + wcred = NOCRED; + vfs_busy_pages(bp, 1); + + BO_LOCK(bo); + /* + * bp is protected by being locked, but nbp is not + * and vfs_busy_pages() may sleep. We have to + * recalculate nbp. + */ + nbp = TAILQ_NEXT(bp, b_bobufs); + + /* + * A list of these buffers is kept so that the + * second loop knows which buffers have actually + * been committed. This is necessary, since there + * may be a race between the commit rpc and new + * uncommitted writes on the file. + */ + bvec[bvecpos++] = bp; + toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + + bp->b_dirtyoff; + if (toff < off) + off = toff; + toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); + if (toff > endoff) + endoff = toff; + } + BO_UNLOCK(bo); + } + if (bvecpos > 0) { + /* + * Commit data on the server, as required. + * If all bufs are using the same wcred, then use that with + * one call for all of them, otherwise commit each one + * separately. + */ + if (wcred != NOCRED) + retv = ncl_commit(vp, off, (int)(endoff - off), + wcred, td); + else { + retv = 0; + for (i = 0; i < bvecpos; i++) { + off_t off, size; + bp = bvec[i]; + off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + + bp->b_dirtyoff; + size = (u_quad_t)(bp->b_dirtyend + - bp->b_dirtyoff); + retv = ncl_commit(vp, off, (int)size, + bp->b_wcred, td); + if (retv) break; + } + } + + if (retv == NFSERR_STALEWRITEVERF) + ncl_clearcommit(vp->v_mount); + + /* + * Now, either mark the blocks I/O done or mark the + * blocks dirty, depending on whether the commit + * succeeded. + */ + for (i = 0; i < bvecpos; i++) { + bp = bvec[i]; + bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); + if (retv) { + /* + * Error, leave B_DELWRI intact + */ + vfs_unbusy_pages(bp); + brelse(bp); + } else { + /* + * Success, remove B_DELWRI ( bundirty() ). + * + * b_dirtyoff/b_dirtyend seem to be NFS + * specific. We should probably move that + * into bundirty(). XXX + */ + bufobj_wref(bo); + bp->b_flags |= B_ASYNC; + bundirty(bp); + bp->b_flags &= ~B_DONE; + bp->b_ioflags &= ~BIO_ERROR; + bp->b_dirtyoff = bp->b_dirtyend = 0; + bufdone(bp); + } + } + } + + /* + * Start/do any write(s) that are required. + */ +loop: + BO_LOCK(bo); + TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { + if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { + if (waitfor != MNT_WAIT || passone) + continue; + + error = BUF_TIMELOCK(bp, + LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, + BO_MTX(bo), "nfsfsync", slpflag, slptimeo); + if (error == 0) { + BUF_UNLOCK(bp); + goto loop; + } + if (error == ENOLCK) { + error = 0; + goto loop; + } + if (newnfs_sigintr(nmp, td)) { + error = EINTR; + goto done; + } + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + goto loop; + } + if ((bp->b_flags & B_DELWRI) == 0) + panic("nfs_fsync: not dirty"); + if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) { + BUF_UNLOCK(bp); + continue; + } + BO_UNLOCK(bo); + bremfree(bp); + if (passone || !commit) + bp->b_flags |= B_ASYNC; + else + bp->b_flags |= B_ASYNC; + bwrite(bp); + if (newnfs_sigintr(nmp, td)) { + error = EINTR; + goto done; + } + goto loop; + } + if (passone) { + passone = 0; + BO_UNLOCK(bo); + goto again; + } + if (waitfor == MNT_WAIT) { + while (bo->bo_numoutput) { + error = bufobj_wwait(bo, slpflag, slptimeo); + if (error) { + BO_UNLOCK(bo); + error = newnfs_sigintr(nmp, td); + if (error) + goto done; + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + BO_LOCK(bo); + } + } + if (bo->bo_dirty.bv_cnt != 0 && commit) { + BO_UNLOCK(bo); + goto loop; + } + /* + * Wait for all the async IO requests to drain + */ + BO_UNLOCK(bo); + mtx_lock(&np->n_mtx); + while (np->n_directio_asyncwr > 0) { + np->n_flag |= NFSYNCWAIT; + error = ncl_msleep(td, (caddr_t)&np->n_directio_asyncwr, + &np->n_mtx, slpflag | (PRIBIO + 1), + "nfsfsync", 0); + if (error) { + if (newnfs_sigintr(nmp, td)) { + mtx_unlock(&np->n_mtx); + error = EINTR; + goto done; + } + } + } + mtx_unlock(&np->n_mtx); + } else + BO_UNLOCK(bo); + mtx_lock(&np->n_mtx); + if (np->n_flag & NWRITEERR) { + error = np->n_error; + np->n_flag &= ~NWRITEERR; + } + if (commit && bo->bo_dirty.bv_cnt == 0 && + bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0) + np->n_flag &= ~NMODIFIED; + mtx_unlock(&np->n_mtx); +done: + if (bvec != NULL && bvec != bvec_on_stack) + free(bvec, M_TEMP); + if (error == 0 && commit != 0 && waitfor == MNT_WAIT && + (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 || + np->n_directio_asyncwr != 0) && trycnt++ < 5) { + /* try, try again... */ + passone = 1; + wcred = NULL; + bvec = NULL; + bvecsize = 0; +printf("try%d\n", trycnt); + goto again; + } + return (error); +} + +/* + * NFS advisory byte-level locks. + */ +static int +nfs_advlock(struct vop_advlock_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct ucred *cred; + struct nfsnode *np = VTONFS(ap->a_vp); + struct proc *p = (struct proc *)ap->a_id; + struct thread *td = curthread; /* XXX */ + struct vattr va; + int ret, error = EOPNOTSUPP, vlret; + u_quad_t size; + + if (NFS_ISV4(vp) && (ap->a_flags & F_POSIX)) { + cred = p->p_ucred; + vlret = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + if (vlret) + return (vlret); + + /* + * If this is unlocking a write locked region, flush and + * commit them before unlocking. This is required by + * RFC3530 Sec. 9.3.2. + */ + if (ap->a_op == F_UNLCK && + nfscl_checkwritelocked(vp, ap->a_fl, cred, td)) + (void) ncl_flush(vp, MNT_WAIT, cred, td, 1); + + /* + * Loop around doing the lock op, while a blocking lock + * must wait for the lock op to succeed. + */ + do { + ret = nfsrpc_advlock(vp, np->n_size, ap->a_op, + ap->a_fl, 0, cred, td); + if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && + ap->a_op == F_SETLK) { + VOP_UNLOCK(vp, 0); + error = nfs_catnap(PZERO | PCATCH, "ncladvl"); + if (error) + return (EINTR); + vlret = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); + if (vlret) + return (vlret); + } + } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) && + ap->a_op == F_SETLK); + if (ret == NFSERR_DENIED) { + VOP_UNLOCK(vp, 0); + return (EAGAIN); + } else if (ret == EINVAL || ret == EBADF || ret == EINTR) { + VOP_UNLOCK(vp, 0); + return (ret); + } else if (ret != 0) { + VOP_UNLOCK(vp, 0); + return (EACCES); + } + + /* + * Now, if we just got a lock, invalidate data in the buffer + * cache, as required, so that the coherency conforms with + * RFC3530 Sec. 9.3.2. + */ + if (ap->a_op == F_SETLK) { + if ((np->n_flag & NMODIFIED) == 0) { + np->n_attrstamp = 0; + ret = VOP_GETATTR(vp, &va, cred); + } + if ((np->n_flag & NMODIFIED) || ret || + np->n_change != va.va_filerev) { + (void) ncl_vinvalbuf(vp, V_SAVE, td, 1); + np->n_attrstamp = 0; + ret = VOP_GETATTR(vp, &va, cred); + if (!ret) { + np->n_mtime = va.va_mtime; + np->n_change = va.va_filerev; + } + } + } + VOP_UNLOCK(vp, 0); + return (0); + } else if (!NFS_ISV4(vp)) { + error = vn_lock(vp, LK_SHARED); + if (error) + return (error); + if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { + size = VTONFS(vp)->n_size; + VOP_UNLOCK(vp, 0); + error = lf_advlock(ap, &(vp->v_lockf), size); + } else { + if (ncl_advlock_p) + error = ncl_advlock_p(ap); + else + error = ENOLCK; + } + } + return (error); +} + +/* + * NFS advisory byte-level locks. + */ +static int +nfs_advlockasync(struct vop_advlockasync_args *ap) +{ + struct vnode *vp = ap->a_vp; + u_quad_t size; + int error; + + if (NFS_ISV4(vp)) + return (EOPNOTSUPP); + error = vn_lock(vp, LK_SHARED); + if (error) + return (error); + if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) { + size = VTONFS(vp)->n_size; + VOP_UNLOCK(vp, 0); + error = lf_advlockasync(ap, &(vp->v_lockf), size); + } else { + VOP_UNLOCK(vp, 0); + error = EOPNOTSUPP; + } + return (error); +} + +/* + * Print out the contents of an nfsnode. + */ +static int +nfs_print(struct vop_print_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + + ncl_printf("\tfileid %ld fsid 0x%x", + np->n_vattr.na_fileid, np->n_vattr.na_fsid); + if (vp->v_type == VFIFO) + fifo_printinfo(vp); + printf("\n"); + return (0); +} + +/* + * This is the "real" nfs::bwrite(struct buf*). + * We set B_CACHE if this is a VMIO buffer. + */ +int +ncl_writebp(struct buf *bp, int force __unused, struct thread *td) +{ + int s; + int oldflags = bp->b_flags; +#if 0 + int retv = 1; + off_t off; +#endif + + BUF_ASSERT_HELD(bp); + + if (bp->b_flags & B_INVAL) { + brelse(bp); + return(0); + } + + bp->b_flags |= B_CACHE; + + /* + * Undirty the bp. We will redirty it later if the I/O fails. + */ + + s = splbio(); + bundirty(bp); + bp->b_flags &= ~B_DONE; + bp->b_ioflags &= ~BIO_ERROR; + bp->b_iocmd = BIO_WRITE; + + bufobj_wref(bp->b_bufobj); + curthread->td_ru.ru_oublock++; + splx(s); + + /* + * Note: to avoid loopback deadlocks, we do not + * assign b_runningbufspace. + */ + vfs_busy_pages(bp, 1); + + BUF_KERNPROC(bp); + bp->b_iooffset = dbtob(bp->b_blkno); + bstrategy(bp); + + if( (oldflags & B_ASYNC) == 0) { + int rtval = bufwait(bp); + + if (oldflags & B_DELWRI) { + s = splbio(); + reassignbuf(bp); + splx(s); + } + brelse(bp); + return (rtval); + } + + return (0); +} + +/* + * nfs special file access vnode op. + * Essentially just get vattr and then imitate iaccess() since the device is + * local to the client. + */ +static int +nfsspec_access(struct vop_access_args *ap) +{ + struct vattr *vap; + struct ucred *cred = ap->a_cred; + struct vnode *vp = ap->a_vp; + accmode_t accmode = ap->a_accmode; + struct vattr vattr; + int error; + + /* + * Disallow write attempts on filesystems mounted read-only; + * unless the file is a socket, fifo, or a block or character + * device resident on the filesystem. + */ + if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { + switch (vp->v_type) { + case VREG: + case VDIR: + case VLNK: + return (EROFS); + default: + break; + } + } + vap = &vattr; + error = VOP_GETATTR(vp, vap, cred); + if (error) + goto out; + error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid, + accmode, cred, NULL); +out: + return error; +} + +/* + * Read wrapper for fifos. + */ +static int +nfsfifo_read(struct vop_read_args *ap) +{ + struct nfsnode *np = VTONFS(ap->a_vp); + int error; + + /* + * Set access flag. + */ + mtx_lock(&np->n_mtx); + np->n_flag |= NACC; + getnanotime(&np->n_atim); + mtx_unlock(&np->n_mtx); + error = fifo_specops.vop_read(ap); + return error; +} + +/* + * Write wrapper for fifos. + */ +static int +nfsfifo_write(struct vop_write_args *ap) +{ + struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * Set update flag. + */ + mtx_lock(&np->n_mtx); + np->n_flag |= NUPD; + getnanotime(&np->n_mtim); + mtx_unlock(&np->n_mtx); + return(fifo_specops.vop_write(ap)); +} + +/* + * Close wrapper for fifos. + * + * Update the times on the nfsnode then do fifo close. + */ +static int +nfsfifo_close(struct vop_close_args *ap) +{ + struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct vattr vattr; + struct timespec ts; + + mtx_lock(&np->n_mtx); + if (np->n_flag & (NACC | NUPD)) { + getnanotime(&ts); + if (np->n_flag & NACC) + np->n_atim = ts; + if (np->n_flag & NUPD) + np->n_mtim = ts; + np->n_flag |= NCHG; + if (vrefcnt(vp) == 1 && + (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { + VATTR_NULL(&vattr); + if (np->n_flag & NACC) + vattr.va_atime = np->n_atim; + if (np->n_flag & NUPD) + vattr.va_mtime = np->n_mtim; + mtx_unlock(&np->n_mtx); + (void)VOP_SETATTR(vp, &vattr, ap->a_cred); + goto out; + } + } + mtx_unlock(&np->n_mtx); +out: + return (fifo_specops.vop_close(ap)); +} + +/* + * Just call ncl_writebp() with the force argument set to 1. + * + * NOTE: B_DONE may or may not be set in a_bp on call. + */ +static int +nfs_bwrite(struct buf *bp) +{ + + return (ncl_writebp(bp, 1, curthread)); +} + +struct buf_ops buf_ops_newnfs = { + .bop_name = "buf_ops_nfs", + .bop_write = nfs_bwrite, + .bop_strategy = bufstrategy, + .bop_sync = bufsync, + .bop_bdflush = bufbdflush, +}; + +/* + * Cloned from vop_stdlock(), and then the ugly hack added. + */ +static int +nfs_lock1(struct vop_lock1_args *ap) +{ + struct vnode *vp = ap->a_vp; + int error = 0; + + /* + * Since vfs_hash_get() calls vget() and it will no longer work + * for FreeBSD8 with flags == 0, I can only think of this horrible + * hack to work around it. I call vfs_hash_get() with LK_EXCLOTHER + * and then handle it here. All I want for this case is a v_usecount + * on the vnode to use for recovery, while another thread might + * hold a lock on the vnode. I have the other threads blocked, so + * there isn't any race problem. + */ + if ((ap->a_flags & LK_TYPE_MASK) == LK_EXCLOTHER) { + if ((ap->a_flags & LK_INTERLOCK) == 0) + panic("ncllock1"); + if ((vp->v_iflag & VI_DOOMED)) + error = ENOENT; + VI_UNLOCK(vp); + return (error); + } + return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), + LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, + ap->a_line)); +} + +#ifdef NFS4_ACL_EXTATTR_NAME +static int +nfs_getacl(struct vop_getacl_args *ap) +{ + int error; + + if (ap->a_type != ACL_TYPE_NFS4) + return (EOPNOTSUPP); + error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, + NULL); + if (error > NFSERR_STALE) { + (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); + error = EPERM; + } + return (error); +} + +static int +nfs_setacl(struct vop_setacl_args *ap) +{ + int error; + + if (ap->a_type != ACL_TYPE_NFS4) + return (EOPNOTSUPP); + error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp, + NULL); + if (error > NFSERR_STALE) { + (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0); + error = EPERM; + } + return (error); +} + +#endif /* NFS4_ACL_EXTATTR_NAME */ diff --git a/sys/fs/nfsclient/nfs_lock.h b/sys/fs/nfsclient/nfs_lock.h new file mode 100644 index 0000000..58fc949 --- /dev/null +++ b/sys/fs/nfsclient/nfs_lock.h @@ -0,0 +1,89 @@ +/*- + * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * lockd uses the nfssvc system call to get the unique kernel services it needs. + * It passes in a request structure with a version number at the start. + * This prevents libc from needing to change if the information passed + * between lockd and the kernel needs to change. + * + * If a structure changes, you must bump the version number. + */ + +/* + * The fifo where the kernel writes requests for locks on remote NFS files, + * and where lockd reads these requests. + * + */ +#define _PATH_NFSLCKDEV "nfslock" + +/* + * This structure is used to uniquely identify the process which originated + * a particular message to lockd. A sequence number is used to differentiate + * multiple messages from the same process. A process start time is used to + * detect the unlikely, but possible, event of the recycling of a pid. + */ +struct lockd_msg_ident { + pid_t pid; /* The process ID. */ + struct timeval pid_start; /* Start time of process id */ + int msg_seq; /* Sequence number of message */ +}; + +#define LOCKD_MSG_VERSION 3 + +/* + * The structure that the kernel hands us for each lock request. + */ +typedef struct __lock_msg { + TAILQ_ENTRY(__lock_msg) lm_link; /* internal linkage */ + int lm_version; /* which version is this */ + struct lockd_msg_ident lm_msg_ident; /* originator of the message */ + struct flock lm_fl; /* The lock request. */ + int lm_wait; /* The F_WAIT flag. */ + int lm_getlk; /* is this a F_GETLK request */ + struct sockaddr_storage lm_addr; /* The address. */ + int lm_nfsv3; /* If NFS version 3. */ + size_t lm_fh_len; /* The file handle length. */ + struct xucred lm_cred; /* user cred for lock req */ + u_int8_t lm_fh[NFSX_V3FHMAX];/* The file handle. */ +} LOCKD_MSG; + +#define LOCKD_ANS_VERSION 1 + +struct lockd_ans { + int la_vers; + struct lockd_msg_ident la_msg_ident; /* originator of the message */ + int la_errno; + int la_set_getlk_pid; /* use returned pid */ + int la_getlk_pid; /* returned pid for F_GETLK */ +}; + +#ifdef _KERNEL +int ncl_dolock(struct vop_advlock_args *ap); +#endif diff --git a/sys/fs/nfsclient/nfsargs.h b/sys/fs/nfsclient/nfsargs.h new file mode 100644 index 0000000..4fce292 --- /dev/null +++ b/sys/fs/nfsclient/nfsargs.h @@ -0,0 +1,104 @@ +/*- + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NFSCLIENT_NFSARGS_H_ +#define _NFSCLIENT_NFSARGS_H_ + +/* + * Arguments to mount NFS + */ +#define NFS_ARGSVERSION 4 /* change when nfs_args changes */ +struct nfs_args { + int version; /* args structure version number */ + struct sockaddr *addr; /* file server address */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ + u_char *fh; /* File handle to be mounted */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int readahead; /* # of blocks to readahead */ + int iothreadcnt; /* and count of assoc threads */ + int wcommitsize; /* Max. write commit size in bytes */ + char *hostname; /* server's name */ + int acregmin; /* cache attrs for reg files min time */ + int acregmax; /* cache attrs for reg files max time */ + int acdirmin; /* cache attrs for dirs min time */ + int acdirmax; /* cache attrs for dirs max time */ + int krbnamelen; /* KerberosV principal name "-P" */ + char *krbname; + int dirlen; /* Mount pt path for NFSv4 */ + char *dirpath; + int srvkrbnamelen; /* Server kerberos target principal */ + char *srvkrbname; /* and the name */ +}; + +/* + * NFS mount option flags + */ +#define NFSMNT_SOFT 0x00000001 /* soft mount (hard is default) */ +#define NFSMNT_WSIZE 0x00000002 /* set write size */ +#define NFSMNT_RSIZE 0x00000004 /* set read size */ +#define NFSMNT_TIMEO 0x00000008 /* set initial timeout */ +#define NFSMNT_RETRANS 0x00000010 /* set number of request retries */ +#define NFSMNT_DIRECTIO 0x00000020 /* set maximum grouplist size */ +#define NFSMNT_INT 0x00000040 /* allow interrupts on hard mount */ +#define NFSMNT_NOCONN 0x00000080 /* Don't Connect the socket */ +#define NFSMNT_NFSV4 0x00000100 /* Use NFSv4 */ +#define NFSMNT_NFSV3 0x00000200 /* Use NFS Version 3 protocol */ +#define NFSMNT_KERB 0x00000400 /* Use Kerberos authentication */ +#define NFSMNT_STRICT3530 0x00000800 /* Follow RFC3530 strictly */ +#define NFSMNT_WCOMMITSIZE 0x00001000 /* set max write commit size */ +#define NFSMNT_READAHEAD 0x00002000 /* set read ahead */ +#define NFSMNT_INTEGRITY 0x00004000 /* Use Integrity cksum - krb5i */ +#define NFSMNT_PRIVACY 0x00008000 /* Encrypt RPCs - krb5p */ +#define NFSMNT_RDIRPLUS 0x00010000 /* Use Readdirplus for V3 */ +#define NFSMNT_READDIRSIZE 0x00020000 /* Set readdir size */ +#define NFSMNT_ACREGMIN 0x00040000 +#define NFSMNT_ACREGMAX 0x00080000 +#define NFSMNT_ACDIRMIN 0x00100000 +#define NFSMNT_ACDIRMAX 0x00200000 +#define NFSMNT_NOLOCKD 0x00400000 /* Locks are local */ +#define NFSMNT_ALLGSSNAME 0x00800000 /* All RPCs use host principal */ +#define NFSMNT_HASWRITEVERF 0x01000000 /* NFSv4 Write verifier */ +#define NFSMNT_HASSETFSID 0x02000000 /* Has set FSID */ +#define NFSMNT_RESVPORT 0x04000000 /* Use a reserved port (Bunk!!) */ +#define NFSMNT_AUTOM 0x08000000 /* Done by autofs */ + +#endif /* _NFSCLIENT_NFSARGS_H_ */ diff --git a/sys/fs/nfsclient/nfsdiskless.h b/sys/fs/nfsclient/nfsdiskless.h new file mode 100644 index 0000000..fc67a04 --- /dev/null +++ b/sys/fs/nfsclient/nfsdiskless.h @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NFSCLIENT_NFSDISKLESS_H_ +#define _NFSCLIENT_NFSDISKLESS_H_ + +/* + * Structure that must be initialized for a diskless nfs client. + * This structure is used by nfs_mountroot() to set up the root vnode, + * and to do a partial ifconfig(8) and route(8) so that the critical net + * interface can communicate with the server. + * The primary bootstrap is expected to fill in the appropriate fields before + * starting the kernel. + * Currently only works for AF_INET protocols. + * NB: All fields are stored in net byte order to avoid hassles with + * client/server byte ordering differences. + */ + +/* + * I have defined a new structure that can handle an NFS Version 3 file handle + * but the kernel still expects the old Version 2 one to be provided. The + * changes required in nfs_vfsops.c for using the new are documented there in + * comments. (I felt that breaking network booting code by changing this + * structure would not be prudent at this time, since almost all servers are + * still Version 2 anyhow.) + */ +struct nfsv3_diskless { + struct ifaliasreq myif; /* Default interface */ + struct sockaddr_in mygateway; /* Default gateway */ + struct nfs_args root_args; /* Mount args for root fs */ + int root_fhsize; /* Size of root file handle */ + u_char root_fh[NFSX_FHMAX]; /* File handle of root dir */ + struct sockaddr_in root_saddr; /* Address of root server */ + char root_hostnam[MNAMELEN]; /* Host name for mount pt */ + long root_time; /* Timestamp of root fs */ + char my_hostnam[MAXHOSTNAMELEN]; /* Client host name */ +}; + +/* + * Old arguments to mount NFS + */ +struct onfs_args { + struct sockaddr *addr; /* file server address */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ + u_char *fh; /* File handle to be mounted */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int readahead; /* # of blocks to readahead */ + int leaseterm; /* Term (sec) of lease */ + char *hostname; /* server's name */ +}; + +struct nfs_diskless { + struct ifaliasreq myif; /* Default interface */ + struct sockaddr_in mygateway; /* Default gateway */ + struct onfs_args root_args; /* Mount args for root fs */ + u_char root_fh[NFSX_V2FH]; /* File handle of root dir */ + struct sockaddr_in root_saddr; /* Address of root server */ + char root_hostnam[MNAMELEN]; /* Host name for mount pt */ + long root_time; /* Timestamp of root fs */ + char my_hostnam[MAXHOSTNAMELEN]; /* Client host name */ +}; + +#ifdef _KERNEL +void bootpc_init(void); +void nfs_setup_diskless(void); +void nfs_parse_options(const char *, struct nfs_args *); +#endif + +#endif /* _NFSCLIENT_NFSDISKLESS_H_ */ diff --git a/sys/fs/nfsclient/nfsmount.h b/sys/fs/nfsclient/nfsmount.h new file mode 100644 index 0000000..acf438e --- /dev/null +++ b/sys/fs/nfsclient/nfsmount.h @@ -0,0 +1,106 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NFSCLIENT_NFSMOUNT_H_ +#define _NFSCLIENT_NFSMOUNT_H_ + +/* + * Mount structure. + * One allocated on every NFS mount. + * Holds NFS specific information for mount. + */ +struct nfsmount { + struct mtx nm_mtx; + int nm_flag; /* Flags for soft/hard... */ + int nm_state; /* Internal state flags */ + struct mount *nm_mountp; /* Vfs structure for this filesystem */ + int nm_numgrps; /* Max. size of groupslist */ + u_char nm_fh[NFSX_FHMAX]; /* File handle of root dir */ + int nm_fhsize; /* Size of root file handle */ + struct nfssockreq nm_sockreq; /* Socket Info */ + int nm_timeo; /* Init timer for NFSMNT_DUMBTIMR */ + int nm_retry; /* Max retries */ + int nm_timeouts; /* Request timeouts */ + int nm_rsize; /* Max size of read rpc */ + int nm_wsize; /* Max size of write rpc */ + int nm_readdirsize; /* Size of a readdir rpc */ + int nm_readahead; /* Num. of blocks to readahead */ + int nm_wcommitsize; /* Max size of commit for write */ + int nm_acdirmin; /* Directory attr cache min lifetime */ + int nm_acdirmax; /* Directory attr cache max lifetime */ + int nm_acregmin; /* Reg file attr cache min lifetime */ + int nm_acregmax; /* Reg file attr cache max lifetime */ + u_char nm_verf[NFSX_VERF]; /* write verifier */ + TAILQ_HEAD(, buf) nm_bufq; /* async io buffer queue */ + short nm_bufqlen; /* number of buffers in queue */ + short nm_bufqwant; /* process wants to add to the queue */ + int nm_bufqiods; /* number of iods processing queue */ + u_int64_t nm_maxfilesize; /* maximum file size */ + int nm_tprintf_initial_delay; /* initial delay */ + int nm_tprintf_delay; /* interval for messages */ + + /* Newnfs additions */ + int nm_iothreadcnt; + struct proc *nm_iodwant[NFS_MAXRAHEAD]; + struct nfsclclient *nm_clp; + uid_t nm_uid; /* Uid for SetClientID etc. */ + u_int64_t nm_clval; /* identifies which clientid */ + u_int64_t nm_fsid[2]; /* NFSv4 fsid */ + u_int16_t nm_krbnamelen; /* Krb5 host principal, if any */ + u_int16_t nm_dirpathlen; /* and mount dirpath, for V4 */ + u_int16_t nm_srvkrbnamelen; /* and the server's target name */ + u_char nm_name[1]; /* malloc'd actual len of krbname + dirpath */ +}; + +#define nm_nam nm_sockreq.nr_nam +#define nm_sotype nm_sockreq.nr_sotype +#define nm_so nm_sockreq.nr_so +#define nm_soflags nm_sockreq.nr_soflags +#define nm_soproto nm_sockreq.nr_soproto +#define nm_client nm_sockreq.nr_client +#define nm_krbname nm_name + +#define NFSMNT_DIRPATH(m) (&((m)->nm_name[(m)->nm_krbnamelen + 1])) +#define NFSMNT_SRVKRBNAME(m) \ + (&((m)->nm_name[(m)->nm_krbnamelen + (m)->nm_dirpathlen + 2])) + +#if defined(_KERNEL) +/* + * Convert mount ptr to nfsmount ptr. + */ +#define VFSTONFS(mp) ((struct nfsmount *)((mp)->mnt_data)) + +#endif /* _KERNEL */ + +#endif /* _NFSCLIENT_NFSMOUNT_H_ */ diff --git a/sys/fs/nfsclient/nfsnode.h b/sys/fs/nfsclient/nfsnode.h new file mode 100644 index 0000000..c0610c9 --- /dev/null +++ b/sys/fs/nfsclient/nfsnode.h @@ -0,0 +1,201 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NFSCLIENT_NFSNODE_H_ +#define _NFSCLIENT_NFSNODE_H_ + +/* + * Silly rename structure that hangs off the nfsnode until the name + * can be removed by nfs_inactive() + */ +struct sillyrename { + struct ucred *s_cred; + struct vnode *s_dvp; + long s_namlen; + char s_name[32]; +}; + +/* + * This structure is used to save the logical directory offset to + * NFS cookie mappings. + * The mappings are stored in a list headed + * by n_cookies, as required. + * There is one mapping for each NFS_DIRBLKSIZ bytes of directory information + * stored in increasing logical offset byte order. + */ +#define NFSNUMCOOKIES 31 + +struct nfsdmap { + LIST_ENTRY(nfsdmap) ndm_list; + int ndm_eocookie; + union { + nfsuint64 ndmu3_cookies[NFSNUMCOOKIES]; + uint64_t ndmu4_cookies[NFSNUMCOOKIES]; + } ndm_un1; +}; + +#define ndm_cookies ndm_un1.ndmu3_cookies +#define ndm4_cookies ndm_un1.ndmu4_cookies + +#define n_ac_ts_tid n_ac_ts.nfs_ac_ts_tid +#define n_ac_ts_pid n_ac_ts.nfs_ac_ts_pid +#define n_ac_ts_syscalls n_ac_ts.nfs_ac_ts_syscalls + +struct nfs_attrcache_timestamp { + lwpid_t nfs_ac_ts_tid; + pid_t nfs_ac_ts_pid; + unsigned long nfs_ac_ts_syscalls; +}; + +struct nfs_accesscache { + u_int32_t mode; /* ACCESS mode cache */ + uid_t uid; /* credentials having mode */ + time_t stamp; /* mode cache timestamp */ +}; + +/* + * The nfsnode is the nfs equivalent to ufs's inode. Any similarity + * is purely coincidental. + * There is a unique nfsnode allocated for each active file, + * each current directory, each mounted-on file, text file, and the root. + * An nfsnode is 'named' by its file handle. (nget/nfs_node.c) + * If this structure exceeds 256 bytes (it is currently 256 using 4.4BSD-Lite + * type definitions), file handles of > 32 bytes should probably be split out + * into a separate MALLOC()'d data structure. (Reduce the size of nfsfh_t by + * changing the definition in nfsproto.h of NFS_SMALLFH.) + * NB: Hopefully the current order of the fields is such that everything will + * be well aligned and, therefore, tightly packed. + */ +struct nfsnode { + struct mtx n_mtx; /* Protects all of these members */ + u_quad_t n_size; /* Current size of file */ + u_quad_t n_brev; /* Modify rev when cached */ + u_quad_t n_lrev; /* Modify rev for lease */ + struct nfsvattr n_vattr; /* Vnode attribute cache */ + time_t n_attrstamp; /* Attr. cache timestamp */ + struct nfs_accesscache n_accesscache[NFS_ACCESSCACHESIZE]; + struct timespec n_mtime; /* Prev modify time. */ + time_t n_ctime; /* Prev create time. */ + time_t n_dmtime; /* Prev dir modify time. */ + time_t n_expiry; /* Lease expiry time */ + struct nfsfh *n_fhp; /* NFS File Handle */ + struct vnode *n_vnode; /* associated vnode */ + struct vnode *n_dvp; /* parent vnode */ + struct lockf *n_lockf; /* Locking record of file */ + int n_error; /* Save write error value */ + union { + struct timespec nf_atim; /* Special file times */ + nfsuint64 nd_cookieverf; /* Cookie verifier (dir only) */ + u_char nd4_cookieverf[NFSX_VERF]; + } n_un1; + union { + struct timespec nf_mtim; + off_t nd_direof; /* Dir. EOF offset cache */ + } n_un2; + union { + struct sillyrename *nf_silly; /* Ptr to silly rename struct */ + LIST_HEAD(, nfsdmap) nd_cook; /* cookies */ + } n_un3; + short n_fhsize; /* size in bytes, of fh */ + u_int32_t n_flag; /* Flag for locking.. */ + int n_directio_opens; + int n_directio_asyncwr; + struct nfs_attrcache_timestamp n_ac_ts; + u_int64_t n_change; /* old Change attribute */ + struct nfsv4node *n_v4; /* extra V4 stuff */ +}; + +#define n_atim n_un1.nf_atim +#define n_mtim n_un2.nf_mtim +#define n_sillyrename n_un3.nf_silly +#define n_cookieverf n_un1.nd_cookieverf +#define n4_cookieverf n_un1.nd4_cookieverf +#define n_direofoffset n_un2.nd_direof +#define n_cookies n_un3.nd_cook + +/* + * Flags for n_flag + */ +#define NDIRCOOKIELK 0x00000001 /* Lock to serialize access to directory cookies */ +#define NFSYNCWAIT 0x00000002 /* fsync waiting for all directio async + writes to drain */ +#define NMODIFIED 0x00000004 /* Might have a modified buffer in bio */ +#define NWRITEERR 0x00000008 /* Flag write errors so close will know */ +#define NCREATED 0x00000010 /* Opened by nfs_create() */ +#define NTRUNCATE 0x00000020 /* Opened by nfs_setattr() */ +#define NSIZECHANGED 0x00000040 /* File size has changed: need cache inval */ +#define NNONCACHE 0x00000080 /* Node marked as noncacheable */ +#define NACC 0x00000100 /* Special file accessed */ +#define NUPD 0x00000200 /* Special file updated */ +#define NCHG 0x00000400 /* Special file times changed */ +#define NDELEGMOD 0x00000800 /* Modified delegation */ +#define NDELEGRECALL 0x00001000 /* Recall in progress */ +#define NREMOVEINPROG 0x00002000 /* Remove in progress */ +#define NREMOVEWANT 0x00004000 /* Want notification that remove is done */ +#define NLOCK 0x00008000 /* Sleep lock the node */ +#define NLOCKWANT 0x00010000 /* Want the sleep lock */ + +/* + * Convert between nfsnode pointers and vnode pointers + */ +#define VTONFS(vp) ((struct nfsnode *)(vp)->v_data) +#define NFSTOV(np) ((struct vnode *)(np)->n_vnode) + +#define NFS_TIMESPEC_COMPARE(T1, T2) (((T1)->tv_sec != (T2)->tv_sec) || ((T1)->tv_nsec != (T2)->tv_nsec)) + +#if defined(_KERNEL) + +/* + * Prototypes for NFS vnode operations + */ +int ncl_getpages(struct vop_getpages_args *); +int ncl_putpages(struct vop_putpages_args *); +int ncl_write(struct vop_write_args *); +int ncl_inactive(struct vop_inactive_args *); +int ncl_reclaim(struct vop_reclaim_args *); + +/* other stuff */ +int ncl_removeit(struct sillyrename *, struct vnode *); +int ncl_nget(struct mount *, u_int8_t *, int, struct nfsnode **); +nfsuint64 *ncl_getcookie(struct nfsnode *, off_t, int); +void ncl_invaldir(struct vnode *); +int ncl_upgrade_vnlock(struct vnode *); +void ncl_downgrade_vnlock(struct vnode *, int); +void ncl_printf(const char *, ...); +void ncl_dircookie_lock(struct nfsnode *); +void ncl_dircookie_unlock(struct nfsnode *); + +#endif /* _KERNEL */ + +#endif /* _NFSCLIENT_NFSNODE_H_ */ diff --git a/sys/fs/nfsclient/nlminfo.h b/sys/fs/nfsclient/nlminfo.h new file mode 100644 index 0000000..5439a10 --- /dev/null +++ b/sys/fs/nfsclient/nlminfo.h @@ -0,0 +1,41 @@ +/*- + * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Berkeley Software Design Inc's name may not be used to endorse or + * promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Misc NLM informationi, some needed for the master lockd process, and some + * needed by every process doing nlm based locking. + */ +struct nlminfo { + /* these are used by any process doing nlm locking */ + int msg_seq; /* sequence counter for lock requests */ + int retcode; /* return code for lock requests */ + int set_getlk_pid; + int getlk_pid; + struct timeval pid_start; /* process starting time */ +}; |