diff options
author | rmacklem <rmacklem@FreeBSD.org> | 2009-05-04 15:23:58 +0000 |
---|---|---|
committer | rmacklem <rmacklem@FreeBSD.org> | 2009-05-04 15:23:58 +0000 |
commit | e3d34903b6fb9cb09f7e616bde59d97341958fa2 (patch) | |
tree | 0246ff14527b554e60f1c9212be00ee8c1128197 /sys/fs/nfsserver | |
parent | fb2908c8ff440e0985013b83071bd8dfecb11371 (diff) | |
download | FreeBSD-src-e3d34903b6fb9cb09f7e616bde59d97341958fa2.zip FreeBSD-src-e3d34903b6fb9cb09f7e616bde59d97341958fa2.tar.gz |
Add the experimental nfs subtree to the kernel, that includes
support for NFSv4 as well as NFSv2 and 3.
It lives in 3 subdirs under sys/fs:
nfs - functions that are common to the client and server
nfsclient - a mutation of sys/nfsclient that call generic functions
to do RPCs and handle state. As such, it retains the
buffer cache handling characteristics and vnode semantics that
are found in sys/nfsclient, for the most part.
nfsserver - the server. It includes a DRC designed specifically for
NFSv4, that is used instead of the generic DRC in sys/rpc.
The build glue will be checked in later, so at this point, it
consists of 3 new subdirs that should not affect kernel building.
Approved by: kib (mentor)
Diffstat (limited to 'sys/fs/nfsserver')
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdcache.c | 867 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdkrpc.c | 455 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdport.c | 3060 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdserv.c | 3367 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdsocket.c | 979 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdstate.c | 4891 | ||||
-rw-r--r-- | sys/fs/nfsserver/nfs_nfsdsubs.c | 2021 |
7 files changed, 15640 insertions, 0 deletions
diff --git a/sys/fs/nfsserver/nfs_nfsdcache.c b/sys/fs/nfsserver/nfs_nfsdcache.c new file mode 100644 index 0000000..a83510e --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdcache.c @@ -0,0 +1,867 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Here is the basic algorithm: + * First, some design criteria I used: + * - I think a false hit is more serious than a false miss + * - A false hit for an RPC that has Op(s) that order via seqid# must be + * avoided at all cost + * - A valid hit will probably happen a long time after the original reply + * and the TCP socket that the original request was received on will no + * longer be active + * (The long time delay implies to me that LRU is not appropriate.) + * - The mechanism will satisfy the requirements of ordering Ops with seqid#s + * in them as well as minimizing the risk of redoing retried non-idempotent + * Ops. + * Because it is biased towards avoiding false hits, multiple entries with + * the same xid are to be expected, especially for the case of the entry + * in the cache being related to a seqid# sequenced Op. + * + * The basic algorithm I'm about to code up: + * - Null RPCs bypass the cache and are just done + * For TCP + * - key on <xid, NFS version> (as noted above, there can be several + * entries with the same key) + * When a request arrives: + * For all that match key + * - if RPC# != OR request_size != + * - not a match with this one + * - if NFSv4 and received on same TCP socket OR + * received on a TCP connection created before the + * entry was cached + * - not a match with this one + * (V2,3 clients might retry on same TCP socket) + * - calculate checksum on first N bytes of NFS XDR + * - if checksum != + * - not a match for this one + * If any of the remaining ones that match has a + * seqid_refcnt > 0 + * - not a match (go do RPC, using new cache entry) + * If one match left + * - a hit (reply from cache) + * else + * - miss (go do RPC, using new cache entry) + * + * During processing of NFSv4 request: + * - set a flag when a non-idempotent Op is processed + * - when an Op that uses a seqid# (Open,...) is processed + * - if same seqid# as referenced entry in cache + * - free new cache entry + * - reply from referenced cache entry + * else if next seqid# in order + * - free referenced cache entry + * - increment seqid_refcnt on new cache entry + * - set pointer from Openowner/Lockowner to + * new cache entry (aka reference it) + * else if first seqid# in sequence + * - increment seqid_refcnt on new cache entry + * - set pointer from Openowner/Lockowner to + * new cache entry (aka reference it) + * + * At end of RPC processing: + * - if seqid_refcnt > 0 OR flagged non-idempotent on new + * cache entry + * - save reply in cache entry + * - calculate checksum on first N bytes of NFS XDR + * request + * - note op and length of XDR request (in bytes) + * - timestamp it + * else + * - free new cache entry + * - Send reply (noting info for socket activity check, below) + * + * For cache entries saved above: + * - if saved since seqid_refcnt was > 0 + * - free when seqid_refcnt decrements to 0 + * (when next one in sequence is processed above, or + * when Openowner/Lockowner is discarded) + * else { non-idempotent Op(s) } + * - free when + * - some further activity observed on same + * socket + * (I'm not yet sure how I'm going to do + * this. Maybe look at the TCP connection + * to see if the send_tcp_sequence# is well + * past sent reply OR K additional RPCs + * replied on same socket OR?) + * OR + * - when very old (hours, days, weeks?) + * + * For UDP (v2, 3 only), pretty much the old way: + * - key on <xid, NFS version, RPC#, Client host ip#> + * (at most one entry for each key) + * + * When a Request arrives: + * - if a match with entry via key + * - if RPC marked In_progress + * - discard request (don't send reply) + * else + * - reply from cache + * - timestamp cache entry + * else + * - add entry to cache, marked In_progress + * - do RPC + * - when RPC done + * - if RPC# non-idempotent + * - mark entry Done (not In_progress) + * - save reply + * - timestamp cache entry + * else + * - free cache entry + * - send reply + * + * Later, entries with saved replies are free'd a short time (few minutes) + * after reply sent (timestamp). + * Reference: Chet Juszczak, "Improving the Performance and Correctness + * of an NFS Server", in Proc. Winter 1989 USENIX Conference, + * pages 53-63. San Diego, February 1989. + * for the UDP case. + * nfsrc_floodlevel is set to the allowable upper limit for saved replies + * for TCP. For V3, a reply won't be saved when the flood level is + * hit. For V4, the non-idempotent Op will return NFSERR_RESOURCE in + * that case. This level should be set high enough that this almost + * never happens. + */ +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +extern struct nfsstats newnfsstats; +NFSCACHEMUTEX; +int nfsrc_floodlevel = NFSRVCACHE_FLOODLEVEL, nfsrc_tcpsavedreplies = 0; +#endif /* !APPLEKEXT */ + +static int nfsrc_tcpnonidempotent = 1; +static int nfsrc_udphighwater = NFSRVCACHE_UDPHIGHWATER, nfsrc_udpcachesize = 0; +static TAILQ_HEAD(, nfsrvcache) nfsrvudplru; +static struct nfsrvhashhead nfsrvhashtbl[NFSRVCACHE_HASHSIZE], + nfsrvudphashtbl[NFSRVCACHE_HASHSIZE]; +/* + * and the reverse mapping from generic to Version 2 procedure numbers + */ +static int newnfsv2_procid[NFS_V3NPROCS] = { + NFSV2PROC_NULL, + NFSV2PROC_GETATTR, + NFSV2PROC_SETATTR, + NFSV2PROC_LOOKUP, + NFSV2PROC_NOOP, + NFSV2PROC_READLINK, + NFSV2PROC_READ, + NFSV2PROC_WRITE, + NFSV2PROC_CREATE, + NFSV2PROC_MKDIR, + NFSV2PROC_SYMLINK, + NFSV2PROC_CREATE, + NFSV2PROC_REMOVE, + NFSV2PROC_RMDIR, + NFSV2PROC_RENAME, + NFSV2PROC_LINK, + NFSV2PROC_READDIR, + NFSV2PROC_NOOP, + NFSV2PROC_STATFS, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, +}; + +#define NFSRCUDPHASH(xid) \ + (&nfsrvudphashtbl[((xid) + ((xid) >> 24)) % NFSRVCACHE_HASHSIZE]) +#define NFSRCHASH(xid) \ + (&nfsrvhashtbl[((xid) + ((xid) >> 24)) % NFSRVCACHE_HASHSIZE]) +#define TRUE 1 +#define FALSE 0 +#define NFSRVCACHE_CHECKLEN 100 + +/* True iff the rpc reply is an nfs status ONLY! */ +static int nfsv2_repstat[NFS_V3NPROCS] = { + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + TRUE, + TRUE, + TRUE, + TRUE, + FALSE, + TRUE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, +}; + +/* + * Will NFS want to work over IPv6 someday? + */ +#define NETFAMILY(rp) \ + (((rp)->rc_flag & RC_INETIPV6) ? AF_INET6 : AF_INET) + +/* local functions */ +static int nfsrc_getudp(struct nfsrv_descript *nd, struct nfsrvcache *newrp); +static int nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp); +static void nfsrc_lock(struct nfsrvcache *rp); +static void nfsrc_unlock(struct nfsrvcache *rp); +static void nfsrc_wanted(struct nfsrvcache *rp); +static void nfsrc_freecache(struct nfsrvcache *rp); +static void nfsrc_trimcache(u_int64_t, struct socket *); +static int nfsrc_activesocket(struct nfsrvcache *rp, u_int64_t, + struct socket *); +static int nfsrc_getlenandcksum(mbuf_t m1, u_int16_t *cksum); +static void nfsrc_marksametcpconn(u_int64_t); + +/* + * Initialize the server request cache list + */ +APPLESTATIC void +nfsrvd_initcache(void) +{ + int i; + static int inited = 0; + + if (inited) + return; + inited = 1; + for (i = 0; i < NFSRVCACHE_HASHSIZE; i++) { + LIST_INIT(&nfsrvudphashtbl[i]); + LIST_INIT(&nfsrvhashtbl[i]); + } + TAILQ_INIT(&nfsrvudplru); + nfsrc_tcpsavedreplies = 0; + nfsrc_udpcachesize = 0; + newnfsstats.srvcache_tcppeak = 0; + newnfsstats.srvcache_size = 0; +} + +/* + * Get a cache entry for this request. Basically just malloc a new one + * and then call nfsrc_getudp() or nfsrc_gettcp() to do the rest. + * Call nfsrc_trimcache() to clean up the cache before returning. + */ +APPLESTATIC int +nfsrvd_getcache(struct nfsrv_descript *nd, struct socket *so) +{ + struct nfsrvcache *newrp; + int ret; + + if (nd->nd_procnum == NFSPROC_NULL) + panic("nfsd cache null"); + MALLOC(newrp, struct nfsrvcache *, sizeof (struct nfsrvcache), + M_NFSRVCACHE, M_WAITOK); + NFSBZERO((caddr_t)newrp, sizeof (struct nfsrvcache)); + if (nd->nd_flag & ND_NFSV4) + newrp->rc_flag = RC_NFSV4; + else if (nd->nd_flag & ND_NFSV3) + newrp->rc_flag = RC_NFSV3; + else + newrp->rc_flag = RC_NFSV2; + newrp->rc_xid = nd->nd_retxid; + newrp->rc_proc = nd->nd_procnum; + newrp->rc_sockref = nd->nd_sockref; + newrp->rc_cachetime = nd->nd_tcpconntime; + if (nd->nd_flag & ND_SAMETCPCONN) + newrp->rc_flag |= RC_SAMETCPCONN; + if (nd->nd_nam2 != NULL) { + newrp->rc_flag |= RC_UDP; + ret = nfsrc_getudp(nd, newrp); + } else { + ret = nfsrc_gettcp(nd, newrp); + } + nfsrc_trimcache(nd->nd_sockref, so); + return (ret); +} + +/* + * For UDP (v2, v3): + * - key on <xid, NFS version, RPC#, Client host ip#> + * (at most one entry for each key) + */ +static int +nfsrc_getudp(struct nfsrv_descript *nd, struct nfsrvcache *newrp) +{ + struct nfsrvcache *rp; + struct sockaddr_in *saddr; + struct sockaddr_in6 *saddr6; + struct nfsrvhashhead *hp; + int ret = 0; + + hp = NFSRCUDPHASH(newrp->rc_xid); +loop: + NFSLOCKCACHE(); + LIST_FOREACH(rp, hp, rc_hash) { + if (newrp->rc_xid == rp->rc_xid && + newrp->rc_proc == rp->rc_proc && + (newrp->rc_flag & rp->rc_flag & RC_NFSVERS) && + nfsaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) { + if ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + NFSUNLOCKCACHE(); + (void) tsleep((caddr_t)rp, PZERO - 1, + "nfsrc", 10 * hz); + goto loop; + } + if (rp->rc_flag == 0) + panic("nfs udp cache0"); + rp->rc_flag |= RC_LOCKED; + TAILQ_REMOVE(&nfsrvudplru, rp, rc_lru); + TAILQ_INSERT_TAIL(&nfsrvudplru, rp, rc_lru); + if (rp->rc_flag & RC_INPROG) { + newnfsstats.srvcache_inproghits++; + NFSUNLOCKCACHE(); + ret = RC_DROPIT; + } else if (rp->rc_flag & RC_REPSTATUS) { + /* + * V2 only. + */ + newnfsstats.srvcache_nonidemdonehits++; + NFSUNLOCKCACHE(); + nfsrvd_rephead(nd); + *(nd->nd_errp) = rp->rc_status; + ret = RC_REPLY; + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_UDPTIMEOUT; + } else if (rp->rc_flag & RC_REPMBUF) { + newnfsstats.srvcache_nonidemdonehits++; + NFSUNLOCKCACHE(); + nd->nd_mreq = m_copym(rp->rc_reply, 0, + M_COPYALL, M_WAIT); + ret = RC_REPLY; + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_UDPTIMEOUT; + } else { + panic("nfs udp cache1"); + } + nfsrc_unlock(rp); + free((caddr_t)newrp, M_NFSRVCACHE); + return (ret); + } + } + newnfsstats.srvcache_misses++; + newnfsstats.srvcache_size++; + nfsrc_udpcachesize++; + + newrp->rc_flag |= RC_INPROG; + saddr = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in *); + if (saddr->sin_family == AF_INET) + newrp->rc_inet = saddr->sin_addr.s_addr; + else if (saddr->sin_family == AF_INET6) { + saddr6 = (struct sockaddr_in6 *)saddr; + NFSBCOPY((caddr_t)&saddr6->sin6_addr,(caddr_t)&newrp->rc_inet6, + sizeof (struct in6_addr)); + rp->rc_flag |= RC_INETIPV6; + } + LIST_INSERT_HEAD(hp, newrp, rc_hash); + TAILQ_INSERT_TAIL(&nfsrvudplru, newrp, rc_lru); + NFSUNLOCKCACHE(); + nd->nd_rp = newrp; + return (RC_DOIT); +} + +/* + * Update a request cache entry after the rpc has been done + */ +APPLESTATIC struct nfsrvcache * +nfsrvd_updatecache(struct nfsrv_descript *nd, struct socket *so) +{ + struct nfsrvcache *rp; + struct nfsrvcache *retrp = NULL; + + rp = nd->nd_rp; + if (!rp) + panic("nfsrvd_updatecache null rp"); + nd->nd_rp = NULL; + NFSLOCKCACHE(); + nfsrc_lock(rp); + if (!(rp->rc_flag & RC_INPROG)) + panic("nfsrvd_updatecache not inprog"); + rp->rc_flag &= ~RC_INPROG; + if (rp->rc_flag & RC_UDP) { + TAILQ_REMOVE(&nfsrvudplru, rp, rc_lru); + TAILQ_INSERT_TAIL(&nfsrvudplru, rp, rc_lru); + } + + /* + * Reply from cache is a special case returned by nfsrv_checkseqid(). + */ + if (nd->nd_repstat == NFSERR_REPLYFROMCACHE) { + newnfsstats.srvcache_nonidemdonehits++; + NFSUNLOCKCACHE(); + nd->nd_repstat = 0; + if (nd->nd_mreq) + mbuf_freem(nd->nd_mreq); + if (!(rp->rc_flag & RC_REPMBUF)) + panic("reply from cache"); + nd->nd_mreq = m_copym(rp->rc_reply, 0, + M_COPYALL, M_WAIT); + rp->rc_timestamp = NFSD_MONOSEC + NFSRVCACHE_TCPTIMEOUT; + nfsrc_unlock(rp); + nfsrc_trimcache(nd->nd_sockref, so); + return (retrp); + } + + /* + * If rc_refcnt > 0, save it + * For UDP, save it if ND_SAVEREPLY is set + * For TCP, save it if ND_SAVEREPLY and nfsrc_tcpnonidempotent is set + */ + if (nd->nd_repstat != NFSERR_DONTREPLY && + (rp->rc_refcnt > 0 || + ((nd->nd_flag & ND_SAVEREPLY) && (rp->rc_flag & RC_UDP)) || + ((nd->nd_flag & ND_SAVEREPLY) && !(rp->rc_flag & RC_UDP) && + nfsrc_tcpsavedreplies <= nfsrc_floodlevel && + nfsrc_tcpnonidempotent))) { + if (rp->rc_refcnt > 0) { + if (!(rp->rc_flag & RC_NFSV4)) + panic("update_cache refcnt"); + rp->rc_flag |= RC_REFCNT; + } + if ((nd->nd_flag & ND_NFSV2) && + nfsv2_repstat[newnfsv2_procid[nd->nd_procnum]]) { + NFSUNLOCKCACHE(); + rp->rc_status = nd->nd_repstat; + rp->rc_flag |= RC_REPSTATUS; + } else { + if (!(rp->rc_flag & RC_UDP)) { + nfsrc_tcpsavedreplies++; + if (nfsrc_tcpsavedreplies > + newnfsstats.srvcache_tcppeak) + newnfsstats.srvcache_tcppeak = + nfsrc_tcpsavedreplies; + } + NFSUNLOCKCACHE(); + rp->rc_reply = m_copym(nd->nd_mreq, 0, M_COPYALL, + M_WAIT); + rp->rc_flag |= RC_REPMBUF; + } + if (rp->rc_flag & RC_UDP) { + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_UDPTIMEOUT; + nfsrc_unlock(rp); + } else { + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_TCPTIMEOUT; + if (rp->rc_refcnt > 0) + nfsrc_unlock(rp); + else + retrp = rp; + } + } else { + nfsrc_freecache(rp); + NFSUNLOCKCACHE(); + } + nfsrc_trimcache(nd->nd_sockref, so); + return (retrp); +} + +/* + * Invalidate and, if possible, free an in prog cache entry. + * Must not sleep. + */ +APPLESTATIC void +nfsrvd_delcache(struct nfsrvcache *rp) +{ + + if (!(rp->rc_flag & RC_INPROG)) + panic("nfsrvd_delcache not in prog"); + NFSLOCKCACHE(); + rp->rc_flag &= ~RC_INPROG; + if (rp->rc_refcnt == 0 && !(rp->rc_flag & RC_LOCKED)) + nfsrc_freecache(rp); + NFSUNLOCKCACHE(); +} + +/* + * Called after nfsrvd_updatecache() once the reply is sent, to update + * the entry for nfsrc_activesocket() and unlock it. The argument is + * the pointer returned by nfsrvd_updatecache(). + */ +APPLESTATIC void +nfsrvd_sentcache(struct nfsrvcache *rp, struct socket *so, int err) +{ + + if (!(rp->rc_flag & RC_LOCKED)) + panic("nfsrvd_sentcache not locked"); + if (!err) { + if (so->so_proto->pr_domain->dom_family != AF_INET || + so->so_proto->pr_protocol != IPPROTO_TCP) + panic("nfs sent cache"); + if (nfsrv_getsockseqnum(so, &rp->rc_tcpseq)) + rp->rc_flag |= RC_TCPSEQ; + } + nfsrc_unlock(rp); +} + +/* + * Get a cache entry for TCP + * - key on <xid, nfs version> + * (allow multiple entries for a given key) + */ +static int +nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp) +{ + struct nfsrvcache *rp, *nextrp; + int i; + struct nfsrvcache *hitrp; + struct nfsrvhashhead *hp, nfsrc_templist; + int hit, ret = 0; + + hp = NFSRCHASH(newrp->rc_xid); + newrp->rc_reqlen = nfsrc_getlenandcksum(nd->nd_mrep, &newrp->rc_cksum); +tryagain: + NFSLOCKCACHE(); + hit = 1; + LIST_INIT(&nfsrc_templist); + /* + * Get all the matches and put them on the temp list. + */ + rp = LIST_FIRST(hp); + while (rp != LIST_END(hp)) { + nextrp = LIST_NEXT(rp, rc_hash); + if (newrp->rc_xid == rp->rc_xid && + (!(rp->rc_flag & RC_INPROG) || + ((newrp->rc_flag & RC_SAMETCPCONN) && + newrp->rc_sockref == rp->rc_sockref)) && + (newrp->rc_flag & rp->rc_flag & RC_NFSVERS) && + newrp->rc_proc == rp->rc_proc && + ((newrp->rc_flag & RC_NFSV4) && + newrp->rc_sockref != rp->rc_sockref && + newrp->rc_cachetime >= rp->rc_cachetime) + && newrp->rc_reqlen == rp->rc_reqlen && + newrp->rc_cksum == rp->rc_cksum) { + LIST_REMOVE(rp, rc_hash); + LIST_INSERT_HEAD(&nfsrc_templist, rp, rc_hash); + } + rp = nextrp; + } + + /* + * Now, use nfsrc_templist to decide if there is a match. + */ + i = 0; + LIST_FOREACH(rp, &nfsrc_templist, rc_hash) { + i++; + if (rp->rc_refcnt > 0) { + hit = 0; + break; + } + } + /* + * Can be a hit only if one entry left. + * Note possible hit entry and put nfsrc_templist back on hash + * list. + */ + if (i != 1) + hit = 0; + hitrp = rp = LIST_FIRST(&nfsrc_templist); + while (rp != LIST_END(&nfsrc_templist)) { + nextrp = LIST_NEXT(rp, rc_hash); + LIST_REMOVE(rp, rc_hash); + LIST_INSERT_HEAD(hp, rp, rc_hash); + rp = nextrp; + } + if (LIST_FIRST(&nfsrc_templist) != LIST_END(&nfsrc_templist)) + panic("nfs gettcp cache templist"); + + if (hit) { + rp = hitrp; + if ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + NFSUNLOCKCACHE(); + (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 10 * hz); + goto tryagain; + } + if (rp->rc_flag == 0) + panic("nfs tcp cache0"); + rp->rc_flag |= RC_LOCKED; + if (rp->rc_flag & RC_INPROG) { + newnfsstats.srvcache_inproghits++; + NFSUNLOCKCACHE(); + if (newrp->rc_sockref == rp->rc_sockref) + nfsrc_marksametcpconn(rp->rc_sockref); + ret = RC_DROPIT; + } else if (rp->rc_flag & RC_REPSTATUS) { + /* + * V2 only. + */ + newnfsstats.srvcache_nonidemdonehits++; + NFSUNLOCKCACHE(); + if (newrp->rc_sockref == rp->rc_sockref) + nfsrc_marksametcpconn(rp->rc_sockref); + ret = RC_REPLY; + nfsrvd_rephead(nd); + *(nd->nd_errp) = rp->rc_status; + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_TCPTIMEOUT; + } else if (rp->rc_flag & RC_REPMBUF) { + newnfsstats.srvcache_nonidemdonehits++; + NFSUNLOCKCACHE(); + if (newrp->rc_sockref == rp->rc_sockref) + nfsrc_marksametcpconn(rp->rc_sockref); + ret = RC_REPLY; + nd->nd_mreq = m_copym(rp->rc_reply, 0, + M_COPYALL, M_WAIT); + rp->rc_timestamp = NFSD_MONOSEC + + NFSRVCACHE_TCPTIMEOUT; + } else { + panic("nfs tcp cache1"); + } + nfsrc_unlock(rp); + free((caddr_t)newrp, M_NFSRVCACHE); + return (ret); + } + newnfsstats.srvcache_misses++; + newnfsstats.srvcache_size++; + + /* + * For TCP, multiple entries for a key are allowed, so don't + * chain it into the hash table until done. + */ + newrp->rc_cachetime = NFSD_MONOSEC; + newrp->rc_flag |= RC_INPROG; + LIST_INSERT_HEAD(hp, newrp, rc_hash); + NFSUNLOCKCACHE(); + nd->nd_rp = newrp; + return (RC_DOIT); +} + +/* + * Lock a cache entry. + * Also puts a mutex lock on the cache list. + */ +static void +nfsrc_lock(struct nfsrvcache *rp) +{ + NFSCACHELOCKREQUIRED(); + while ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + (void) nfsmsleep((caddr_t)rp, NFSCACHEMUTEXPTR, PZERO - 1, + "nfsrc", 0); + } + rp->rc_flag |= RC_LOCKED; +} + +/* + * Unlock a cache entry. + */ +static void +nfsrc_unlock(struct nfsrvcache *rp) +{ + rp->rc_flag &= ~RC_LOCKED; + nfsrc_wanted(rp); +} + +/* + * Wakeup anyone wanting entry. + */ +static void +nfsrc_wanted(struct nfsrvcache *rp) +{ + if (rp->rc_flag & RC_WANTED) { + rp->rc_flag &= ~RC_WANTED; + wakeup((caddr_t)rp); + } +} + +/* + * Free up the entry. + * Must not sleep. + */ +static void +nfsrc_freecache(struct nfsrvcache *rp) +{ + + NFSCACHELOCKREQUIRED(); + LIST_REMOVE(rp, rc_hash); + if (rp->rc_flag & RC_UDP) { + TAILQ_REMOVE(&nfsrvudplru, rp, rc_lru); + nfsrc_udpcachesize--; + } + nfsrc_wanted(rp); + if (rp->rc_flag & RC_REPMBUF) { + mbuf_freem(rp->rc_reply); + if (!(rp->rc_flag & RC_UDP)) + nfsrc_tcpsavedreplies--; + } + FREE((caddr_t)rp, M_NFSRVCACHE); + newnfsstats.srvcache_size--; +} + +#ifdef notdef +/* + * Clean out the cache. Called when the last nfsd terminates. + */ +APPLESTATIC void +nfsrvd_cleancache(void) +{ + struct nfsrvcache *rp, *nextrp; + int i; + + NFSLOCKCACHE(); + for (i = 0; i < NFSRVCACHE_HASHSIZE; i++) { + LIST_FOREACH_SAFE(rp, &nfsrvhashtbl[i], rc_hash, nextrp) { + nfsrc_freecache(rp); + } + } + for (i = 0; i < NFSRVCACHE_HASHSIZE; i++) { + LIST_FOREACH_SAFE(rp, &nfsrvudphashtbl[i], rc_hash, nextrp) { + nfsrc_freecache(rp); + } + } + newnfsstats.srvcache_size = 0; + nfsrc_tcpsavedreplies = 0; + NFSUNLOCKCACHE(); +} +#endif /* notdef */ + +/* + * The basic rule is to get rid of entries that are expired. + */ +static void +nfsrc_trimcache(u_int64_t sockref, struct socket *so) +{ + struct nfsrvcache *rp, *nextrp; + int i; + + NFSLOCKCACHE(); + TAILQ_FOREACH_SAFE(rp, &nfsrvudplru, rc_lru, nextrp) { + if (!(rp->rc_flag & (RC_INPROG|RC_LOCKED|RC_WANTED)) + && rp->rc_refcnt == 0 + && ((rp->rc_flag & RC_REFCNT) || + NFSD_MONOSEC > rp->rc_timestamp || + nfsrc_udpcachesize > nfsrc_udphighwater)) + nfsrc_freecache(rp); + } + for (i = 0; i < NFSRVCACHE_HASHSIZE; i++) { + LIST_FOREACH_SAFE(rp, &nfsrvhashtbl[i], rc_hash, nextrp) { + if (!(rp->rc_flag & (RC_INPROG|RC_LOCKED|RC_WANTED)) + && rp->rc_refcnt == 0 + && ((rp->rc_flag & RC_REFCNT) || + NFSD_MONOSEC > rp->rc_timestamp || + nfsrc_activesocket(rp, sockref, so))) + nfsrc_freecache(rp); + } + } + NFSUNLOCKCACHE(); +} + +/* + * Add a seqid# reference to the cache entry. + */ +APPLESTATIC void +nfsrvd_refcache(struct nfsrvcache *rp) +{ + + NFSLOCKCACHE(); + if (rp->rc_refcnt < 0) + panic("nfs cache refcnt"); + rp->rc_refcnt++; + NFSUNLOCKCACHE(); +} + +/* + * Dereference a seqid# cache entry. + */ +APPLESTATIC void +nfsrvd_derefcache(struct nfsrvcache *rp) +{ + + NFSLOCKCACHE(); + if (rp->rc_refcnt <= 0) + panic("nfs cache derefcnt"); + rp->rc_refcnt--; + if (rp->rc_refcnt == 0 && !(rp->rc_flag & (RC_LOCKED | RC_INPROG))) + nfsrc_freecache(rp); + NFSUNLOCKCACHE(); +} + +/* + * Check to see if the socket is active. + * Return 1 if the reply has been received/acknowledged by the client, + * 0 otherwise. + * XXX - Uses tcp internals. + */ +static int +nfsrc_activesocket(struct nfsrvcache *rp, u_int64_t cur_sockref, + struct socket *cur_so) +{ + int ret = 0; + + if (!(rp->rc_flag & RC_TCPSEQ)) + return (ret); + /* + * If the sockref is the same, it is the same TCP connection. + */ + if (cur_sockref == rp->rc_sockref) + ret = nfsrv_checksockseqnum(cur_so, rp->rc_tcpseq); + return (ret); +} + +/* + * Calculate the length of the mbuf list and a checksum on the first up to + * NFSRVCACHE_CHECKLEN bytes. + */ +static int +nfsrc_getlenandcksum(mbuf_t m1, u_int16_t *cksum) +{ + int len = 0, cklen; + mbuf_t m; + + m = m1; + while (m) { + len += mbuf_len(m); + m = mbuf_next(m); + } + cklen = (len > NFSRVCACHE_CHECKLEN) ? NFSRVCACHE_CHECKLEN : len; + *cksum = in_cksum(m1, cklen); + return (len); +} + +/* + * Mark a TCP connection that is seeing retries. Should never happen for + * NFSv4. + */ +static void +nfsrc_marksametcpconn(u_int64_t sockref) +{ +} + diff --git a/sys/fs/nfsserver/nfs_nfsdkrpc.c b/sys/fs/nfsserver/nfs_nfsdkrpc.c new file mode 100644 index 0000000..0b8df95 --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdkrpc.c @@ -0,0 +1,455 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#include "opt_inet6.h" +#include "opt_kgssapi.h" + +#include <fs/nfs/nfsport.h> + +#include <rpc/rpc.h> +#include <rpc/rpcsec_gss.h> + +NFSDLOCKMUTEX; + +/* + * Mapping of old NFS Version 2 RPC numbers to generic numbers. + */ +static int newnfs_nfsv3_procid[NFS_V3NPROCS] = { + NFSPROC_NULL, + NFSPROC_GETATTR, + NFSPROC_SETATTR, + NFSPROC_NOOP, + NFSPROC_LOOKUP, + NFSPROC_READLINK, + NFSPROC_READ, + NFSPROC_NOOP, + NFSPROC_WRITE, + NFSPROC_CREATE, + NFSPROC_REMOVE, + NFSPROC_RENAME, + NFSPROC_LINK, + NFSPROC_SYMLINK, + NFSPROC_MKDIR, + NFSPROC_RMDIR, + NFSPROC_READDIR, + NFSPROC_FSSTAT, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, +}; + + +SYSCTL_DECL(_vfs_newnfs); + +SVCPOOL *nfsrvd_pool; + +static int nfs_privport = 0; +SYSCTL_INT(_vfs_newnfs, OID_AUTO, nfs_privport, CTLFLAG_RW, + &nfs_privport, 0, + "Only allow clients using a privileged port for NFSv2 and 3"); + +static int nfs_proc(struct nfsrv_descript *, u_int32_t, struct socket *, + u_int64_t, struct nfsrvcache **); + +extern u_long sb_max_adj; +extern int newnfs_numnfsd; + +/* + * NFS server system calls + */ + +static void +nfssvc_program(struct svc_req *rqst, SVCXPRT *xprt) +{ + struct nfsrv_descript nd; + struct nfsrvcache *rp = NULL; + int cacherep; + + memset(&nd, 0, sizeof(nd)); + if (rqst->rq_vers == NFS_VER2) { + if (rqst->rq_proc > NFSV2PROC_STATFS) { + svcerr_noproc(rqst); + svc_freereq(rqst); + return; + } + nd.nd_procnum = newnfs_nfsv3_procid[rqst->rq_proc]; + nd.nd_flag = ND_NFSV2; + } else if (rqst->rq_vers == NFS_VER3) { + if (rqst->rq_proc >= NFS_V3NPROCS) { + svcerr_noproc(rqst); + svc_freereq(rqst); + return; + } + nd.nd_procnum = rqst->rq_proc; + nd.nd_flag = ND_NFSV3; + } else { + if (rqst->rq_proc != NFSPROC_NULL && + rqst->rq_proc != NFSV4PROC_COMPOUND) { + svcerr_noproc(rqst); + svc_freereq(rqst); + return; + } + nd.nd_procnum = rqst->rq_proc; + nd.nd_flag = ND_NFSV4; + } + + /* + * Note: we want rq_addr, not svc_getrpccaller for nd_nam2 - + * NFS_SRVMAXDATA uses a NULL value for nd_nam2 to detect TCP + * mounts. + */ + nd.nd_mrep = rqst->rq_args; + rqst->rq_args = NULL; + newnfs_realign(&nd.nd_mrep); + nd.nd_md = nd.nd_mrep; + nd.nd_dpos = mtod(nd.nd_md, caddr_t); + nd.nd_nam = svc_getrpccaller(rqst); + nd.nd_nam2 = rqst->rq_addr; + nd.nd_mreq = NULL; + nd.nd_cred = NULL; + + if (nfs_privport && (nd.nd_flag & ND_NFSV4) == 0) { + /* Check if source port is privileged */ + u_short port; + struct sockaddr *nam = nd.nd_nam; + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)nam; + /* + * INET/INET6 - same code: + * sin_port and sin6_port are at same offset + */ + port = ntohs(sin->sin_port); + if (port >= IPPORT_RESERVED && + nd.nd_procnum != NFSPROC_NULL) { +#ifdef INET6 + char b6[INET6_ADDRSTRLEN]; +#if defined(KLD_MODULE) + /* Do not use ip6_sprintf: the nfs module should work without INET6. */ +#define ip6_sprintf(buf, a) \ + (sprintf((buf), "%x:%x:%x:%x:%x:%x:%x:%x", \ + (a)->s6_addr16[0], (a)->s6_addr16[1], \ + (a)->s6_addr16[2], (a)->s6_addr16[3], \ + (a)->s6_addr16[4], (a)->s6_addr16[5], \ + (a)->s6_addr16[6], (a)->s6_addr16[7]), \ + (buf)) +#endif +#endif + printf("NFS request from unprivileged port (%s:%d)\n", +#ifdef INET6 + sin->sin_family == AF_INET6 ? + ip6_sprintf(b6, &satosin6(sin)->sin6_addr) : +#if defined(KLD_MODULE) +#undef ip6_sprintf +#endif +#endif + inet_ntoa(sin->sin_addr), port); + svcerr_weakauth(rqst); + svc_freereq(rqst); + m_freem(nd.nd_mrep); + return; + } + } + + if (nd.nd_procnum != NFSPROC_NULL) { + if (!svc_getcred(rqst, &nd.nd_cred, &nd.nd_credflavor)) { + svcerr_weakauth(rqst); + svc_freereq(rqst); + m_freem(nd.nd_mrep); + return; + } +#ifdef MAC + mac_cred_associate_nfsd(nd.nd_cred); +#endif + if ((nd.nd_flag & ND_NFSV4)) + nd.nd_repstat = nfsvno_v4rootexport(&nd); + + cacherep = nfs_proc(&nd, rqst->rq_xid, xprt->xp_socket, + xprt->xp_sockref, &rp); + } else { + NFSMGET(nd.nd_mreq); + nd.nd_mreq->m_len = 0; + cacherep = RC_REPLY; + } + if (nd.nd_mrep != NULL) + m_freem(nd.nd_mrep); + + if (nd.nd_cred != NULL) + crfree(nd.nd_cred); + + if (cacherep == RC_DROPIT) { + if (nd.nd_mreq != NULL) + m_freem(nd.nd_mreq); + svc_freereq(rqst); + return; + } + + if (nd.nd_mreq == NULL) { + svcerr_decode(rqst); + svc_freereq(rqst); + return; + } + + if (nd.nd_repstat & NFSERR_AUTHERR) { + svcerr_auth(rqst, nd.nd_repstat & ~NFSERR_AUTHERR); + if (nd.nd_mreq != NULL) + m_freem(nd.nd_mreq); + } else if (!svc_sendreply_mbuf(rqst, nd.nd_mreq)) { + svcerr_systemerr(rqst); + } + if (rp != NULL) + nfsrvd_sentcache(rp, xprt->xp_socket, 0); + svc_freereq(rqst); +} + +/* + * Check the cache and, optionally, do the RPC. + * Return the appropriate cache response. + */ +static int +nfs_proc(struct nfsrv_descript *nd, u_int32_t xid, struct socket *so, + u_int64_t sockref, struct nfsrvcache **rpp) +{ + struct thread *td = curthread; + int cacherep = RC_DOIT, isdgram; + + *rpp = NULL; + if (nd->nd_nam2 == NULL) { + nd->nd_flag |= ND_STREAMSOCK; + isdgram = 0; + } else { + isdgram = 1; + } + NFSGETTIME(&nd->nd_starttime); + + /* + * Several cases: + * 1 - For NFSv2 over UDP, if we are near our malloc/mget + * limit, just drop the request. There is no + * NFSERR_RESOURCE or NFSERR_DELAY for NFSv2 and the + * client will timeout/retry over UDP in a little while. + * 2 - nd_repstat set to some error, so generate the reply now. + * 3 - nd_repstat == 0 && nd_mreq == NULL, which + * means a normal nfs rpc, so check the cache + */ + if ((nd->nd_flag & ND_NFSV2) && nd->nd_nam2 != NULL && + nfsrv_mallocmget_limit()) { + cacherep = RC_DROPIT; + } else if (nd->nd_repstat) { + cacherep = RC_REPLY; + } else { + /* + * For NFSv3, play it safe and assume that the client is + * doing retries on the same TCP connection. + */ + if ((nd->nd_flag & (ND_NFSV4 | ND_STREAMSOCK)) == + ND_STREAMSOCK) + nd->nd_flag |= ND_SAMETCPCONN; + nd->nd_retxid = xid; + nd->nd_tcpconntime = NFSD_MONOSEC; + nd->nd_sockref = sockref; + cacherep = nfsrvd_getcache(nd, so); + } + + /* + * Handle the request. There are three cases. + * RC_DOIT - do the RPC + * RC_REPLY - return the reply already created + * RC_DROPIT - just throw the request away + */ + if (cacherep == RC_DOIT) { + nfsrvd_dorpc(nd, isdgram, td); + if (nd->nd_repstat == NFSERR_DONTREPLY) + cacherep = RC_DROPIT; + else + cacherep = RC_REPLY; + *rpp = nfsrvd_updatecache(nd, so); + } + return (cacherep); +} + +/* + * Adds a socket to the list for servicing by nfsds. + */ +int +nfsrvd_addsock(struct file *fp) +{ + int siz; + struct socket *so; + int error; + SVCXPRT *xprt; + static u_int64_t sockref = 0; + + so = fp->f_data; + + siz = sb_max_adj; + error = soreserve(so, siz, siz); + if (error) { + return (error); + } + + /* + * Steal the socket from userland so that it doesn't close + * unexpectedly. + */ + if (so->so_type == SOCK_DGRAM) + xprt = svc_dg_create(nfsrvd_pool, so, 0, 0); + else + xprt = svc_vc_create(nfsrvd_pool, so, 0, 0); + if (xprt) { + fp->f_ops = &badfileops; + fp->f_data = NULL; + xprt->xp_sockref = ++sockref; + svc_reg(xprt, NFS_PROG, NFS_VER2, nfssvc_program, NULL); + svc_reg(xprt, NFS_PROG, NFS_VER3, nfssvc_program, NULL); + svc_reg(xprt, NFS_PROG, NFS_VER4, nfssvc_program, NULL); + } + + return (0); +} + +/* + * Called by nfssvc() for nfsds. Just loops around servicing rpc requests + * until it is killed by a signal. + */ +int +nfsrvd_nfsd(struct thread *td, struct nfsd_nfsd_args *args) +{ +#ifdef KGSSAPI + char principal[128]; + int error; + bool_t ret2, ret3, ret4; +#endif + +#ifdef KGSSAPI + if (args != NULL) { + error = copyinstr(args->principal, principal, + sizeof(principal), NULL); + if (error) + return (error); + } else { + snprintf(principal, sizeof(principal), "nfs@%s", hostname); + } +#endif + + /* + * Only the first nfsd actually does any work. The RPC code + * adds threads to it as needed. Any extra processes offered + * by nfsd just exit. If nfsd is new enough, it will call us + * once with a structure that specifies how many threads to + * use. + */ + NFSD_LOCK(); + if (newnfs_numnfsd == 0) { + newnfs_numnfsd++; + + NFSD_UNLOCK(); + +#ifdef KGSSAPI + ret2 = rpc_gss_set_svc_name(principal, "kerberosv5", + GSS_C_INDEFINITE, NFS_PROG, NFS_VER2); + ret3 = rpc_gss_set_svc_name(principal, "kerberosv5", + GSS_C_INDEFINITE, NFS_PROG, NFS_VER3); + ret4 = rpc_gss_set_svc_name(principal, "kerberosv5", + GSS_C_INDEFINITE, NFS_PROG, NFS_VER4); + + /* + * If the principal name was specified, these should have + * succeeded. + */ + if (args != NULL && principal[0] != '\0' && + (!ret2 || !ret3 || !ret4)) { + NFSD_LOCK(); + newnfs_numnfsd--; + NFSD_UNLOCK(); + return (EAUTH); + } +#endif + + if (args != NULL) { + nfsrvd_pool->sp_minthreads = args->minthreads; + nfsrvd_pool->sp_maxthreads = args->maxthreads; + } else { + nfsrvd_pool->sp_minthreads = 4; + nfsrvd_pool->sp_maxthreads = 4; + } + + svc_run(nfsrvd_pool); + +#ifdef KGSSAPI + rpc_gss_clear_svc_name(NFS_PROG, NFS_VER2); + rpc_gss_clear_svc_name(NFS_PROG, NFS_VER3); + rpc_gss_clear_svc_name(NFS_PROG, NFS_VER4); +#endif + + NFSD_LOCK(); + newnfs_numnfsd--; + nfsrvd_init(1); + } + NFSD_UNLOCK(); + + return (0); +} + +/* + * Initialize the data structures for the server. + * Handshake with any new nfsds starting up to avoid any chance of + * corruption. + */ +void +nfsrvd_init(int terminating) +{ + + NFSD_LOCK_ASSERT(); + + if (terminating) { + NFSD_UNLOCK(); + svcpool_destroy(nfsrvd_pool); + nfsrvd_pool = NULL; + NFSD_LOCK(); + } + + NFSD_UNLOCK(); + + nfsrvd_pool = svcpool_create("nfsd", SYSCTL_STATIC_CHILDREN(_vfs_newnfs)); + nfsrvd_pool->sp_rcache = NULL; + nfsrvd_pool->sp_assign = NULL; + nfsrvd_pool->sp_done = NULL; + + NFSD_LOCK(); +} + diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c new file mode 100644 index 0000000..6d39173 --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdport.c @@ -0,0 +1,3060 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Functions that perform the vfs operations required by the routines in + * nfsd_serv.c. It is hoped that this change will make the server more + * portable. + */ + +#include <fs/nfs/nfsport.h> +#include <sys/sysctl.h> + +extern int nfsrv_dolocallocks; +extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1; +extern int nfsv4root_set; +extern int nfsrv_useacl; +extern int newnfs_numnfsd; +extern struct mount nfsv4root_mnt; +extern struct nfsrv_stablefirst nfsrv_stablefirst; +extern void (*nfsd_call_servertimer)(void); +struct vfsoptlist nfsv4root_opt, nfsv4root_newopt; +NFSDLOCKMUTEX; +struct mtx nfs_cache_mutex; +struct mtx nfs_v4root_mutex; +struct nfsrvfh nfs_rootfh, nfs_pubfh; +int nfs_pubfhset = 0, nfs_rootfhset = 0; + +static int nfssvc_srvcall(struct thread *, struct nfssvc_args *, struct ucred *); + +static int enable_crossmntpt = 1; +static int nfs_commit_blks; +static int nfs_commit_miss; +extern int nfsrv_issuedelegs; +SYSCTL_DECL(_vfs_newnfs); +SYSCTL_INT(_vfs_newnfs, OID_AUTO, mirrormnt, CTLFLAG_RW, &enable_crossmntpt, 0, ""); +SYSCTL_INT(_vfs_newnfs, OID_AUTO, commit_blks, CTLFLAG_RW, &nfs_commit_blks, 0, ""); +SYSCTL_INT(_vfs_newnfs, OID_AUTO, commit_miss, CTLFLAG_RW, &nfs_commit_miss, 0, ""); +SYSCTL_INT(_vfs_newnfs, OID_AUTO, issue_delegations, CTLFLAG_RW, &nfsrv_issuedelegs, 0, ""); + +#define NUM_HEURISTIC 1017 +#define NHUSE_INIT 64 +#define NHUSE_INC 16 +#define NHUSE_MAX 2048 + +static struct nfsheur { + struct vnode *nh_vp; /* vp to match (unreferenced pointer) */ + off_t nh_nextr; /* next offset for sequential detection */ + int nh_use; /* use count for selection */ + int nh_seqcount; /* heuristic */ +} nfsheur[NUM_HEURISTIC]; + + +/* + * Get attributes into nfsvattr structure. + */ +int +nfsvno_getattr(struct vnode *vp, struct nfsvattr *nvap, struct ucred *cred, + struct thread *p) +{ + int error, lockedit = 0; + + /* Since FreeBSD insists the vnode be locked... */ + if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { + lockedit = 1; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + } + error = VOP_GETATTR(vp, &nvap->na_vattr, cred); + if (lockedit) + NFSVOPUNLOCK(vp, 0, p); + return (error); +} + +/* + * Get a file handle for a vnode. + */ +int +nfsvno_getfh(struct vnode *vp, fhandle_t *fhp, struct thread *p) +{ + int error; + + NFSBZERO((caddr_t)fhp, sizeof(fhandle_t)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VOP_VPTOFH(vp, &fhp->fh_fid); + return (error); +} + +/* + * Perform access checking for vnodes obtained from file handles that would + * refer to files already opened by a Unix client. You cannot just use + * vn_writechk() and VOP_ACCESS() for two reasons. + * 1 - You must check for exported rdonly as well as MNT_RDONLY for the write case + * 2 - The owner is to be given access irrespective of mode bits for some + * operations, so that processes that chmod after opening a file don't + * break. + */ +int +nfsvno_accchk(struct vnode *vp, u_int32_t accessbits, struct ucred *cred, + struct nfsexstuff *exp, struct thread *p, int override, int vpislocked) +{ + struct vattr vattr; + int error = 0, getret = 0; + accmode_t accmode; + + /* + * Convert accessbits to Vxxx flags. + */ + if (accessbits & (NFSV4ACE_WRITEDATA | NFSV4ACE_APPENDDATA | + NFSV4ACE_ADDFILE | NFSV4ACE_ADDSUBDIRECTORY | + NFSV4ACE_DELETECHILD | NFSV4ACE_WRITEATTRIBUTES | + NFSV4ACE_DELETE | NFSV4ACE_WRITEACL | NFSV4ACE_WRITEOWNER)) + accmode = VWRITE; + else if (accessbits & (NFSV4ACE_EXECUTE | NFSV4ACE_SEARCH)) + accmode = VEXEC; + else + accmode = VREAD; + + if (accmode & VWRITE) { + /* Just vn_writechk() changed to check rdonly */ + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket or a block or character + * device resident on the file system. + */ + if (NFSVNO_EXRDONLY(exp) || + (vp->v_mount->mnt_flag & MNT_RDONLY)) { + switch (vp->v_type) { + case VREG: + case VDIR: + case VLNK: + return (EROFS); + default: + break; + } + } + /* + * If there's shared text associated with + * the inode, try to free it up once. If + * we fail, we can't allow writing. + */ + if (vp->v_vflag & VV_TEXT) + return (ETXTBSY); + } + if (vpislocked == 0) + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + +#ifdef NFS4_ACL_EXTATTR_NAME + /* + * Should the override still be applied when ACLs are enabled? + */ + if (nfsrv_useacl != 0 && NFSHASNFS4ACL(vp->v_mount)) + error = nfsrv_aclaccess(vp, accmode, accessbits, cred, p); + else +#endif + if (accessbits == NFSV4ACE_READATTRIBUTES) + error = 0; + else + error = VOP_ACCESS(vp, accmode, cred, p); + + /* + * Allow certain operations for the owner (reads and writes + * on files that are already open). + */ + if (override != NFSACCCHK_NOOVERRIDE && + (error == EPERM || error == EACCES)) { + if (cred->cr_uid == 0 && (override & NFSACCCHK_ALLOWROOT)) + error = 0; + else if (override & NFSACCCHK_ALLOWOWNER) { + getret = VOP_GETATTR(vp, &vattr, cred); + if (getret == 0 && cred->cr_uid == vattr.va_uid) + error = 0; + } + } + if (vpislocked == 0) + NFSVOPUNLOCK(vp, 0, p); + return (error); +} + +/* + * Set attribute(s) vnop. + */ +int +nfsvno_setattr(struct vnode *vp, struct nfsvattr *nvap, struct ucred *cred, + struct thread *p, struct nfsexstuff *exp) +{ + int error; + + error = VOP_SETATTR(vp, &nvap->na_vattr, cred); + return (error); +} + +/* + * Set up nameidata for a lookup() call and do it + * For the cases where we are crossing mount points + * (looking up the public fh path or the v4 root path when + * not using a pseudo-root fs), set/release the Giant lock, + * as required. + */ +int +nfsvno_namei(struct nfsrv_descript *nd, struct nameidata *ndp, + struct vnode *dp, int islocked, struct nfsexstuff *exp, struct thread *p, + struct vnode **retdirp) +{ + struct componentname *cnp = &ndp->ni_cnd; + int i; + struct iovec aiov; + struct uio auio; + int lockleaf = (cnp->cn_flags & LOCKLEAF) != 0, linklen; + int error = 0, crossmnt; + char *cp; + + *retdirp = NULL; + cnp->cn_nameptr = cnp->cn_pnbuf; + /* + * Extract and set starting directory. + */ + if (dp->v_type != VDIR) { + if (islocked) + vput(dp); + else + vrele(dp); + nfsvno_relpathbuf(ndp); + return (ENOTDIR); + } + if (islocked) + NFSVOPUNLOCK(dp, 0, p); + VREF(dp); + *retdirp = dp; + if (NFSVNO_EXRDONLY(exp)) + cnp->cn_flags |= RDONLY; + ndp->ni_segflg = UIO_SYSSPACE; + crossmnt = 1; + + if (nd->nd_flag & ND_PUBLOOKUP) { + ndp->ni_loopcnt = 0; + if (cnp->cn_pnbuf[0] == '/') { + vrele(dp); + /* + * Check for degenerate pathnames here, since lookup() + * panics on them. + */ + for (i = 1; i < ndp->ni_pathlen; i++) + if (cnp->cn_pnbuf[i] != '/') + break; + if (i == ndp->ni_pathlen) { + error = NFSERR_ACCES; + goto out; + } + dp = rootvnode; + VREF(dp); + } + } else if ((enable_crossmntpt == 0 && NFSVNO_EXPORTED(exp)) || + (nd->nd_flag & ND_NFSV4) == 0) { + /* + * Only cross mount points for NFSv4 when doing a + * mount while traversing the file system above + * the mount point, unless enable_crossmntpt is set. + */ + cnp->cn_flags |= NOCROSSMOUNT; + crossmnt = 0; + } + + /* + * Initialize for scan, set ni_startdir and bump ref on dp again + * becuase lookup() will dereference ni_startdir. + */ + + cnp->cn_thread = p; + ndp->ni_startdir = dp; + ndp->ni_rootdir = rootvnode; + + if (!lockleaf) + cnp->cn_flags |= LOCKLEAF; + for (;;) { + cnp->cn_nameptr = cnp->cn_pnbuf; + /* + * Call lookup() to do the real work. If an error occurs, + * ndp->ni_vp and ni_dvp are left uninitialized or NULL and + * we do not have to dereference anything before returning. + * In either case ni_startdir will be dereferenced and NULLed + * out. + */ + if (exp->nes_vfslocked) + ndp->ni_cnd.cn_flags |= GIANTHELD; + error = lookup(ndp); + /* + * The Giant lock should only change when + * crossing mount points. + */ + if (crossmnt) { + exp->nes_vfslocked = + (ndp->ni_cnd.cn_flags & GIANTHELD) != 0; + ndp->ni_cnd.cn_flags &= ~GIANTHELD; + } + if (error) + break; + + /* + * Check for encountering a symbolic link. Trivial + * termination occurs if no symlink encountered. + */ + if ((cnp->cn_flags & ISSYMLINK) == 0) { + if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0) + nfsvno_relpathbuf(ndp); + if (ndp->ni_vp && !lockleaf) + NFSVOPUNLOCK(ndp->ni_vp, 0, p); + break; + } + + /* + * Validate symlink + */ + if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) + NFSVOPUNLOCK(ndp->ni_dvp, 0, p); + if (!(nd->nd_flag & ND_PUBLOOKUP)) { + error = EINVAL; + goto badlink2; + } + + if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { + error = ELOOP; + goto badlink2; + } + if (ndp->ni_pathlen > 1) + cp = uma_zalloc(namei_zone, M_WAITOK); + else + cp = cnp->cn_pnbuf; + aiov.iov_base = cp; + aiov.iov_len = MAXPATHLEN; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_td = NULL; + auio.uio_resid = MAXPATHLEN; + error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); + if (error) { + badlink1: + if (ndp->ni_pathlen > 1) + uma_zfree(namei_zone, cp); + badlink2: + vrele(ndp->ni_dvp); + vput(ndp->ni_vp); + break; + } + linklen = MAXPATHLEN - auio.uio_resid; + if (linklen == 0) { + error = ENOENT; + goto badlink1; + } + if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { + error = ENAMETOOLONG; + goto badlink1; + } + + /* + * Adjust or replace path + */ + if (ndp->ni_pathlen > 1) { + NFSBCOPY(ndp->ni_next, cp + linklen, ndp->ni_pathlen); + uma_zfree(namei_zone, cnp->cn_pnbuf); + cnp->cn_pnbuf = cp; + } else + cnp->cn_pnbuf[linklen] = '\0'; + ndp->ni_pathlen += linklen; + + /* + * Cleanup refs for next loop and check if root directory + * should replace current directory. Normally ni_dvp + * becomes the new base directory and is cleaned up when + * we loop. Explicitly null pointers after invalidation + * to clarify operation. + */ + vput(ndp->ni_vp); + ndp->ni_vp = NULL; + + if (cnp->cn_pnbuf[0] == '/') { + vrele(ndp->ni_dvp); + ndp->ni_dvp = ndp->ni_rootdir; + VREF(ndp->ni_dvp); + } + ndp->ni_startdir = ndp->ni_dvp; + ndp->ni_dvp = NULL; + } + if (!lockleaf) + cnp->cn_flags &= ~LOCKLEAF; + +out: + if (error) { + uma_zfree(namei_zone, cnp->cn_pnbuf); + ndp->ni_vp = NULL; + ndp->ni_dvp = NULL; + ndp->ni_startdir = NULL; + cnp->cn_flags &= ~HASBUF; + } else if ((ndp->ni_cnd.cn_flags & (WANTPARENT|LOCKPARENT)) == 0) { + ndp->ni_dvp = NULL; + } + return (error); +} + +/* + * Set up a pathname buffer and return a pointer to it and, optionally + * set a hash pointer. + */ +void +nfsvno_setpathbuf(struct nameidata *ndp, char **bufpp, u_long **hashpp) +{ + struct componentname *cnp = &ndp->ni_cnd; + + cnp->cn_flags |= (NOMACCHECK | HASBUF); + cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); + if (hashpp != NULL) + *hashpp = NULL; + *bufpp = cnp->cn_pnbuf; +} + +/* + * Release the above path buffer, if not released by nfsvno_namei(). + */ +void +nfsvno_relpathbuf(struct nameidata *ndp) +{ + + if ((ndp->ni_cnd.cn_flags & HASBUF) == 0) + panic("nfsrelpath"); + uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); + ndp->ni_cnd.cn_flags &= ~HASBUF; +} + +/* + * Readlink vnode op into an mbuf list. + */ +int +nfsvno_readlink(struct vnode *vp, struct ucred *cred, struct thread *p, + struct mbuf **mpp, struct mbuf **mpendp, int *lenp) +{ + struct iovec iv[(NFS_MAXPATHLEN+MLEN-1)/MLEN]; + struct iovec *ivp = iv; + struct uio io, *uiop = &io; + struct mbuf *mp, *mp2 = NULL, *mp3 = NULL; + int i, len, tlen, error; + + len = 0; + i = 0; + while (len < NFS_MAXPATHLEN) { + NFSMGET(mp); + MCLGET(mp, M_WAIT); + mp->m_len = NFSMSIZ(mp); + if (len == 0) { + mp3 = mp2 = mp; + } else { + mp2->m_next = mp; + mp2 = mp; + } + if ((len + mp->m_len) > NFS_MAXPATHLEN) { + mp->m_len = NFS_MAXPATHLEN - len; + len = NFS_MAXPATHLEN; + } else { + len += mp->m_len; + } + ivp->iov_base = mtod(mp, caddr_t); + ivp->iov_len = mp->m_len; + i++; + ivp++; + } + uiop->uio_iov = iv; + uiop->uio_iovcnt = i; + uiop->uio_offset = 0; + uiop->uio_resid = len; + uiop->uio_rw = UIO_READ; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_td = NULL; + error = VOP_READLINK(vp, uiop, cred); + if (error) { + m_freem(mp3); + *lenp = 0; + return (error); + } + if (uiop->uio_resid > 0) { + len -= uiop->uio_resid; + tlen = NFSM_RNDUP(len); + nfsrv_adj(mp3, NFS_MAXPATHLEN - tlen, tlen - len); + } + *lenp = len; + *mpp = mp3; + *mpendp = mp; + return (0); +} + +/* + * Read vnode op call into mbuf list. + */ +int +nfsvno_read(struct vnode *vp, off_t off, int cnt, struct ucred *cred, + struct thread *p, struct mbuf **mpp, struct mbuf **mpendp) +{ + struct mbuf *m; + int i; + struct iovec *iv; + struct iovec *iv2; + int error = 0, len, left, siz, tlen, ioflag = 0, hi, try = 32; + struct mbuf *m2 = NULL, *m3; + struct uio io, *uiop = &io; + struct nfsheur *nh; + + /* + * Calculate seqcount for heuristic + */ + /* + * Locate best candidate + */ + + hi = ((int)(vm_offset_t)vp / sizeof(struct vnode)) % NUM_HEURISTIC; + nh = &nfsheur[hi]; + + while (try--) { + if (nfsheur[hi].nh_vp == vp) { + nh = &nfsheur[hi]; + break; + } + if (nfsheur[hi].nh_use > 0) + --nfsheur[hi].nh_use; + hi = (hi + 1) % NUM_HEURISTIC; + if (nfsheur[hi].nh_use < nh->nh_use) + nh = &nfsheur[hi]; + } + + if (nh->nh_vp != vp) { + nh->nh_vp = vp; + nh->nh_nextr = off; + nh->nh_use = NHUSE_INIT; + if (off == 0) + nh->nh_seqcount = 4; + else + nh->nh_seqcount = 1; + } + + /* + * Calculate heuristic + */ + + if ((off == 0 && nh->nh_seqcount > 0) || off == nh->nh_nextr) { + if (++nh->nh_seqcount > IO_SEQMAX) + nh->nh_seqcount = IO_SEQMAX; + } else if (nh->nh_seqcount > 1) { + nh->nh_seqcount = 1; + } else { + nh->nh_seqcount = 0; + } + nh->nh_use += NHUSE_INC; + if (nh->nh_use > NHUSE_MAX) + nh->nh_use = NHUSE_MAX; + ioflag |= nh->nh_seqcount << IO_SEQSHIFT; + + len = left = NFSM_RNDUP(cnt); + m3 = NULL; + /* + * Generate the mbuf list with the uio_iov ref. to it. + */ + i = 0; + while (left > 0) { + NFSMGET(m); + MCLGET(m, M_WAIT); + m->m_len = 0; + siz = min(M_TRAILINGSPACE(m), left); + left -= siz; + i++; + if (m3) + m2->m_next = m; + else + m3 = m; + m2 = m; + } + MALLOC(iv, struct iovec *, i * sizeof (struct iovec), + M_TEMP, M_WAITOK); + uiop->uio_iov = iv2 = iv; + m = m3; + left = len; + i = 0; + while (left > 0) { + if (m == NULL) + panic("nfsvno_read iov"); + siz = min(M_TRAILINGSPACE(m), left); + if (siz > 0) { + iv->iov_base = mtod(m, caddr_t) + m->m_len; + iv->iov_len = siz; + m->m_len += siz; + left -= siz; + iv++; + i++; + } + m = m->m_next; + } + uiop->uio_iovcnt = i; + uiop->uio_offset = off; + uiop->uio_resid = len; + uiop->uio_rw = UIO_READ; + uiop->uio_segflg = UIO_SYSSPACE; + error = VOP_READ(vp, uiop, IO_NODELOCKED | ioflag, cred); + FREE((caddr_t)iv2, M_TEMP); + if (error) { + m_freem(m3); + *mpp = NULL; + return (error); + } + tlen = len - uiop->uio_resid; + cnt = cnt < tlen ? cnt : tlen; + tlen = NFSM_RNDUP(cnt); + if (tlen == 0) { + m_freem(m3); + m3 = NULL; + } else if (len != tlen || tlen != cnt) + nfsrv_adj(m3, len - tlen, tlen - cnt); + *mpp = m3; + *mpendp = m2; + return (0); +} + +/* + * Write vnode op from an mbuf list. + */ +int +nfsvno_write(struct vnode *vp, off_t off, int retlen, int cnt, int stable, + struct mbuf *mp, char *cp, struct ucred *cred, struct thread *p) +{ + struct iovec *ivp; + int i, len; + struct iovec *iv; + int ioflags, error; + struct uio io, *uiop = &io; + + MALLOC(ivp, struct iovec *, cnt * sizeof (struct iovec), M_TEMP, + M_WAITOK); + uiop->uio_iov = iv = ivp; + uiop->uio_iovcnt = cnt; + i = mtod(mp, caddr_t) + mp->m_len - cp; + len = retlen; + while (len > 0) { + if (mp == NULL) + panic("nfsvno_write"); + if (i > 0) { + i = min(i, len); + ivp->iov_base = cp; + ivp->iov_len = i; + ivp++; + len -= i; + } + mp = mp->m_next; + if (mp) { + i = mp->m_len; + cp = mtod(mp, caddr_t); + } + } + + if (stable == NFSWRITE_UNSTABLE) + ioflags = IO_NODELOCKED; + else + ioflags = (IO_SYNC | IO_NODELOCKED); + uiop->uio_resid = retlen; + uiop->uio_rw = UIO_WRITE; + uiop->uio_segflg = UIO_SYSSPACE; + NFSUIOPROC(uiop, p); + uiop->uio_offset = off; + error = VOP_WRITE(vp, uiop, ioflags, cred); + FREE((caddr_t)iv, M_TEMP); + return (error); +} + +/* + * Common code for creating a regular file (plus special files for V2). + */ +int +nfsvno_createsub(struct nfsrv_descript *nd, struct nameidata *ndp, + struct vnode **vpp, struct nfsvattr *nvap, int *exclusive_flagp, + u_char *cverf, NFSDEV_T rdev, struct thread *p, struct nfsexstuff *exp) +{ + u_quad_t tempsize; + int error; + + error = nd->nd_repstat; + if (!error && ndp->ni_vp == NULL) { + if (nvap->na_type == VREG || nvap->na_type == VSOCK) { + vrele(ndp->ni_startdir); + error = VOP_CREATE(ndp->ni_dvp, + &ndp->ni_vp, &ndp->ni_cnd, &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + if (!error) { + if (*exclusive_flagp) { + *exclusive_flagp = 0; + NFSVNO_ATTRINIT(nvap); + NFSBCOPY(cverf,(caddr_t)&nvap->na_atime, + NFSX_VERF); + error = VOP_SETATTR(ndp->ni_vp, + &nvap->na_vattr, nd->nd_cred); + } + } + /* + * NFS V2 Only. nfsrvd_mknod() does this for V3. + * (This implies, just get out on an error.) + */ + } else if (nvap->na_type == VCHR || nvap->na_type == VBLK || + nvap->na_type == VFIFO) { + if (nvap->na_type == VCHR && rdev == 0xffffffff) + nvap->na_type = VFIFO; + if (nvap->na_type != VFIFO && + (error = priv_check_cred(nd->nd_cred, + PRIV_VFS_MKNOD_DEV, 0))) { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vput(ndp->ni_dvp); + return (error); + } + nvap->na_rdev = rdev; + error = VOP_MKNOD(ndp->ni_dvp, &ndp->ni_vp, + &ndp->ni_cnd, &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + if (error) { + vrele(ndp->ni_startdir); + return (error); + } + } else { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vput(ndp->ni_dvp); + return (ENXIO); + } + *vpp = ndp->ni_vp; + } else { + /* + * Handle cases where error is already set and/or + * the file exists. + * 1 - clean up the lookup + * 2 - iff !error and na_size set, truncate it + */ + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + *vpp = ndp->ni_vp; + if (ndp->ni_dvp == *vpp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + if (!error && nvap->na_size != VNOVAL) { + error = nfsvno_accchk(*vpp, NFSV4ACE_ADDFILE, + nd->nd_cred, exp, p, NFSACCCHK_NOOVERRIDE, + NFSACCCHK_VPISLOCKED); + if (!error) { + tempsize = nvap->na_size; + NFSVNO_ATTRINIT(nvap); + nvap->na_size = tempsize; + error = VOP_SETATTR(*vpp, + &nvap->na_vattr, nd->nd_cred); + } + } + if (error) + vput(*vpp); + } + return (error); +} + +/* + * Do a mknod vnode op. + */ +int +nfsvno_mknod(struct nameidata *ndp, struct nfsvattr *nvap, struct ucred *cred, + struct thread *p) +{ + int error = 0; + enum vtype vtyp; + + vtyp = nvap->na_type; + /* + * Iff doesn't exist, create it. + */ + if (ndp->ni_vp) { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vput(ndp->ni_dvp); + vrele(ndp->ni_vp); + return (EEXIST); + } + if (vtyp != VCHR && vtyp != VBLK && vtyp != VSOCK && vtyp != VFIFO) { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vput(ndp->ni_dvp); + return (NFSERR_BADTYPE); + } + if (vtyp == VSOCK) { + vrele(ndp->ni_startdir); + error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, + &ndp->ni_cnd, &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + } else { + if (nvap->na_type != VFIFO && + (error = priv_check_cred(cred, PRIV_VFS_MKNOD_DEV, 0))) { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vput(ndp->ni_dvp); + return (error); + } + error = VOP_MKNOD(ndp->ni_dvp, &ndp->ni_vp, + &ndp->ni_cnd, &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + if (error) + vrele(ndp->ni_startdir); + /* + * Since VOP_MKNOD returns the ni_vp, I can't + * see any reason to do the lookup. + */ + } + return (error); +} + +/* + * Mkdir vnode op. + */ +int +nfsvno_mkdir(struct nameidata *ndp, struct nfsvattr *nvap, uid_t saved_uid, + struct ucred *cred, struct thread *p, struct nfsexstuff *exp) +{ + int error = 0; + + if (ndp->ni_vp != NULL) { + if (ndp->ni_dvp == ndp->ni_vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + vrele(ndp->ni_vp); + return (EEXIST); + } + error = VOP_MKDIR(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, + &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + return (error); +} + +/* + * symlink vnode op. + */ +int +nfsvno_symlink(struct nameidata *ndp, struct nfsvattr *nvap, char *pathcp, + int pathlen, int not_v2, uid_t saved_uid, struct ucred *cred, struct thread *p, + struct nfsexstuff *exp) +{ + int error = 0; + + if (ndp->ni_vp) { + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + if (ndp->ni_dvp == ndp->ni_vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + vrele(ndp->ni_vp); + return (EEXIST); + } + + error = VOP_SYMLINK(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, + &nvap->na_vattr, pathcp); + vput(ndp->ni_dvp); + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + /* + * Although FreeBSD still had the lookup code in + * it for 7/current, there doesn't seem to be any + * point, since VOP_SYMLINK() returns the ni_vp. + * Just vput it for v2. + */ + if (!not_v2 && !error) + vput(ndp->ni_vp); + return (error); +} + +/* + * Parse symbolic link arguments. + * This function has an ugly side effect. It will MALLOC() an area for + * the symlink and set iov_base to point to it, only if it succeeds. + * So, if it returns with uiop->uio_iov->iov_base != NULL, that must + * be FREE'd later. + */ +int +nfsvno_getsymlink(struct nfsrv_descript *nd, struct nfsvattr *nvap, + struct thread *p, char **pathcpp, int *lenp) +{ + u_int32_t *tl; + char *pathcp = NULL; + int error = 0, len; + struct nfsv2_sattr *sp; + + *pathcpp = NULL; + *lenp = 0; + if ((nd->nd_flag & ND_NFSV3) && + (error = nfsrv_sattr(nd, nvap, NULL, NULL, p))) + goto nfsmout; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + len = fxdr_unsigned(int, *tl); + if (len > NFS_MAXPATHLEN || len <= 0) { + error = EBADRPC; + goto nfsmout; + } + MALLOC(pathcp, caddr_t, len + 1, M_TEMP, M_WAITOK); + error = nfsrv_mtostr(nd, pathcp, len); + if (error) + goto nfsmout; + if (nd->nd_flag & ND_NFSV2) { + NFSM_DISSECT(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + nvap->na_mode = fxdr_unsigned(u_int16_t, sp->sa_mode); + } + *pathcpp = pathcp; + *lenp = len; + return (0); +nfsmout: + if (pathcp) + free(pathcp, M_TEMP); + return (error); +} + +/* + * Remove a non-directory object. + */ +int +nfsvno_removesub(struct nameidata *ndp, int is_v4, struct ucred *cred, + struct thread *p, struct nfsexstuff *exp) +{ + struct vnode *vp; + int error = 0; + + vp = ndp->ni_vp; + if (vp->v_type == VDIR) + error = NFSERR_ISDIR; + else if (is_v4) + error = nfsrv_checkremove(vp, 1, p); + if (!error) + error = VOP_REMOVE(ndp->ni_dvp, vp, &ndp->ni_cnd); + if (ndp->ni_dvp == vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + vput(vp); + return (error); +} + +/* + * Remove a directory. + */ +int +nfsvno_rmdirsub(struct nameidata *ndp, int is_v4, struct ucred *cred, + struct thread *p, struct nfsexstuff *exp) +{ + struct vnode *vp; + int error = 0; + + vp = ndp->ni_vp; + if (vp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + /* + * No rmdir "." please. + */ + if (ndp->ni_dvp == vp) { + error = EINVAL; + goto out; + } + /* + * The root of a mounted filesystem cannot be deleted. + */ + if (vp->v_vflag & VV_ROOT) + error = EBUSY; +out: + if (!error) + error = VOP_RMDIR(ndp->ni_dvp, vp, &ndp->ni_cnd); + if (ndp->ni_dvp == vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + vput(vp); + return (error); +} + +/* + * Rename vnode op. + */ +int +nfsvno_rename(struct nameidata *fromndp, struct nameidata *tondp, + u_int32_t ndstat, u_int32_t ndflag, struct ucred *cred, struct thread *p) +{ + struct vnode *fvp, *tvp, *tdvp; + int error = 0; + + fvp = fromndp->ni_vp; + if (ndstat) { + vrele(fromndp->ni_dvp); + vrele(fvp); + error = ndstat; + goto out1; + } + tdvp = tondp->ni_dvp; + tvp = tondp->ni_vp; + if (tvp != NULL) { + if (fvp->v_type == VDIR && tvp->v_type != VDIR) { + error = (ndflag & ND_NFSV2) ? EISDIR : EEXIST; + goto out; + } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) { + error = (ndflag & ND_NFSV2) ? ENOTDIR : EEXIST; + goto out; + } + if (tvp->v_type == VDIR && tvp->v_mountedhere) { + error = (ndflag & ND_NFSV2) ? ENOTEMPTY : EXDEV; + goto out; + } + + /* + * A rename to '.' or '..' results in a prematurely + * unlocked vnode on FreeBSD5, so I'm just going to fail that + * here. + */ + if ((tondp->ni_cnd.cn_namelen == 1 && + tondp->ni_cnd.cn_nameptr[0] == '.') || + (tondp->ni_cnd.cn_namelen == 2 && + tondp->ni_cnd.cn_nameptr[0] == '.' && + tondp->ni_cnd.cn_nameptr[1] == '.')) { + error = EINVAL; + goto out; + } + } + if (fvp->v_type == VDIR && fvp->v_mountedhere) { + error = (ndflag & ND_NFSV2) ? ENOTEMPTY : EXDEV; + goto out; + } + if (fvp->v_mount != tdvp->v_mount) { + error = (ndflag & ND_NFSV2) ? ENOTEMPTY : EXDEV; + goto out; + } + if (fvp == tdvp) { + error = (ndflag & ND_NFSV2) ? ENOTEMPTY : EINVAL; + goto out; + } + if (fvp == tvp) { + /* + * If source and destination are the same, there is nothing to + * do. Set error to -1 to indicate this. + */ + error = -1; + goto out; + } + if (ndflag & ND_NFSV4) { + NFSVOPLOCK(fvp, LK_EXCLUSIVE | LK_RETRY, p); + error = nfsrv_checkremove(fvp, 0, p); + NFSVOPUNLOCK(fvp, 0, p); + if (tvp && !error) + error = nfsrv_checkremove(tvp, 1, p); + } else { + /* + * For NFSv2 and NFSv3, try to get rid of the delegation, so + * that the NFSv4 client won't be confused by the rename. + * Since nfsd_recalldelegation() can only be called on an + * unlocked vnode at this point and fvp is the file that will + * still exist after the rename, just do fvp. + */ + nfsd_recalldelegation(fvp, p); + } +out: + if (!error) { + error = VOP_RENAME(fromndp->ni_dvp, fromndp->ni_vp, + &fromndp->ni_cnd, tondp->ni_dvp, tondp->ni_vp, + &tondp->ni_cnd); + } else { + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + vrele(fromndp->ni_dvp); + vrele(fvp); + if (error == -1) + error = 0; + } + vrele(tondp->ni_startdir); + nfsvno_relpathbuf(tondp); +out1: + vrele(fromndp->ni_startdir); + nfsvno_relpathbuf(fromndp); + return (error); +} + +/* + * Link vnode op. + */ +int +nfsvno_link(struct nameidata *ndp, struct vnode *vp, struct ucred *cred, + struct thread *p, struct nfsexstuff *exp) +{ + struct vnode *xp; + int error = 0; + + xp = ndp->ni_vp; + if (xp != NULL) { + error = EEXIST; + } else { + xp = ndp->ni_dvp; + if (vp->v_mount != xp->v_mount) + error = EXDEV; + } + if (!error) { + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_LINK(ndp->ni_dvp, vp, &ndp->ni_cnd); + if (ndp->ni_dvp == vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + NFSVOPUNLOCK(vp, 0, p); + } else { + if (ndp->ni_dvp == ndp->ni_vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + if (ndp->ni_vp) + vrele(ndp->ni_vp); + } + nfsvno_relpathbuf(ndp); + return (error); +} + +/* + * Do the fsync() appropriate for the commit. + */ +int +nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred, + struct thread *td) +{ + int error = 0; + + if (cnt > MAX_COMMIT_COUNT) { + /* + * Give up and do the whole thing + */ + if (vp->v_object && + (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { + VM_OBJECT_LOCK(vp->v_object); + vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); + VM_OBJECT_UNLOCK(vp->v_object); + } + error = VOP_FSYNC(vp, MNT_WAIT, td); + } else { + /* + * Locate and synchronously write any buffers that fall + * into the requested range. Note: we are assuming that + * f_iosize is a power of 2. + */ + int iosize = vp->v_mount->mnt_stat.f_iosize; + int iomask = iosize - 1; + struct bufobj *bo; + daddr_t lblkno; + + /* + * Align to iosize boundry, super-align to page boundry. + */ + if (off & iomask) { + cnt += off & iomask; + off &= ~(u_quad_t)iomask; + } + if (off & PAGE_MASK) { + cnt += off & PAGE_MASK; + off &= ~(u_quad_t)PAGE_MASK; + } + lblkno = off / iosize; + + if (vp->v_object && + (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { + VM_OBJECT_LOCK(vp->v_object); + vm_object_page_clean(vp->v_object, off / PAGE_SIZE, (cnt + PAGE_MASK) / PAGE_SIZE, OBJPC_SYNC); + VM_OBJECT_UNLOCK(vp->v_object); + } + + bo = &vp->v_bufobj; + BO_LOCK(bo); + while (cnt > 0) { + struct buf *bp; + + /* + * If we have a buffer and it is marked B_DELWRI we + * have to lock and write it. Otherwise the prior + * write is assumed to have already been committed. + * + * gbincore() can return invalid buffers now so we + * have to check that bit as well (though B_DELWRI + * should not be set if B_INVAL is set there could be + * a race here since we haven't locked the buffer). + */ + if ((bp = gbincore(&vp->v_bufobj, lblkno)) != NULL) { + if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | + LK_INTERLOCK, BO_MTX(bo)) == ENOLCK) { + BO_LOCK(bo); + continue; /* retry */ + } + if ((bp->b_flags & (B_DELWRI|B_INVAL)) == + B_DELWRI) { + bremfree(bp); + bp->b_flags &= ~B_ASYNC; + bwrite(bp); + ++nfs_commit_miss; + } else + BUF_UNLOCK(bp); + BO_LOCK(bo); + } + ++nfs_commit_blks; + if (cnt < iosize) + break; + cnt -= iosize; + ++lblkno; + } + BO_UNLOCK(bo); + } + return (error); +} + +/* + * Statfs vnode op. + */ +int +nfsvno_statfs(struct vnode *vp, struct statfs *sf, struct ucred *cred, + struct thread *p) +{ + int error; + + error = VFS_STATFS(vp->v_mount, sf, p); + return (error); +} + +/* + * Do the vnode op stuff for Open. Similar to nfsvno_createsub(), but + * must handle nfsrv_opencheck() calls after any other access checks. + */ +void +nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp, + nfsquad_t clientid, nfsv4stateid_t *stateidp, struct nfsstate *stp, + int *exclusive_flagp, struct nfsvattr *nvap, u_char *cverf, int create, + NFSACL_T *aclp, nfsattrbit_t *attrbitp, struct ucred *cred, struct thread *p, + struct nfsexstuff *exp, struct vnode **vpp) +{ + struct vnode *vp = NULL; + u_quad_t tempsize; + struct nfsexstuff nes; + + if (ndp->ni_vp == NULL) + nd->nd_repstat = nfsrv_opencheck(clientid, + stateidp, stp, NULL, nd, p, nd->nd_repstat); + if (!nd->nd_repstat) { + if (ndp->ni_vp == NULL) { + vrele(ndp->ni_startdir); + nd->nd_repstat = VOP_CREATE(ndp->ni_dvp, + &ndp->ni_vp, &ndp->ni_cnd, &nvap->na_vattr); + vput(ndp->ni_dvp); + nfsvno_relpathbuf(ndp); + if (!nd->nd_repstat) { + if (*exclusive_flagp) { + *exclusive_flagp = 0; + NFSVNO_ATTRINIT(nvap); + NFSBCOPY(cverf, + (caddr_t)&nvap->na_atime, + NFSX_VERF); + nd->nd_repstat = VOP_SETATTR(ndp->ni_vp, + &nvap->na_vattr, cred); + } else { + nfsrv_fixattr(nd, ndp->ni_vp, nvap, + aclp, p, attrbitp, exp); + } + } + vp = ndp->ni_vp; + } else { + if (ndp->ni_startdir) + vrele(ndp->ni_startdir); + nfsvno_relpathbuf(ndp); + vp = ndp->ni_vp; + if (create == NFSV4OPEN_CREATE) { + if (ndp->ni_dvp == vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + } + if (NFSVNO_ISSETSIZE(nvap) && vp->v_type == VREG) { + if (ndp->ni_cnd.cn_flags & RDONLY) + NFSVNO_SETEXRDONLY(&nes); + else + NFSVNO_EXINIT(&nes); + nd->nd_repstat = nfsvno_accchk(vp, + NFSV4ACE_ADDFILE, cred, &nes, p, + NFSACCCHK_NOOVERRIDE,NFSACCCHK_VPISLOCKED); + nd->nd_repstat = nfsrv_opencheck(clientid, + stateidp, stp, vp, nd, p, nd->nd_repstat); + if (!nd->nd_repstat) { + tempsize = nvap->na_size; + NFSVNO_ATTRINIT(nvap); + nvap->na_size = tempsize; + nd->nd_repstat = VOP_SETATTR(vp, + &nvap->na_vattr, cred); + } + } else if (vp->v_type == VREG) { + nd->nd_repstat = nfsrv_opencheck(clientid, + stateidp, stp, vp, nd, p, nd->nd_repstat); + } + } + } else { + if (ndp->ni_cnd.cn_flags & HASBUF) + nfsvno_relpathbuf(ndp); + if (ndp->ni_startdir && create == NFSV4OPEN_CREATE) { + vrele(ndp->ni_startdir); + if (ndp->ni_dvp == ndp->ni_vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + if (ndp->ni_vp) + vput(ndp->ni_vp); + } + } + *vpp = vp; +} + +/* + * Updates the file rev and sets the mtime and ctime + * to the current clock time, returning the va_filerev and va_Xtime + * values. + */ +void +nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap, + struct ucred *cred, struct thread *p) +{ + struct vattr va; + + VATTR_NULL(&va); + getnanotime(&va.va_mtime); + (void) VOP_SETATTR(vp, &va, cred); + (void) nfsvno_getattr(vp, nvap, cred, p); +} + +/* + * Glue routine to nfsv4_fillattr(). + */ +int +nfsvno_fillattr(struct nfsrv_descript *nd, struct vnode *vp, + struct nfsvattr *nvap, fhandle_t *fhp, int rderror, nfsattrbit_t *attrbitp, + struct ucred *cred, struct thread *p, int isdgram, int reterr) +{ + int error; + + error = nfsv4_fillattr(nd, vp, NULL, &nvap->na_vattr, fhp, rderror, + attrbitp, cred, p, isdgram, reterr); + return (error); +} + +/* Since the Readdir vnode ops vary, put the entire functions in here. */ +/* + * nfs readdir service + * - mallocs what it thinks is enough to read + * count rounded up to a multiple of DIRBLKSIZ <= NFS_MAXREADDIR + * - calls nfsvno_readdir() + * - loops around building the reply + * if the output generated exceeds count break out of loop + * The NFSM_CLGET macro is used here so that the reply will be packed + * tightly in mbuf clusters. + * - it only knows that it has encountered eof when the nfsvno_readdir() + * reads nothing + * - as such one readdir rpc will return eof false although you are there + * and then the next will return eof + * - it trims out records with d_fileno == 0 + * this doesn't matter for Unix clients, but they might confuse clients + * for other os'. + * - it trims out records with d_type == DT_WHT + * these cannot be seen through NFS (unless we extend the protocol) + * NB: It is tempting to set eof to true if the nfsvno_readdir() reads less + * than requested, but this may not apply to all filesystems. For + * example, client NFS does not { although it is never remote mounted + * anyhow } + * The alternate call nfsrvd_readdirplus() does lookups as well. + * PS: The NFS protocol spec. does not clarify what the "count" byte + * argument is a count of.. just name strings and file id's or the + * entire reply rpc or ... + * I tried just file name and id sizes and it confused the Sun client, + * so I am using the full rpc size now. The "paranoia.." comment refers + * to including the status longwords that are not a part of the dir. + * "entry" structures, but are in the rpc. + */ +int +nfsrvd_readdir(struct nfsrv_descript *nd, int isdgram, + struct vnode *vp, struct thread *p, struct nfsexstuff *exp) +{ + struct dirent *dp; + u_int32_t *tl; + int dirlen; + char *cpos, *cend, *rbuf; + struct nfsvattr at; + int nlen, error = 0, getret = 1; + int siz, cnt, fullsiz, eofflag, ncookies; + u_int64_t off, toff, verf; + u_long *cookies = NULL, *cookiep; + struct uio io; + struct iovec iv; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + return (0); + } + if (nd->nd_flag & ND_NFSV2) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + off = fxdr_unsigned(u_quad_t, *tl++); + } else { + NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + off = fxdr_hyper(tl); + tl += 2; + verf = fxdr_hyper(tl); + tl += 2; + } + toff = off; + cnt = fxdr_unsigned(int, *tl); + if (cnt > NFS_SRVMAXDATA(nd)) + cnt = NFS_SRVMAXDATA(nd); + siz = ((cnt + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); + fullsiz = siz; + if (nd->nd_flag & ND_NFSV3) { + nd->nd_repstat = getret = nfsvno_getattr(vp, &at, nd->nd_cred, + p); +#if 0 + /* + * va_filerev is not sufficient as a cookie verifier, + * since it is not supposed to change when entries are + * removed/added unless that offset cookies returned to + * the client are no longer valid. + */ + if (!nd->nd_repstat && toff && verf != at.na_filerev) + nd->nd_repstat = NFSERR_BAD_COOKIE; +#endif + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_SEARCH, + nd->nd_cred, exp, p, NFSACCCHK_NOOVERRIDE, + NFSACCCHK_VPISLOCKED); + if (nd->nd_repstat) { + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + return (0); + } + NFSVOPUNLOCK(vp, 0, p); + MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); +again: + eofflag = 0; + if (cookies) { + free((caddr_t)cookies, M_TEMP); + cookies = NULL; + } + + iv.iov_base = rbuf; + iv.iov_len = siz; + io.uio_iov = &iv; + io.uio_iovcnt = 1; + io.uio_offset = (off_t)off; + io.uio_resid = siz; + io.uio_segflg = UIO_SYSSPACE; + io.uio_rw = UIO_READ; + io.uio_td = NULL; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + nd->nd_repstat = VOP_READDIR(vp, &io, nd->nd_cred, &eofflag, &ncookies, + &cookies); + NFSVOPUNLOCK(vp, 0, p); + off = (u_int64_t)io.uio_offset; + if (io.uio_resid) + siz -= io.uio_resid; + + if (!cookies && !nd->nd_repstat) + nd->nd_repstat = NFSERR_PERM; + if (nd->nd_flag & ND_NFSV3) { + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = getret; + } + + /* + * Handles the failed cases. nd->nd_repstat == 0 past here. + */ + if (nd->nd_repstat) { + vrele(vp); + free((caddr_t)rbuf, M_TEMP); + if (cookies) + free((caddr_t)cookies, M_TEMP); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + return (0); + } + /* + * If nothing read, return eof + * rpc reply + */ + if (siz == 0) { + vrele(vp); + if (nd->nd_flag & ND_NFSV2) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + } else { + nfsrv_postopattr(nd, getret, &at); + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + txdr_hyper(at.na_filerev, tl); + tl += 2; + } + *tl++ = newnfs_false; + *tl = newnfs_true; + FREE((caddr_t)rbuf, M_TEMP); + FREE((caddr_t)cookies, M_TEMP); + return (0); + } + + /* + * Check for degenerate cases of nothing useful read. + * If so go try again + */ + cpos = rbuf; + cend = rbuf + siz; + dp = (struct dirent *)cpos; + cookiep = cookies; + + /* + * For some reason FreeBSD's ufs_readdir() chooses to back the + * directory offset up to a block boundary, so it is necessary to + * skip over the records that precede the requested offset. This + * requires the assumption that file offset cookies monotonically + * increase. + */ + while (cpos < cend && ncookies > 0 && + (dp->d_fileno == 0 || dp->d_type == DT_WHT || + ((u_quad_t)(*cookiep)) <= toff)) { + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + if (cpos >= cend || ncookies == 0) { + siz = fullsiz; + toff = off; + goto again; + } + + /* + * dirlen is the size of the reply, including all XDR and must + * not exceed cnt. For NFSv2, RFC1094 didn't clearly indicate + * if the XDR should be included in "count", but to be safe, we do. + * (Include the two booleans at the end of the reply in dirlen now.) + */ + if (nd->nd_flag & ND_NFSV3) { + nfsrv_postopattr(nd, getret, &at); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + txdr_hyper(at.na_filerev, tl); + dirlen = NFSX_V3POSTOPATTR + NFSX_VERF + 2 * NFSX_UNSIGNED; + } else { + dirlen = 2 * NFSX_UNSIGNED; + } + + /* Loop through the records and build reply */ + while (cpos < cend && ncookies > 0) { + nlen = dp->d_namlen; + if (dp->d_fileno != 0 && dp->d_type != DT_WHT && + nlen <= NFS_MAXNAMLEN) { + if (nd->nd_flag & ND_NFSV3) + dirlen += (6*NFSX_UNSIGNED + NFSM_RNDUP(nlen)); + else + dirlen += (4*NFSX_UNSIGNED + NFSM_RNDUP(nlen)); + if (dirlen > cnt) { + eofflag = 0; + break; + } + + /* + * Build the directory record xdr from + * the dirent entry. + */ + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl++ = 0; + } else { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + } + *tl = txdr_unsigned(dp->d_fileno); + (void) nfsm_strtom(nd, dp->d_name, nlen); + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = 0; + } else + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(*cookiep); + } + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + if (cpos < cend) + eofflag = 0; + vrele(vp); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + if (eofflag) + *tl = newnfs_true; + else + *tl = newnfs_false; + FREE((caddr_t)rbuf, M_TEMP); + FREE((caddr_t)cookies, M_TEMP); + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * Readdirplus for V3 and Readdir for V4. + */ +int +nfsrvd_readdirplus(struct nfsrv_descript *nd, int isdgram, + struct vnode *vp, struct thread *p, struct nfsexstuff *exp) +{ + struct dirent *dp; + u_int32_t *tl; + int dirlen; + char *cpos, *cend, *rbuf; + struct vnode *nvp; + fhandle_t nfh; + struct nfsvattr nva, at, *nvap = &nva; + struct mbuf *mb0, *mb1; + struct nfsreferral *refp; + int nlen, r, error = 0, getret = 1, vgetret; + int siz, cnt, fullsiz, eofflag, ncookies, entrycnt; + caddr_t bpos0, bpos1; + u_int64_t off, toff, verf; + u_long *cookies = NULL, *cookiep; + nfsattrbit_t attrbits, rderrbits, savbits; + struct uio io; + struct iovec iv; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + return (0); + } + NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED); + off = fxdr_hyper(tl); + toff = off; + tl += 2; + verf = fxdr_hyper(tl); + tl += 2; + siz = fxdr_unsigned(int, *tl++); + cnt = fxdr_unsigned(int, *tl); + + /* + * Use the server's maximum data transfer size as the upper bound + * on reply datalen. + */ + if (cnt > NFS_SRVMAXDATA(nd)) + cnt = NFS_SRVMAXDATA(nd); + + /* + * siz is a "hint" of how much directory information (name, fileid, + * cookie) should be in the reply. At least one client "hints" 0, + * so I set it to cnt for that case. I also round it up to the + * next multiple of DIRBLKSIZ. + */ + if (siz == 0) + siz = cnt; + siz = ((siz + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); + + if (nd->nd_flag & ND_NFSV4) { + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + if (error) + goto nfsmout; + NFSSET_ATTRBIT(&savbits, &attrbits); + NFSCLRNOTFILLABLE_ATTRBIT(&attrbits); + NFSZERO_ATTRBIT(&rderrbits); + NFSSETBIT_ATTRBIT(&rderrbits, NFSATTRBIT_RDATTRERROR); + } else { + NFSZERO_ATTRBIT(&attrbits); + } + fullsiz = siz; + nd->nd_repstat = getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + if (!nd->nd_repstat) { + if (off && verf != at.na_filerev) { + /* + * va_filerev is not sufficient as a cookie verifier, + * since it is not supposed to change when entries are + * removed/added unless that offset cookies returned to + * the client are no longer valid. + */ +#if 0 + if (nd->nd_flag & ND_NFSV4) { + nd->nd_repstat = NFSERR_NOTSAME; + } else { + nd->nd_repstat = NFSERR_BAD_COOKIE; + } +#endif + } else if ((nd->nd_flag & ND_NFSV4) && off == 0 && verf != 0) { + nd->nd_repstat = NFSERR_BAD_COOKIE; + } + } + if (!nd->nd_repstat && vp->v_type != VDIR) + nd->nd_repstat = NFSERR_NOTDIR; + if (!nd->nd_repstat && cnt == 0) + nd->nd_repstat = NFSERR_TOOSMALL; + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_SEARCH, + nd->nd_cred, exp, p, NFSACCCHK_NOOVERRIDE, + NFSACCCHK_VPISLOCKED); + if (nd->nd_repstat) { + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + return (0); + } + + NFSVOPUNLOCK(vp, 0, p); + + MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); +again: + eofflag = 0; + if (cookies) { + free((caddr_t)cookies, M_TEMP); + cookies = NULL; + } + + iv.iov_base = rbuf; + iv.iov_len = siz; + io.uio_iov = &iv; + io.uio_iovcnt = 1; + io.uio_offset = (off_t)off; + io.uio_resid = siz; + io.uio_segflg = UIO_SYSSPACE; + io.uio_rw = UIO_READ; + io.uio_td = NULL; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + nd->nd_repstat = VOP_READDIR(vp, &io, nd->nd_cred, &eofflag, &ncookies, + &cookies); + NFSVOPUNLOCK(vp, 0, p); + off = (u_int64_t)io.uio_offset; + if (io.uio_resid) + siz -= io.uio_resid; + + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + + if (!cookies && !nd->nd_repstat) + nd->nd_repstat = NFSERR_PERM; + if (!nd->nd_repstat) + nd->nd_repstat = getret; + if (nd->nd_repstat) { + vrele(vp); + if (cookies) + free((caddr_t)cookies, M_TEMP); + free((caddr_t)rbuf, M_TEMP); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + return (0); + } + /* + * If nothing read, return eof + * rpc reply + */ + if (siz == 0) { + vrele(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + txdr_hyper(at.na_filerev, tl); + tl += 2; + *tl++ = newnfs_false; + *tl = newnfs_true; + free((caddr_t)cookies, M_TEMP); + free((caddr_t)rbuf, M_TEMP); + return (0); + } + + /* + * Check for degenerate cases of nothing useful read. + * If so go try again + */ + cpos = rbuf; + cend = rbuf + siz; + dp = (struct dirent *)cpos; + cookiep = cookies; + + /* + * For some reason FreeBSD's ufs_readdir() chooses to back the + * directory offset up to a block boundary, so it is necessary to + * skip over the records that precede the requested offset. This + * requires the assumption that file offset cookies monotonically + * increase. + */ + while (cpos < cend && ncookies > 0 && + (dp->d_fileno == 0 || dp->d_type == DT_WHT || + ((u_quad_t)(*cookiep)) <= toff || + ((nd->nd_flag & ND_NFSV4) && + ((dp->d_namlen == 1 && dp->d_name[0] == '.') || + (dp->d_namlen==2 && dp->d_name[0]=='.' && dp->d_name[1]=='.'))))) { + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + if (cpos >= cend || ncookies == 0) { + siz = fullsiz; + toff = off; + goto again; + } + + /* + * Probe one of the directory entries to see if the filesystem + * supports VGET for NFSv3. For NFSv4, it will return an + * error later, if attributes are required. + * (To be honest, most if not all NFSv4 clients will require + * attributes, but??) + */ + if ((nd->nd_flag & ND_NFSV3)) { + vgetret = VFS_VGET(vp->v_mount, dp->d_fileno, LK_EXCLUSIVE, + &nvp); + if (vgetret != 0) { + if (vgetret == EOPNOTSUPP) + nd->nd_repstat = NFSERR_NOTSUPP; + else + nd->nd_repstat = NFSERR_SERVERFAULT; + vrele(vp); + if (cookies) + free((caddr_t)cookies, M_TEMP); + free((caddr_t)rbuf, M_TEMP); + nfsrv_postopattr(nd, getret, &at); + return (0); + } + if (!vgetret) + vput(nvp); + nvp = NULL; + } + + /* + * Save this position, in case there is an error before one entry + * is created. + */ + mb0 = nd->nd_mb; + bpos0 = nd->nd_bpos; + + /* + * Fill in the first part of the reply. + * dirlen is the reply length in bytes and cannot exceed cnt. + * (Include the two booleans at the end of the reply in dirlen now, + * so we recognize when we have exceeded cnt.) + */ + if (nd->nd_flag & ND_NFSV3) { + dirlen = NFSX_V3POSTOPATTR + NFSX_VERF + 2 * NFSX_UNSIGNED; + nfsrv_postopattr(nd, getret, &at); + } else { + dirlen = NFSX_VERF + 2 * NFSX_UNSIGNED; + } + NFSM_BUILD(tl, u_int32_t *, NFSX_VERF); + txdr_hyper(at.na_filerev, tl); + + /* + * Save this position, in case there is an empty reply needed. + */ + mb1 = nd->nd_mb; + bpos1 = nd->nd_bpos; + + /* Loop through the records and build reply */ + entrycnt = 0; + while (cpos < cend && ncookies > 0 && dirlen < cnt) { + nlen = dp->d_namlen; + if (dp->d_fileno != 0 && dp->d_type != DT_WHT && + nlen <= NFS_MAXNAMLEN && + ((nd->nd_flag & ND_NFSV3) || nlen > 2 || + (nlen==2 && (dp->d_name[0]!='.' || dp->d_name[1]!='.')) + || (nlen == 1 && dp->d_name[0] != '.'))) { + /* + * Save the current position in the reply, in case + * this entry exceeds cnt. + */ + mb1 = nd->nd_mb; + bpos1 = nd->nd_bpos; + + /* + * For readdir_and_lookup get the vnode using + * the file number. + */ + nvp = NULL; + refp = NULL; + r = 0; + if ((nd->nd_flag & ND_NFSV3) || + NFSNONZERO_ATTRBIT(&savbits)) { + if (nd->nd_flag & ND_NFSV4) + refp = nfsv4root_getreferral(NULL, + vp, dp->d_fileno); + if (refp == NULL) + r = VFS_VGET(vp->v_mount, dp->d_fileno, + LK_EXCLUSIVE, &nvp); + if (!r) { + if (refp == NULL && + ((nd->nd_flag & ND_NFSV3) || + NFSNONZERO_ATTRBIT(&attrbits))) { + r = nfsvno_getfh(nvp, &nfh, p); + if (!r) + r = nfsvno_getattr(nvp, nvap, + nd->nd_cred, p); + } + } else { + nvp = NULL; + } + if (r) { + if (!NFSISSET_ATTRBIT(&attrbits, + NFSATTRBIT_RDATTRERROR)) { + if (nvp != NULL) + vput(nvp); + nd->nd_repstat = r; + break; + } + } + } + + /* + * Build the directory record xdr + */ + if (nd->nd_flag & ND_NFSV3) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl++ = 0; + *tl = txdr_unsigned(dp->d_fileno); + dirlen += nfsm_strtom(nd, dp->d_name, nlen); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = 0; + *tl = txdr_unsigned(*cookiep); + nfsrv_postopattr(nd, 0, nvap); + dirlen += nfsm_fhtom(nd,(u_int8_t *)&nfh,0,1); + dirlen += (5*NFSX_UNSIGNED+NFSX_V3POSTOPATTR); + if (nvp != NULL) + vput(nvp); + } else { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + *tl++ = 0; + *tl = txdr_unsigned(*cookiep); + dirlen += nfsm_strtom(nd, dp->d_name, nlen); + if (nvp != NULL) + NFSVOPUNLOCK(nvp, 0, p); + if (refp != NULL) { + dirlen += nfsrv_putreferralattr(nd, + &savbits, refp, 0, + &nd->nd_repstat); + if (nd->nd_repstat) { + if (nvp != NULL) + vrele(nvp); + break; + } + } else if (r) { + dirlen += nfsvno_fillattr(nd, nvp, nvap, + &nfh, r, &rderrbits, nd->nd_cred, + p, isdgram, 0); + } else { + dirlen += nfsvno_fillattr(nd, nvp, nvap, + &nfh, r, &attrbits, nd->nd_cred, + p, isdgram, 0); + } + if (nvp != NULL) + vrele(nvp); + dirlen += (3 * NFSX_UNSIGNED); + } + if (dirlen <= cnt) + entrycnt++; + } + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + vrele(vp); + + /* + * If dirlen > cnt, we must strip off the last entry. If that + * results in an empty reply, report NFSERR_TOOSMALL. + */ + if (dirlen > cnt || nd->nd_repstat) { + if (!nd->nd_repstat && entrycnt == 0) + nd->nd_repstat = NFSERR_TOOSMALL; + if (nd->nd_repstat) + newnfs_trimtrailing(nd, mb0, bpos0); + else + newnfs_trimtrailing(nd, mb1, bpos1); + eofflag = 0; + } else if (cpos < cend) + eofflag = 0; + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + if (eofflag) + *tl = newnfs_true; + else + *tl = newnfs_false; + } + FREE((caddr_t)cookies, M_TEMP); + FREE((caddr_t)rbuf, M_TEMP); + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * Get the settable attributes out of the mbuf list. + * (Return 0 or EBADRPC) + */ +int +nfsrv_sattr(struct nfsrv_descript *nd, struct nfsvattr *nvap, + nfsattrbit_t *attrbitp, NFSACL_T *aclp, struct thread *p) +{ + u_int32_t *tl; + struct nfsv2_sattr *sp; + struct timeval curtime; + int error = 0, toclient = 0; + + switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) { + case ND_NFSV2: + NFSM_DISSECT(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + /* + * Some old clients didn't fill in the high order 16bits. + * --> check the low order 2 bytes for 0xffff + */ + if ((fxdr_unsigned(int, sp->sa_mode) & 0xffff) != 0xffff) + nvap->na_mode = nfstov_mode(sp->sa_mode); + if (sp->sa_uid != newnfs_xdrneg1) + nvap->na_uid = fxdr_unsigned(uid_t, sp->sa_uid); + if (sp->sa_gid != newnfs_xdrneg1) + nvap->na_gid = fxdr_unsigned(gid_t, sp->sa_gid); + if (sp->sa_size != newnfs_xdrneg1) + nvap->na_size = fxdr_unsigned(u_quad_t, sp->sa_size); + if (sp->sa_atime.nfsv2_sec != newnfs_xdrneg1) { +#ifdef notyet + fxdr_nfsv2time(&sp->sa_atime, &nvap->na_atime); +#else + nvap->na_atime.tv_sec = + fxdr_unsigned(u_int32_t,sp->sa_atime.nfsv2_sec); + nvap->na_atime.tv_nsec = 0; +#endif + } + if (sp->sa_mtime.nfsv2_sec != newnfs_xdrneg1) + fxdr_nfsv2time(&sp->sa_mtime, &nvap->na_mtime); + break; + case ND_NFSV3: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (*tl == newnfs_true) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nvap->na_mode = nfstov_mode(*tl); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (*tl == newnfs_true) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nvap->na_uid = fxdr_unsigned(uid_t, *tl); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (*tl == newnfs_true) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nvap->na_gid = fxdr_unsigned(gid_t, *tl); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (*tl == newnfs_true) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + nvap->na_size = fxdr_hyper(tl); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + switch (fxdr_unsigned(int, *tl)) { + case NFSV3SATTRTIME_TOCLIENT: + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + fxdr_nfsv3time(tl, &nvap->na_atime); + toclient = 1; + break; + case NFSV3SATTRTIME_TOSERVER: + NFSGETTIME(&curtime); + nvap->na_atime.tv_sec = curtime.tv_sec; + nvap->na_atime.tv_nsec = curtime.tv_usec * 1000; + nvap->na_vaflags |= VA_UTIMES_NULL; + break; + }; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + switch (fxdr_unsigned(int, *tl)) { + case NFSV3SATTRTIME_TOCLIENT: + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + fxdr_nfsv3time(tl, &nvap->na_mtime); + nvap->na_vaflags &= ~VA_UTIMES_NULL; + break; + case NFSV3SATTRTIME_TOSERVER: + NFSGETTIME(&curtime); + nvap->na_mtime.tv_sec = curtime.tv_sec; + nvap->na_mtime.tv_nsec = curtime.tv_usec * 1000; + if (!toclient) + nvap->na_vaflags |= VA_UTIMES_NULL; + break; + }; + break; + case ND_NFSV4: + error = nfsv4_sattr(nd, nvap, attrbitp, aclp, p); + }; +nfsmout: + return (error); +} + +/* + * Handle the setable attributes for V4. + * Returns NFSERR_BADXDR if it can't be parsed, 0 otherwise. + */ +int +nfsv4_sattr(struct nfsrv_descript *nd, struct nfsvattr *nvap, + nfsattrbit_t *attrbitp, NFSACL_T *aclp, struct thread *p) +{ + u_int32_t *tl; + int attrsum = 0; + int i, j; + int error, attrsize, bitpos, aclsize, aceerr, retnotsup = 0; + int toclient = 0; + u_char *cp, namestr[NFSV4_SMALLSTR + 1]; + uid_t uid; + gid_t gid; + struct timeval curtime; + + error = nfsrv_getattrbits(nd, attrbitp, NULL, &retnotsup); + if (error) + return (error); + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + attrsize = fxdr_unsigned(int, *tl); + + /* + * Loop around getting the setable attributes. If an unsupported + * one is found, set nd_repstat == NFSERR_ATTRNOTSUPP and return. + */ + if (retnotsup) { + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + bitpos = NFSATTRBIT_MAX; + } else { + bitpos = 0; + } + for (; bitpos < NFSATTRBIT_MAX; bitpos++) { + if (attrsum > attrsize) { + error = NFSERR_BADXDR; + goto nfsmout; + } + if (NFSISSET_ATTRBIT(attrbitp, bitpos)) + switch (bitpos) { + case NFSATTRBIT_SIZE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER); + nvap->na_size = fxdr_hyper(tl); + attrsum += NFSX_HYPER; + break; + case NFSATTRBIT_ACL: + error = nfsrv_dissectacl(nd, aclp, &aceerr, &aclsize, + p); + if (error) + goto nfsmout; + if (aceerr && !nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += aclsize; + break; + case NFSATTRBIT_ARCHIVE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_HIDDEN: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_MIMETYPE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + error = nfsm_advance(nd, NFSM_RNDUP(i), -1); + if (error) + goto nfsmout; + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(i)); + break; + case NFSATTRBIT_MODE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nvap->na_mode = nfstov_mode(*tl); + attrsum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_OWNER: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + j = fxdr_unsigned(int, *tl); + if (j < 0) + return (NFSERR_BADXDR); + if (j > NFSV4_SMALLSTR) + cp = malloc(j + 1, M_NFSSTRING, M_WAITOK); + else + cp = namestr; + error = nfsrv_mtostr(nd, cp, j); + if (error) { + if (j > NFSV4_SMALLSTR) + free(cp, M_NFSSTRING); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsv4_strtouid(cp,j,&uid,p); + if (!nd->nd_repstat) + nvap->na_uid = uid; + } + if (j > NFSV4_SMALLSTR) + free(cp, M_NFSSTRING); + attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(j)); + break; + case NFSATTRBIT_OWNERGROUP: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + j = fxdr_unsigned(int, *tl); + if (j < 0) + return (NFSERR_BADXDR); + if (j > NFSV4_SMALLSTR) + cp = malloc(j + 1, M_NFSSTRING, M_WAITOK); + else + cp = namestr; + error = nfsrv_mtostr(nd, cp, j); + if (error) { + if (j > NFSV4_SMALLSTR) + free(cp, M_NFSSTRING); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsv4_strtogid(cp,j,&gid,p); + if (!nd->nd_repstat) + nvap->na_gid = gid; + } + if (j > NFSV4_SMALLSTR) + free(cp, M_NFSSTRING); + attrsum += (NFSX_UNSIGNED + NFSM_RNDUP(j)); + break; + case NFSATTRBIT_SYSTEM: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_TIMEACCESSSET: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + attrsum += NFSX_UNSIGNED; + if (fxdr_unsigned(int, *tl)==NFSV4SATTRTIME_TOCLIENT) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME); + fxdr_nfsv4time(tl, &nvap->na_atime); + toclient = 1; + attrsum += NFSX_V4TIME; + } else { + NFSGETTIME(&curtime); + nvap->na_atime.tv_sec = curtime.tv_sec; + nvap->na_atime.tv_nsec = curtime.tv_usec * 1000; + nvap->na_vaflags |= VA_UTIMES_NULL; + } + break; + case NFSATTRBIT_TIMEBACKUP: + NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME); + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += NFSX_V4TIME; + break; + case NFSATTRBIT_TIMECREATE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME); + if (!nd->nd_repstat) + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + attrsum += NFSX_V4TIME; + break; + case NFSATTRBIT_TIMEMODIFYSET: + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + attrsum += NFSX_UNSIGNED; + if (fxdr_unsigned(int, *tl)==NFSV4SATTRTIME_TOCLIENT) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_V4TIME); + fxdr_nfsv4time(tl, &nvap->na_mtime); + nvap->na_vaflags &= ~VA_UTIMES_NULL; + attrsum += NFSX_V4TIME; + } else { + NFSGETTIME(&curtime); + nvap->na_mtime.tv_sec = curtime.tv_sec; + nvap->na_mtime.tv_nsec = curtime.tv_usec * 1000; + if (!toclient) + nvap->na_vaflags |= VA_UTIMES_NULL; + } + break; + default: + nd->nd_repstat = NFSERR_ATTRNOTSUPP; + /* + * set bitpos so we drop out of the loop. + */ + bitpos = NFSATTRBIT_MAX; + break; + }; + } + + /* + * some clients pad the attrlist, so we need to skip over the + * padding. + */ + if (attrsum > attrsize) { + error = NFSERR_BADXDR; + } else { + attrsize = NFSM_RNDUP(attrsize); + if (attrsum < attrsize) + error = nfsm_advance(nd, attrsize - attrsum, -1); + } +nfsmout: + return (error); +} + +/* + * Check/setup export credentials. + */ +int +nfsd_excred(struct nfsrv_descript *nd, struct nfsexstuff *exp, + struct ucred *credanon) +{ + int i; + int error = 0; + + /* + * Check/setup credentials. + */ + if (nd->nd_flag & ND_GSS) + exp->nes_exflag &= ~(MNT_EXGSSONLY | MNT_EXPORTANON); + + /* + * For AUTH_SYS, check to see if it is allowed. + * RFC2623 suggests that the NFSv3 Fsinfo RPC be allowed to + * AUTH_NONE or AUTH_SYS for file systems requiring RPCSEC_GSS. + */ + if (NFSVNO_EXGSSONLY(exp) && + nd->nd_procnum != NFSPROC_FSINFO) { + if (nd->nd_flag & ND_NFSV4) + error = NFSERR_WRONGSEC; + else + error = (NFSERR_AUTHERR | AUTH_TOOWEAK); + return (error); + } + + /* + * Check to see if the file system is exported V4 only. + */ + if (NFSVNO_EXV4ONLY(exp) && !(nd->nd_flag & ND_NFSV4)) + return (NFSERR_PROGNOTV4); + + /* + * Now, map the user credentials. + * (Note that ND_AUTHNONE will only be set for an NFSv3 + * Fsinfo RPC. If set for anything else, this code might need + * to change.) + */ + if (NFSVNO_EXPORTED(exp) && + ((!(nd->nd_flag & ND_GSS) && nd->nd_cred->cr_uid == 0) || + NFSVNO_EXPORTANON(exp) || + (nd->nd_flag & ND_AUTHNONE))) { + nd->nd_cred->cr_uid = credanon->cr_uid; + nd->nd_cred->cr_gid = credanon->cr_gid; + for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) + nd->nd_cred->cr_groups[i] = credanon->cr_groups[i]; + nd->nd_cred->cr_ngroups = i; + } + return (0); +} + +/* + * Check exports. + */ +int +nfsvno_checkexp(struct mount *mp, struct sockaddr *nam, struct nfsexstuff *exp, + struct ucred **credp) +{ + int error; + int numsecflavor, *secflavors; + + error = VFS_CHECKEXP(mp, nam, &exp->nes_exflag, credp, + &numsecflavor, &secflavors); + if (error && nfs_rootfhset) { + exp->nes_exflag = 0; + error = 0; + } + return (error); +} + +/* + * Get a vnode for a file handle and export stuff. + */ +int +nfsvno_fhtovp(struct mount *mp, fhandle_t *fhp, struct sockaddr *nam, + struct vnode **vpp, struct nfsexstuff *exp, struct ucred **credp) +{ + int error; + int numsecflavor, *secflavors; + + error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp); + if (nam && !error) { + error = VFS_CHECKEXP(mp, nam, &exp->nes_exflag, credp, + &numsecflavor, &secflavors); + if (error) { + if (nfs_rootfhset) { + exp->nes_exflag = 0; + error = 0; + } else { + vput(*vpp); + } + } + } + return (error); +} + +/* + * Do the pathconf vnode op. + */ +int +nfsvno_pathconf(struct vnode *vp, int flag, register_t *retf, + struct ucred *cred, struct thread *p) +{ + int error; + + error = VOP_PATHCONF(vp, flag, retf); + return (error); +} + +/* + * nfsd_fhtovp() - convert a fh to a vnode ptr + * - look up fsid in mount list (if not found ret error) + * - get vp and export rights by calling nfsvno_fhtovp() + * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon + * for AUTH_SYS + * Also handle getting the Giant lock for the file system, + * as required: + * - if same mount point as *mpp + * do nothing + * else if *mpp == NULL + * if already locked + * leave it locked + * else + * call VFS_LOCK_GIANT() + * else + * if already locked + * unlock Giant + * call VFS_LOCK_GIANT() + */ +void +nfsd_fhtovp(struct nfsrv_descript *nd, struct nfsrvfh *nfp, + struct vnode **vpp, struct nfsexstuff *exp, + struct mount **mpp, int startwrite, struct thread *p) +{ + struct mount *mp; + struct ucred *credanon; + fhandle_t *fhp; + + fhp = (fhandle_t *)nfp->nfsrvfh_data; + /* + * Check for the special case of the nfsv4root_fh. + */ + mp = vfs_getvfs(&fhp->fh_fsid); + if (!mp) { + *vpp = NULL; + nd->nd_repstat = ESTALE; + if (*mpp && exp->nes_vfslocked) + VFS_UNLOCK_GIANT(*mpp); + *mpp = NULL; + exp->nes_vfslocked = 0; + return; + } + + /* + * Now, handle Giant for the file system. + */ + if (*mpp != NULL && *mpp != mp && exp->nes_vfslocked) { + VFS_UNLOCK_GIANT(*mpp); + exp->nes_vfslocked = 0; + } + if (!exp->nes_vfslocked && *mpp != mp) + exp->nes_vfslocked = VFS_LOCK_GIANT(mp); + + *mpp = mp; + if (startwrite) + vn_start_write(NULL, mpp, V_WAIT); + + nd->nd_repstat = nfsvno_fhtovp(mp, fhp, nd->nd_nam, vpp, exp, + &credanon); + + /* + * For NFSv4 without a pseudo root fs, unexported file handles + * can be returned, so that Lookup works everywhere. + */ + if (!nd->nd_repstat && exp->nes_exflag == 0 && + !(nd->nd_flag & ND_NFSV4)) { + vput(*vpp); + nd->nd_repstat = EACCES; + } + + /* + * Personally, I've never seen any point in requiring a + * reserved port#, since only in the rare case where the + * clients are all boxes with secure system priviledges, + * does it provide any enhanced security, but... some people + * believe it to be useful and keep putting this code back in. + * (There is also some "security checker" out there that + * complains if the nfs server doesn't enforce this.) + * However, note the following: + * RFC3530 (NFSv4) specifies that a reserved port# not be + * required. + * RFC2623 recommends that, if a reserved port# is checked for, + * that there be a way to turn that off--> ifdef'd. + */ +#ifdef NFS_REQRSVPORT + if (!nd->nd_repstat) { + struct sockaddr_in *saddr; + struct sockaddr_in6 *saddr6; + saddr = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in *); + saddr6 = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in6 *); + if (!(nd->nd_flag & ND_NFSV4) && + ((saddr->sin_family == AF_INET && + ntohs(saddr->sin_port) >= IPPORT_RESERVED) || + (saddr6->sin6_family == AF_INET6 && + ntohs(saddr6->sin6_port) >= IPPORT_RESERVED))) { + vput(*vpp); + nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK); + } + } +#endif /* NFS_REQRSVPORT */ + + /* + * Check/setup credentials. + */ + if (!nd->nd_repstat) { + nd->nd_saveduid = nd->nd_cred->cr_uid; + nd->nd_repstat = nfsd_excred(nd, exp, credanon); + if (nd->nd_repstat) + vput(*vpp); + } + if (nd->nd_repstat) { + if (startwrite) + vn_finished_write(mp); + if (exp->nes_vfslocked) { + VFS_UNLOCK_GIANT(mp); + exp->nes_vfslocked = 0; + } + vfs_rel(mp); + *vpp = NULL; + *mpp = NULL; + } else { + vfs_rel(mp); + } +} + +/* + * glue for fp. + */ +int +fp_getfvp(struct thread *p, int fd, struct file **fpp, struct vnode **vpp) +{ + struct filedesc *fdp; + struct file *fp; + + fdp = p->td_proc->p_fd; + if (fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL) + return (EBADF); + *fpp = fp; + return (0); +} + +/* + * Network export information + */ +struct netexport { + struct netcred ne_defexported; /* Default export */ + struct radix_node_head *ne_rtable[AF_MAX+1]; /* Individual exports */ +}; + +struct netexport nfsv4root_export; + +/* + * Called from newnfssvc() to update the exports list. Just call + * vfs_export(). This has to be done, since the v4 root fake fs isn't + * in the mount list. + */ +int +nfsrv_v4rootexport(void *argp, struct ucred *cred, struct thread *p) +{ + struct nfsex_args *nfsexargp = (struct nfsex_args *)argp; + int error; + struct nameidata nd; + fhandle_t fh; + + /* + * Until newmountd is using the secflavor fields, just make + * sure it's 0. + */ + nfsexargp->export.ex_numsecflavors = 0; + error = vfs_export(&nfsv4root_mnt, &nfsexargp->export); + if ((nfsexargp->export.ex_flags & MNT_DELEXPORT)) { + nfs_rootfhset = 0; + nfsv4root_set = 0; + } else if (error == 0) { + if (nfsexargp->fspec == NULL) + return (EPERM); + /* + * If fspec != NULL, this is the v4root path. + */ + NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_USERSPACE, + nfsexargp->fspec, p); + if ((error = namei(&nd)) != 0) + return (error); + error = nfsvno_getfh(nd.ni_vp, &fh, p); + vrele(nd.ni_vp); + if (!error) { + nfs_rootfh.nfsrvfh_len = NFSX_MYFH; + NFSBCOPY((caddr_t)&fh, + nfs_rootfh.nfsrvfh_data, + sizeof (fhandle_t)); + nfs_rootfhset = 1; + } + } + return (error); +} + +/* + * Get the tcp socket sequence numbers we need. + * (Maybe this should be moved to the tcp sources?) + */ +int +nfsrv_getsocksndseq(struct socket *so, tcp_seq *maxp, tcp_seq *unap) +{ + struct inpcb *inp; + struct tcpcb *tp; + int error = EPIPE; + + INP_INFO_RLOCK(&V_tcbinfo); + inp = sotoinpcb(so); + if (inp == NULL) { + INP_INFO_RUNLOCK(&V_tcbinfo); + return (error); + } + INP_RLOCK(inp); + INP_INFO_RUNLOCK(&V_tcbinfo); + tp = intotcpcb(inp); + if (tp != NULL && tp->t_state == TCPS_ESTABLISHED) { + *maxp = tp->snd_max; + *unap = tp->snd_una; + error = 0; + } + INP_RUNLOCK(inp); + return (error); +} + +/* + * This function needs to test to see if the system is near its limit + * for memory allocation via malloc() or mget() and return True iff + * either of these resources are near their limit. + * XXX (For now, this is just a stub.) + */ +int nfsrv_testmalloclimit = 0; +int +nfsrv_mallocmget_limit(void) +{ + static int printmesg = 0; + static int testval = 1; + + if (nfsrv_testmalloclimit && (testval++ % 1000) == 0) { + if ((printmesg++ % 100) == 0) + printf("nfsd: malloc/mget near limit\n"); + return (1); + } + return (0); +} + +/* + * BSD specific initialization of a mount point. + */ +void +nfsd_mntinit(void) +{ + static int inited = 0; + + if (inited) + return; + inited = 1; + nfsv4root_mnt.mnt_flag = (MNT_RDONLY | MNT_EXPORTED); + TAILQ_INIT(&nfsv4root_mnt.mnt_nvnodelist); + nfsv4root_mnt.mnt_export = NULL; + TAILQ_INIT(&nfsv4root_opt); + TAILQ_INIT(&nfsv4root_newopt); + nfsv4root_mnt.mnt_opt = &nfsv4root_opt; + nfsv4root_mnt.mnt_optnew = &nfsv4root_newopt; + nfsv4root_mnt.mnt_nvnodelistsize = 0; +} + +/* + * Get a vnode for a file handle, without checking exports, etc. + */ +struct vnode * +nfsvno_getvp(fhandle_t *fhp) +{ + struct mount *mp; + struct vnode *vp; + int error; + + mp = vfs_getvfs(&fhp->fh_fsid); + if (mp == NULL) + return (NULL); + error = VFS_FHTOVP(mp, &fhp->fh_fid, &vp); + if (error) + return (NULL); + return (vp); +} + +static int id_for_advlock; +/* + * Check to see it a byte range lock held by a process running + * locally on the server conflicts with the new lock. + */ +int +nfsvno_localconflict(struct vnode *vp, int ftype, u_int64_t first, + u_int64_t end, struct nfslockconflict *cfp, struct thread *p) +{ + int error; + struct flock fl; + + if (!nfsrv_dolocallocks) + return (0); + fl.l_whence = SEEK_SET; + fl.l_type = ftype; + fl.l_start = (off_t)first; + if (end == NFS64BITSSET) + fl.l_len = 0; + else + fl.l_len = (off_t)(end - first); + /* + * FreeBSD8 doesn't like 0, so I'll use the address of id_for_advlock. + */ + NFSVOPUNLOCK(vp, 0, p); + error = VOP_ADVLOCK(vp, &id_for_advlock, F_GETLK, &fl, F_POSIX); + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) + return (error); + if (fl.l_type == F_UNLCK) + return (0); + if (cfp != NULL) { + cfp->cl_clientid.lval[0] = cfp->cl_clientid.lval[1] = 0; + cfp->cl_first = (u_int64_t)fl.l_start; + if (fl.l_len == 0) + cfp->cl_end = NFS64BITSSET; + else + cfp->cl_end = (u_int64_t) + (fl.l_start + fl.l_len); + if (fl.l_type == F_WRLCK) + cfp->cl_flags = NFSLCK_WRITE; + else + cfp->cl_flags = NFSLCK_READ; + sprintf(cfp->cl_owner, "LOCALID%d", fl.l_pid); + cfp->cl_ownerlen = strlen(cfp->cl_owner); + return (NFSERR_DENIED); + } + return (NFSERR_INVAL); +} + +/* + * Do a local VOP_ADVLOCK(). + */ +int +nfsvno_advlock(struct vnode *vp, int ftype, u_int64_t first, + u_int64_t end, struct thread *p) +{ + int error; + struct flock fl; + u_int64_t tlen; + + if (!nfsrv_dolocallocks) + return (0); + fl.l_whence = SEEK_SET; + fl.l_type = ftype; + fl.l_start = (off_t)first; + if (end == NFS64BITSSET) { + fl.l_len = 0; + } else { + tlen = end - first; + fl.l_len = (off_t)tlen; + } + /* + * FreeBSD8 doesn't like 0, so I'll use the address of id_for_advlock. + */ + NFSVOPUNLOCK(vp, 0, p); + error = VOP_ADVLOCK(vp, &id_for_advlock, F_SETLK, &fl, F_POSIX); + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); +} + +/* + * Unlock an underlying local file system. + */ +void +nfsvno_unlockvfs(struct mount *mp) +{ + + VFS_UNLOCK_GIANT(mp); +} + +/* + * Lock an underlying file system, as required, and return + * whether or not it is locked. + */ +int +nfsvno_lockvfs(struct mount *mp) +{ + int ret; + + ret = VFS_LOCK_GIANT(mp); + return (ret); +} + +/* + * Check the nfsv4 root exports. + */ +int +nfsvno_v4rootexport(struct nfsrv_descript *nd) +{ + struct ucred *credanon; + int exflags, error; + + error = vfs_stdcheckexp(&nfsv4root_mnt, nd->nd_nam, &exflags, + &credanon, NULL, NULL); + if (error) + return (NFSERR_PROGUNAVAIL); + if ((exflags & MNT_EXGSSONLY)) + nd->nd_flag |= ND_EXGSSONLY; + return (0); +} + +/* + * Nfs server psuedo system call for the nfsd's + */ +/* + * MPSAFE + */ +static int +nfssvc_nfsd(struct thread *td, struct nfssvc_args *uap) +{ + struct file *fp; + struct nfsd_args nfsdarg; + int error; + + if (uap->flag & NFSSVC_NFSDADDSOCK) { + error = copyin(uap->argp, (caddr_t)&nfsdarg, sizeof(nfsdarg)); + if (error) + return (error); + if ((error = fget(td, nfsdarg.sock, &fp)) != 0) { + return (error); + } + if (fp->f_type != DTYPE_SOCKET) { + fdrop(fp, td); + return (EPERM); + } + error = nfsrvd_addsock(fp); + fdrop(fp, td); + } else if (uap->flag & NFSSVC_NFSDNFSD) { + error = nfsrvd_nfsd(td, NULL); + } else { + error = nfssvc_srvcall(td, uap, td->td_ucred); + } + return (error); +} + +static int +nfssvc_srvcall(struct thread *p, struct nfssvc_args *uap, struct ucred *cred) +{ + struct nfsex_args export; + struct file *fp = NULL; + int stablefd, len; + struct nfsd_clid adminrevoke; + struct nfsd_dumplist dumplist; + struct nfsd_dumpclients *dumpclients; + struct nfsd_dumplocklist dumplocklist; + struct nfsd_dumplocks *dumplocks; + struct nameidata nd; + vnode_t vp; + int error = EINVAL; + + if (uap->flag & NFSSVC_PUBLICFH) { + NFSBZERO((caddr_t)&nfs_pubfh.nfsrvfh_data, + sizeof (fhandle_t)); + error = copyin(uap->argp, + &nfs_pubfh.nfsrvfh_data, sizeof (fhandle_t)); + if (!error) + nfs_pubfhset = 1; + } else if (uap->flag & NFSSVC_V4ROOTEXPORT) { + error = copyin(uap->argp,(caddr_t)&export, + sizeof (struct nfsex_args)); + if (!error) + error = nfsrv_v4rootexport(&export, cred, p); + } else if (uap->flag & NFSSVC_NOPUBLICFH) { + nfs_pubfhset = 0; + error = 0; + } else if (uap->flag & NFSSVC_STABLERESTART) { + error = copyin(uap->argp, (caddr_t)&stablefd, + sizeof (int)); + if (!error) + error = fp_getfvp(p, stablefd, &fp, &vp); + if (!error && (NFSFPFLAG(fp) & (FREAD | FWRITE)) != (FREAD | FWRITE)) + error = EBADF; + if (!error && newnfs_numnfsd != 0) + error = EPERM; + if (!error) { + nfsrv_stablefirst.nsf_fp = fp; + nfsrv_setupstable(p); + } + } else if (uap->flag & NFSSVC_ADMINREVOKE) { + error = copyin(uap->argp, (caddr_t)&adminrevoke, + sizeof (struct nfsd_clid)); + if (!error) + error = nfsrv_adminrevoke(&adminrevoke, p); + } else if (uap->flag & NFSSVC_DUMPCLIENTS) { + error = copyin(uap->argp, (caddr_t)&dumplist, + sizeof (struct nfsd_dumplist)); + if (!error && (dumplist.ndl_size < 1 || + dumplist.ndl_size > NFSRV_MAXDUMPLIST)) + error = EPERM; + if (!error) { + len = sizeof (struct nfsd_dumpclients) * dumplist.ndl_size; + dumpclients = (struct nfsd_dumpclients *)malloc(len, + M_TEMP, M_WAITOK); + nfsrv_dumpclients(dumpclients, dumplist.ndl_size); + error = copyout(dumpclients, + CAST_USER_ADDR_T(dumplist.ndl_list), len); + free((caddr_t)dumpclients, M_TEMP); + } + } else if (uap->flag & NFSSVC_DUMPLOCKS) { + error = copyin(uap->argp, (caddr_t)&dumplocklist, + sizeof (struct nfsd_dumplocklist)); + if (!error && (dumplocklist.ndllck_size < 1 || + dumplocklist.ndllck_size > NFSRV_MAXDUMPLIST)) + error = EPERM; + if (!error) + error = nfsrv_lookupfilename(&nd, + dumplocklist.ndllck_fname, p); + if (!error) { + len = sizeof (struct nfsd_dumplocks) * + dumplocklist.ndllck_size; + dumplocks = (struct nfsd_dumplocks *)malloc(len, + M_TEMP, M_WAITOK); + nfsrv_dumplocks(nd.ni_vp, dumplocks, + dumplocklist.ndllck_size, p); + vput(nd.ni_vp); + error = copyout(dumplocks, + CAST_USER_ADDR_T(dumplocklist.ndllck_list), len); + free((caddr_t)dumplocks, M_TEMP); + } + } + return (error); +} + +extern int (*nfsd_call_nfsd)(struct thread *, struct nfssvc_args *); + +/* + * Called once to initialize data structures... + */ +static int +nfsd_modevent(module_t mod, int type, void *data) +{ + int error = 0; + static int loaded = 0; + + switch (type) { + case MOD_LOAD: + if (loaded) + return (0); + newnfs_portinit(); + mtx_init(&nfs_cache_mutex, "nfs_cache_mutex", NULL, MTX_DEF); + mtx_init(&nfs_v4root_mutex, "nfs_v4root_mutex", NULL, MTX_DEF); + mtx_init(&nfsv4root_mnt.mnt_mtx, "struct mount mtx", NULL, + MTX_DEF); + lockinit(&nfsv4root_mnt.mnt_explock, PVFS, "explock", 0, 0); + nfsrvd_initcache(); + nfsd_init(); + NFSD_LOCK(); + nfsrvd_init(0); + NFSD_UNLOCK(); + nfsd_mntinit(); +#ifdef VV_DISABLEDELEG + vn_deleg_ops.vndeleg_recall = nfsd_recalldelegation; + vn_deleg_ops.vndeleg_disable = nfsd_disabledelegation; +#endif + nfsd_call_servertimer = nfsrv_servertimer; + nfsd_call_nfsd = nfssvc_nfsd; + loaded = 1; + break; + + case MOD_UNLOAD: + if (newnfs_numnfsd != 0) { + error = EBUSY; + break; + } + +#ifdef VV_DISABLEDELEG + vn_deleg_ops.vndeleg_recall = NULL; + vn_deleg_ops.vndeleg_disable = NULL; +#endif + nfsd_call_servertimer = NULL; + nfsd_call_nfsd = NULL; + /* and get rid of the locks */ + mtx_destroy(&nfs_cache_mutex); + mtx_destroy(&nfs_v4root_mutex); + mtx_destroy(&nfsv4root_mnt.mnt_mtx); + lockdestroy(&nfsv4root_mnt.mnt_explock); + loaded = 0; + break; + default: + error = EOPNOTSUPP; + break; + } + return error; +} +static moduledata_t nfsd_mod = { + "nfsd", + nfsd_modevent, + NULL, +}; +DECLARE_MODULE(nfsd, nfsd_mod, SI_SUB_VFS, SI_ORDER_ANY); + +/* So that loader and kldload(2) can find us, wherever we are.. */ +MODULE_VERSION(nfsd, 1); +MODULE_DEPEND(nfsd, nfscommon, 1, 1, 1); + diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c new file mode 100644 index 0000000..141a614 --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdserv.c @@ -0,0 +1,3367 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * nfs version 2, 3 and 4 server calls to vnode ops + * - these routines generally have 3 phases + * 1 - break down and validate rpc request in mbuf list + * 2 - do the vnode ops for the request, usually by calling a nfsvno_XXX() + * function in nfsd_port.c + * 3 - build the rpc reply in an mbuf list + * For nfsv4, these functions are called for each Op within the Compound RPC. + */ + +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +/* Global vars */ +extern u_int32_t newnfs_false, newnfs_true; +extern enum vtype nv34tov_type[8]; +extern struct timeval nfsboottime; +extern int nfs_rootfhset, nfsv4root_set; +#endif /* !APPLEKEXT */ + +/* + * This list defines the GSS mechanisms supported. + * (Don't ask me how you get these strings from the RFC stuff like + * iso(1), org(3)... but someone did it, so I don't need to know.) + */ +static struct nfsgss_mechlist nfsgss_mechlist[] = { + { 9, "\052\206\110\206\367\022\001\002\002", 11 }, + { 0, "", 0 }, +}; + +/* local functions */ +static void nfsrvd_symlinksub(struct nfsrv_descript *nd, struct nameidata *ndp, + struct nfsvattr *nvap, fhandle_t *fhp, vnode_t *vpp, + vnode_t dirp, struct nfsvattr *dirforp, struct nfsvattr *diraftp, + int *diraft_retp, nfsattrbit_t *attrbitp, + NFSACL_T *aclp, NFSPROC_T *p, struct nfsexstuff *exp, char *pathcp, + int pathlen); +static void nfsrvd_mkdirsub(struct nfsrv_descript *nd, struct nameidata *ndp, + struct nfsvattr *nvap, fhandle_t *fhp, vnode_t *vpp, + vnode_t dirp, struct nfsvattr *dirforp, struct nfsvattr *diraftp, + int *diraft_retp, nfsattrbit_t *attrbitp, NFSACL_T *aclp, + NFSPROC_T *p, struct nfsexstuff *exp); + +/* + * nfs access service (not a part of NFS V2) + */ +APPLESTATIC int +nfsrvd_access(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int getret, error = 0; + struct nfsvattr nva; + u_int32_t testmode, nfsmode, supported = 0; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, 1, &nva); + return (0); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nfsmode = fxdr_unsigned(u_int32_t, *tl); + if ((nd->nd_flag & ND_NFSV4) && + (nfsmode & ~(NFSACCESS_READ | NFSACCESS_LOOKUP | + NFSACCESS_MODIFY | NFSACCESS_EXTEND | NFSACCESS_DELETE | + NFSACCESS_EXECUTE))) { + nd->nd_repstat = NFSERR_INVAL; + vput(vp); + return (0); + } + if (nfsmode & NFSACCESS_READ) { + supported |= NFSACCESS_READ; + if (nfsvno_accchk(vp, NFSV4ACE_READDATA, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED)) + nfsmode &= ~NFSACCESS_READ; + } + if (nfsmode & NFSACCESS_MODIFY) { + supported |= NFSACCESS_MODIFY; + if (nfsvno_accchk(vp, NFSV4ACE_WRITEDATA, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED)) + nfsmode &= ~NFSACCESS_MODIFY; + } + if (nfsmode & NFSACCESS_EXTEND) { + supported |= NFSACCESS_EXTEND; + if (nfsvno_accchk(vp, NFSV4ACE_APPENDDATA, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED)) + nfsmode &= ~NFSACCESS_EXTEND; + } + if (nfsmode & NFSACCESS_DELETE) { + supported |= NFSACCESS_DELETE; + if (nfsvno_accchk(vp, NFSV4ACE_DELETE, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED)) + nfsmode &= ~NFSACCESS_DELETE; + } + if (vnode_vtype(vp) == VDIR) + testmode = NFSACCESS_LOOKUP; + else + testmode = NFSACCESS_EXECUTE; + if (nfsmode & testmode) { + supported |= (nfsmode & testmode); + if (nfsvno_accchk(vp, NFSV4ACE_EXECUTE, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED)) + nfsmode &= ~testmode; + } + nfsmode &= supported; + if (nd->nd_flag & ND_NFSV3) { + getret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + nfsrv_postopattr(nd, getret, &nva); + } + vput(vp); + if (nd->nd_flag & ND_NFSV4) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(supported); + } else + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(nfsmode); + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfs getattr service + */ +APPLESTATIC int +nfsrvd_getattr(struct nfsrv_descript *nd, int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + struct nfsvattr nva; + fhandle_t fh; + int error = 0; + struct nfsreferral *refp; + nfsattrbit_t attrbits; + + if (nd->nd_repstat) + return (0); + if (nd->nd_flag & ND_NFSV4) { + error = nfsrv_getattrbits(nd, &attrbits, NULL, NULL); + if (error) { + vput(vp); + return (error); + } + + /* + * Check for a referral. + */ + refp = nfsv4root_getreferral(vp, NULL, 0); + if (refp != NULL) { + (void) nfsrv_putreferralattr(nd, &attrbits, refp, 1, + &nd->nd_repstat); + vput(vp); + return (0); + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, + NFSV4ACE_READATTRIBUTES, + nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE, NFSACCCHK_VPISLOCKED); + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV4) { + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_FILEHANDLE)) + nd->nd_repstat = nfsvno_getfh(vp, &fh, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsrv_checkgetattr(nd, vp, + &nva, &attrbits, nd->nd_cred, p); + NFSVOPUNLOCK(vp, 0, p); + if (!nd->nd_repstat) + (void) nfsvno_fillattr(nd, vp, &nva, &fh, + 0, &attrbits, nd->nd_cred, p, isdgram, 1); + vrele(vp); + } else { + nfsrv_fillattr(nd, &nva); + vput(vp); + } + } else { + vput(vp); + } + return (0); +} + +/* + * nfs setattr service + */ +APPLESTATIC int +nfsrvd_setattr(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + struct nfsvattr nva, nva2; + u_int32_t *tl; + int preat_ret = 1, postat_ret = 1, gcheck = 0, error = 0; + struct timespec guard = { 0, 0 }; + nfsattrbit_t attrbits, retbits; + nfsv4stateid_t stateid; + NFSACL_T *aclp = NULL; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, preat_ret, &nva2, postat_ret, &nva); + return (0); + } +#ifdef NFS4_ACL_EXTATTR_NAME + aclp = acl_alloc(); + aclp->acl_cnt = 0; +#endif + NFSVNO_ATTRINIT(&nva); + NFSZERO_ATTRBIT(&retbits); + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl,(caddr_t)stateid.other,NFSX_STATEIDOTHER); + } + error = nfsrv_sattr(nd, &nva, &attrbits, aclp, p); + if (error) + goto nfsmout; + preat_ret = nfsvno_getattr(vp, &nva2, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = preat_ret; + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + gcheck = fxdr_unsigned(int, *tl); + if (gcheck) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + fxdr_nfsv3time(tl, &guard); + } + if (!nd->nd_repstat && gcheck && + (nva2.na_ctime.tv_sec != guard.tv_sec || + nva2.na_ctime.tv_nsec != guard.tv_nsec)) + nd->nd_repstat = NFSERR_NOT_SYNC; + if (nd->nd_repstat) { + vput(vp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + nfsrv_wcc(nd, preat_ret, &nva2, postat_ret, &nva); + return (0); + } + } else if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV4)) + nd->nd_repstat = nfsrv_checkuidgid(nd, &nva); + + /* + * Now that we have all the fields, lets do it. + * If the size is being changed write access is required, otherwise + * just check for a read only file system. + */ + if (!nd->nd_repstat) { + if (NFSVNO_NOTSETSIZE(&nva)) { + if (NFSVNO_EXRDONLY(exp) || + (vfs_flags(vnode_mount(vp)) & MNT_RDONLY)) + nd->nd_repstat = EROFS; + } else { + if (vnode_vtype(vp) != VREG) + nd->nd_repstat = EINVAL; + else if (nva2.na_uid != nd->nd_cred->cr_uid || + NFSVNO_EXSTRICTACCESS(exp)) + nd->nd_repstat = nfsvno_accchk(vp, + NFSV4ACE_WRITEDATA, nd->nd_cred, exp, p, + NFSACCCHK_NOOVERRIDE,NFSACCCHK_VPISLOCKED); + } + } + if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV4)) + nd->nd_repstat = nfsrv_checksetattr(vp, nd, &stateid, + &nva, &attrbits, exp, p); + + if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV4)) { + /* + * For V4, try setting the attrbutes in sets, so that the + * reply bitmap will be correct for an error case. + */ + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_OWNER) || + NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP)) { + NFSVNO_ATTRINIT(&nva2); + NFSVNO_SETATTRVAL(&nva2, uid, nva.na_uid); + NFSVNO_SETATTRVAL(&nva2, gid, nva.na_gid); + nd->nd_repstat = nfsvno_setattr(vp, &nva2, nd->nd_cred, p, + exp); + if (!nd->nd_repstat) { + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_OWNER)) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_OWNER); + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP)) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_OWNERGROUP); + } + } + if (!nd->nd_repstat && + NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_SIZE)) { + NFSVNO_ATTRINIT(&nva2); + NFSVNO_SETATTRVAL(&nva2, size, nva.na_size); + nd->nd_repstat = nfsvno_setattr(vp, &nva2, nd->nd_cred, p, + exp); + if (!nd->nd_repstat) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_SIZE); + } + if (!nd->nd_repstat && + (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET) || + NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET))) { + NFSVNO_ATTRINIT(&nva2); + NFSVNO_SETATTRVAL(&nva2, atime, nva.na_atime); + NFSVNO_SETATTRVAL(&nva2, mtime, nva.na_mtime); + if (nva.na_vaflags & VA_UTIMES_NULL) { + nva2.na_vaflags |= VA_UTIMES_NULL; + NFSVNO_SETACTIVE(&nva2, vaflags); + } + nd->nd_repstat = nfsvno_setattr(vp, &nva2, nd->nd_cred, p, + exp); + if (!nd->nd_repstat) { + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET)) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_TIMEACCESSSET); + if (NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET)) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_TIMEMODIFYSET); + } + } + if (!nd->nd_repstat && + NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_MODE)) { + NFSVNO_ATTRINIT(&nva2); + NFSVNO_SETATTRVAL(&nva2, mode, nva.na_mode); + nd->nd_repstat = nfsvno_setattr(vp, &nva2, nd->nd_cred, p, + exp); + if (!nd->nd_repstat) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_MODE); + } + +#ifdef NFS4_ACL_EXTATTR_NAME + if (!nd->nd_repstat && aclp->acl_cnt > 0 && + NFSISSET_ATTRBIT(&attrbits, NFSATTRBIT_ACL)) { + nd->nd_repstat = nfsrv_setacl(vp, aclp, nd->nd_cred, p); + if (!nd->nd_repstat) + NFSSETBIT_ATTRBIT(&retbits, NFSATTRBIT_ACL); + } +#endif + } else if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_setattr(vp, &nva, nd->nd_cred, p, + exp); + } + if (nd->nd_flag & (ND_NFSV2 | ND_NFSV3)) { + postat_ret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = postat_ret; + } + vput(vp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, preat_ret, &nva2, postat_ret, &nva); + else if (nd->nd_flag & ND_NFSV4) + (void) nfsrv_putattrbit(nd, &retbits); + else if (!nd->nd_repstat) + nfsrv_fillattr(nd, &nva); + return (0); +nfsmout: + vput(vp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + if (nd->nd_flag & ND_NFSV4) { + /* + * For all nd_repstat, the V4 reply includes a bitmap, + * even NFSERR_BADXDR, which is what this will end up + * returning. + */ + (void) nfsrv_putattrbit(nd, &retbits); + } + return (error); +} + +/* + * nfs lookup rpc + * (Also performs lookup parent for v4) + */ +APPLESTATIC int +nfsrvd_lookup(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, vnode_t *vpp, fhandle_t *fhp, NFSPROC_T *p, + __unused struct nfsexstuff *exp) +{ + struct nameidata named; + vnode_t vp, dirp = NULL; + int error, dattr_ret = 1; + struct nfsvattr nva, dattr; + char *bufp; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, dattr_ret, &dattr); + return (0); + } + + /* + * For some reason, if dp is a symlink, the error + * returned is supposed to be NFSERR_SYMLINK and not NFSERR_NOTDIR. + */ + if (dp->v_type == VLNK && (nd->nd_flag & ND_NFSV4)) { + nd->nd_repstat = NFSERR_SYMLINK; + vrele(dp); + return (0); + } + + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, LOOKUP, + LOCKLEAF | SAVESTART); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vrele(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, exp, p, &dirp); + } else { + vrele(dp); + nfsvno_relpathbuf(&named); + } + if (nd->nd_repstat) { + if (dirp) { + if (nd->nd_flag & ND_NFSV3) + dattr_ret = nfsvno_getattr(dirp, &dattr, + nd->nd_cred, p); + vrele(dirp); + } + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, dattr_ret, &dattr); + return (0); + } + if (named.ni_startdir) + vrele(named.ni_startdir); + nfsvno_relpathbuf(&named); + vp = named.ni_vp; + nd->nd_repstat = nfsvno_getfh(vp, fhp, p); + if (!(nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (vpp) { + NFSVOPUNLOCK(vp, 0, p); + *vpp = vp; + } else { + vput(vp); + } + if (dirp) { + if (nd->nd_flag & ND_NFSV3) + dattr_ret = nfsvno_getattr(dirp, &dattr, nd->nd_cred, + p); + vrele(dirp); + } + if (nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, dattr_ret, &dattr); + return (0); + } + if (nd->nd_flag & ND_NFSV2) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 0); + nfsrv_fillattr(nd, &nva); + } else if (nd->nd_flag & ND_NFSV3) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 0); + nfsrv_postopattr(nd, 0, &nva); + nfsrv_postopattr(nd, dattr_ret, &dattr); + } + return (0); +} + +/* + * nfs readlink service + */ +APPLESTATIC int +nfsrvd_readlink(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + mbuf_t mp = NULL, mpend = NULL; + int getret = 1, len; + struct nfsvattr nva; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &nva); + return (0); + } + if (vnode_vtype(vp) != VLNK) { + if (nd->nd_flag & ND_NFSV2) + nd->nd_repstat = ENXIO; + else + nd->nd_repstat = EINVAL; + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_readlink(vp, nd->nd_cred, p, + &mp, &mpend, &len); + if (nd->nd_flag & ND_NFSV3) + getret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &nva); + if (nd->nd_repstat) + return (0); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(len); + mbuf_setnext(nd->nd_mb, mp); + nd->nd_mb = mpend; + nd->nd_bpos = NFSMTOD(mpend, caddr_t) + mbuf_len(mpend); + return (0); +} + +/* + * nfs read service + */ +APPLESTATIC int +nfsrvd_read(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0, cnt, len, getret = 1, reqlen, eof = 0; + mbuf_t m2, m3; + struct nfsvattr nva; + off_t off = 0x0; + struct nfsstate st, *stp = &st; + struct nfslock lo, *lop = &lo; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &nva); + return (0); + } + if (nd->nd_flag & ND_NFSV2) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + off = (off_t)fxdr_unsigned(u_int32_t, *tl++); + reqlen = fxdr_unsigned(int, *tl); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + off = fxdr_hyper(tl); + tl += 2; + reqlen = fxdr_unsigned(int, *tl); + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3*NFSX_UNSIGNED); + reqlen = fxdr_unsigned(int, *(tl + 6)); + } + if (reqlen > NFS_SRVMAXDATA(nd)) { + reqlen = NFS_SRVMAXDATA(nd); + } else if (reqlen < 0) { + error = EBADRPC; + goto nfsmout; + } + if (nd->nd_flag & ND_NFSV4) { + stp->ls_flags = (NFSLCK_CHECK | NFSLCK_READACCESS); + lop->lo_flags = NFSLCK_READ; + stp->ls_ownerlen = 0; + stp->ls_op = NULL; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + clientid.lval[0] = stp->ls_stateid.other[0] = *tl++; + clientid.lval[1] = stp->ls_stateid.other[1] = *tl++; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + stp->ls_stateid.other[2] = *tl++; + off = fxdr_hyper(tl); + lop->lo_first = off; + tl += 2; + lop->lo_end = off + reqlen; + /* + * Paranoia, just in case it wraps around. + */ + if (lop->lo_end < off) + lop->lo_end = NFS64BITSSET; + } + if (vnode_vtype(vp) != VREG) { + if (nd->nd_flag & ND_NFSV3) + nd->nd_repstat = EINVAL; + else + nd->nd_repstat = (vnode_vtype(vp) == VDIR) ? EISDIR : + EINVAL; + } + getret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = getret; + if (!nd->nd_repstat && + (nva.na_uid != nd->nd_cred->cr_uid || + NFSVNO_EXSTRICTACCESS(exp))) { + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_READDATA, + nd->nd_cred, exp, p, + NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED); + if (nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_EXECUTE, + nd->nd_cred, exp, p, + NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED); + } + if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) + nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid, + &stateid, exp, nd, p); + if (nd->nd_repstat) { + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &nva); + return (0); + } + if (off >= nva.na_size) { + cnt = 0; + eof = 1; + } else if (reqlen == 0) + cnt = 0; + else if ((off + reqlen) > nva.na_size) + cnt = nva.na_size - off; + else + cnt = reqlen; + len = NFSM_RNDUP(cnt); + m3 = NULL; + if (cnt > 0) { + nd->nd_repstat = nfsvno_read(vp, off, cnt, nd->nd_cred, p, + &m3, &m2); + if (!(nd->nd_flag & ND_NFSV4)) { + getret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = getret; + } + if (nd->nd_repstat) { + vput(vp); + if (m3) + mbuf_freem(m3); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &nva); + return (0); + } + } + vput(vp); + if (nd->nd_flag & ND_NFSV2) { + nfsrv_fillattr(nd, &nva); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + } else { + if (nd->nd_flag & ND_NFSV3) { + nfsrv_postopattr(nd, getret, &nva); + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(cnt); + } else + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + if (len < reqlen || eof) + *tl++ = newnfs_true; + else + *tl++ = newnfs_false; + } + *tl = txdr_unsigned(cnt); + if (m3) { + mbuf_setnext(nd->nd_mb, m3); + nd->nd_mb = m2; + nd->nd_bpos = NFSMTOD(m2, caddr_t) + mbuf_len(m2); + } + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfs write service + */ +APPLESTATIC int +nfsrvd_write(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + int i, cnt; + u_int32_t *tl; + mbuf_t mp; + struct nfsvattr nva, forat; + int aftat_ret = 1, retlen, len, error = 0, forat_ret = 1; + int stable = NFSWRITE_FILESYNC; + off_t off; + struct nfsstate st, *stp = &st; + struct nfslock lo, *lop = &lo; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, forat_ret, &forat, aftat_ret, &nva); + return (0); + } + if (nd->nd_flag & ND_NFSV2) { + NFSM_DISSECT(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + off = (off_t)fxdr_unsigned(u_int32_t, *++tl); + tl += 2; + retlen = len = fxdr_unsigned(int32_t, *tl); + } else if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + off = fxdr_hyper(tl); + tl += 3; + stable = fxdr_unsigned(int, *tl++); + retlen = len = fxdr_unsigned(int32_t, *tl); + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 4 * NFSX_UNSIGNED); + stp->ls_flags = (NFSLCK_CHECK | NFSLCK_WRITEACCESS); + lop->lo_flags = NFSLCK_WRITE; + stp->ls_ownerlen = 0; + stp->ls_op = NULL; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + clientid.lval[0] = stp->ls_stateid.other[0] = *tl++; + clientid.lval[1] = stp->ls_stateid.other[1] = *tl++; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + stp->ls_stateid.other[2] = *tl++; + off = fxdr_hyper(tl); + lop->lo_first = off; + tl += 2; + stable = fxdr_unsigned(int, *tl++); + retlen = len = fxdr_unsigned(int32_t, *tl); + lop->lo_end = off + len; + /* + * Paranoia, just in case it wraps around, which shouldn't + * ever happen anyhow. + */ + if (lop->lo_end < lop->lo_first) + lop->lo_end = NFS64BITSSET; + } + + /* + * Loop through the mbuf chain, counting how many mbufs are a + * part of this write operation, so the iovec size is known. + */ + cnt = 0; + mp = nd->nd_md; + i = NFSMTOD(mp, caddr_t) + mbuf_len(mp) - nd->nd_dpos; + while (len > 0) { + if (i > 0) { + len -= i; + cnt++; + } + mp = mbuf_next(mp); + if (!mp) { + if (len > 0) { + error = EBADRPC; + goto nfsmout; + } + } else + i = mbuf_len(mp); + } + + if (retlen > NFS_MAXDATA || retlen < 0) + nd->nd_repstat = EIO; + if (vnode_vtype(vp) != VREG && !nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) + nd->nd_repstat = EINVAL; + else + nd->nd_repstat = (vnode_vtype(vp) == VDIR) ? EISDIR : + EINVAL; + } + forat_ret = nfsvno_getattr(vp, &forat, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = forat_ret; + if (!nd->nd_repstat && + (forat.na_uid != nd->nd_cred->cr_uid || + NFSVNO_EXSTRICTACCESS(exp))) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_WRITEDATA, + nd->nd_cred, exp, p, + NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED); + if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) { + nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid, + &stateid, exp, nd, p); + } + if (nd->nd_repstat) { + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, forat_ret, &forat, aftat_ret, &nva); + return (0); + } + + /* + * For NFS Version 2, it is not obvious what a write of zero length + * should do, but I might as well be consistent with Version 3, + * which is to return ok so long as there are no permission problems. + */ + if (retlen > 0) { + nd->nd_repstat = nfsvno_write(vp, off, retlen, cnt, stable, + nd->nd_md, nd->nd_dpos, nd->nd_cred, p); + error = nfsm_advance(nd, NFSM_RNDUP(retlen), -1); + if (error) + panic("nfsrv_write mbuf"); + } + if (nd->nd_flag & ND_NFSV4) + aftat_ret = 0; + else + aftat_ret = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + vput(vp); + if (!nd->nd_repstat) + nd->nd_repstat = aftat_ret; + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, forat_ret, &forat, aftat_ret, &nva); + if (nd->nd_repstat) + return (0); + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(retlen); + if (stable == NFSWRITE_UNSTABLE) + *tl++ = txdr_unsigned(stable); + else + *tl++ = txdr_unsigned(NFSWRITE_FILESYNC); + /* + * Actually, there is no need to txdr these fields, + * but it may make the values more human readable, + * for debugging purposes. + */ + *tl++ = txdr_unsigned(nfsboottime.tv_sec); + *tl = txdr_unsigned(nfsboottime.tv_usec); + } else if (!nd->nd_repstat) + nfsrv_fillattr(nd, &nva); + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfs create service (creates regular files for V2 and V3. Spec. files for V2.) + * now does a truncate to 0 length via. setattr if it already exists + * The core creation routine has been extracted out into nfsrv_creatsub(), + * so it can also be used by nfsrv_open() for V4. + */ +APPLESTATIC int +nfsrvd_create(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + struct nfsvattr nva, dirfor, diraft; + struct nfsv2_sattr *sp; + struct nameidata named; + u_int32_t *tl; + int error = 0, tsize, dirfor_ret = 1, diraft_ret = 1; + int how = NFSCREATE_UNCHECKED, exclusive_flag = 0; + NFSDEV_T rdev = 0; + vnode_t vp = NULL, dirp = NULL; + u_char cverf[NFSX_VERF], *cp; + fhandle_t fh; + char *bufp; + u_long *hashp; + enum vtype vtyp; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, + LOCKPARENT | LOCKLEAF | SAVESTART); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vput(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + NFSVNO_ATTRINIT(&nva); + if (nd->nd_flag & ND_NFSV2) { + NFSM_DISSECT(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + vtyp = IFTOVT(fxdr_unsigned(u_int32_t, sp->sa_mode)); + if (vtyp == VNON) + vtyp = VREG; + NFSVNO_SETATTRVAL(&nva, type, vtyp); + NFSVNO_SETATTRVAL(&nva, mode, + nfstov_mode(sp->sa_mode)); + switch (nva.na_type) { + case VREG: + tsize = fxdr_unsigned(int32_t, sp->sa_size); + if (tsize != -1) + NFSVNO_SETATTRVAL(&nva, size, + (u_quad_t)tsize); + break; + case VCHR: + case VBLK: + case VFIFO: + rdev = fxdr_unsigned(NFSDEV_T, sp->sa_size); + break; + default: + break; + }; + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + how = fxdr_unsigned(int, *tl); + switch (how) { + case NFSCREATE_GUARDED: + case NFSCREATE_UNCHECKED: + error = nfsrv_sattr(nd, &nva, NULL, NULL, p); + if (error) + goto nfsmout; + break; + case NFSCREATE_EXCLUSIVE: + NFSM_DISSECT(cp, u_char *, NFSX_VERF); + NFSBCOPY(cp, cverf, NFSX_VERF); + exclusive_flag = 1; + break; + }; + NFSVNO_SETATTRVAL(&nva, type, VREG); + } + } + if (nd->nd_repstat) { + nfsvno_relpathbuf(&named); + if (nd->nd_flag & ND_NFSV3) { + dirfor_ret = nfsvno_getattr(dp, &dirfor, nd->nd_cred, + p); + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + } + vput(dp); + return (0); + } + + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 1, exp, p, &dirp); + if (dirp) { + if (nd->nd_flag & ND_NFSV2) { + vrele(dirp); + dirp = NULL; + } else { + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, + p); + } + } + if (nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + if (dirp) + vrele(dirp); + return (0); + } + + if (!(nd->nd_flag & ND_NFSV2)) { + switch (how) { + case NFSCREATE_GUARDED: + if (named.ni_vp) + nd->nd_repstat = EEXIST; + break; + case NFSCREATE_UNCHECKED: + break; + case NFSCREATE_EXCLUSIVE: + if (named.ni_vp == NULL) + NFSVNO_SETATTRVAL(&nva, mode, 0); + break; + }; + } + + /* + * Iff doesn't exist, create it + * otherwise just truncate to 0 length + * should I set the mode too ? + */ + nd->nd_repstat = nfsvno_createsub(nd, &named, &vp, &nva, + &exclusive_flag, cverf, rdev, p, exp); + + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_getfh(vp, &fh, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, + p); + vput(vp); + } + if (nd->nd_flag & ND_NFSV2) { + if (!nd->nd_repstat) { + (void) nfsm_fhtom(nd, (u_int8_t *)&fh, 0, 0); + nfsrv_fillattr(nd, &nva); + } + } else { + if (exclusive_flag && !nd->nd_repstat && + NFSBCMP(cverf, (caddr_t)&nva.na_atime, NFSX_VERF)) + nd->nd_repstat = EEXIST; + diraft_ret = nfsvno_getattr(dirp, &diraft, nd->nd_cred, p); + vrele(dirp); + if (!nd->nd_repstat) { + (void) nfsm_fhtom(nd, (u_int8_t *)&fh, 0, 1); + nfsrv_postopattr(nd, 0, &nva); + } + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + } + return (0); +nfsmout: + vput(dp); + nfsvno_relpathbuf(&named); + return (error); +} + +/* + * nfs v3 mknod service (and v4 create) + */ +APPLESTATIC int +nfsrvd_mknod(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, vnode_t *vpp, fhandle_t *fhp, NFSPROC_T *p, + struct nfsexstuff *exp) +{ + struct nfsvattr nva, dirfor, diraft; + u_int32_t *tl; + struct nameidata named; + int error = 0, dirfor_ret = 1, diraft_ret = 1, pathlen; + u_int32_t major, minor; + enum vtype vtyp = VNON; + nfstype nfs4type = NFNON; + vnode_t vp, dirp = NULL; + nfsattrbit_t attrbits; + char *bufp = NULL, *pathcp = NULL; + u_long *hashp, cnflags; + NFSACL_T *aclp = NULL; + + NFSVNO_ATTRINIT(&nva); + cnflags = (LOCKPARENT | SAVESTART); + if (nd->nd_repstat) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } +#ifdef NFS4_ACL_EXTATTR_NAME + aclp = acl_alloc(); + aclp->acl_cnt = 0; +#endif + + /* + * For V4, the creation stuff is here, Yuck! + */ + if (nd->nd_flag & ND_NFSV4) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + vtyp = nfsv34tov_type(*tl); + nfs4type = fxdr_unsigned(nfstype, *tl); + switch (nfs4type) { + case NFLNK: + error = nfsvno_getsymlink(nd, &nva, p, &pathcp, + &pathlen); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + return (error); + } + break; + case NFCHR: + case NFBLK: + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + major = fxdr_unsigned(u_int32_t, *tl++); + minor = fxdr_unsigned(u_int32_t, *tl); + nva.na_rdev = NFSMAKEDEV(major, minor); + break; + case NFSOCK: + case NFFIFO: + break; + case NFDIR: + cnflags = LOCKPARENT; + break; + default: + nd->nd_repstat = NFSERR_BADTYPE; + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + return (0); + }; + } + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, cnflags); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + nfsvno_relpathbuf(&named); + if (pathcp) + FREE(pathcp, M_TEMP); + return (error); + } + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + vtyp = nfsv34tov_type(*tl); + } + error = nfsrv_sattr(nd, &nva, &attrbits, aclp, p); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + nfsvno_relpathbuf(&named); + if (pathcp) + FREE(pathcp, M_TEMP); + return (error); + } + nva.na_type = vtyp; + if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV3) && + (vtyp == VCHR || vtyp == VBLK)) { + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + major = fxdr_unsigned(u_int32_t, *tl++); + minor = fxdr_unsigned(u_int32_t, *tl); + nva.na_rdev = NFSMAKEDEV(major, minor); + } + } + + dirfor_ret = nfsvno_getattr(dp, &dirfor, nd->nd_cred, p); + if (!nd->nd_repstat && (nd->nd_flag & ND_NFSV4)) { + if (!dirfor_ret && NFSVNO_ISSETGID(&nva) && + dirfor.na_gid == nva.na_gid) + NFSVNO_UNSET(&nva, gid); + nd->nd_repstat = nfsrv_checkuidgid(nd, &nva); + } + if (nd->nd_repstat) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + nfsvno_relpathbuf(&named); + if (pathcp) + FREE(pathcp, M_TEMP); + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + return (0); + } + + /* + * Yuck! For V4, mkdir and link are here and some V4 clients don't fill + * in va_mode, so we'll have to set a default here. + */ + if (NFSVNO_NOTSETMODE(&nva)) { + if (vtyp == VLNK) + nva.na_mode = 0755; + else + nva.na_mode = 0400; + } + + if (vtyp == VDIR) + named.ni_cnd.cn_flags |= WILLBEDIR; + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, exp, p, &dirp); + if (nd->nd_repstat) { + if (dirp) { + if (nd->nd_flag & ND_NFSV3) + dirfor_ret = nfsvno_getattr(dirp, &dirfor, + nd->nd_cred, p); + vrele(dirp); + } +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + return (0); + } + if (dirp) + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, p); + + if ((nd->nd_flag & ND_NFSV4) && (vtyp == VDIR || vtyp == VLNK)) { + if (vtyp == VDIR) { + nfsrvd_mkdirsub(nd, &named, &nva, fhp, vpp, dirp, + &dirfor, &diraft, &diraft_ret, &attrbits, aclp, p, + exp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + return (0); + } else if (vtyp == VLNK) { + nfsrvd_symlinksub(nd, &named, &nva, fhp, vpp, dirp, + &dirfor, &diraft, &diraft_ret, &attrbits, + aclp, p, exp, pathcp, pathlen); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE(pathcp, M_TEMP); + return (0); + } + } + + nd->nd_repstat = nfsvno_mknod(&named, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) { + vp = named.ni_vp; + nfsrv_fixattr(nd, vp, &nva, aclp, p, &attrbits, exp); + nd->nd_repstat = nfsvno_getfh(vp, fhp, p); + if ((nd->nd_flag & ND_NFSV3) && !nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, + p); + if (vpp) { + NFSVOPUNLOCK(vp, 0, p); + *vpp = vp; + } else { + vput(vp); + } + } + + diraft_ret = nfsvno_getattr(dirp, &diraft, nd->nd_cred, p); + vrele(dirp); + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 1); + nfsrv_postopattr(nd, 0, &nva); + } else { + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(dirfor.na_filerev, tl); + tl += 2; + txdr_hyper(diraft.na_filerev, tl); + (void) nfsrv_putattrbit(nd, &attrbits); + } + } + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + return (0); +nfsmout: + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + if (bufp) + nfsvno_relpathbuf(&named); + if (pathcp) + FREE(pathcp, M_TEMP); + return (error); +} + +/* + * nfs remove service + */ +APPLESTATIC int +nfsrvd_remove(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + struct nameidata named; + u_int32_t *tl; + int error, dirfor_ret = 1, diraft_ret = 1; + vnode_t dirp = NULL; + struct nfsvattr dirfor, diraft; + char *bufp; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, DELETE, + LOCKPARENT | LOCKLEAF); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vput(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 1, exp, p, &dirp); + } else { + vput(dp); + nfsvno_relpathbuf(&named); + } + if (dirp) { + if (!(nd->nd_flag & ND_NFSV2)) { + dirfor_ret = nfsvno_getattr(dirp, &dirfor, + nd->nd_cred, p); + } else { + vrele(dirp); + dirp = NULL; + } + } + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV4) { + if (vnode_vtype(named.ni_vp) == VDIR) + nd->nd_repstat = nfsvno_rmdirsub(&named, 1, + nd->nd_cred, p, exp); + else + nd->nd_repstat = nfsvno_removesub(&named, 1, + nd->nd_cred, p, exp); + } else if (nd->nd_procnum == NFSPROC_RMDIR) { + nd->nd_repstat = nfsvno_rmdirsub(&named, 0, + nd->nd_cred, p, exp); + } else { + nd->nd_repstat = nfsvno_removesub(&named, 0, + nd->nd_cred, p, exp); + } + } + if (!(nd->nd_flag & ND_NFSV2)) { + if (dirp) { + diraft_ret = nfsvno_getattr(dirp, &diraft, nd->nd_cred, + p); + vrele(dirp); + } + if (nd->nd_flag & ND_NFSV3) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + } else if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(dirfor.na_filerev, tl); + tl += 2; + txdr_hyper(diraft.na_filerev, tl); + } + } + return (0); +} + +/* + * nfs rename service + */ +APPLESTATIC int +nfsrvd_rename(struct nfsrv_descript *nd, int isdgram, + vnode_t dp, vnode_t todp, NFSPROC_T *p, struct nfsexstuff *exp, + struct nfsexstuff *toexp) +{ + u_int32_t *tl; + int error, fdirfor_ret = 1, fdiraft_ret = 1; + int tdirfor_ret = 1, tdiraft_ret = 1; + struct nameidata fromnd, tond; + vnode_t fdirp = NULL, tdirp = NULL, tdp = NULL; + struct nfsvattr fdirfor, fdiraft, tdirfor, tdiraft; + struct nfsexstuff tnes; + struct nfsrvfh tfh; + mount_t mp = NULL; + char *bufp, *tbufp = NULL; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, fdirfor_ret, &fdirfor, fdiraft_ret, &fdiraft); + nfsrv_wcc(nd, tdirfor_ret, &tdirfor, tdiraft_ret, &tdiraft); + return (0); + } + if (!(nd->nd_flag & ND_NFSV2)) + fdirfor_ret = nfsvno_getattr(dp, &fdirfor, nd->nd_cred, p); + tond.ni_cnd.cn_nameiop = 0; + tond.ni_startdir = NULL; + NFSNAMEICNDSET(&fromnd.ni_cnd, nd->nd_cred, DELETE, WANTPARENT | SAVESTART); + nfsvno_setpathbuf(&fromnd, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &fromnd.ni_pathlen); + if (error) { + vput(dp); + if (todp) + vrele(todp); + nfsvno_relpathbuf(&fromnd); + return (error); + } + if (nd->nd_flag & ND_NFSV4) { + tdp = todp; + tnes = *toexp; + tdirfor_ret = nfsvno_getattr(tdp, &tdirfor, nd->nd_cred, p); + } else { + error = nfsrv_mtofh(nd, &tfh); + if (error) { + vput(dp); + /* todp is always NULL except NFSv4 */ + nfsvno_relpathbuf(&fromnd); + return (error); + } + nd->nd_cred->cr_uid = nd->nd_saveduid; + /* Won't lock vfs if already locked, mp == NULL */ + tnes.nes_vfslocked = exp->nes_vfslocked; + nfsd_fhtovp(nd, &tfh, &tdp, &tnes, &mp, 0, p); + if (tdp) { + tdirfor_ret = nfsvno_getattr(tdp, &tdirfor, nd->nd_cred, + p); + NFSVOPUNLOCK(tdp, 0, p); + } + } + NFSNAMEICNDSET(&tond.ni_cnd, nd->nd_cred, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART); + nfsvno_setpathbuf(&tond, &tbufp, &hashp); + if (!nd->nd_repstat) { + error = nfsrv_parsename(nd, tbufp, hashp, &tond.ni_pathlen); + if (error) { + if (tdp) { + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + vrele(tdp); + } + vput(dp); + nfsvno_relpathbuf(&fromnd); + nfsvno_relpathbuf(&tond); + return (error); + } + } + if (nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) { + nfsrv_wcc(nd, fdirfor_ret, &fdirfor, fdiraft_ret, + &fdiraft); + nfsrv_wcc(nd, tdirfor_ret, &tdirfor, tdiraft_ret, + &tdiraft); + } + if (tdp) { + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + vrele(tdp); + } + vput(dp); + nfsvno_relpathbuf(&fromnd); + nfsvno_relpathbuf(&tond); + return (0); + } + + /* + * Done parsing, now down to business. + */ + nd->nd_repstat = nfsvno_namei(nd, &fromnd, dp, 1, exp, p, &fdirp); + if (nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV3) { + nfsrv_wcc(nd, fdirfor_ret, &fdirfor, fdiraft_ret, + &fdiraft); + nfsrv_wcc(nd, tdirfor_ret, &tdirfor, tdiraft_ret, + &tdiraft); + } + if (fdirp) + vrele(fdirp); + if (tdp) { + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + vrele(tdp); + } + nfsvno_relpathbuf(&tond); + return (0); + } + if (vnode_vtype(fromnd.ni_vp) == VDIR) + tond.ni_cnd.cn_flags |= WILLBEDIR; + nd->nd_repstat = nfsvno_namei(nd, &tond, tdp, 0, &tnes, p, &tdirp); + nd->nd_repstat = nfsvno_rename(&fromnd, &tond, nd->nd_repstat, + nd->nd_flag, nd->nd_cred, p); + if (fdirp) + fdiraft_ret = nfsvno_getattr(fdirp, &fdiraft, nd->nd_cred, p); + if (tdirp) + tdiraft_ret = nfsvno_getattr(tdirp, &tdiraft, nd->nd_cred, p); + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + if (fdirp) + vrele(fdirp); + if (tdirp) + vrele(tdirp); + if (nd->nd_flag & ND_NFSV3) { + nfsrv_wcc(nd, fdirfor_ret, &fdirfor, fdiraft_ret, &fdiraft); + nfsrv_wcc(nd, tdirfor_ret, &tdirfor, tdiraft_ret, &tdiraft); + } else if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 10 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(fdirfor.na_filerev, tl); + tl += 2; + txdr_hyper(fdiraft.na_filerev, tl); + tl += 2; + *tl++ = newnfs_false; + txdr_hyper(tdirfor.na_filerev, tl); + tl += 2; + txdr_hyper(tdiraft.na_filerev, tl); + } + return (0); +} + +/* + * nfs link service + */ +APPLESTATIC int +nfsrvd_link(struct nfsrv_descript *nd, int isdgram, + vnode_t vp, vnode_t tovp, NFSPROC_T *p, struct nfsexstuff *exp, + struct nfsexstuff *toexp) +{ + struct nameidata named; + u_int32_t *tl; + int error = 0, dirfor_ret = 1, diraft_ret = 1, getret = 1; + vnode_t dirp = NULL, dp = NULL; + struct nfsvattr dirfor, diraft, at; + struct nfsexstuff tnes; + struct nfsrvfh dfh; + mount_t mp = NULL; + char *bufp; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + NFSVOPUNLOCK(vp, 0, p); + if (vnode_vtype(vp) == VDIR) { + if (nd->nd_flag & ND_NFSV4) + nd->nd_repstat = NFSERR_ISDIR; + else + nd->nd_repstat = NFSERR_INVAL; + if (tovp) + vrele(tovp); + } else if (vnode_vtype(vp) == VLNK) { + if (nd->nd_flag & ND_NFSV2) + nd->nd_repstat = NFSERR_INVAL; + else + nd->nd_repstat = NFSERR_NOTSUPP; + if (tovp) + vrele(tovp); + } + if (!nd->nd_repstat) { + if (nd->nd_flag & ND_NFSV4) { + dp = tovp; + tnes = *toexp; + } else { + error = nfsrv_mtofh(nd, &dfh); + if (error) { + vrele(vp); + /* tovp is always NULL unless NFSv4 */ + return (error); + } + /* Won't lock vfs if already locked, mp == NULL */ + tnes.nes_vfslocked = exp->nes_vfslocked; + nfsd_fhtovp(nd, &dfh, &dp, &tnes, &mp, 0, p); + if (dp) + NFSVOPUNLOCK(dp, 0, p); + } + } + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, LOCKPARENT); + if (!nd->nd_repstat) { + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vrele(vp); + if (dp) { + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + vrele(dp); + } + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, &tnes, + p, &dirp); + } else { + if (dp) + vrele(dp); + nfsvno_relpathbuf(&named); + } + } + if (dirp) { + if (nd->nd_flag & ND_NFSV2) { + vrele(dirp); + dirp = NULL; + } else { + dirfor_ret = nfsvno_getattr(dirp, &dirfor, + nd->nd_cred, p); + } + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_link(&named, vp, nd->nd_cred, p, exp); + if (nd->nd_flag & ND_NFSV3) + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + if (dirp) { + diraft_ret = nfsvno_getattr(dirp, &diraft, nd->nd_cred, p); + vrele(dirp); + } + if (tnes.nes_vfslocked && !exp->nes_vfslocked && + !(nd->nd_flag & ND_NFSV4)) + nfsvno_unlockvfs(mp); + vrele(vp); + if (nd->nd_flag & ND_NFSV3) { + nfsrv_postopattr(nd, getret, &at); + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + } else if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(dirfor.na_filerev, tl); + tl += 2; + txdr_hyper(diraft.na_filerev, tl); + } + return (0); +} + +/* + * nfs symbolic link service + */ +APPLESTATIC int +nfsrvd_symlink(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, vnode_t *vpp, fhandle_t *fhp, NFSPROC_T *p, + struct nfsexstuff *exp) +{ + struct nfsvattr nva, dirfor, diraft; + struct nameidata named; + int error, dirfor_ret = 1, diraft_ret = 1, pathlen; + vnode_t dirp = NULL; + char *bufp, *pathcp = NULL; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + if (vpp) + *vpp = NULL; + NFSVNO_ATTRINIT(&nva); + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, + LOCKPARENT | SAVESTART); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (!error && !nd->nd_repstat) + error = nfsvno_getsymlink(nd, &nva, p, &pathcp, &pathlen); + if (error) { + vrele(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, exp, p, &dirp); + } else { + vrele(dp); + nfsvno_relpathbuf(&named); + } + if (dirp != NULL && !(nd->nd_flag & ND_NFSV3)) { + vrele(dirp); + dirp = NULL; + } + + /* + * And call nfsrvd_symlinksub() to do the common code. It will + * return EBADRPC upon a parsing error, 0 otherwise. + */ + if (!nd->nd_repstat) { + if (dirp != NULL) + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, + p); + nfsrvd_symlinksub(nd, &named, &nva, fhp, vpp, dirp, + &dirfor, &diraft, &diraft_ret, NULL, NULL, p, exp, + pathcp, pathlen); + } else if (dirp != NULL) { + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, p); + vrele(dirp); + } + if (pathcp) + FREE(pathcp, M_TEMP); + + if (nd->nd_flag & ND_NFSV3) { + if (!nd->nd_repstat) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 1); + nfsrv_postopattr(nd, 0, &nva); + } + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + } + return (0); +} + +/* + * Common code for creating a symbolic link. + */ +static void +nfsrvd_symlinksub(struct nfsrv_descript *nd, struct nameidata *ndp, + struct nfsvattr *nvap, fhandle_t *fhp, vnode_t *vpp, + vnode_t dirp, struct nfsvattr *dirforp, struct nfsvattr *diraftp, + int *diraft_retp, nfsattrbit_t *attrbitp, + NFSACL_T *aclp, NFSPROC_T *p, struct nfsexstuff *exp, char *pathcp, + int pathlen) +{ + u_int32_t *tl; + + nd->nd_repstat = nfsvno_symlink(ndp, nvap, pathcp, pathlen, + !(nd->nd_flag & ND_NFSV2), nd->nd_saveduid, nd->nd_cred, p, exp); + if (!nd->nd_repstat && !(nd->nd_flag & ND_NFSV2)) { + nfsrv_fixattr(nd, ndp->ni_vp, nvap, aclp, p, attrbitp, exp); + if (nd->nd_flag & ND_NFSV3) { + nd->nd_repstat = nfsvno_getfh(ndp->ni_vp, fhp, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(ndp->ni_vp, + nvap, nd->nd_cred, p); + } + if (vpp) { + NFSVOPUNLOCK(ndp->ni_vp, 0, p); + *vpp = ndp->ni_vp; + } else { + vput(ndp->ni_vp); + } + } + if (dirp) { + *diraft_retp = nfsvno_getattr(dirp, diraftp, nd->nd_cred, p); + vrele(dirp); + } + if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(dirforp->na_filerev, tl); + tl += 2; + txdr_hyper(diraftp->na_filerev, tl); + (void) nfsrv_putattrbit(nd, attrbitp); + } +} + +/* + * nfs mkdir service + */ +APPLESTATIC int +nfsrvd_mkdir(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, vnode_t *vpp, fhandle_t *fhp, NFSPROC_T *p, + struct nfsexstuff *exp) +{ + struct nfsvattr nva, dirfor, diraft; + struct nameidata named; + u_int32_t *tl; + int error, dirfor_ret = 1, diraft_ret = 1; + vnode_t dirp = NULL; + char *bufp; + u_long *hashp; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, LOCKPARENT); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vrele(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + NFSVNO_ATTRINIT(&nva); + if (nd->nd_flag & ND_NFSV3) { + error = nfsrv_sattr(nd, &nva, NULL, NULL, p); + if (error) { + vrele(dp); + nfsvno_relpathbuf(&named); + return (error); + } + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + nva.na_mode = nfstov_mode(*tl++); + } + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, exp, p, &dirp); + } else { + vrele(dp); + nfsvno_relpathbuf(&named); + } + if (dirp != NULL && !(nd->nd_flag & ND_NFSV3)) { + vrele(dirp); + dirp = NULL; + } + if (nd->nd_repstat) { + if (dirp != NULL) { + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, + p); + vrele(dirp); + } + if (nd->nd_flag & ND_NFSV3) + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, + &diraft); + return (0); + } + if (dirp != NULL) + dirfor_ret = nfsvno_getattr(dirp, &dirfor, nd->nd_cred, p); + + /* + * Call nfsrvd_mkdirsub() for the code common to V4 as well. + */ + nfsrvd_mkdirsub(nd, &named, &nva, fhp, vpp, dirp, &dirfor, &diraft, + &diraft_ret, NULL, NULL, p, exp); + + if (nd->nd_flag & ND_NFSV3) { + if (!nd->nd_repstat) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 1); + nfsrv_postopattr(nd, 0, &nva); + } + nfsrv_wcc(nd, dirfor_ret, &dirfor, diraft_ret, &diraft); + } else if (!nd->nd_repstat) { + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, 0, 0); + nfsrv_fillattr(nd, &nva); + } + return (0); +nfsmout: + vrele(dp); + nfsvno_relpathbuf(&named); + return (error); +} + +/* + * Code common to mkdir for V2,3 and 4. + */ +static void +nfsrvd_mkdirsub(struct nfsrv_descript *nd, struct nameidata *ndp, + struct nfsvattr *nvap, fhandle_t *fhp, vnode_t *vpp, + vnode_t dirp, struct nfsvattr *dirforp, struct nfsvattr *diraftp, + int *diraft_retp, nfsattrbit_t *attrbitp, NFSACL_T *aclp, + NFSPROC_T *p, struct nfsexstuff *exp) +{ + vnode_t vp; + u_int32_t *tl; + + NFSVNO_SETATTRVAL(nvap, type, VDIR); + nd->nd_repstat = nfsvno_mkdir(ndp, nvap, nd->nd_saveduid, + nd->nd_cred, p, exp); + if (!nd->nd_repstat) { + vp = ndp->ni_vp; + nfsrv_fixattr(nd, vp, nvap, aclp, p, attrbitp, exp); + nd->nd_repstat = nfsvno_getfh(vp, fhp, p); + if (!(nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, nvap, nd->nd_cred, + p); + if (vpp && !nd->nd_repstat) { + NFSVOPUNLOCK(vp, 0, p); + *vpp = vp; + } else { + vput(vp); + } + } + if (dirp) { + *diraft_retp = nfsvno_getattr(dirp, diraftp, nd->nd_cred, p); + vrele(dirp); + } + if ((nd->nd_flag & ND_NFSV4) && !nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED); + *tl++ = newnfs_false; + txdr_hyper(dirforp->na_filerev, tl); + tl += 2; + txdr_hyper(diraftp->na_filerev, tl); + (void) nfsrv_putattrbit(nd, attrbitp); + } +} + +/* + * nfs commit service + */ +APPLESTATIC int +nfsrvd_commit(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + struct nfsvattr bfor, aft; + u_int32_t *tl; + int error = 0, for_ret = 1, aft_ret = 1, cnt; + u_int64_t off; + + if (nd->nd_repstat) { + nfsrv_wcc(nd, for_ret, &bfor, aft_ret, &aft); + return (0); + } + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + /* + * XXX At this time VOP_FSYNC() does not accept offset and byte + * count parameters, so these arguments are useless (someday maybe). + */ + off = fxdr_hyper(tl); + tl += 2; + cnt = fxdr_unsigned(int, *tl); + if (nd->nd_flag & ND_NFSV3) + for_ret = nfsvno_getattr(vp, &bfor, nd->nd_cred, p); + nd->nd_repstat = nfsvno_fsync(vp, off, cnt, nd->nd_cred, p); + if (nd->nd_flag & ND_NFSV3) { + aft_ret = nfsvno_getattr(vp, &aft, nd->nd_cred, p); + nfsrv_wcc(nd, for_ret, &bfor, aft_ret, &aft); + } + vput(vp); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_VERF); + *tl++ = txdr_unsigned(nfsboottime.tv_sec); + *tl = txdr_unsigned(nfsboottime.tv_usec); + } + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfs statfs service + */ +APPLESTATIC int +nfsrvd_statfs(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + struct statfs *sf; + u_int32_t *tl; + int getret = 1; + struct nfsvattr at; + struct statfs sfs; + u_quad_t tval; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + return (0); + } + sf = &sfs; + nd->nd_repstat = nfsvno_statfs(vp, sf, nd->nd_cred, p); + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + vput(vp); + if (nd->nd_flag & ND_NFSV3) + nfsrv_postopattr(nd, getret, &at); + if (nd->nd_repstat) + return (0); + if (nd->nd_flag & ND_NFSV2) { + NFSM_BUILD(tl, u_int32_t *, NFSX_V2STATFS); + *tl++ = txdr_unsigned(NFS_V2MAXDATA); + *tl++ = txdr_unsigned(sf->f_bsize); + *tl++ = txdr_unsigned(sf->f_blocks); + *tl++ = txdr_unsigned(sf->f_bfree); + *tl = txdr_unsigned(sf->f_bavail); + } else { + NFSM_BUILD(tl, u_int32_t *, NFSX_V3STATFS); + tval = (u_quad_t)sf->f_blocks; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(tval, tl); tl += 2; + tval = (u_quad_t)sf->f_bfree; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(tval, tl); tl += 2; + tval = (u_quad_t)sf->f_bavail; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(tval, tl); tl += 2; + tval = (u_quad_t)sf->f_files; + txdr_hyper(tval, tl); tl += 2; + tval = (u_quad_t)sf->f_ffree; + txdr_hyper(tval, tl); tl += 2; + tval = (u_quad_t)sf->f_ffree; + txdr_hyper(tval, tl); tl += 2; + *tl = 0; + } + return (0); +} + +/* + * nfs fsinfo service + */ +APPLESTATIC int +nfsrvd_fsinfo(struct nfsrv_descript *nd, int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + struct nfsfsinfo fs; + int getret = 1; + struct nfsvattr at; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + return (0); + } + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + nfsvno_getfs(&fs, isdgram); + vput(vp); + nfsrv_postopattr(nd, getret, &at); + NFSM_BUILD(tl, u_int32_t *, NFSX_V3FSINFO); + *tl++ = txdr_unsigned(fs.fs_rtmax); + *tl++ = txdr_unsigned(fs.fs_rtpref); + *tl++ = txdr_unsigned(fs.fs_rtmult); + *tl++ = txdr_unsigned(fs.fs_wtmax); + *tl++ = txdr_unsigned(fs.fs_wtpref); + *tl++ = txdr_unsigned(fs.fs_wtmult); + *tl++ = txdr_unsigned(fs.fs_dtpref); + txdr_hyper(fs.fs_maxfilesize, tl); + tl += 2; + txdr_nfsv3time(&fs.fs_timedelta, tl); + tl += 2; + *tl = txdr_unsigned(fs.fs_properties); + return (0); +} + +/* + * nfs pathconf service + */ +APPLESTATIC int +nfsrvd_pathconf(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + struct nfsv3_pathconf *pc; + int getret = 1; + register_t linkmax, namemax, chownres, notrunc; + struct nfsvattr at; + + if (nd->nd_repstat) { + nfsrv_postopattr(nd, getret, &at); + return (0); + } + nd->nd_repstat = nfsvno_pathconf(vp, _PC_LINK_MAX, &linkmax, + nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_pathconf(vp, _PC_NAME_MAX, &namemax, + nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat=nfsvno_pathconf(vp, _PC_CHOWN_RESTRICTED, + &chownres, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_pathconf(vp, _PC_NO_TRUNC, ¬runc, + nd->nd_cred, p); + getret = nfsvno_getattr(vp, &at, nd->nd_cred, p); + vput(vp); + nfsrv_postopattr(nd, getret, &at); + if (!nd->nd_repstat) { + NFSM_BUILD(pc, struct nfsv3_pathconf *, NFSX_V3PATHCONF); + pc->pc_linkmax = txdr_unsigned(linkmax); + pc->pc_namemax = txdr_unsigned(namemax); + pc->pc_notrunc = txdr_unsigned(notrunc); + pc->pc_chownrestricted = txdr_unsigned(chownres); + + /* + * These should probably be supported by VOP_PATHCONF(), but + * until msdosfs is exportable (why would you want to?), the + * Unix defaults should be ok. + */ + pc->pc_caseinsensitive = newnfs_false; + pc->pc_casepreserving = newnfs_true; + } + return (0); +} + +/* + * nfsv4 lock service + */ +APPLESTATIC int +nfsrvd_lock(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + struct nfsstate *stp = NULL; + struct nfslock *lop; + struct nfslockconflict cf; + int error = 0; + u_short flags = NFSLCK_LOCK, lflags; + u_int64_t offset, len; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4LOCKT_READW: + flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_READ: + lflags = NFSLCK_READ; + break; + case NFSV4LOCKT_WRITEW: + flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_WRITE: + lflags = NFSLCK_WRITE; + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + goto nfsmout; + }; + if (*tl++ == newnfs_true) + flags |= NFSLCK_RECLAIM; + offset = fxdr_hyper(tl); + tl += 2; + len = fxdr_hyper(tl); + tl += 2; + if (*tl == newnfs_true) + flags |= NFSLCK_OPENTOLOCK; + if (flags & NFSLCK_OPENTOLOCK) { + NFSM_DISSECT(tl, u_int32_t *, 5 * NFSX_UNSIGNED + NFSX_STATEID); + i = fxdr_unsigned(int, *(tl+4+(NFSX_STATEID / NFSX_UNSIGNED))); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate) + i, + M_NFSDSTATE, M_WAITOK); + stp->ls_ownerlen = i; + stp->ls_op = nd->nd_rp; + stp->ls_seq = fxdr_unsigned(int, *tl++); + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + stp->ls_opentolockseq = fxdr_unsigned(int, *tl++); + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl++; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + error = nfsrv_mtostr(nd, stp->ls_owner, stp->ls_ownerlen); + if (error) + goto nfsmout; + } else { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + NFSX_UNSIGNED); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate), + M_NFSDSTATE, M_WAITOK); + stp->ls_ownerlen = 0; + stp->ls_op = nd->nd_rp; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + stp->ls_seq = fxdr_unsigned(int, *tl); + clientid.lval[0] = stp->ls_stateid.other[0]; + clientid.lval[1] = stp->ls_stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + } + MALLOC(lop, struct nfslock *, sizeof (struct nfslock), + M_NFSDLOCK, M_WAITOK); + lop->lo_first = offset; + if (len == NFS64BITSSET) { + lop->lo_end = NFS64BITSSET; + } else { + lop->lo_end = offset + len; + if (lop->lo_end <= lop->lo_first) + nd->nd_repstat = NFSERR_INVAL; + } + lop->lo_flags = lflags; + stp->ls_flags = flags; + stp->ls_uid = nd->nd_cred->cr_uid; + + /* + * Do basic access checking. + */ + if (!nd->nd_repstat && vnode_vtype(vp) != VREG) { + if (vnode_vtype(vp) == VDIR) + nd->nd_repstat = NFSERR_ISDIR; + else + nd->nd_repstat = NFSERR_INVAL; + } + if (!nd->nd_repstat) { + if (lflags & NFSLCK_WRITE) { + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_WRITEDATA, + nd->nd_cred, exp, p, NFSACCCHK_ALLOWOWNER, + NFSACCCHK_VPISLOCKED); + } else { + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_READDATA, + nd->nd_cred, exp, p, NFSACCCHK_ALLOWOWNER, + NFSACCCHK_VPISLOCKED); + if (nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_EXECUTE, + nd->nd_cred, exp, p, NFSACCCHK_ALLOWOWNER, + NFSACCCHK_VPISLOCKED); + } + } + + /* + * We call nfsrv_lockctrl() even if nd_repstat set, so that the + * seqid# gets updated. nfsrv_lockctrl() will return the value + * of nd_repstat, if it gets that far. + */ + nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, &cf, clientid, + &stateid, exp, nd, p); + if (lop) + FREE((caddr_t)lop, M_NFSDLOCK); + if (stp) + FREE((caddr_t)stp, M_NFSDSTATE); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + } else if (nd->nd_repstat == NFSERR_DENIED) { + NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + txdr_hyper(cf.cl_first, tl); + tl += 2; + if (cf.cl_end == NFS64BITSSET) + len = NFS64BITSSET; + else + len = cf.cl_end - cf.cl_first; + txdr_hyper(len, tl); + tl += 2; + if (cf.cl_flags == NFSLCK_WRITE) + *tl++ = txdr_unsigned(NFSV4LOCKT_WRITE); + else + *tl++ = txdr_unsigned(NFSV4LOCKT_READ); + *tl++ = stateid.other[0]; + *tl = stateid.other[1]; + (void) nfsm_strtom(nd, cf.cl_owner, cf.cl_ownerlen); + } + vput(vp); + return (0); +nfsmout: + vput(vp); + if (stp) + free((caddr_t)stp, M_NFSDSTATE); + return (error); +} + +/* + * nfsv4 lock test service + */ +APPLESTATIC int +nfsrvd_lockt(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + struct nfsstate *stp = NULL; + struct nfslock lo, *lop = &lo; + struct nfslockconflict cf; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + u_int64_t len; + + NFSM_DISSECT(tl, u_int32_t *, 8 * NFSX_UNSIGNED); + i = fxdr_unsigned(int, *(tl + 7)); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate) + i, + M_NFSDSTATE, M_WAITOK); + stp->ls_ownerlen = i; + stp->ls_op = NULL; + stp->ls_flags = NFSLCK_TEST; + stp->ls_uid = nd->nd_cred->cr_uid; + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4LOCKT_READW: + stp->ls_flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_READ: + lo.lo_flags = NFSLCK_READ; + break; + case NFSV4LOCKT_WRITEW: + stp->ls_flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_WRITE: + lo.lo_flags = NFSLCK_WRITE; + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + goto nfsmout; + }; + lo.lo_first = fxdr_hyper(tl); + tl += 2; + len = fxdr_hyper(tl); + if (len == NFS64BITSSET) { + lo.lo_end = NFS64BITSSET; + } else { + lo.lo_end = lo.lo_first + len; + if (lo.lo_end <= lo.lo_first) + nd->nd_repstat = NFSERR_INVAL; + } + tl += 2; + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + error = nfsrv_mtostr(nd, stp->ls_owner, stp->ls_ownerlen); + if (error) + goto nfsmout; + if (!nd->nd_repstat && vnode_vtype(vp) != VREG) { + if (vnode_vtype(vp) == VDIR) + nd->nd_repstat = NFSERR_ISDIR; + else + nd->nd_repstat = NFSERR_INVAL; + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, &cf, clientid, + &stateid, exp, nd, p); + if (stp) + FREE((caddr_t)stp, M_NFSDSTATE); + if (nd->nd_repstat) { + if (nd->nd_repstat == NFSERR_DENIED) { + NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + txdr_hyper(cf.cl_first, tl); + tl += 2; + if (cf.cl_end == NFS64BITSSET) + len = NFS64BITSSET; + else + len = cf.cl_end - cf.cl_first; + txdr_hyper(len, tl); + tl += 2; + if (cf.cl_flags == NFSLCK_WRITE) + *tl++ = txdr_unsigned(NFSV4LOCKT_WRITE); + else + *tl++ = txdr_unsigned(NFSV4LOCKT_READ); + *tl++ = stp->ls_stateid.other[0]; + *tl = stp->ls_stateid.other[1]; + (void) nfsm_strtom(nd, cf.cl_owner, cf.cl_ownerlen); + } + } + vput(vp); + return (0); +nfsmout: + vput(vp); + if (stp) + free((caddr_t)stp, M_NFSDSTATE); + return (error); +} + +/* + * nfsv4 unlock service + */ +APPLESTATIC int +nfsrvd_locku(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + struct nfsstate *stp; + struct nfslock *lop; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + u_int64_t len; + + NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED + NFSX_STATEID); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate), + M_NFSDSTATE, M_WAITOK); + MALLOC(lop, struct nfslock *, sizeof (struct nfslock), + M_NFSDLOCK, M_WAITOK); + stp->ls_flags = NFSLCK_UNLOCK; + lop->lo_flags = NFSLCK_UNLOCK; + stp->ls_op = nd->nd_rp; + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4LOCKT_READW: + stp->ls_flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_READ: + break; + case NFSV4LOCKT_WRITEW: + stp->ls_flags |= NFSLCK_BLOCKING; + case NFSV4LOCKT_WRITE: + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + goto nfsmout; + }; + stp->ls_ownerlen = 0; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_seq = fxdr_unsigned(int, *tl++); + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + lop->lo_first = fxdr_hyper(tl); + tl += 2; + len = fxdr_hyper(tl); + if (len == NFS64BITSSET) { + lop->lo_end = NFS64BITSSET; + } else { + lop->lo_end = lop->lo_first + len; + if (lop->lo_end <= lop->lo_first) + nd->nd_repstat = NFSERR_INVAL; + } + clientid.lval[0] = stp->ls_stateid.other[0]; + clientid.lval[1] = stp->ls_stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + if (!nd->nd_repstat && vnode_vtype(vp) != VREG) { + if (vnode_vtype(vp) == VDIR) + nd->nd_repstat = NFSERR_ISDIR; + else + nd->nd_repstat = NFSERR_INVAL; + } + /* + * Call nfsrv_lockctrl() even if nd_repstat is set, so that the + * seqid# gets incremented. nfsrv_lockctrl() will return the + * value of nd_repstat, if it gets that far. + */ + nd->nd_repstat = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid, + &stateid, exp, nd, p); + if (stp) + FREE((caddr_t)stp, M_NFSDSTATE); + if (lop) + free((caddr_t)lop, M_NFSDLOCK); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + } +nfsmout: + vput(vp); + return (error); +} + +/* + * nfsv4 open service + */ +APPLESTATIC int +nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, vnode_t *vpp, __unused fhandle_t *fhp, NFSPROC_T *p, + struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + struct nfsstate *stp = NULL; + int error = 0, create, claim, exclusive_flag = 0; + u_int32_t rflags = NFSV4OPEN_LOCKTYPEPOSIX, acemask; + int how = NFSCREATE_UNCHECKED; + u_char cverf[NFSX_VERF]; + vnode_t vp = NULL, dirp = NULL; + struct nfsvattr nva, dirfor, diraft; + struct nameidata named; + nfsv4stateid_t stateid, delegstateid; + nfsattrbit_t attrbits; + nfsquad_t clientid; + char *bufp = NULL; + u_long *hashp; + NFSACL_T *aclp = NULL; + +#ifdef NFS4_ACL_EXTATTR_NAME + aclp = acl_alloc(); + aclp->acl_cnt = 0; +#endif + NFSZERO_ATTRBIT(&attrbits); + named.ni_startdir = NULL; + named.ni_cnd.cn_nameiop = 0; + NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED); + i = fxdr_unsigned(int, *(tl + 5)); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate) + i, + M_NFSDSTATE, M_WAITOK); + stp->ls_ownerlen = i; + stp->ls_op = nd->nd_rp; + stp->ls_flags = NFSLCK_OPEN; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_seq = fxdr_unsigned(u_int32_t, *tl++); + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4OPEN_ACCESSREAD: + stp->ls_flags |= NFSLCK_READACCESS; + break; + case NFSV4OPEN_ACCESSWRITE: + stp->ls_flags |= NFSLCK_WRITEACCESS; + break; + case NFSV4OPEN_ACCESSBOTH: + stp->ls_flags |= (NFSLCK_READACCESS | NFSLCK_WRITEACCESS); + break; + default: + nd->nd_repstat = NFSERR_INVAL; + }; + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4OPEN_DENYNONE: + break; + case NFSV4OPEN_DENYREAD: + stp->ls_flags |= NFSLCK_READDENY; + break; + case NFSV4OPEN_DENYWRITE: + stp->ls_flags |= NFSLCK_WRITEDENY; + break; + case NFSV4OPEN_DENYBOTH: + stp->ls_flags |= (NFSLCK_READDENY | NFSLCK_WRITEDENY); + break; + default: + nd->nd_repstat = NFSERR_INVAL; + }; + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + error = nfsrv_mtostr(nd, stp->ls_owner, stp->ls_ownerlen); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (error); + } + NFSVNO_ATTRINIT(&nva); + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + create = fxdr_unsigned(int, *tl); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(dp, &dirfor, nd->nd_cred, p); + if (create == NFSV4OPEN_CREATE) { + nva.na_type = VREG; + nva.na_mode = 0; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + how = fxdr_unsigned(int, *tl); + switch (how) { + case NFSCREATE_UNCHECKED: + case NFSCREATE_GUARDED: + error = nfsv4_sattr(nd, &nva, &attrbits, aclp, p); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (error); + } + /* + * If the na_gid being set is the same as that of + * the directory it is going in, clear it, since + * that is what will be set by default. This allows + * a user that isn't in that group to do the create. + */ + if (!nd->nd_repstat && NFSVNO_ISSETGID(&nva) && + nva.na_gid == dirfor.na_gid) + NFSVNO_UNSET(&nva, gid); + if (!nd->nd_repstat) + nd->nd_repstat = nfsrv_checkuidgid(nd, &nva); + break; + case NFSCREATE_EXCLUSIVE: + NFSM_DISSECT(tl, u_int32_t *, NFSX_VERF); + NFSBCOPY((caddr_t)tl, cverf, NFSX_VERF); + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); + }; + } else if (create != NFSV4OPEN_NOCREATE) { + nd->nd_repstat = NFSERR_BADXDR; + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); + } + + /* + * Now, handle the claim, which usually includes looking up a + * name in the directory referenced by dp. The exception is + * NFSV4OPEN_CLAIMPREVIOUS. + */ + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + claim = fxdr_unsigned(int, *tl); + if (claim == NFSV4OPEN_CLAIMDELEGATECUR) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl,(caddr_t)stateid.other,NFSX_STATEIDOTHER); + stp->ls_flags |= NFSLCK_DELEGCUR; + } else if (claim == NFSV4OPEN_CLAIMDELEGATEPREV) { + stp->ls_flags |= NFSLCK_DELEGPREV; + } + if (claim == NFSV4OPEN_CLAIMNULL || claim == NFSV4OPEN_CLAIMDELEGATECUR + || claim == NFSV4OPEN_CLAIMDELEGATEPREV) { + if (!nd->nd_repstat && create == NFSV4OPEN_CREATE && + claim != NFSV4OPEN_CLAIMNULL) + nd->nd_repstat = NFSERR_INVAL; + if (nd->nd_repstat) { + nd->nd_repstat = nfsrv_opencheck(clientid, + &stateid, stp, NULL, nd, p, nd->nd_repstat); + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); + } + if (create == NFSV4OPEN_CREATE) + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, CREATE, + LOCKPARENT | LOCKLEAF | SAVESTART); + else + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, LOOKUP, + LOCKLEAF | SAVESTART); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 0, exp, + p, &dirp); + } else { + vrele(dp); + nfsvno_relpathbuf(&named); + } + if (create == NFSV4OPEN_CREATE) { + switch (how) { + case NFSCREATE_UNCHECKED: + if (named.ni_vp) { + /* + * Clear the setable attribute bits, except + * for Size, if it is being truncated. + */ + NFSZERO_ATTRBIT(&attrbits); + if (NFSVNO_ISSETSIZE(&nva)) + NFSSETBIT_ATTRBIT(&attrbits, + NFSATTRBIT_SIZE); + } + break; + case NFSCREATE_GUARDED: + if (named.ni_vp && !nd->nd_repstat) + nd->nd_repstat = EEXIST; + break; + case NFSCREATE_EXCLUSIVE: + exclusive_flag = 1; + if (!named.ni_vp) + nva.na_mode = 0; + }; + } + nfsvno_open(nd, &named, clientid, &stateid, stp, + &exclusive_flag, &nva, cverf, create, aclp, &attrbits, + nd->nd_cred, p, exp, &vp); + } else if (claim == NFSV4OPEN_CLAIMPREVIOUS) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + switch (i) { + case NFSV4OPEN_DELEGATEREAD: + stp->ls_flags |= NFSLCK_DELEGREAD; + break; + case NFSV4OPEN_DELEGATEWRITE: + stp->ls_flags |= NFSLCK_DELEGWRITE; + case NFSV4OPEN_DELEGATENONE: + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); + }; + stp->ls_flags |= NFSLCK_RECLAIM; + vp = dp; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + nd->nd_repstat = nfsrv_opencheck(clientid, &stateid, stp, vp, + nd, p, nd->nd_repstat); + } else { + nd->nd_repstat = NFSERR_BADXDR; + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); + } + + /* + * Do basic access checking. + */ + if (!nd->nd_repstat && vnode_vtype(vp) != VREG) { + if (vnode_vtype(vp) == VDIR) + nd->nd_repstat = NFSERR_ISDIR; + else if (vnode_vtype(vp) == VLNK) + nd->nd_repstat = NFSERR_SYMLINK; + else + nd->nd_repstat = NFSERR_INVAL; + } + if (!nd->nd_repstat && (stp->ls_flags & NFSLCK_WRITEACCESS)) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_WRITEDATA, nd->nd_cred, + exp, p, NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED); + if (!nd->nd_repstat && (stp->ls_flags & NFSLCK_READACCESS)) { + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_READDATA, nd->nd_cred, + exp, p, NFSACCCHK_ALLOWOWNER, NFSACCCHK_VPISLOCKED); + if (nd->nd_repstat) + nd->nd_repstat = nfsvno_accchk(vp, NFSV4ACE_EXECUTE, + nd->nd_cred, exp, p, NFSACCCHK_ALLOWOWNER, + NFSACCCHK_VPISLOCKED); + } + + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat && exclusive_flag && + NFSBCMP(cverf, (caddr_t)&nva.na_atime, NFSX_VERF)) + nd->nd_repstat = EEXIST; + /* + * Do the open locking/delegation stuff. + */ + if (!nd->nd_repstat) + nd->nd_repstat = nfsrv_openctrl(nd, vp, &stp, clientid, &stateid, + &delegstateid, &rflags, exp, p, nva.na_filerev); + + /* + * vp must be unlocked before the call to nfsvno_getattr(dirp,...) + * below, to avoid a deadlock with the lookup in nfsvno_namei() above. + * (ie: Leave the NFSVOPUNLOCK() about here.) + */ + if (vp) + NFSVOPUNLOCK(vp, 0, p); + if (stp) + FREE((caddr_t)stp, M_NFSDSTATE); + if (!nd->nd_repstat && dirp) + nd->nd_repstat = nfsvno_getattr(dirp, &diraft, nd->nd_cred, p); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID + 6 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + if (claim == NFSV4OPEN_CLAIMPREVIOUS) { + *tl++ = newnfs_true; + *tl++ = 0; + *tl++ = 0; + *tl++ = 0; + *tl++ = 0; + } else { + *tl++ = newnfs_false; /* Since dirp is not locked */ + txdr_hyper(dirfor.na_filerev, tl); + tl += 2; + txdr_hyper(diraft.na_filerev, tl); + tl += 2; + } + *tl = txdr_unsigned(rflags & NFSV4OPEN_RFLAGS); + (void) nfsrv_putattrbit(nd, &attrbits); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + if (rflags & NFSV4OPEN_READDELEGATE) + *tl = txdr_unsigned(NFSV4OPEN_DELEGATEREAD); + else if (rflags & NFSV4OPEN_WRITEDELEGATE) + *tl = txdr_unsigned(NFSV4OPEN_DELEGATEWRITE); + else + *tl = txdr_unsigned(NFSV4OPEN_DELEGATENONE); + if (rflags & (NFSV4OPEN_READDELEGATE|NFSV4OPEN_WRITEDELEGATE)) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID+NFSX_UNSIGNED); + *tl++ = txdr_unsigned(delegstateid.seqid); + NFSBCOPY((caddr_t)delegstateid.other, (caddr_t)tl, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + if (rflags & NFSV4OPEN_RECALL) + *tl = newnfs_true; + else + *tl = newnfs_false; + if (rflags & NFSV4OPEN_WRITEDELEGATE) { + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4OPEN_LIMITSIZE); + txdr_hyper(nva.na_size, tl); + } + NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4ACE_ALLOWEDTYPE); + *tl++ = txdr_unsigned(0x0); + acemask = NFSV4ACE_ALLFILESMASK; + if (nva.na_mode & S_IRUSR) + acemask |= NFSV4ACE_READMASK; + if (nva.na_mode & S_IWUSR) + acemask |= NFSV4ACE_WRITEMASK; + if (nva.na_mode & S_IXUSR) + acemask |= NFSV4ACE_EXECUTEMASK; + *tl = txdr_unsigned(acemask); + (void) nfsm_strtom(nd, "OWNER@", 6); + } + *vpp = vp; + } else if (vp) { + vrele(vp); + } + if (dirp) + vrele(dirp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + return (0); +nfsmout: + vrele(dp); +#ifdef NFS4_ACL_EXTATTR_NAME + acl_free(aclp); +#endif + if (stp) + FREE((caddr_t)stp, M_NFSDSTATE); + return (error); +} + +/* + * nfsv4 close service + */ +APPLESTATIC int +nfsrvd_close(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + struct nfsstate st, *stp = &st; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED + NFSX_STATEID); + stp->ls_seq = fxdr_unsigned(u_int32_t, *tl++); + stp->ls_ownerlen = 0; + stp->ls_op = nd->nd_rp; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + stp->ls_flags = NFSLCK_CLOSE; + clientid.lval[0] = stp->ls_stateid.other[0]; + clientid.lval[1] = stp->ls_stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + nd->nd_repstat = nfsrv_openupdate(vp, stp, clientid, &stateid, nd, p); + vput(vp); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + } + return (0); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfsv4 delegpurge service + */ +APPLESTATIC int +nfsrvd_delegpurge(struct nfsrv_descript *nd, __unused int isdgram, + __unused vnode_t vp, __unused NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + nd->nd_repstat = nfsrv_delegupdate(clientid, NULL, NULL, + NFSV4OP_DELEGPURGE, nd->nd_cred, p); +nfsmout: + return (error); +} + +/* + * nfsv4 delegreturn service + */ +APPLESTATIC int +nfsrvd_delegreturn(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID); + stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stateid.other, NFSX_STATEIDOTHER); + clientid.lval[0] = stateid.other[0]; + clientid.lval[1] = stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + nd->nd_repstat = nfsrv_delegupdate(clientid, &stateid, vp, + NFSV4OP_DELEGRETURN, nd->nd_cred, p); +nfsmout: + vput(vp); + return (error); +} + +/* + * nfsv4 get file handle service + */ +APPLESTATIC int +nfsrvd_getfh(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + fhandle_t fh; + + nd->nd_repstat = nfsvno_getfh(vp, &fh, p); + vput(vp); + if (!nd->nd_repstat) + (void) nfsm_fhtom(nd, (u_int8_t *)&fh, 0, 0); + return (0); +} + +/* + * nfsv4 open confirm service + */ +APPLESTATIC int +nfsrvd_openconfirm(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + struct nfsstate st, *stp = &st; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + NFSX_UNSIGNED); + stp->ls_ownerlen = 0; + stp->ls_op = nd->nd_rp; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + stp->ls_seq = fxdr_unsigned(u_int32_t, *tl); + stp->ls_flags = NFSLCK_CONFIRM; + clientid.lval[0] = stp->ls_stateid.other[0]; + clientid.lval[1] = stp->ls_stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + nd->nd_repstat = nfsrv_openupdate(vp, stp, clientid, &stateid, nd, p); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + } +nfsmout: + vput(vp); + return (error); +} + +/* + * nfsv4 open downgrade service + */ +APPLESTATIC int +nfsrvd_opendowngrade(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + struct nfsstate st, *stp = &st; + int error = 0; + nfsv4stateid_t stateid; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_STATEID + 3 * NFSX_UNSIGNED); + stp->ls_ownerlen = 0; + stp->ls_op = nd->nd_rp; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = fxdr_unsigned(u_int32_t, *tl++); + NFSBCOPY((caddr_t)tl, (caddr_t)stp->ls_stateid.other, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + stp->ls_seq = fxdr_unsigned(u_int32_t, *tl++); + i = fxdr_unsigned(int, *tl++); + switch (i) { + case NFSV4OPEN_ACCESSREAD: + stp->ls_flags = (NFSLCK_READACCESS | NFSLCK_DOWNGRADE); + break; + case NFSV4OPEN_ACCESSWRITE: + stp->ls_flags = (NFSLCK_WRITEACCESS | NFSLCK_DOWNGRADE); + break; + case NFSV4OPEN_ACCESSBOTH: + stp->ls_flags = (NFSLCK_READACCESS | NFSLCK_WRITEACCESS | + NFSLCK_DOWNGRADE); + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + }; + i = fxdr_unsigned(int, *tl); + switch (i) { + case NFSV4OPEN_DENYNONE: + break; + case NFSV4OPEN_DENYREAD: + stp->ls_flags |= NFSLCK_READDENY; + break; + case NFSV4OPEN_DENYWRITE: + stp->ls_flags |= NFSLCK_WRITEDENY; + break; + case NFSV4OPEN_DENYBOTH: + stp->ls_flags |= (NFSLCK_READDENY | NFSLCK_WRITEDENY); + break; + default: + nd->nd_repstat = NFSERR_BADXDR; + }; + + clientid.lval[0] = stp->ls_stateid.other[0]; + clientid.lval[1] = stp->ls_stateid.other[1]; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + if (!nd->nd_repstat) + nd->nd_repstat = nfsrv_openupdate(vp, stp, clientid, &stateid, + nd, p); + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, NFSX_STATEID); + *tl++ = txdr_unsigned(stateid.seqid); + NFSBCOPY((caddr_t)stateid.other,(caddr_t)tl,NFSX_STATEIDOTHER); + } +nfsmout: + vput(vp); + return (error); +} + +/* + * nfsv4 renew lease service + */ +APPLESTATIC int +nfsrvd_renew(struct nfsrv_descript *nd, __unused int isdgram, + __unused vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_HYPER); + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + nd->nd_repstat = nfsrv_getclient(clientid, (CLOPS_RENEWOP|CLOPS_RENEW), + NULL, (nfsquad_t)((u_quad_t)0), nd, p); +nfsmout: + return (error); +} + +/* + * nfsv4 security info service + */ +APPLESTATIC int +nfsrvd_secinfo(struct nfsrv_descript *nd, int isdgram, + vnode_t dp, NFSPROC_T *p, struct nfsexstuff *exp) +{ + u_int32_t *tl; + int len; + struct nameidata named; + vnode_t dirp = NULL, vp; + struct nfsrvfh fh; + struct nfsexstuff retnes; + mount_t mp; + u_int32_t *sizp; + int error, savflag, i; + char *bufp; + u_long *hashp; + + /* + * All this just to get the export flags for the name. + */ + NFSNAMEICNDSET(&named.ni_cnd, nd->nd_cred, LOOKUP, + LOCKLEAF | SAVESTART); + nfsvno_setpathbuf(&named, &bufp, &hashp); + error = nfsrv_parsename(nd, bufp, hashp, &named.ni_pathlen); + if (error) { + vput(dp); + nfsvno_relpathbuf(&named); + return (error); + } + if (!nd->nd_repstat) { + nd->nd_repstat = nfsvno_namei(nd, &named, dp, 1, exp, p, &dirp); + } else { + vput(dp); + nfsvno_relpathbuf(&named); + } + if (dirp) + vrele(dirp); + if (nd->nd_repstat) + return (0); + vrele(named.ni_startdir); + nfsvno_relpathbuf(&named); + fh.nfsrvfh_len = NFSX_MYFH; + vp = named.ni_vp; + nd->nd_repstat = nfsvno_getfh(vp, (fhandle_t *)fh.nfsrvfh_data, p); + mp = vnode_mount(vp); /* so it won't try to re-lock filesys */ + retnes.nes_vfslocked = exp->nes_vfslocked; + vput(vp); + savflag = nd->nd_flag; + nd->nd_flag |= ND_GSS; /* so nfsd_fhtovp() won't reply Wrongsec */ + if (!nd->nd_repstat) { + nfsd_fhtovp(nd, &fh, &vp, &retnes, &mp, 0, p); + if (vp) + vput(vp); + } + nd->nd_flag = savflag; + if (nd->nd_repstat) + return (0); + + /* + * Finally have the export flags for name, so we can create + * the security info. + */ + len = 0; + NFSM_BUILD(sizp, u_int32_t *, NFSX_UNSIGNED); + if (!NFSVNO_EXGSSONLY(&retnes)) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(RPCAUTH_UNIX); + len++; + } + for (i = RPCAUTHGSS_SVCNONE; i <= RPCAUTHGSS_SVCPRIVACY; i++) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl++ = txdr_unsigned(RPCAUTH_GSS); + (void) nfsm_strtom(nd, nfsgss_mechlist[KERBV_MECH].str, + nfsgss_mechlist[KERBV_MECH].len); + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(GSS_KERBV_QOP); + *tl = txdr_unsigned(i); + len++; + } + *sizp = txdr_unsigned(len); + return (0); +} + +/* + * nfsv4 set client id service + */ +APPLESTATIC int +nfsrvd_setclientid(struct nfsrv_descript *nd, __unused int isdgram, + __unused vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int i; + int error = 0, idlen; + struct nfsclient *clp = NULL; + struct sockaddr_in *rad; + u_char *verf, *ucp, *ucp2, addrbuf[24]; + nfsquad_t clientid, confirm; + + if ((!nfs_rootfhset && !nfsv4root_set) || + (nd->nd_flag & (ND_GSS | ND_EXGSSONLY)) == ND_EXGSSONLY) { + nd->nd_repstat = NFSERR_WRONGSEC; + return (0); + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_VERF + NFSX_UNSIGNED); + verf = (u_char *)tl; + tl += (NFSX_VERF / NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + if (i > NFSV4_OPAQUELIMIT || i <= 0) { + nd->nd_repstat = NFSERR_BADXDR; + return (error); + } + idlen = i; + if (nd->nd_flag & ND_GSS) + i += nd->nd_princlen; + MALLOC(clp, struct nfsclient *, sizeof (struct nfsclient) + i, + M_NFSDCLIENT, M_WAITOK); + NFSBZERO((caddr_t)clp, sizeof (struct nfsclient) + i); + NFSINITSOCKMUTEX(&clp->lc_req.nr_mtx); + NFSSOCKADDRALLOC(clp->lc_req.nr_nam); + NFSSOCKADDRSIZE(clp->lc_req.nr_nam, sizeof (struct sockaddr_in)); + clp->lc_req.nr_cred = NULL; + NFSBCOPY(verf, clp->lc_verf, NFSX_VERF); + clp->lc_idlen = idlen; + error = nfsrv_mtostr(nd, clp->lc_id, idlen); + if (error) + goto nfsmout; + if (nd->nd_flag & ND_GSS) { + clp->lc_flags = LCL_GSS; + if (nd->nd_flag & ND_GSSINTEGRITY) + clp->lc_flags |= LCL_GSSINTEGRITY; + else if (nd->nd_flag & ND_GSSPRIVACY) + clp->lc_flags |= LCL_GSSPRIVACY; + } else { + clp->lc_flags = 0; + } + if ((nd->nd_flag & ND_GSS) && nd->nd_princlen > 0) { + clp->lc_flags |= LCL_NAME; + clp->lc_namelen = nd->nd_princlen; + clp->lc_name = &clp->lc_id[idlen]; + NFSBCOPY(nd->nd_principal, clp->lc_name, clp->lc_namelen); + } else { + clp->lc_uid = nd->nd_cred->cr_uid; + clp->lc_gid = nd->nd_cred->cr_gid; + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + clp->lc_program = fxdr_unsigned(u_int32_t, *tl); + error = nfsrv_getclientipaddr(nd, clp); + if (error) + goto nfsmout; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + clp->lc_callback = fxdr_unsigned(u_int32_t, *tl); + + /* + * nfsrv_setclient() does the actual work of adding it to the + * client list. If there is no error, the structure has been + * linked into the client list and clp should no longer be used + * here. When an error is returned, it has not been linked in, + * so it should be free'd. + */ + nd->nd_repstat = nfsrv_setclient(nd, &clp, &clientid, &confirm, p); + if (nd->nd_repstat == NFSERR_CLIDINUSE) { + if (clp->lc_flags & LCL_TCPCALLBACK) + (void) nfsm_strtom(nd, "tcp", 3); + else + (void) nfsm_strtom(nd, "udp", 3); + rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *); + ucp = (u_char *)&rad->sin_addr.s_addr; + ucp2 = (u_char *)&rad->sin_port; + sprintf(addrbuf, "%d.%d.%d.%d.%d.%d", ucp[0] & 0xff, + ucp[1] & 0xff, ucp[2] & 0xff, ucp[3] & 0xff, + ucp2[0] & 0xff, ucp2[1] & 0xff); + (void) nfsm_strtom(nd, addrbuf, strlen(addrbuf)); + } + if (clp) { + NFSSOCKADDRFREE(clp->lc_req.nr_nam); + NFSFREEMUTEX(&clp->lc_req.nr_mtx); + free((caddr_t)clp, M_NFSDCLIENT); + } + if (!nd->nd_repstat) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_HYPER); + *tl++ = clientid.lval[0]; + *tl++ = clientid.lval[1]; + *tl++ = confirm.lval[0]; + *tl = confirm.lval[1]; + } + return (0); +nfsmout: + if (clp) { + NFSSOCKADDRFREE(clp->lc_req.nr_nam); + NFSFREEMUTEX(&clp->lc_req.nr_mtx); + free((caddr_t)clp, M_NFSDCLIENT); + } + return (error); +} + +/* + * nfsv4 set client id confirm service + */ +APPLESTATIC int +nfsrvd_setclientidcfrm(struct nfsrv_descript *nd, + __unused int isdgram, __unused vnode_t vp, NFSPROC_T *p, + __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0; + nfsquad_t clientid, confirm; + + if ((!nfs_rootfhset && !nfsv4root_set) || + (nd->nd_flag & (ND_GSS | ND_EXGSSONLY)) == ND_EXGSSONLY) { + nd->nd_repstat = NFSERR_WRONGSEC; + return (0); + } + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_HYPER); + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl++; + confirm.lval[0] = *tl++; + confirm.lval[1] = *tl; + + /* + * nfsrv_getclient() searches the client list for a match and + * returns the appropriate NFSERR status. + */ + nd->nd_repstat = nfsrv_getclient(clientid, (CLOPS_CONFIRM|CLOPS_RENEW), + NULL, confirm, nd, p); +nfsmout: + return (error); +} + +/* + * nfsv4 verify service + */ +APPLESTATIC int +nfsrvd_verify(struct nfsrv_descript *nd, int isdgram, + vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + int error = 0, ret, fhsize = NFSX_MYFH; + struct nfsvattr nva; + struct statfs sf; + struct nfsfsinfo fs; + fhandle_t fh; + + nd->nd_repstat = nfsvno_getattr(vp, &nva, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_statfs(vp, &sf, nd->nd_cred, p); + if (!nd->nd_repstat) + nd->nd_repstat = nfsvno_getfh(vp, &fh, p); + if (!nd->nd_repstat) { + nfsvno_getfs(&fs, isdgram); + error = nfsv4_loadattr(nd, vp, &nva, NULL, &fh, fhsize, NULL, + &sf, NULL, &fs, NULL, 1, &ret, NULL, NULL, p, nd->nd_cred); + if (!error) { + if (nd->nd_procnum == NFSV4OP_NVERIFY) { + if (ret == 0) + nd->nd_repstat = NFSERR_SAME; + else if (ret != NFSERR_NOTSAME) + nd->nd_repstat = ret; + } else if (ret) + nd->nd_repstat = ret; + } + } + vput(vp); + return (error); +} + +/* + * nfs openattr rpc + */ +APPLESTATIC int +nfsrvd_openattr(struct nfsrv_descript *nd, __unused int isdgram, + vnode_t dp, __unused vnode_t *vpp, __unused fhandle_t *fhp, + __unused NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + int error = 0, createdir; + + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + createdir = fxdr_unsigned(int, *tl); + nd->nd_repstat = NFSERR_NOTSUPP; +nfsmout: + vrele(dp); + return (error); +} + +/* + * nfsv4 release lock owner service + */ +APPLESTATIC int +nfsrvd_releaselckown(struct nfsrv_descript *nd, __unused int isdgram, + __unused vnode_t vp, NFSPROC_T *p, __unused struct nfsexstuff *exp) +{ + u_int32_t *tl; + struct nfsstate *stp = NULL; + int error = 0, len; + nfsquad_t clientid; + + NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); + len = fxdr_unsigned(int, *(tl + 2)); + MALLOC(stp, struct nfsstate *, sizeof (struct nfsstate) + len, + M_NFSDSTATE, M_WAITOK); + stp->ls_ownerlen = len; + stp->ls_op = NULL; + stp->ls_flags = NFSLCK_RELEASE; + stp->ls_uid = nd->nd_cred->cr_uid; + clientid.lval[0] = *tl++; + clientid.lval[1] = *tl; + if (nd->nd_flag & ND_IMPLIEDCLID) { + if (nd->nd_clientid.qval != clientid.qval) + printf("EEK! multiple clids\n"); + } else { + nd->nd_flag |= ND_IMPLIEDCLID; + nd->nd_clientid.qval = clientid.qval; + } + error = nfsrv_mtostr(nd, stp->ls_owner, len); + if (error) + goto nfsmout; + nd->nd_repstat = nfsrv_releaselckown(stp, clientid, p); + FREE((caddr_t)stp, M_NFSDSTATE); + return (0); +nfsmout: + if (stp) + free((caddr_t)stp, M_NFSDSTATE); + return (error); +} diff --git a/sys/fs/nfsserver/nfs_nfsdsocket.c b/sys/fs/nfsserver/nfs_nfsdsocket.c new file mode 100644 index 0000000..29592a9 --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdsocket.c @@ -0,0 +1,979 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Socket operations for use by the nfs server. + */ + +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +extern struct nfsstats newnfsstats; +extern struct nfsrvfh nfs_pubfh, nfs_rootfh; +extern int nfs_pubfhset, nfs_rootfhset; +extern struct nfsv4lock nfsv4rootfs_lock; +extern struct nfsrv_stablefirst nfsrv_stablefirst; +extern struct nfsclienthashhead nfsclienthash[NFSCLIENTHASHSIZE]; +extern int nfsrc_floodlevel, nfsrc_tcpsavedreplies; +NFSV4ROOTLOCKMUTEX; +NFSSTATESPINLOCK; +vnode_t nfsv4root_vp = NULL; +int nfsv4root_set = 0; + +int (*nfsrv3_procs0[NFS_V3NPROCS])(struct nfsrv_descript *, + int, vnode_t , NFSPROC_T *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_getattr, + nfsrvd_setattr, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_access, + nfsrvd_readlink, + nfsrvd_read, + nfsrvd_write, + nfsrvd_create, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_remove, + nfsrvd_remove, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_readdir, + nfsrvd_readdirplus, + nfsrvd_statfs, + nfsrvd_fsinfo, + nfsrvd_pathconf, + nfsrvd_commit, +}; + +int (*nfsrv3_procs1[NFS_V3NPROCS])(struct nfsrv_descript *, + int, vnode_t , vnode_t *, fhandle_t *, + NFSPROC_T *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_lookup, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_mkdir, + nfsrvd_symlink, + nfsrvd_mknod, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, +}; + +int (*nfsrv3_procs2[NFS_V3NPROCS])(struct nfsrv_descript *, + int, vnode_t , vnode_t , NFSPROC_T *, + struct nfsexstuff *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + nfsrvd_rename, + nfsrvd_link, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, +}; + +int (*nfsrv4_ops0[NFSV4OP_NOPS])(struct nfsrv_descript *, + int, vnode_t , NFSPROC_T *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_access, + nfsrvd_close, + nfsrvd_commit, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_delegpurge, + nfsrvd_delegreturn, + nfsrvd_getattr, + nfsrvd_getfh, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_lock, + nfsrvd_lockt, + nfsrvd_locku, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_verify, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_openconfirm, + nfsrvd_opendowngrade, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_read, + nfsrvd_readdirplus, + nfsrvd_readlink, + nfsrvd_remove, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_renew, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_secinfo, + nfsrvd_setattr, + nfsrvd_setclientid, + nfsrvd_setclientidcfrm, + nfsrvd_verify, + nfsrvd_write, + nfsrvd_releaselckown, +}; + +int (*nfsrv4_ops1[NFSV4OP_NOPS])(struct nfsrv_descript *, + int, vnode_t , vnode_t *, fhandle_t *, + NFSPROC_T *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_mknod, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_lookup, + nfsrvd_lookup, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + nfsrvd_open, + nfsrvd_openattr, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t *, fhandle_t *, NFSPROC_T *, struct nfsexstuff *))0, +}; + +int (*nfsrv4_ops2[NFSV4OP_NOPS])(struct nfsrv_descript *, + int, vnode_t , vnode_t , NFSPROC_T *, + struct nfsexstuff *, struct nfsexstuff *) = { + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + nfsrvd_link, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + nfsrvd_rename, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, + (int (*)(struct nfsrv_descript *, int, vnode_t , vnode_t , NFSPROC_T *, struct nfsexstuff *, struct nfsexstuff *))0, +}; +#endif /* !APPLEKEXT */ + +/* + * Static array that defines which nfs rpc's are nonidempotent + */ +static int nfsrv_nonidempotent[NFS_V3NPROCS] = { + FALSE, + FALSE, + TRUE, + FALSE, + FALSE, + FALSE, + FALSE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, +}; + +/* + * This static array indicates whether or not the RPC modifies the + * file system. + */ +static int nfs_writerpc[NFS_NPROCS] = { 0, 0, 1, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }; + +/* local functions */ +static void nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, + NFSPROC_T *p); + + +/* + * This static array indicates which server procedures require the extra + * arguments to return the current file handle for V2, 3. + */ +static int nfs_retfh[NFS_V3NPROCS] = { 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0 }; + +extern struct nfsv4_opflag nfsv4_opflag[NFSV4OP_NOPS]; + +static int nfsv3to4op[NFS_V3NPROCS] = { + NFSPROC_NULL, + NFSV4OP_GETATTR, + NFSV4OP_SETATTR, + NFSV4OP_LOOKUP, + NFSV4OP_ACCESS, + NFSV4OP_READLINK, + NFSV4OP_READ, + NFSV4OP_WRITE, + NFSV4OP_V3CREATE, + NFSV4OP_MKDIR, + NFSV4OP_SYMLINK, + NFSV4OP_MKNOD, + NFSV4OP_REMOVE, + NFSV4OP_RMDIR, + NFSV4OP_RENAME, + NFSV4OP_LINK, + NFSV4OP_READDIR, + NFSV4OP_READDIRPLUS, + NFSV4OP_FSSTAT, + NFSV4OP_FSINFO, + NFSV4OP_PATHCONF, + NFSV4OP_COMMIT, +}; + +/* + * Do an RPC. Basically, get the file handles translated to vnode pointers + * and then call the appropriate server routine. The server routines are + * split into groups, based on whether they use a file handle or file + * handle plus name or ... + * The NFS V4 Compound RPC is performed separately by nfsrvd_compound(). + */ +APPLESTATIC void +nfsrvd_dorpc(struct nfsrv_descript *nd, int isdgram, + NFSPROC_T *p) +{ + int error = 0; + vnode_t vp; + mount_t mp = NULL; + struct nfsrvfh fh; + struct nfsexstuff nes; + + /* + * Get a locked vnode for the first file handle + */ + if (!(nd->nd_flag & ND_NFSV4)) { +#ifdef DIAGNOSTIC + if (nd->nd_repstat) + panic("nfsrvd_dorpc"); +#endif + /* + * For NFSv3, if the malloc/mget allocation is near limits, + * return NFSERR_DELAY. + */ + if ((nd->nd_flag & ND_NFSV3) && nfsrv_mallocmget_limit()) { + nd->nd_repstat = NFSERR_DELAY; + vp = NULL; + } else { + error = nfsrv_mtofh(nd, &fh); + if (error) { + if (error != EBADRPC) + printf("nfs dorpc err1=%d\n", error); + nd->nd_repstat = NFSERR_GARBAGE; + return; + } + nes.nes_vfslocked = 0; + if (nd->nd_flag & ND_PUBLOOKUP) + nfsd_fhtovp(nd, &nfs_pubfh, &vp, &nes, + &mp, nfs_writerpc[nd->nd_procnum], p); + else + nfsd_fhtovp(nd, &fh, &vp, &nes, + &mp, nfs_writerpc[nd->nd_procnum], p); + if (nd->nd_repstat == NFSERR_PROGNOTV4) + return; + } + } + + /* + * For V2 and 3, set the ND_SAVEREPLY flag for the recent request + * cache, as required. + * For V4, nfsrvd_compound() does this. + */ + if (!(nd->nd_flag & ND_NFSV4) && nfsrv_nonidempotent[nd->nd_procnum]) + nd->nd_flag |= ND_SAVEREPLY; + + nfsrvd_rephead(nd); + /* + * If nd_repstat is non-zero, just fill in the reply status + * to complete the RPC reply for V2. Otherwise, you must do + * the RPC. + */ + if (nd->nd_repstat && (nd->nd_flag & ND_NFSV2)) { + *nd->nd_errp = nfsd_errmap(nd); + NFSINCRGLOBAL(newnfsstats.srvrpccnt[nfsv3to4op[nd->nd_procnum]]); + if (mp != NULL) { + if (nfs_writerpc[nd->nd_procnum]) + NFS_ENDWRITE(mp); + if (nes.nes_vfslocked) + nfsvno_unlockvfs(mp); + } + return; + } + + /* + * Now the procedure can be performed. For V4, nfsrvd_compound() + * works through the sub-rpcs, otherwise just call the procedure. + * The procedures are in three groups with different arguments. + * The group is indicated by the value in nfs_retfh[]. + */ + if (nd->nd_flag & ND_NFSV4) { + nfsrvd_compound(nd, isdgram, p); + } else { + if (nfs_retfh[nd->nd_procnum] == 1) { + if (vp) + NFSVOPUNLOCK(vp, 0, p); + error = (*(nfsrv3_procs1[nd->nd_procnum]))(nd, isdgram, + vp, NULL, (fhandle_t *)fh.nfsrvfh_data, p, &nes); + } else if (nfs_retfh[nd->nd_procnum] == 2) { + error = (*(nfsrv3_procs2[nd->nd_procnum]))(nd, isdgram, + vp, NULL, p, &nes, NULL); + } else { + error = (*(nfsrv3_procs0[nd->nd_procnum]))(nd, isdgram, + vp, p, &nes); + } + if (mp) { + if (nfs_writerpc[nd->nd_procnum]) + NFS_ENDWRITE(mp); + if (nes.nes_vfslocked) + nfsvno_unlockvfs(mp); + } + NFSINCRGLOBAL(newnfsstats.srvrpccnt[nfsv3to4op[nd->nd_procnum]]); + } + if (error) { + if (error != EBADRPC) + printf("nfs dorpc err2=%d\n", error); + nd->nd_repstat = NFSERR_GARBAGE; + } + *nd->nd_errp = nfsd_errmap(nd); + + /* + * Don't cache certain reply status values. + */ + if (nd->nd_repstat && (nd->nd_flag & ND_SAVEREPLY) && + (nd->nd_repstat == NFSERR_GARBAGE || + nd->nd_repstat == NFSERR_BADXDR || + nd->nd_repstat == NFSERR_MOVED || + nd->nd_repstat == NFSERR_DELAY || + nd->nd_repstat == NFSERR_BADSEQID || + nd->nd_repstat == NFSERR_RESOURCE || + nd->nd_repstat == NFSERR_SERVERFAULT || + nd->nd_repstat == NFSERR_STALECLIENTID || + nd->nd_repstat == NFSERR_STALESTATEID || + nd->nd_repstat == NFSERR_OLDSTATEID || + nd->nd_repstat == NFSERR_BADSTATEID || + nd->nd_repstat == NFSERR_GRACE || + nd->nd_repstat == NFSERR_NOGRACE)) + nd->nd_flag &= ~ND_SAVEREPLY; +} + +/* + * Breaks down a compound RPC request and calls the server routines for + * the subprocedures. + * Some suboperations are performed directly here to simplify file handle<--> + * vnode pointer handling. + */ +static void +nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, + NFSPROC_T *p) +{ + int i, op; + u_int32_t *tl; + struct nfsclient *clp, *nclp; + int numops, taglen = -1, error = 0, igotlock; + u_int32_t minorvers, retops = 0, *retopsp = NULL, *repp; + u_char tag[NFSV4_SMALLSTR + 1], *tagstr; + vnode_t vp, nvp, savevp; + struct nfsrvfh fh; + mount_t mp, savemp; + struct ucred *credanon; + struct nfsexstuff nes, vpnes, savevpnes; + static u_int64_t compref = 0; + + NFSVNO_EXINIT(&vpnes); + NFSVNO_EXINIT(&savevpnes); + /* + * Put the seq# of the current compound RPC in nfsrv_descript. + * (This is used by nfsrv_checkgetattr(), to see if the write + * delegation was created by the same compound RPC as the one + * with that Getattr in it.) + * Don't worry about the 64bit number wrapping around. It ain't + * gonna happen before this server gets shut down/rebooted. + */ + nd->nd_compref = compref++; + + /* + * Check for and optionally get a lock on the root. This lock means that + * no nfsd will be fiddling with the V4 file system and state stuff. It + * is required when the V4 root is being changed, the stable storage + * restart file is being updated, or callbacks are being done. + * When any of the nfsd are processing an NFSv4 compound RPC, they must + * either hold a reference count (nfs_usecnt) or the lock. When + * nfsrv_unlock() is called to release the lock, it can optionally + * also get a reference count, which saves the need for a call to + * nfsrv_getref() after nfsrv_unlock(). + */ + /* + * First, check to see if we need to wait for an update lock. + */ + igotlock = 0; + NFSLOCKV4ROOTMUTEX(); + if (nfsrv_stablefirst.nsf_flags & NFSNSF_NEEDLOCK) + igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + else + igotlock = nfsv4_lock(&nfsv4rootfs_lock, 0, NULL, + NFSV4ROOTLOCKMUTEXPTR); + NFSUNLOCKV4ROOTMUTEX(); + if (igotlock) { + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + /* + * If I got the lock, I can update the stable storage file. + * Done when the grace period is over or a client has long + * since expired. + */ + nfsrv_stablefirst.nsf_flags &= ~NFSNSF_NEEDLOCK; + if ((nfsrv_stablefirst.nsf_flags & + (NFSNSF_GRACEOVER | NFSNSF_UPDATEDONE)) == NFSNSF_GRACEOVER) + nfsrv_updatestable(p); + + /* + * If at least one client has long since expired, search + * the client list for them, write a REVOKE record on the + * stable storage file and then remove them from the client + * list. + */ + if (nfsrv_stablefirst.nsf_flags & NFSNSF_EXPIREDCLIENT) { + nfsrv_stablefirst.nsf_flags &= ~NFSNSF_EXPIREDCLIENT; + for (i = 0; i < NFSCLIENTHASHSIZE; i++) { + LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, + nclp) { + if (clp->lc_flags & LCL_EXPIREIT) { + if (!LIST_EMPTY(&clp->lc_open) || + !LIST_EMPTY(&clp->lc_deleg)) + nfsrv_writestable(clp->lc_id, + clp->lc_idlen, NFSNST_REVOKE, p); + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + nfsrv_freedeleglist(&clp->lc_olddeleg); + LIST_REMOVE(clp, lc_hash); + nfsrv_zapclient(clp, p); + } + } + } + } + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } else { + /* + * If we didn't get the lock, we need to get a refcnt, + * which also checks for and waits for the lock. + */ + NFSLOCKV4ROOTMUTEX(); + nfsv4_getref(&nfsv4rootfs_lock, NULL, + NFSV4ROOTLOCKMUTEXPTR); + NFSUNLOCKV4ROOTMUTEX(); + } + + /* + * If flagged, search for open owners that haven't had any opens + * for a long time. + */ + if (nfsrv_stablefirst.nsf_flags & NFSNSF_NOOPENS) { + nfsrv_throwawayopens(p); + } + + savevp = vp = NULL; + savevpnes.nes_vfslocked = vpnes.nes_vfslocked = 0; + savemp = mp = NULL; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + taglen = fxdr_unsigned(int, *tl); + if (taglen < 0) { + error = EBADRPC; + goto nfsmout; + } + if (taglen <= NFSV4_SMALLSTR) + tagstr = tag; + else + tagstr = malloc(taglen + 1, M_TEMP, M_WAITOK); + error = nfsrv_mtostr(nd, tagstr, taglen); + if (error) { + if (taglen > NFSV4_SMALLSTR) + free(tagstr, M_TEMP); + taglen = -1; + goto nfsmout; + } + (void) nfsm_strtom(nd, tag, taglen); + if (taglen > NFSV4_SMALLSTR) { + free(tagstr, M_TEMP); + } + NFSM_BUILD(retopsp, u_int32_t *, NFSX_UNSIGNED); + NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + minorvers = fxdr_unsigned(u_int32_t, *tl++); + if (minorvers != NFSV4_MINORVERSION) + nd->nd_repstat = NFSERR_MINORVERMISMATCH; + if (nd->nd_repstat) + numops = 0; + else + numops = fxdr_unsigned(int, *tl); + /* + * Loop around doing the sub ops. + * vp - is an unlocked vnode pointer for the CFH + * savevp - is an unlocked vnode pointer for the SAVEDFH + * (at some future date, it might turn out to be more appropriate + * to keep the file handles instead of vnode pointers?) + * savevpnes and vpnes - are the export flags for the above. + */ + for (i = 0; i < numops; i++) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + NFSM_BUILD(repp, u_int32_t *, 2 * NFSX_UNSIGNED); + *repp++ = *tl; + op = fxdr_unsigned(int, *tl); + if (op < NFSV4OP_ACCESS || op >= NFSV4OP_NOPS) { + nd->nd_repstat = NFSERR_OPILLEGAL; + *repp = nfsd_errmap(nd); + retops++; + break; + } + + /* + * Check for a referral on the current FH and, if so, return + * NFSERR_MOVED for all ops that allow it, except Getattr. + */ + if (vp != NULL && op != NFSV4OP_GETATTR && + nfsv4root_getreferral(vp, NULL, 0) != NULL && + nfsrv_errmoved(op)) { + nd->nd_repstat = NFSERR_MOVED; + *repp = nfsd_errmap(nd); + retops++; + break; + } + + nd->nd_procnum = op; + /* + * If over flood level, reply NFSERR_RESOURCE, if at the first + * Op. (Since a client recovery from NFSERR_RESOURCE can get + * really nasty for certain Op sequences, I'll play it safe + * and only return the error at the beginning.) The cache + * will still function over flood level, but uses lots of + * mbufs.) + * If nfsrv_mallocmget_limit() returns True, the system is near + * to its limit for memory that malloc()/mget() can allocate. + */ + if (i == 0 && nd->nd_rp->rc_refcnt == 0 && + (nfsrv_mallocmget_limit() || + nfsrc_tcpsavedreplies > nfsrc_floodlevel)) { + if (nfsrc_tcpsavedreplies > nfsrc_floodlevel) { + printf("nfsd server cache flooded, try to"); + printf(" increase nfsrc_floodlevel\n"); + } + nd->nd_repstat = NFSERR_RESOURCE; + *repp = nfsd_errmap(nd); + if (op == NFSV4OP_SETATTR) { + /* + * Setattr replies require a bitmap. + * even for errors like these. + */ + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = 0; + } + retops++; + break; + } + if (nfsv4_opflag[op].savereply) + nd->nd_flag |= ND_SAVEREPLY; + NFSINCRGLOBAL(newnfsstats.srvrpccnt[nd->nd_procnum]); + switch (op) { + case NFSV4OP_PUTFH: + error = nfsrv_mtofh(nd, &fh); + if (error) + goto nfsmout; + if (!nd->nd_repstat) { + nes.nes_vfslocked = vpnes.nes_vfslocked; + nfsd_fhtovp(nd, &fh, &nvp, &nes, &mp, + 0, p); + } + /* For now, allow this for non-export FHs */ + if (!nd->nd_repstat) { + if (vp) + vrele(vp); + vp = nvp; + NFSVOPUNLOCK(vp, 0, p); + vpnes = nes; + } + break; + case NFSV4OP_PUTPUBFH: + if (nfs_pubfhset) { + nes.nes_vfslocked = vpnes.nes_vfslocked; + nfsd_fhtovp(nd, &nfs_pubfh, &nvp, + &nes, &mp, 0, p); + } else { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + } + if (!nd->nd_repstat) { + if (vp) + vrele(vp); + vp = nvp; + NFSVOPUNLOCK(vp, 0, p); + vpnes = nes; + } + break; + case NFSV4OP_PUTROOTFH: + if (nfs_rootfhset) { + nes.nes_vfslocked = vpnes.nes_vfslocked; + nfsd_fhtovp(nd, &nfs_rootfh, &nvp, + &nes, &mp, 0, p); + if (!nd->nd_repstat) { + if (vp) + vrele(vp); + vp = nvp; + NFSVOPUNLOCK(vp, 0, p); + vpnes = nes; + } + } else if (nfsv4root_vp && nfsv4root_set) { + if (vp) { + if (vpnes.nes_vfslocked) + nfsvno_unlockvfs(mp); + vrele(vp); + } + vp = nfsv4root_vp; + VREF(vp); + NFSVNO_SETEXRDONLY(&vpnes); + vpnes.nes_vfslocked = 0; + mp = vnode_mount(vp); + } else { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + } + break; + case NFSV4OP_SAVEFH: + if (vp && NFSVNO_EXPORTED(&vpnes)) { + nd->nd_repstat = 0; + /* If vp == savevp, a no-op */ + if (vp != savevp) { + if (savevp) + vrele(savevp); + VREF(vp); + savevp = vp; + savevpnes = vpnes; + savemp = mp; + } + } else { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + } + break; + case NFSV4OP_RESTOREFH: + if (savevp) { + nd->nd_repstat = 0; + /* If vp == savevp, a no-op */ + if (vp != savevp) { + VREF(savevp); + if (mp == NULL || savemp == NULL) + panic("nfscmpmp"); + if (!savevpnes.nes_vfslocked && + vpnes.nes_vfslocked) { + if (mp == savemp) + panic("nfscmp2"); + nfsvno_unlockvfs(mp); + } else if (savevpnes.nes_vfslocked && + !vpnes.nes_vfslocked) { + if (mp == savemp) + panic("nfscmp3"); + savevpnes.nes_vfslocked = nfsvno_lockvfs(savemp); + } + vrele(vp); + vp = savevp; + vpnes = savevpnes; + mp = savemp; + } + } else { + nd->nd_repstat = NFSERR_RESTOREFH; + } + break; + default: + /* + * Allow a Lookup, Getattr, GetFH, Secinfo on an + * non-exported directory if + * nfs_rootfhset. Do I need to allow any other Ops? + * (You can only have a non-exported vpnes if + * nfs_rootfhset is true. See nfsd_fhtovp()) + * Allow AUTH_SYS to be used for file systems + * exported GSS only for certain Ops, to allow + * clients to do mounts more easily. + */ + if (nfsv4_opflag[op].needscfh && vp) { + if (!NFSVNO_EXPORTED(&vpnes) && + op != NFSV4OP_LOOKUP && + op != NFSV4OP_GETATTR && + op != NFSV4OP_GETFH && + op != NFSV4OP_SECINFO) + nd->nd_repstat = NFSERR_NOFILEHANDLE; + else if (NFSVNO_EXGSSONLY(&vpnes) && + !(nd->nd_flag & ND_GSS) && + op != NFSV4OP_LOOKUP && + op != NFSV4OP_GETFH && + op != NFSV4OP_GETATTR && + op != NFSV4OP_SECINFO) + nd->nd_repstat = NFSERR_WRONGSEC; + if (nd->nd_repstat) { + if (op == NFSV4OP_SETATTR) { + /* + * Setattr reply requires a bitmap + * even for errors like these. + */ + NFSM_BUILD(tl, u_int32_t *, + NFSX_UNSIGNED); + *tl = 0; + } + break; + } + } + if (nfsv4_opflag[op].retfh == 1) { + if (!vp) { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + break; + } + VREF(vp); + if (nfsv4_opflag[op].modifyfs) + NFS_STARTWRITE(NULL, &mp); + error = (*(nfsrv4_ops1[op]))(nd, isdgram, vp, + &nvp, (fhandle_t *)fh.nfsrvfh_data, p, &vpnes); + if (!error && !nd->nd_repstat) { + if (vfs_statfs(mp)->f_fsid.val[0] != + vfs_statfs(vnode_mount(nvp))->f_fsid.val[0] || + vfs_statfs(mp)->f_fsid.val[1] != + vfs_statfs(vnode_mount(nvp))->f_fsid.val[1]) { + if (vfs_statfs(vnode_mount(nvp))->f_fsid.val[0] == + NFSV4ROOT_FSID0 && + vfs_statfs(vnode_mount(nvp))->f_fsid.val[1] == + NFSV4ROOT_FSID1) { + if (vpnes.nes_vfslocked) { + nfsvno_unlockvfs(mp); + vpnes.nes_vfslocked = 0; + } + NFSVNO_SETEXRDONLY(&vpnes); + mp = vnode_mount(nvp); + } else { + nd->nd_repstat = nfsvno_checkexp(vnode_mount(nvp), + nd->nd_nam, &nes, &credanon); + if (!nd->nd_repstat) + nd->nd_repstat = nfsd_excred(nd, + &nes, credanon); + if (!nd->nd_repstat) { + if (vpnes.nes_vfslocked) + nfsvno_unlockvfs(mp); + mp = vnode_mount(nvp); + vpnes = nes; + vpnes.nes_vfslocked = + nfsvno_lockvfs(mp); + } + } + } + if (!nd->nd_repstat) { + vrele(vp); + vp = nvp; + } + } + if (nfsv4_opflag[op].modifyfs) + NFS_ENDWRITE(mp); + } else if (nfsv4_opflag[op].retfh == 2) { + if (vp == NULL || savevp == NULL) { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + break; + } else if (mp != savemp) { + nd->nd_repstat = NFSERR_XDEV; + break; + } + VREF(vp); + VREF(savevp); + if (nfsv4_opflag[op].modifyfs) + NFS_STARTWRITE(NULL, &mp); + NFSVOPLOCK(savevp, LK_EXCLUSIVE | LK_RETRY, p); + error = (*(nfsrv4_ops2[op]))(nd, isdgram, savevp, + vp, p, &savevpnes, &vpnes); + if (nfsv4_opflag[op].modifyfs) + NFS_ENDWRITE(mp); + } else { + if (nfsv4_opflag[op].retfh != 0) + panic("nfsrvd_compound"); + if (nfsv4_opflag[op].needscfh) { + if (vp) { + VREF(vp); + if (nfsv4_opflag[op].modifyfs) + NFS_STARTWRITE(NULL, &mp); + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + } else { + nd->nd_repstat = NFSERR_NOFILEHANDLE; + if (op == NFSV4OP_SETATTR) { + /* + * Setattr reply requires a bitmap + * even for errors like these. + */ + NFSM_BUILD(tl, u_int32_t *, + NFSX_UNSIGNED); + *tl = 0; + } + break; + } + error = (*(nfsrv4_ops0[op]))(nd, isdgram, vp, + p, &vpnes); + if (nfsv4_opflag[op].modifyfs) + NFS_ENDWRITE(mp); + } else { + error = (*(nfsrv4_ops0[op]))(nd, isdgram, + NULL, p, &vpnes); + } + } + }; + if (error) { + if (error == EBADRPC || error == NFSERR_BADXDR) { + nd->nd_repstat = NFSERR_BADXDR; + } else { + nd->nd_repstat = error; + printf("nfsv4 comperr0=%d\n", error); + } + error = 0; + } + retops++; + if (nd->nd_repstat) { + *repp = nfsd_errmap(nd); + break; + } else { + *repp = 0; /* NFS4_OK */ + } + } +nfsmout: + if (error) { + if (error == EBADRPC || error == NFSERR_BADXDR) + nd->nd_repstat = NFSERR_BADXDR; + else + printf("nfsv4 comperr1=%d\n", error); + } + if (taglen == -1) { + NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED); + *tl++ = 0; + *tl = 0; + } else { + *retopsp = txdr_unsigned(retops); + } + if (mp && vpnes.nes_vfslocked) + nfsvno_unlockvfs(mp); + if (vp) + vrele(vp); + if (savevp) + vrele(savevp); + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + NFSUNLOCKV4ROOTMUTEX(); +} diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c new file mode 100644 index 0000000..e312cb1 --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdstate.c @@ -0,0 +1,4891 @@ +/*- + * Copyright (c) 2009 Rick Macklem, University of Guelph + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#ifndef APPLEKEXT +#include <fs/nfs/nfsport.h> + +struct nfsrv_stablefirst nfsrv_stablefirst; +int nfsrv_issuedelegs = 0; +int nfsrv_dolocallocks = 1; +struct nfsv4lock nfsv4rootfs_lock; + +extern int newnfs_numnfsd; +extern struct nfsstats newnfsstats; +extern int nfsrv_lease; +extern struct timeval nfsboottime; +extern u_int32_t newnfs_true, newnfs_false; +NFSV4ROOTLOCKMUTEX; +NFSSTATESPINLOCK; + +/* + * Hash and lru lists for nfs V4. + * (Some would put them in the .h file, but I don't like declaring storage + * in a .h) + */ +struct nfsclienthashhead nfsclienthash[NFSCLIENTHASHSIZE]; +struct nfslockhashhead nfslockhash[NFSLOCKHASHSIZE]; +#endif /* !APPLEKEXT */ + +static u_int32_t nfsrv_openpluslock = 0, nfsrv_delegatecnt = 0; +static time_t nfsrvboottime; +static int nfsrv_writedelegifpos = 1; +static int nfsrv_returnoldstateid = 0, nfsrv_clients = 0; +static int nfsrv_clienthighwater = NFSRV_CLIENTHIGHWATER; +static int nfsrv_nogsscallback = 0; + +/* local functions */ +static void nfsrv_dumpaclient(struct nfsclient *clp, + struct nfsd_dumpclients *dumpp); +static void nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, + NFSPROC_T *p); +static int nfsrv_freeopen(struct nfsstate *stp, int *freedlockp, + int cansleep, NFSPROC_T *p); +static int nfsrv_freelockowner(struct nfsstate *stp, int *freedlockp, + int cansleep, NFSPROC_T *p); +static int nfsrv_freeallnfslocks(struct nfsstate *stp, int *freedlockp, + int cansleep, NFSPROC_T *p); +static void nfsrv_freenfslock(struct nfslock *lop); +static void nfsrv_freenfslockfile(struct nfslockfile *lfp); +static void nfsrv_freedeleg(struct nfsstate *); +static int nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp, + u_int32_t flags, struct nfsstate **stpp); +static void nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp, + struct nfsstate **stpp); +static int nfsrv_getlockfh(vnode_t vp, u_short flags, + struct nfslockfile **new_lfpp, fhandle_t *nfhp, NFSPROC_T *p); +static int nfsrv_getlockfile(u_short flags, + struct nfslockfile **new_lfpp, struct nfslockfile **lfpp, fhandle_t *nfhp); +static void nfsrv_insertlock(struct nfslock *new_lop, + struct nfslock *insert_lop, struct nfsstate *stp, struct nfslockfile *lfp); +static void nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp, + struct nfslock **other_lopp, struct nfslockfile *lfp); +static int nfsrv_getipnumber(u_char *cp); +static int nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags, + nfsv4stateid_t *stateidp, int specialid); +static int nfsrv_checkgrace(u_int32_t flags); +static int nfsrv_docallback(struct nfsclient *clp, int procnum, + nfsv4stateid_t *stateidp, int trunc, fhandle_t *fhp, + struct nfsvattr *nap, nfsattrbit_t *attrbitp, NFSPROC_T *p); +static u_int32_t nfsrv_nextclientindex(void); +static u_int32_t nfsrv_nextstateindex(struct nfsclient *clp); +static void nfsrv_markstable(struct nfsclient *clp); +static int nfsrv_checkstable(struct nfsclient *clp); +static int nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, struct + vnode *vp, NFSPROC_T *p); +static int nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, + NFSPROC_T *p, vnode_t vp); +static int nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp, + struct nfsclient *clp, int *haslockp, NFSPROC_T *p); +static int nfsrv_notsamecredname(struct nfsrv_descript *nd, + struct nfsclient *clp); +static time_t nfsrv_leaseexpiry(void); +static void nfsrv_delaydelegtimeout(struct nfsstate *stp); +static int nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid, + struct nfsstate *stp, struct nfsrvcache *op); +static void nfsrv_locallocks(vnode_t vp, struct nfslockfile *lfp, + NFSPROC_T *p); +static int nfsrv_nootherstate(struct nfsstate *stp); + +/* + * Scan the client list for a match and either return the current one, + * create a new entry or return an error. + * If returning a non-error, the clp structure must either be linked into + * the client list or free'd. + */ +APPLESTATIC int +nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp, + nfsquad_t *clientidp, nfsquad_t *confirmp, NFSPROC_T *p) +{ + struct nfsclient *clp = NULL, *new_clp = *new_clpp; + int i; + struct nfsstate *stp, *tstp; + struct sockaddr_in *sad, *rad; + int zapit = 0, gotit, hasstate = 0, igotlock; + static u_int64_t confirm_index = 0; + + /* + * Check for state resource limit exceeded. + */ + if (nfsrv_openpluslock > NFSRV_V4STATELIMIT) + return (NFSERR_RESOURCE); + + if ((nd->nd_flag & ND_GSS) && nfsrv_nogsscallback) + /* + * Don't do callbacks for AUTH_GSS. + * (Since these aren't yet debugged, they might cause the + * server to crap out, if they get past the Init call to + * the client.) + */ + new_clp->lc_program = 0; + + /* Lock out other nfsd threads */ + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + do { + igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + } while (!igotlock); + NFSUNLOCKV4ROOTMUTEX(); + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + + /* + * Search for a match in the client list. + */ + gotit = i = 0; + while (i < NFSCLIENTHASHSIZE && !gotit) { + LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) { + if (new_clp->lc_idlen == clp->lc_idlen && + !NFSBCMP(new_clp->lc_id, clp->lc_id, clp->lc_idlen)) { + gotit = 1; + break; + } + } + i++; + } + if (!gotit || + (clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_ADMINREVOKED))) { + /* + * Get rid of the old one. + */ + if (i != NFSCLIENTHASHSIZE) { + LIST_REMOVE(clp, lc_hash); + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + nfsrv_freedeleglist(&clp->lc_olddeleg); + zapit = 1; + } + /* + * Add it after assigning a client id to it. + */ + new_clp->lc_flags |= LCL_NEEDSCONFIRM; + confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index; + clientidp->lval[0] = new_clp->lc_clientid.lval[0] = + (u_int32_t)nfsrvboottime; + clientidp->lval[1] = new_clp->lc_clientid.lval[1] = + nfsrv_nextclientindex(); + new_clp->lc_stateindex = 0; + new_clp->lc_statemaxindex = 0; + new_clp->lc_cbref = 0; + new_clp->lc_expiry = nfsrv_leaseexpiry(); + LIST_INIT(&new_clp->lc_open); + LIST_INIT(&new_clp->lc_deleg); + LIST_INIT(&new_clp->lc_olddeleg); + for (i = 0; i < NFSSTATEHASHSIZE; i++) + LIST_INIT(&new_clp->lc_stateid[i]); + LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, + lc_hash); + newnfsstats.srvclients++; + nfsrv_openpluslock++; + nfsrv_clients++; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + if (zapit) + nfsrv_zapclient(clp, p); + *new_clpp = NULL; + return (0); + } + + /* + * Now, handle the cases where the id is already issued. + */ + if (nfsrv_notsamecredname(nd, clp)) { + /* + * Check to see if there is expired state that should go away. + */ + if (clp->lc_expiry < NFSD_MONOSEC && + (!LIST_EMPTY(&clp->lc_open) || !LIST_EMPTY(&clp->lc_deleg))) { + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + } + + /* + * If there is outstanding state, then reply NFSERR_CLIDINUSE per + * RFC3530 Sec. 8.1.2 last para. + */ + if (!LIST_EMPTY(&clp->lc_deleg)) { + hasstate = 1; + } else if (LIST_EMPTY(&clp->lc_open)) { + hasstate = 0; + } else { + hasstate = 0; + /* Look for an Open on the OpenOwner */ + LIST_FOREACH(stp, &clp->lc_open, ls_list) { + if (!LIST_EMPTY(&stp->ls_open)) { + hasstate = 1; + break; + } + } + } + if (hasstate) { + /* + * If the uid doesn't match, return NFSERR_CLIDINUSE after + * filling out the correct ipaddr and portnum. + */ + sad = NFSSOCKADDR(new_clp->lc_req.nr_nam, struct sockaddr_in *); + rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *); + sad->sin_addr.s_addr = rad->sin_addr.s_addr; + sad->sin_port = rad->sin_port; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + return (NFSERR_CLIDINUSE); + } + } + + if (NFSBCMP(new_clp->lc_verf, clp->lc_verf, NFSX_VERF)) { + /* + * If the verifier has changed, the client has rebooted + * and a new client id is issued. The old state info + * can be thrown away once the SETCLIENTID_CONFIRM occurs. + */ + LIST_REMOVE(clp, lc_hash); + new_clp->lc_flags |= LCL_NEEDSCONFIRM; + confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index; + clientidp->lval[0] = new_clp->lc_clientid.lval[0] = + nfsrvboottime; + clientidp->lval[1] = new_clp->lc_clientid.lval[1] = + nfsrv_nextclientindex(); + new_clp->lc_stateindex = 0; + new_clp->lc_statemaxindex = 0; + new_clp->lc_cbref = 0; + new_clp->lc_expiry = nfsrv_leaseexpiry(); + + /* + * Save the state until confirmed. + */ + LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list); + LIST_FOREACH(tstp, &new_clp->lc_open, ls_list) + tstp->ls_clp = new_clp; + LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list); + LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list) + tstp->ls_clp = new_clp; + LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg, + ls_list); + LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list) + tstp->ls_clp = new_clp; + for (i = 0; i < NFSSTATEHASHSIZE; i++) { + LIST_NEWHEAD(&new_clp->lc_stateid[i], + &clp->lc_stateid[i], ls_hash); + LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_list) + tstp->ls_clp = new_clp; + } + LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, + lc_hash); + newnfsstats.srvclients++; + nfsrv_openpluslock++; + nfsrv_clients++; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + + /* + * Must wait until any outstanding callback on the old clp + * completes. + */ + while (clp->lc_cbref) { + clp->lc_flags |= LCL_WAKEUPWANTED; + (void) tsleep((caddr_t)clp, PZERO - 1, + "nfsd clp", 10 * hz); + } + nfsrv_zapclient(clp, p); + *new_clpp = NULL; + return (0); + } + /* + * id and verifier match, so update the net address info + * and get rid of any existing callback authentication + * handle, so a new one will be acquired. + */ + LIST_REMOVE(clp, lc_hash); + new_clp->lc_flags |= (LCL_NEEDSCONFIRM | LCL_DONTCLEAN); + new_clp->lc_expiry = nfsrv_leaseexpiry(); + confirmp->qval = new_clp->lc_confirm.qval = ++confirm_index; + clientidp->lval[0] = new_clp->lc_clientid.lval[0] = + clp->lc_clientid.lval[0]; + clientidp->lval[1] = new_clp->lc_clientid.lval[1] = + clp->lc_clientid.lval[1]; + new_clp->lc_delegtime = clp->lc_delegtime; + new_clp->lc_stateindex = clp->lc_stateindex; + new_clp->lc_statemaxindex = clp->lc_statemaxindex; + new_clp->lc_cbref = 0; + LIST_NEWHEAD(&new_clp->lc_open, &clp->lc_open, ls_list); + LIST_FOREACH(tstp, &new_clp->lc_open, ls_list) + tstp->ls_clp = new_clp; + LIST_NEWHEAD(&new_clp->lc_deleg, &clp->lc_deleg, ls_list); + LIST_FOREACH(tstp, &new_clp->lc_deleg, ls_list) + tstp->ls_clp = new_clp; + LIST_NEWHEAD(&new_clp->lc_olddeleg, &clp->lc_olddeleg, ls_list); + LIST_FOREACH(tstp, &new_clp->lc_olddeleg, ls_list) + tstp->ls_clp = new_clp; + for (i = 0; i < NFSSTATEHASHSIZE; i++) { + LIST_NEWHEAD(&new_clp->lc_stateid[i], &clp->lc_stateid[i], + ls_hash); + LIST_FOREACH(tstp, &new_clp->lc_stateid[i], ls_list) + tstp->ls_clp = new_clp; + } + LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp, + lc_hash); + newnfsstats.srvclients++; + nfsrv_openpluslock++; + nfsrv_clients++; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + + /* + * Must wait until any outstanding callback on the old clp + * completes. + */ + while (clp->lc_cbref) { + clp->lc_flags |= LCL_WAKEUPWANTED; + (void) tsleep((caddr_t)clp, PZERO - 1, "nfsd clp", 10 * hz); + } + nfsrv_zapclient(clp, p); + *new_clpp = NULL; + return (0); +} + +/* + * Check to see if the client id exists and optionally confirm it. + */ +APPLESTATIC int +nfsrv_getclient(nfsquad_t clientid, int opflags, struct nfsclient **clpp, + nfsquad_t confirm, struct nfsrv_descript *nd, NFSPROC_T *p) +{ + struct nfsclient *clp; + struct nfsstate *stp; + int i; + struct nfsclienthashhead *hp; + int error = 0, igotlock, doneok; + + if (clpp) + *clpp = NULL; + if (nfsrvboottime != clientid.lval[0]) + return (NFSERR_STALECLIENTID); + + /* + * If called with opflags == CLOPS_RENEW, the State Lock is + * already held. Otherwise, we need to get either that or, + * for the case of Confirm, lock out the nfsd threads. + */ + if (opflags & CLOPS_CONFIRM) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + do { + igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + } while (!igotlock); + NFSUNLOCKV4ROOTMUTEX(); + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + } else if (opflags != CLOPS_RENEW) { + NFSLOCKSTATE(); + } + + hp = NFSCLIENTHASH(clientid); + LIST_FOREACH(clp, hp, lc_hash) { + if (clp->lc_clientid.lval[1] == clientid.lval[1]) + break; + } + if (clp == LIST_END(hp)) { + if (opflags & CLOPS_CONFIRM) + error = NFSERR_STALECLIENTID; + else + error = NFSERR_EXPIRED; + } else if (clp->lc_flags & LCL_ADMINREVOKED) { + /* + * If marked admin revoked, just return the error. + */ + error = NFSERR_ADMINREVOKED; + } + if (error) { + if (opflags & CLOPS_CONFIRM) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } else if (opflags != CLOPS_RENEW) { + NFSUNLOCKSTATE(); + } + return (error); + } + + /* + * Perform any operations specified by the opflags. + */ + if (opflags & CLOPS_CONFIRM) { + if (clp->lc_confirm.qval != confirm.qval) + error = NFSERR_STALECLIENTID; + else if (nfsrv_notsamecredname(nd, clp)) + error = NFSERR_CLIDINUSE; + + if (!error) { + if ((clp->lc_flags & (LCL_NEEDSCONFIRM | LCL_DONTCLEAN)) == + LCL_NEEDSCONFIRM) { + /* + * Hang onto the delegations (as old delegations) + * for an Open with CLAIM_DELEGATE_PREV unless in + * grace, but get rid of the rest of the state. + */ + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_olddeleg); + if (nfsrv_checkgrace(0)) { + /* In grace, so just delete delegations */ + nfsrv_freedeleglist(&clp->lc_deleg); + } else { + LIST_FOREACH(stp, &clp->lc_deleg, ls_list) + stp->ls_flags |= NFSLCK_OLDDELEG; + clp->lc_delegtime = NFSD_MONOSEC + + nfsrv_lease + NFSRV_LEASEDELTA; + LIST_NEWHEAD(&clp->lc_olddeleg, &clp->lc_deleg, + ls_list); + } + } + clp->lc_flags &= ~(LCL_NEEDSCONFIRM | LCL_DONTCLEAN); + if (clp->lc_program) + clp->lc_flags |= LCL_NEEDSCBNULL; + } + } else if (clp->lc_flags & LCL_NEEDSCONFIRM) { + error = NFSERR_EXPIRED; + } + + /* + * If called by the Renew Op, we must check the principal. + */ + if (!error && (opflags & CLOPS_RENEWOP)) { + if (nfsrv_notsamecredname(nd, clp)) { + doneok = 0; + for (i = 0; i < NFSSTATEHASHSIZE && doneok == 0; i++) { + LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { + if ((stp->ls_flags & NFSLCK_OPEN) && + stp->ls_uid == nd->nd_cred->cr_uid) { + doneok = 1; + break; + } + } + } + if (!doneok) + error = NFSERR_ACCES; + } + if (!error && (clp->lc_flags & LCL_CBDOWN)) + error = NFSERR_CBPATHDOWN; + } + if ((!error || error == NFSERR_CBPATHDOWN) && + (opflags & CLOPS_RENEW)) { + clp->lc_expiry = nfsrv_leaseexpiry(); + } + if (opflags & CLOPS_CONFIRM) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } else if (opflags != CLOPS_RENEW) { + NFSUNLOCKSTATE(); + } + if (clpp) + *clpp = clp; + return (error); +} + +/* + * Called from the new nfssvc syscall to admin revoke a clientid. + * Returns 0 for success, error otherwise. + */ +APPLESTATIC int +nfsrv_adminrevoke(struct nfsd_clid *revokep, NFSPROC_T *p) +{ + struct nfsclient *clp = NULL; + int i; + int gotit, igotlock; + + /* + * First, lock out the nfsd so that state won't change while the + * revocation record is being written to the stable storage restart + * file. + */ + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + do { + igotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + } while (!igotlock); + NFSUNLOCKV4ROOTMUTEX(); + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + + /* + * Search for a match in the client list. + */ + gotit = i = 0; + while (i < NFSCLIENTHASHSIZE && !gotit) { + LIST_FOREACH(clp, &nfsclienthash[i], lc_hash) { + if (revokep->nclid_idlen == clp->lc_idlen && + !NFSBCMP(revokep->nclid_id, clp->lc_id, clp->lc_idlen)) { + gotit = 1; + break; + } + } + i++; + } + if (!gotit) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 0); + NFSUNLOCKV4ROOTMUTEX(); + return (EPERM); + } + + /* + * Now, write out the revocation record + */ + nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); + + /* + * and clear out the state, marking the clientid revoked. + */ + clp->lc_flags &= ~LCL_CALLBACKSON; + clp->lc_flags |= LCL_ADMINREVOKED; + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + nfsrv_freedeleglist(&clp->lc_olddeleg); + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 0); + NFSUNLOCKV4ROOTMUTEX(); + return (0); +} + +/* + * Dump out stats for all clients. Called from nfssvc(2), that is used + * newnfsstats. + */ +APPLESTATIC void +nfsrv_dumpclients(struct nfsd_dumpclients *dumpp, int maxcnt) +{ + struct nfsclient *clp; + int i = 0, cnt = 0; + + NFSLOCKSTATE(); + /* + * Rattle through the client lists until done. + */ + while (i < NFSCLIENTHASHSIZE && cnt < maxcnt) { + clp = LIST_FIRST(&nfsclienthash[i]); + while (clp != LIST_END(&nfsclienthash[i]) && cnt < maxcnt) { + nfsrv_dumpaclient(clp, &dumpp[cnt]); + cnt++; + clp = LIST_NEXT(clp, lc_hash); + } + i++; + } + if (cnt < maxcnt) + dumpp[cnt].ndcl_clid.nclid_idlen = 0; + NFSUNLOCKSTATE(); +} + +/* + * Dump stats for a client. Must be called with the NFSSTATELOCK and spl'd. + */ +static void +nfsrv_dumpaclient(struct nfsclient *clp, struct nfsd_dumpclients *dumpp) +{ + struct nfsstate *stp, *openstp, *lckownstp; + struct nfslock *lop; + struct sockaddr *sad; + struct sockaddr_in *rad; + struct sockaddr_in6 *rad6; + + dumpp->ndcl_nopenowners = dumpp->ndcl_nlockowners = 0; + dumpp->ndcl_nopens = dumpp->ndcl_nlocks = 0; + dumpp->ndcl_ndelegs = dumpp->ndcl_nolddelegs = 0; + dumpp->ndcl_flags = clp->lc_flags; + dumpp->ndcl_clid.nclid_idlen = clp->lc_idlen; + NFSBCOPY(clp->lc_id, dumpp->ndcl_clid.nclid_id, clp->lc_idlen); + sad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr *); + dumpp->ndcl_addrfam = sad->sa_family; + if (sad->sa_family == AF_INET) { + rad = (struct sockaddr_in *)sad; + dumpp->ndcl_cbaddr.sin_addr = rad->sin_addr; + } else { + rad6 = (struct sockaddr_in6 *)sad; + dumpp->ndcl_cbaddr.sin6_addr = rad6->sin6_addr; + } + + /* + * Now, scan the state lists and total up the opens and locks. + */ + LIST_FOREACH(stp, &clp->lc_open, ls_list) { + dumpp->ndcl_nopenowners++; + LIST_FOREACH(openstp, &stp->ls_open, ls_list) { + dumpp->ndcl_nopens++; + LIST_FOREACH(lckownstp, &openstp->ls_open, ls_list) { + dumpp->ndcl_nlockowners++; + LIST_FOREACH(lop, &lckownstp->ls_lock, lo_lckowner) { + dumpp->ndcl_nlocks++; + } + } + } + } + + /* + * and the delegation lists. + */ + LIST_FOREACH(stp, &clp->lc_deleg, ls_list) { + dumpp->ndcl_ndelegs++; + } + LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) { + dumpp->ndcl_nolddelegs++; + } +} + +/* + * Dump out lock stats for a file. + */ +APPLESTATIC void +nfsrv_dumplocks(vnode_t vp, struct nfsd_dumplocks *ldumpp, int maxcnt, + NFSPROC_T *p) +{ + struct nfsstate *stp; + struct nfslock *lop; + int cnt = 0; + struct nfslockfile *lfp; + struct sockaddr *sad; + struct sockaddr_in *rad; + struct sockaddr_in6 *rad6; + int ret; + fhandle_t nfh; + + ret = nfsrv_getlockfh(vp, 0, NULL, &nfh, p); + NFSLOCKSTATE(); + if (!ret) + ret = nfsrv_getlockfile(0, NULL, &lfp, &nfh); + if (ret) { + ldumpp[0].ndlck_clid.nclid_idlen = 0; + NFSUNLOCKSTATE(); + return; + } + + /* + * For each open share on file, dump it out. + */ + stp = LIST_FIRST(&lfp->lf_open); + while (stp != LIST_END(&lfp->lf_open) && cnt < maxcnt) { + ldumpp[cnt].ndlck_flags = stp->ls_flags; + ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; + ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; + ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; + ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; + ldumpp[cnt].ndlck_owner.nclid_idlen = + stp->ls_openowner->ls_ownerlen; + NFSBCOPY(stp->ls_openowner->ls_owner, + ldumpp[cnt].ndlck_owner.nclid_id, + stp->ls_openowner->ls_ownerlen); + ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; + NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, + stp->ls_clp->lc_idlen); + sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); + ldumpp[cnt].ndlck_addrfam = sad->sa_family; + if (sad->sa_family == AF_INET) { + rad = (struct sockaddr_in *)sad; + ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; + } else { + rad6 = (struct sockaddr_in6 *)sad; + ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; + } + stp = LIST_NEXT(stp, ls_file); + cnt++; + } + + /* + * and all locks. + */ + lop = LIST_FIRST(&lfp->lf_lock); + while (lop != LIST_END(&lfp->lf_lock) && cnt < maxcnt) { + stp = lop->lo_stp; + ldumpp[cnt].ndlck_flags = lop->lo_flags; + ldumpp[cnt].ndlck_first = lop->lo_first; + ldumpp[cnt].ndlck_end = lop->lo_end; + ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; + ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; + ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; + ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; + ldumpp[cnt].ndlck_owner.nclid_idlen = stp->ls_ownerlen; + NFSBCOPY(stp->ls_owner, ldumpp[cnt].ndlck_owner.nclid_id, + stp->ls_ownerlen); + ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; + NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, + stp->ls_clp->lc_idlen); + sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); + ldumpp[cnt].ndlck_addrfam = sad->sa_family; + if (sad->sa_family == AF_INET) { + rad = (struct sockaddr_in *)sad; + ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; + } else { + rad6 = (struct sockaddr_in6 *)sad; + ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; + } + lop = LIST_NEXT(lop, lo_lckfile); + cnt++; + } + + /* + * and the delegations. + */ + stp = LIST_FIRST(&lfp->lf_deleg); + while (stp != LIST_END(&lfp->lf_deleg) && cnt < maxcnt) { + ldumpp[cnt].ndlck_flags = stp->ls_flags; + ldumpp[cnt].ndlck_stateid.seqid = stp->ls_stateid.seqid; + ldumpp[cnt].ndlck_stateid.other[0] = stp->ls_stateid.other[0]; + ldumpp[cnt].ndlck_stateid.other[1] = stp->ls_stateid.other[1]; + ldumpp[cnt].ndlck_stateid.other[2] = stp->ls_stateid.other[2]; + ldumpp[cnt].ndlck_owner.nclid_idlen = 0; + ldumpp[cnt].ndlck_clid.nclid_idlen = stp->ls_clp->lc_idlen; + NFSBCOPY(stp->ls_clp->lc_id, ldumpp[cnt].ndlck_clid.nclid_id, + stp->ls_clp->lc_idlen); + sad=NFSSOCKADDR(stp->ls_clp->lc_req.nr_nam, struct sockaddr *); + ldumpp[cnt].ndlck_addrfam = sad->sa_family; + if (sad->sa_family == AF_INET) { + rad = (struct sockaddr_in *)sad; + ldumpp[cnt].ndlck_cbaddr.sin_addr = rad->sin_addr; + } else { + rad6 = (struct sockaddr_in6 *)sad; + ldumpp[cnt].ndlck_cbaddr.sin6_addr = rad6->sin6_addr; + } + stp = LIST_NEXT(stp, ls_file); + cnt++; + } + + /* + * If list isn't full, mark end of list by setting the client name + * to zero length. + */ + if (cnt < maxcnt) + ldumpp[cnt].ndlck_clid.nclid_idlen = 0; + NFSUNLOCKSTATE(); +} + +/* + * Server timer routine. It can scan any linked list, so long + * as it holds the spin lock and there is no exclusive lock on + * nfsv4rootfs_lock. + * Must be called by a kernel thread and not a timer interrupt, + * so that it only runs when the nfsd threads are sleeping on a + * uniprocessor and uses the State spin lock for an SMP system. + * (For OpenBSD, a kthread is ok. For FreeBSD, I think it is ok + * to do this from a callout, since the spin locks work. For + * Darwin, I'm not sure what will work correctly yet.) + * Should be called once per second. + */ +APPLESTATIC void +nfsrv_servertimer(void) +{ + struct nfsclient *clp, *nclp; + struct nfsstate *stp, *nstp; + int i; + + /* + * Make sure nfsboottime is set. This is used by V3 as well + * as V4. Note that nfsboottime is not nfsrvboottime, which is + * only used by the V4 server for leases. + */ + if (nfsboottime.tv_sec == 0) + NFSSETBOOTTIME(nfsboottime); + + /* + * If server hasn't started yet, just return. + */ + NFSLOCKSTATE(); + if (nfsrv_stablefirst.nsf_eograce == 0) { + NFSUNLOCKSTATE(); + return; + } + if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE)) { + if (!(nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) && + NFSD_MONOSEC > nfsrv_stablefirst.nsf_eograce) + nfsrv_stablefirst.nsf_flags |= + (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK); + NFSUNLOCKSTATE(); + return; + } + + /* + * Return now if an nfsd thread has the exclusive lock on + * nfsv4rootfs_lock. The dirty trick here is that we have + * the spin lock already and the nfsd threads do a: + * NFSLOCKSTATE, NFSUNLOCKSTATE after getting the exclusive + * lock, so they won't race with code after this check. + */ + if (nfsv4rootfs_lock.nfslock_lock & NFSV4LOCK_LOCK) { + NFSUNLOCKSTATE(); + return; + } + + /* + * For each client... + */ + for (i = 0; i < NFSCLIENTHASHSIZE; i++) { + clp = LIST_FIRST(&nfsclienthash[i]); + while (clp != LIST_END(&nfsclienthash[i])) { + nclp = LIST_NEXT(clp, lc_hash); + if (!(clp->lc_flags & LCL_EXPIREIT)) { + if (((clp->lc_expiry + NFSRV_STALELEASE) < NFSD_MONOSEC + && ((LIST_EMPTY(&clp->lc_deleg) + && LIST_EMPTY(&clp->lc_open)) || + nfsrv_clients > nfsrv_clienthighwater)) || + (clp->lc_expiry + NFSRV_MOULDYLEASE) < NFSD_MONOSEC || + (clp->lc_expiry < NFSD_MONOSEC && + (nfsrv_openpluslock * 10 / 9) > NFSRV_V4STATELIMIT)) { + /* + * Lease has expired several nfsrv_lease times ago: + * PLUS + * - no state is associated with it + * OR + * - above high water mark for number of clients + * (nfsrv_clienthighwater should be large enough + * that this only occurs when clients fail to + * use the same nfs_client_id4.id. Maybe somewhat + * higher that the maximum number of clients that + * will mount this server?) + * OR + * Lease has expired a very long time ago + * OR + * Lease has expired PLUS the number of opens + locks + * has exceeded 90% of capacity + * + * --> Mark for expiry. The actual expiry will be done + * by an nfsd sometime soon. + */ + clp->lc_flags |= LCL_EXPIREIT; + nfsrv_stablefirst.nsf_flags |= + (NFSNSF_NEEDLOCK | NFSNSF_EXPIREDCLIENT); + } else { + /* + * If there are no opens, increment no open tick cnt + * If time exceeds NFSNOOPEN, mark it to be thrown away + * otherwise, if there is an open, reset no open time + * Hopefully, this will avoid excessive re-creation + * of open owners and subsequent open confirms. + */ + stp = LIST_FIRST(&clp->lc_open); + while (stp != LIST_END(&clp->lc_open)) { + nstp = LIST_NEXT(stp, ls_list); + if (LIST_EMPTY(&stp->ls_open)) { + stp->ls_noopens++; + if (stp->ls_noopens > NFSNOOPEN || + (nfsrv_openpluslock * 2) > + NFSRV_V4STATELIMIT) + nfsrv_stablefirst.nsf_flags |= + NFSNSF_NOOPENS; + } else { + stp->ls_noopens = 0; + } + stp = nstp; + } + } + } + clp = nclp; + } + } + NFSUNLOCKSTATE(); +} + +/* + * The following set of functions free up the various data structures. + */ +/* + * Clear out all open/lock state related to this nfsclient. + * Caller must hold an exclusive lock on nfsv4rootfs_lock, so that + * there are no other active nfsd threads. + */ +APPLESTATIC void +nfsrv_cleanclient(struct nfsclient *clp, NFSPROC_T *p) +{ + struct nfsstate *stp, *nstp; + + LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp) { + nfsrv_freeopenowner(stp, 1, p); + } +} + +/* + * Free a client that has been cleaned. It should also already have been + * removed from the lists. + * (Just to be safe w.r.t. newnfs_disconnect(), call this function when + * softclock interrupts are enabled.) + */ +APPLESTATIC void +nfsrv_zapclient(struct nfsclient *clp, NFSPROC_T *p) +{ + +#ifdef notyet + if ((clp->lc_flags & (LCL_GSS | LCL_CALLBACKSON)) == + (LCL_GSS | LCL_CALLBACKSON) && + (clp->lc_hand.nfsh_flag & NFSG_COMPLETE) && + clp->lc_handlelen > 0) { + clp->lc_hand.nfsh_flag &= ~NFSG_COMPLETE; + clp->lc_hand.nfsh_flag |= NFSG_DESTROYED; + (void) nfsrv_docallback(clp, NFSV4PROC_CBNULL, + NULL, 0, NULL, NULL, NULL, p); + } +#endif + newnfs_disconnect(&clp->lc_req); + NFSSOCKADDRFREE(clp->lc_req.nr_nam); + NFSFREEMUTEX(&clp->lc_req.nr_mtx); + free((caddr_t)clp, M_NFSDCLIENT); + NFSLOCKSTATE(); + newnfsstats.srvclients--; + nfsrv_openpluslock--; + nfsrv_clients--; + NFSUNLOCKSTATE(); +} + +/* + * Free a list of delegation state structures. + * (This function will also free all nfslockfile structures that no + * longer have associated state.) + */ +APPLESTATIC void +nfsrv_freedeleglist(struct nfsstatehead *sthp) +{ + struct nfsstate *stp, *nstp; + + LIST_FOREACH_SAFE(stp, sthp, ls_list, nstp) { + nfsrv_freedeleg(stp); + } + LIST_INIT(sthp); +} + +/* + * Free up a delegation. + */ +static void +nfsrv_freedeleg(struct nfsstate *stp) +{ + struct nfslockfile *lfp; + + LIST_REMOVE(stp, ls_hash); + LIST_REMOVE(stp, ls_list); + LIST_REMOVE(stp, ls_file); + lfp = stp->ls_lfp; + if (LIST_EMPTY(&lfp->lf_open) && + LIST_EMPTY(&lfp->lf_lock) && LIST_EMPTY(&lfp->lf_deleg)) + nfsrv_freenfslockfile(lfp); + FREE((caddr_t)stp, M_NFSDSTATE); + newnfsstats.srvdelegates--; + nfsrv_openpluslock--; + nfsrv_delegatecnt--; +} + +/* + * This function frees an open owner and all associated opens. + * Must be called with soft clock interrupts disabled. + */ +static void +nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, NFSPROC_T *p) +{ + struct nfsstate *nstp, *tstp; + + LIST_REMOVE(stp, ls_list); + /* + * Now, free all associated opens. + */ + nstp = LIST_FIRST(&stp->ls_open); + while (nstp != LIST_END(&stp->ls_open)) { + tstp = nstp; + nstp = LIST_NEXT(nstp, ls_list); + (void) nfsrv_freeopen(tstp, NULL, cansleep, p); + } + if (stp->ls_op) + nfsrvd_derefcache(stp->ls_op); + FREE((caddr_t)stp, M_NFSDSTATE); + newnfsstats.srvopenowners--; + nfsrv_openpluslock--; +} + +/* + * This function frees an open (nfsstate open structure) with all associated + * lock_owners and locks. It also frees the nfslockfile structure iff there + * are no other opens on the file. + * Must be called with soft clock interrupts disabled. + * Returns 1 if it free'd the nfslockfile, 0 otherwise. + */ +static int +nfsrv_freeopen(struct nfsstate *stp, int *freedlockp, int cansleep, + NFSPROC_T *p) +{ + struct nfsstate *nstp, *tstp; + struct nfslockfile *lfp; + int ret = 0, ret2; + + LIST_REMOVE(stp, ls_hash); + LIST_REMOVE(stp, ls_list); + LIST_REMOVE(stp, ls_file); + + lfp = stp->ls_lfp; + /* + * The nfslockfile is freed here if there are no locks + * associated with the open. + * If there are locks associated with the open, the + * nfslockfile structure can be freed via nfsrv_freelockowner(). + * (That is why the call must be here instead of after the loop.) + */ + if (LIST_EMPTY(&lfp->lf_open) && LIST_EMPTY(&lfp->lf_lock) && + LIST_EMPTY(&lfp->lf_deleg)) { + nfsrv_freenfslockfile(lfp); + ret = 1; + } + /* + * Now, free all lockowners associated with this open. + */ + nstp = LIST_FIRST(&stp->ls_open); + while (nstp != LIST_END(&stp->ls_open)) { + tstp = nstp; + nstp = LIST_NEXT(nstp, ls_list); + ret2 = nfsrv_freelockowner(tstp, freedlockp, cansleep, p); + if (ret == 0 && ret2 != 0) + ret = ret2; + } + FREE((caddr_t)stp, M_NFSDSTATE); + newnfsstats.srvopens--; + nfsrv_openpluslock--; + return (ret); +} + +/* + * Frees a lockowner and all associated locks. + * It also frees the nfslockfile structure, if there are no more + * references to it. + * Must be called with soft clock interrupts disabled. + * Returns 1 if it free'd the nfslockfile structure, 1 otherwise. + */ +static int +nfsrv_freelockowner(struct nfsstate *stp, int *freedlockp, int cansleep, + NFSPROC_T *p) +{ + int ret; + + LIST_REMOVE(stp, ls_hash); + LIST_REMOVE(stp, ls_list); + ret = nfsrv_freeallnfslocks(stp, freedlockp, cansleep, p); + if (stp->ls_op) + nfsrvd_derefcache(stp->ls_op); + FREE((caddr_t)stp, M_NFSDSTATE); + newnfsstats.srvlockowners--; + nfsrv_openpluslock--; + return (ret); +} + +/* + * Free all the nfs locks on a lockowner. + * Returns 1 if it free'd the nfslockfile structure, 0 otherwise. + * If any byte range lock is free'd, *freedlockp is set to 1. + */ +static int +nfsrv_freeallnfslocks(struct nfsstate *stp, int *freedlockp, int cansleep, + NFSPROC_T *p) +{ + struct nfslock *lop, *nlop; + struct nfslockfile *lfp = NULL, *olfp = NULL; + int ret = 0; + + lop = LIST_FIRST(&stp->ls_lock); + while (lop != LIST_END(&stp->ls_lock)) { + nlop = LIST_NEXT(lop, lo_lckowner); + /* + * Since locks off a lockowner are ordered by + * file, you should update the local locks when + * you hit the next file OR the end of the lock + * list. If there are no locks for other owners, + * it must be done before the lockowner is discarded. + * (All this only applies if cansleep == 1.) + */ + olfp = lfp; + lfp = lop->lo_lfp; + nfsrv_freenfslock(lop); + if (freedlockp) + *freedlockp = 1; + if (LIST_EMPTY(&lfp->lf_open) && LIST_EMPTY(&lfp->lf_lock) && + LIST_EMPTY(&lfp->lf_deleg)) { + if (cansleep) + nfsrv_locallocks(NULL, lfp, p); + nfsrv_freenfslockfile(lfp); + /* + * Set the pointer(s) to this lockowner NULL, + * to indicate it has been free'd and local + * locks discarded already. + */ + if (olfp == lfp) + olfp = NULL; + lfp = NULL; + ret = 1; + } + if (cansleep && olfp != lfp && olfp != NULL) + nfsrv_locallocks(NULL, olfp, p); + lop = nlop; + } + if (cansleep && lfp != NULL) + nfsrv_locallocks(NULL, olfp, p); + return (ret); +} + +/* + * Free an nfslock structure. + * Must be called with soft clock interrupts disabled. + */ +static void +nfsrv_freenfslock(struct nfslock *lop) +{ + + LIST_REMOVE(lop, lo_lckfile); + LIST_REMOVE(lop, lo_lckowner); + FREE((caddr_t)lop, M_NFSDLOCK); + newnfsstats.srvlocks--; + nfsrv_openpluslock--; +} + +/* + * This function frees an nfslockfile structure. + * Must be called with soft clock interrupts disabled. + */ +static void +nfsrv_freenfslockfile(struct nfslockfile *lfp) +{ + + LIST_REMOVE(lfp, lf_hash); + FREE((caddr_t)lfp, M_NFSDLOCKFILE); +} + +/* + * This function looks up an nfsstate structure via stateid. + */ +static int +nfsrv_getstate(struct nfsclient *clp, nfsv4stateid_t *stateidp, __unused u_int32_t flags, + struct nfsstate **stpp) +{ + struct nfsstate *stp; + struct nfsstatehead *hp; + + *stpp = NULL; + hp = NFSSTATEHASH(clp, *stateidp); + LIST_FOREACH(stp, hp, ls_hash) { + if (!NFSBCMP(stp->ls_stateid.other, stateidp->other, + NFSX_STATEIDOTHER)) + break; + } + + /* + * If no state id in list, return NFSERR_BADSTATEID. + */ + if (stp == LIST_END(hp)) + return (NFSERR_BADSTATEID); + *stpp = stp; + return (0); +} + +/* + * This function gets an nfsstate structure via owner string. + */ +static void +nfsrv_getowner(struct nfsstatehead *hp, struct nfsstate *new_stp, + struct nfsstate **stpp) +{ + struct nfsstate *stp; + + *stpp = NULL; + LIST_FOREACH(stp, hp, ls_list) { + if (new_stp->ls_ownerlen == stp->ls_ownerlen && + !NFSBCMP(new_stp->ls_owner,stp->ls_owner,stp->ls_ownerlen)) { + *stpp = stp; + return; + } + } +} + +/* + * Lock control function called to update lock status. + * Returns 0 upon success, -1 if there is no lock and the flags indicate + * that one isn't to be created and an NFSERR_xxx for other errors. + * The structures new_stp and new_lop are passed in as pointers that should + * be set to NULL if the structure is used and shouldn't be free'd. + * For the NFSLCK_TEST and NFSLCK_CHECK cases, the structures are + * never used and can safely be allocated on the stack. For all other + * cases, *new_stpp and *new_lopp should be malloc'd before the call, + * in case they are used. + */ +APPLESTATIC int +nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, + struct nfslock **new_lopp, struct nfslockconflict *cfp, + nfsquad_t clientid, nfsv4stateid_t *stateidp, __unused struct nfsexstuff *exp, + struct nfsrv_descript *nd, NFSPROC_T *p) +{ + struct nfslock *lop; + struct nfsstate *new_stp = *new_stpp; + struct nfslock *new_lop = *new_lopp; + struct nfsstate *tstp, *mystp, *nstp; + int specialid = 0; + struct nfslockfile *lfp; + struct nfslock *other_lop = NULL; + struct nfsstate *stp, *lckstp = NULL; + struct nfsclient *clp = NULL; + u_int32_t bits; + int error = 0, haslock = 0, ret; + int getlckret, delegation = 0; + fhandle_t nfh; + + if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_SETATTR)) { + /* + * Note the special cases of "all 1s" or "all 0s" stateids and + * let reads with all 1s go ahead. + */ + if (new_stp->ls_stateid.seqid == 0x0 && + new_stp->ls_stateid.other[0] == 0x0 && + new_stp->ls_stateid.other[1] == 0x0 && + new_stp->ls_stateid.other[2] == 0x0) + specialid = 1; + else if (new_stp->ls_stateid.seqid == 0xffffffff && + new_stp->ls_stateid.other[0] == 0xffffffff && + new_stp->ls_stateid.other[1] == 0xffffffff && + new_stp->ls_stateid.other[2] == 0xffffffff) + specialid = 2; + } + + /* + * Check for restart conditions (client and server). + */ + error = nfsrv_checkrestart(clientid, new_stp->ls_flags, + &new_stp->ls_stateid, specialid); + if (error) + return (error); + + /* + * Check for state resource limit exceeded. + */ + if ((new_stp->ls_flags & NFSLCK_LOCK) && + nfsrv_openpluslock > NFSRV_V4STATELIMIT) + return (NFSERR_RESOURCE); + + /* + * For Lock, check for a conflict with a lock held by + * a process running locally on the server now, before + * monkeying with nfsd state. Since the vp is locked, any + * other local calls are blocked during this Op. + */ + if (new_stp->ls_flags & NFSLCK_LOCK) { + if (new_lop->lo_flags & NFSLCK_WRITE) + error = nfsvno_localconflict(vp, F_WRLCK, + new_lop->lo_first, new_lop->lo_end, cfp, p); + else + error = nfsvno_localconflict(vp, F_RDLCK, + new_lop->lo_first, new_lop->lo_end, cfp, p); + if (error) + return (error); + } + + /* + * For the lock case, get another nfslock structure, + * just in case we need it. + * Malloc now, before we start sifting through the linked lists, + * in case we have to wait for memory. + */ +tryagain: + if (new_stp->ls_flags & NFSLCK_LOCK) + MALLOC(other_lop, struct nfslock *, sizeof (struct nfslock), + M_NFSDLOCK, M_WAITOK); + + /* + * Get the lockfile structure for CFH now, so we can do a sanity + * check against the stateid, before incrementing the seqid#, since + * we want to return NFSERR_BADSTATEID on failure and the seqid# + * shouldn't be incremented for this case. + * If nfsrv_getlockfile() returns -1, it means "not found", which + * will be handled later. + */ + getlckret = nfsrv_getlockfh(vp, new_stp->ls_flags, NULL, &nfh, p); + NFSLOCKSTATE(); + if (!getlckret) + getlckret = nfsrv_getlockfile(new_stp->ls_flags, NULL, + &lfp, &nfh); + if (getlckret != 0 && getlckret != -1) { + NFSUNLOCKSTATE(); + if (other_lop) + FREE((caddr_t)other_lop, M_NFSDLOCK); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (getlckret); + } + + /* + * Since the code is manipulating lists that are also + * manipulated by nfsrv_servertimer(), soft clock interrupts + * must be masked off. + */ + if (specialid == 0) { + if (new_stp->ls_flags & NFSLCK_TEST) { + /* + * RFC 3530 does not list LockT as an op that renews a + * lease, but the concensus seems to be that it is ok + * for a server to do so. + */ + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + + /* + * Since NFSERR_EXPIRED, NFSERR_ADMINREVOKED are not valid + * error returns for LockT, just go ahead and test for a lock, + * since there are no locks for this client, but other locks + * can conflict. (ie. same client will always be false) + */ + if (error == NFSERR_EXPIRED || error == NFSERR_ADMINREVOKED) + error = 0; + lckstp = new_stp; + } else { + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + if (error == 0) + /* + * Look up the stateid + */ + error = nfsrv_getstate(clp, &new_stp->ls_stateid, + new_stp->ls_flags, &stp); + /* + * do some sanity checks for an unconfirmed open or a + * stateid that refers to the wrong file, for an open stateid + */ + if (error == 0 && (stp->ls_flags & NFSLCK_OPEN) && + ((stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM) || + (getlckret != -1 && stp->ls_lfp != lfp))) + error = NFSERR_BADSTATEID; + if (error == 0 && + (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) && + getlckret != -1 && stp->ls_lfp != lfp) + error = NFSERR_BADSTATEID; + + /* + * If the lockowner stateid doesn't refer to the same file, + * I believe that is considered ok, since some clients will + * only create a single lockowner and use that for all locks + * on all files. + * For now, log it as a diagnostic, instead of considering it + * a BadStateid. + */ + if (error == 0 && (stp->ls_flags & + (NFSLCK_OPEN | NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) == 0 && + getlckret != -1 && stp->ls_lfp != lfp) { +#ifdef DIAGNOSTIC + printf("Got a lock statid for different file open\n"); +#endif + /* + error = NFSERR_BADSTATEID; + */ + } + + if (error == 0) { + if (new_stp->ls_flags & NFSLCK_OPENTOLOCK) { + /* + * If haslock set, we've already checked the seqid. + */ + if (!haslock) { + if (stp->ls_flags & NFSLCK_OPEN) + error = nfsrv_checkseqid(nd, new_stp->ls_seq, + stp->ls_openowner, new_stp->ls_op); + else + error = NFSERR_BADSTATEID; + } + if (!error) + nfsrv_getowner(&stp->ls_open, new_stp, &lckstp); + if (lckstp) + /* + * I believe this should be an error, but it + * isn't obvious what NFSERR_xxx would be + * appropriate, so I'll use NFSERR_INVAL for now. + */ + error = NFSERR_INVAL; + else + lckstp = new_stp; + } else if (new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK)) { + /* + * If haslock set, ditto above. + */ + if (!haslock) { + if (stp->ls_flags & NFSLCK_OPEN) + error = NFSERR_BADSTATEID; + else + error = nfsrv_checkseqid(nd, new_stp->ls_seq, + stp, new_stp->ls_op); + } + lckstp = stp; + } else { + lckstp = stp; + } + } + /* + * If the seqid part of the stateid isn't the same, return + * NFSERR_OLDSTATEID for cases other than I/O Ops. + * For I/O Ops, only return NFSERR_OLDSTATEID if + * nfsrv_returnoldstateid is set. (The concensus on the email + * list was that most clients would prefer to not receive + * NFSERR_OLDSTATEID for I/O Ops, but the RFC suggests that that + * is what will happen, so I use the nfsrv_returnoldstateid to + * allow for either server configuration.) + */ + if (!error && stp->ls_stateid.seqid!=new_stp->ls_stateid.seqid && + (!(new_stp->ls_flags & NFSLCK_CHECK) || + nfsrv_returnoldstateid)) + error = NFSERR_OLDSTATEID; + } + } + + /* + * Now we can check for grace. + */ + if (!error) + error = nfsrv_checkgrace(new_stp->ls_flags); + if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error && + nfsrv_checkstable(clp)) + error = NFSERR_NOGRACE; + /* + * If we successfully Reclaimed state, note that. + */ + if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error) + nfsrv_markstable(clp); + + /* + * If nd_repstat is set, we can return that now, since the + * seqid# has been incremented. + */ + if (nd->nd_repstat && !error) + error = nd->nd_repstat; + if (error) { + NFSUNLOCKSTATE(); + if (other_lop) + FREE((caddr_t)other_lop, M_NFSDLOCK); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + + /* + * Check the nfsrv_getlockfile return. + * Returned -1 if no structure found. + */ + if (getlckret == -1) { + error = NFSERR_EXPIRED; + /* + * Called from lockt, so no lock is OK. + */ + if (new_stp->ls_flags & NFSLCK_TEST) { + error = 0; + } else if (new_stp->ls_flags & + (NFSLCK_CHECK | NFSLCK_SETATTR)) { + /* + * Called to check for a lock, OK if the stateid is all + * 1s or all 0s, but there should be an nfsstate + * otherwise. + * (ie. If there is no open, I'll assume no share + * deny bits.) + */ + if (specialid) + error = 0; + else + error = NFSERR_BADSTATEID; + } + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + /* + * Called to lock or unlock, so the lock has gone away. + */ + return (error); + } + + /* + * For NFSLCK_CHECK and NFSLCK_LOCK, test for a share conflict. + * For NFSLCK_CHECK, allow a read if write access is granted, + * but check for a deny. For NFSLCK_LOCK, require correct access, + * which implies a conflicting deny can't exist. + */ + if (new_stp->ls_flags & (NFSLCK_CHECK | NFSLCK_LOCK)) { + /* + * Four kinds of state id: + * - specialid (all 0s or all 1s), only for NFSLCK_CHECK + * - stateid for an open + * - stateid for a delegation + * - stateid for a lock owner + */ + if (!specialid) { + if (stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) { + delegation = 1; + mystp = stp; + nfsrv_delaydelegtimeout(stp); + } else if (stp->ls_flags & NFSLCK_OPEN) { + mystp = stp; + } else { + mystp = stp->ls_openstp; + } + /* + * If locking or checking, require correct access + * bit set. + */ + if (((new_stp->ls_flags & NFSLCK_LOCK) && + !((new_lop->lo_flags >> NFSLCK_LOCKSHIFT) & + mystp->ls_flags & NFSLCK_ACCESSBITS)) || + ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_READACCESS)) == + (NFSLCK_CHECK | NFSLCK_READACCESS) && + !(mystp->ls_flags & NFSLCK_READACCESS)) || + ((new_stp->ls_flags & (NFSLCK_CHECK|NFSLCK_WRITEACCESS)) == + (NFSLCK_CHECK | NFSLCK_WRITEACCESS) && + !(mystp->ls_flags & NFSLCK_WRITEACCESS))) { + NFSUNLOCKSTATE(); + if (other_lop) + FREE((caddr_t)other_lop, M_NFSDLOCK); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_OPENMODE); + } + } else + mystp = NULL; + if ((new_stp->ls_flags & NFSLCK_CHECK) && !delegation) { + /* + * Check for a conflicting deny bit. + */ + LIST_FOREACH(tstp, &lfp->lf_open, ls_file) { + if (tstp != mystp) { + bits = tstp->ls_flags; + bits >>= NFSLCK_SHIFT; + if (new_stp->ls_flags & bits & NFSLCK_ACCESSBITS) { + ret = nfsrv_clientconflict(tstp->ls_clp, &haslock, + vp, p); + if (ret) { + /* + * nfsrv_clientconflict unlocks state + * when it returns non-zero. + */ + lckstp = NULL; + goto tryagain; + } + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_OPENMODE); + } + } + } + + /* We're outta here */ + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (0); + } + } + + /* + * For setattr, just get rid of all the Delegations for other clients. + */ + if (new_stp->ls_flags & NFSLCK_SETATTR) { + ret = nfsrv_cleandeleg(vp, lfp, clp, &haslock, p); + if (ret) { + /* + * nfsrv_cleandeleg() unlocks state when it + * returns non-zero. + */ + if (ret == -1) { + lckstp = NULL; + goto tryagain; + } + return (ret); + } + if (!(new_stp->ls_flags & NFSLCK_CHECK) || + (LIST_EMPTY(&lfp->lf_open) && LIST_EMPTY(&lfp->lf_lock) && + LIST_EMPTY(&lfp->lf_deleg))) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (0); + } + } + + /* + * Check for a conflicting delegation. If one is found, call + * nfsrv_delegconflict() to handle it. If the v4root lock hasn't + * been set yet, it will get the lock. Otherwise, it will recall + * the delegation. Then, we try try again... + * I currently believe the conflict algorithm to be: + * For Lock Ops (Lock/LockT/LockU) + * - there is a conflict iff a different client has a write delegation + * For Reading (Read Op) + * - there is a conflict iff a different client has a write delegation + * (the specialids are always a different client) + * For Writing (Write/Setattr of size) + * - there is a conflict if a different client has any delegation + * - there is a conflict if the same client has a read delegation + * (I don't understand why this isn't allowed, but that seems to be + * the current concensus?) + */ + tstp = LIST_FIRST(&lfp->lf_deleg); + while (tstp != LIST_END(&lfp->lf_deleg)) { + nstp = LIST_NEXT(tstp, ls_file); + if ((((new_stp->ls_flags&(NFSLCK_LOCK|NFSLCK_UNLOCK|NFSLCK_TEST))|| + ((new_stp->ls_flags & NFSLCK_CHECK) && + (new_lop->lo_flags & NFSLCK_READ))) && + clp != tstp->ls_clp && + (tstp->ls_flags & NFSLCK_DELEGWRITE)) || + ((new_stp->ls_flags & NFSLCK_CHECK) && + (new_lop->lo_flags & NFSLCK_WRITE) && + (clp != tstp->ls_clp || + (tstp->ls_flags & NFSLCK_DELEGREAD)))) { + ret = nfsrv_delegconflict(tstp, &haslock, p, vp); + if (ret) { + /* + * nfsrv_delegconflict unlocks state when it + * returns non-zero. + */ + if (other_lop) { + FREE((caddr_t)other_lop, M_NFSDLOCK); + other_lop = NULL; + } + if (ret == -1) { + lckstp = NULL; + goto tryagain; + } + return (ret); + } + } + tstp = nstp; + } + + /* + * Handle the unlock case by calling nfsrv_updatelock(). + * (Should I have done some access checking above for unlock? For now, + * just let it happen.) + */ + if (new_stp->ls_flags & NFSLCK_UNLOCK) { + nfsrv_updatelock(stp, new_lopp, &other_lop, lfp); + stateidp->seqid = ++(stp->ls_stateid.seqid); + stateidp->other[0] = stp->ls_stateid.other[0]; + stateidp->other[1] = stp->ls_stateid.other[1]; + stateidp->other[2] = stp->ls_stateid.other[2]; + /* + * For a non-empty flp->lf_lock list, I believe + * nfsrv_locallocks() can safely traverse the list, including + * sleeping, for two reasons: + * 1 - The Lock/LockU/Close Ops all require a locked + * vnode for the file and we currently have that. + * 2 - The only other thing that modifies a non-empty + * list is nfsrv_cleanclient() and it is always + * done with the exclusive nfsv4rootfs_lock held. + * Since this Op in progress holds either a shared or + * exclusive lock on nfsv4rootfs_lock, that can't + * happen now. + * However, the structure pointed to by lfp can go + * in many places for an empty list, so that is handled + * by passing a NULL pointer to nfsrv_locallocks(). + * Do that check now, while we are still SMP safe. + */ + if (LIST_EMPTY(&lfp->lf_lock)) + lfp = NULL; + NFSUNLOCKSTATE(); + nfsrv_locallocks(vp, lfp, p); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (0); + } + + /* + * Search for a conflicting lock. A lock conflicts if: + * - the lock range overlaps and + * - at least one lock is a write lock and + * - it is not owned by the same lock owner + */ + if (!delegation) { + LIST_FOREACH(lop, &lfp->lf_lock, lo_lckfile) { + if (new_lop->lo_end > lop->lo_first && + new_lop->lo_first < lop->lo_end && + (new_lop->lo_flags == NFSLCK_WRITE || + lop->lo_flags == NFSLCK_WRITE) && + lckstp != lop->lo_stp && + (lckstp->ls_clp != lop->lo_stp->ls_clp || + lckstp->ls_ownerlen != lop->lo_stp->ls_ownerlen || + NFSBCMP(lckstp->ls_owner, lop->lo_stp->ls_owner, + lckstp->ls_ownerlen))) { + if (other_lop) { + FREE((caddr_t)other_lop, M_NFSDLOCK); + other_lop = NULL; + } + ret = nfsrv_clientconflict(lop->lo_stp->ls_clp,&haslock,vp,p); + if (ret) { + /* + * nfsrv_clientconflict() unlocks state when it + * returns non-zero. + */ + lckstp = NULL; + goto tryagain; + } + /* + * Found a conflicting lock, so record the conflict and + * return the error. + */ + if (cfp) { + cfp->cl_clientid.lval[0]=lop->lo_stp->ls_stateid.other[0]; + cfp->cl_clientid.lval[1]=lop->lo_stp->ls_stateid.other[1]; + cfp->cl_first = lop->lo_first; + cfp->cl_end = lop->lo_end; + cfp->cl_flags = lop->lo_flags; + cfp->cl_ownerlen = lop->lo_stp->ls_ownerlen; + NFSBCOPY(lop->lo_stp->ls_owner, cfp->cl_owner, + cfp->cl_ownerlen); + } + if (new_stp->ls_flags & NFSLCK_RECLAIM) + error = NFSERR_RECLAIMCONFLICT; + else if (new_stp->ls_flags & NFSLCK_CHECK) + error = NFSERR_LOCKED; + else + error = NFSERR_DENIED; + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + } + } + + /* + * We only get here if there was no lock that conflicted. + */ + if (new_stp->ls_flags & (NFSLCK_TEST | NFSLCK_CHECK)) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (0); + } + + /* + * We only get here when we are creating or modifying a lock. + * There are two variants: + * - exist_lock_owner where lock_owner exists + * - open_to_lock_owner with new lock_owner + */ + if (!(new_stp->ls_flags & NFSLCK_OPENTOLOCK)) { + nfsrv_updatelock(lckstp, new_lopp, &other_lop, lfp); + stateidp->seqid = ++(lckstp->ls_stateid.seqid); + stateidp->other[0] = lckstp->ls_stateid.other[0]; + stateidp->other[1] = lckstp->ls_stateid.other[1]; + stateidp->other[2] = lckstp->ls_stateid.other[2]; + } else { + /* + * The new open_to_lock_owner case. + * Link the new nfsstate into the lists. + */ + new_stp->ls_seq = new_stp->ls_opentolockseq; + nfsrvd_refcache(new_stp->ls_op); + stateidp->seqid = new_stp->ls_stateid.seqid = 0; + stateidp->other[0] = new_stp->ls_stateid.other[0] = + clp->lc_clientid.lval[0]; + stateidp->other[1] = new_stp->ls_stateid.other[1] = + clp->lc_clientid.lval[1]; + stateidp->other[2] = new_stp->ls_stateid.other[2] = + nfsrv_nextstateindex(clp); + new_stp->ls_clp = clp; + LIST_INIT(&new_stp->ls_lock); + new_stp->ls_openstp = stp; + new_stp->ls_lfp = lfp; + nfsrv_insertlock(new_lop, (struct nfslock *)new_stp, new_stp, + lfp); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_stp->ls_stateid), + new_stp, ls_hash); + LIST_INSERT_HEAD(&stp->ls_open, new_stp, ls_list); + *new_lopp = NULL; + *new_stpp = NULL; + newnfsstats.srvlockowners++; + nfsrv_openpluslock++; + } + /* See comment above, w.r.t. nfsrv_locallocks(). */ + if (LIST_EMPTY(&lfp->lf_lock)) + lfp = NULL; + NFSUNLOCKSTATE(); + nfsrv_locallocks(vp, lfp, p); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + if (other_lop) + FREE((caddr_t)other_lop, M_NFSDLOCK); + return (0); +} + +/* + * Check for state errors for Open. + * repstat is passed back out as an error if more critical errors + * are not detected. + */ +APPLESTATIC int +nfsrv_opencheck(nfsquad_t clientid, nfsv4stateid_t *stateidp, + struct nfsstate *new_stp, vnode_t vp, struct nfsrv_descript *nd, + NFSPROC_T *p, int repstat) +{ + struct nfsstate *stp, *nstp; + struct nfsclient *clp; + struct nfsstate *ownerstp; + struct nfslockfile *lfp, *new_lfp; + int error, haslock = 0, ret, readonly = 0, getfhret = 0; + + if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS) + readonly = 1; + /* + * Check for restart conditions (client and server). + */ + error = nfsrv_checkrestart(clientid, new_stp->ls_flags, + &new_stp->ls_stateid, 0); + if (error) + return (error); + + /* + * Check for state resource limit exceeded. + * Technically this should be SMP protected, but the worst + * case error is "out by one or two" on the count when it + * returns NFSERR_RESOURCE and the limit is just a rather + * arbitrary high water mark, so no harm is done. + */ + if (nfsrv_openpluslock > NFSRV_V4STATELIMIT) + return (NFSERR_RESOURCE); + +tryagain: + MALLOC(new_lfp, struct nfslockfile *, sizeof (struct nfslockfile), + M_NFSDLOCKFILE, M_WAITOK); + if (vp) + getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, &new_lfp, + NULL, p); + NFSLOCKSTATE(); + /* + * Get the nfsclient structure. + * Since the code is manipulating lists that are also + * manipulated by nfsrv_servertimer(), soft clock interrupts + * must be masked off. + */ + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + + /* + * Look up the open owner. See if it needs confirmation and + * check the seq#, as required. + */ + if (!error) + nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp); + + if (!error && ownerstp) { + error = nfsrv_checkseqid(nd, new_stp->ls_seq, ownerstp, + new_stp->ls_op); + /* + * If the OpenOwner hasn't been confirmed, assume the + * old one was a replay and this one is ok. + * See: RFC3530 Sec. 14.2.18. + */ + if (error == NFSERR_BADSEQID && + (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM)) + error = 0; + } + + /* + * Check for grace. + */ + if (!error) + error = nfsrv_checkgrace(new_stp->ls_flags); + if ((new_stp->ls_flags & NFSLCK_RECLAIM) && !error && + nfsrv_checkstable(clp)) + error = NFSERR_NOGRACE; + + /* + * If none of the above errors occurred, let repstat be + * returned. + */ + if (repstat && !error) + error = repstat; + if (error) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + free((caddr_t)new_lfp, M_NFSDLOCKFILE); + return (error); + } + + /* + * If vp == NULL, the file doesn't exist yet, so return ok. + * (This always happens on the first pass, so haslock must be 0.) + */ + if (vp == NULL) { + NFSUNLOCKSTATE(); + FREE((caddr_t)new_lfp, M_NFSDLOCKFILE); + return (0); + } + + /* + * Get the structure for the underlying file. + */ + if (getfhret) + error = getfhret; + else + error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp, + NULL); + if (new_lfp) + FREE((caddr_t)new_lfp, M_NFSDLOCKFILE); + if (error) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + + /* + * Search for a conflicting open/share. + */ + if (new_stp->ls_flags & NFSLCK_DELEGCUR) { + /* + * For Delegate_Cur, search for the matching Delegation, + * which indicates no conflict. + * An old delegation should have been recovered by the + * client doing a Claim_DELEGATE_Prev, so I won't let + * it match and return NFSERR_EXPIRED. Should I let it + * match? + */ + LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { + if (!(stp->ls_flags & NFSLCK_OLDDELEG) && + stateidp->seqid == stp->ls_stateid.seqid && + !NFSBCMP(stateidp->other, stp->ls_stateid.other, + NFSX_STATEIDOTHER)) + break; + } + if (stp == LIST_END(&lfp->lf_deleg) || + ((new_stp->ls_flags & NFSLCK_WRITEACCESS) && + (stp->ls_flags & NFSLCK_DELEGREAD))) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_EXPIRED); + } + } + + /* + * Check for access/deny bit conflicts. I check for the same + * owner as well, in case the client didn't bother. + */ + LIST_FOREACH(stp, &lfp->lf_open, ls_file) { + if (!(new_stp->ls_flags & NFSLCK_DELEGCUR) && + (((new_stp->ls_flags & NFSLCK_ACCESSBITS) & + ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))|| + ((stp->ls_flags & NFSLCK_ACCESSBITS) & + ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS)))){ + ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p); + if (ret) { + /* + * nfsrv_clientconflict() unlocks + * state when it returns non-zero. + */ + goto tryagain; + } + if (new_stp->ls_flags & NFSLCK_RECLAIM) + error = NFSERR_RECLAIMCONFLICT; + else + error = NFSERR_SHAREDENIED; + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + } + + /* + * Check for a conflicting delegation. If one is found, call + * nfsrv_delegconflict() to handle it. If the v4root lock hasn't + * been set yet, it will get the lock. Otherwise, it will recall + * the delegation. Then, we try try again... + * (If NFSLCK_DELEGCUR is set, it has a delegation, so there + * isn't a conflict.) + * I currently believe the conflict algorithm to be: + * For Open with Read Access and Deny None + * - there is a conflict iff a different client has a write delegation + * For Open with other Write Access or any Deny except None + * - there is a conflict if a different client has any delegation + * - there is a conflict if the same client has a read delegation + * (The current concensus is that this last case should be + * considered a conflict since the client with a read delegation + * could have done an Open with ReadAccess and WriteDeny + * locally and then not have checked for the WriteDeny.) + * Don't check for a Reclaim, since that will be dealt with + * by nfsrv_openctrl(). + */ + if (!(new_stp->ls_flags & + (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR | NFSLCK_RECLAIM))) { + stp = LIST_FIRST(&lfp->lf_deleg); + while (stp != LIST_END(&lfp->lf_deleg)) { + nstp = LIST_NEXT(stp, ls_file); + if ((readonly && stp->ls_clp != clp && + (stp->ls_flags & NFSLCK_DELEGWRITE)) || + (!readonly && (stp->ls_clp != clp || + (stp->ls_flags & NFSLCK_DELEGREAD)))) { + ret = nfsrv_delegconflict(stp, &haslock, p, vp); + if (ret) { + /* + * nfsrv_delegconflict() unlocks state + * when it returns non-zero. + */ + if (ret == -1) + goto tryagain; + return (ret); + } + } + stp = nstp; + } + } + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (0); +} + +/* + * Open control function to create/update open state for an open. + */ +APPLESTATIC int +nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp, + struct nfsstate **new_stpp, nfsquad_t clientid, nfsv4stateid_t *stateidp, + nfsv4stateid_t *delegstateidp, u_int32_t *rflagsp, struct nfsexstuff *exp, + NFSPROC_T *p, u_quad_t filerev) +{ + struct nfsstate *new_stp = *new_stpp; + struct nfsstate *stp, *nstp; + struct nfsstate *openstp = NULL, *new_open, *ownerstp, *new_deleg; + struct nfslockfile *lfp, *new_lfp; + struct nfsclient *clp; + int error, haslock = 0, ret, delegate = 1, writedeleg = 1; + int readonly = 0, cbret = 1, getfhret = 0; + + if ((new_stp->ls_flags & NFSLCK_SHAREBITS) == NFSLCK_READACCESS) + readonly = 1; + /* + * Check for restart conditions (client and server). + * (Paranoia, should have been detected by nfsrv_opencheck().) + * If an error does show up, return NFSERR_EXPIRED, since the + * the seqid# has already been incremented. + */ + error = nfsrv_checkrestart(clientid, new_stp->ls_flags, + &new_stp->ls_stateid, 0); + if (error) { + printf("Nfsd: openctrl unexpected restart err=%d\n", + error); + return (NFSERR_EXPIRED); + } + +tryagain: + MALLOC(new_lfp, struct nfslockfile *, sizeof (struct nfslockfile), + M_NFSDLOCKFILE, M_WAITOK); + MALLOC(new_open, struct nfsstate *, sizeof (struct nfsstate), + M_NFSDSTATE, M_WAITOK); + MALLOC(new_deleg, struct nfsstate *, sizeof (struct nfsstate), + M_NFSDSTATE, M_WAITOK); + getfhret = nfsrv_getlockfh(vp, new_stp->ls_flags, &new_lfp, + NULL, p); + NFSLOCKSTATE(); + /* + * Get the client structure. Since the linked lists could be changed + * by other nfsd processes if this process does a tsleep(), one of + * two things must be done. + * 1 - don't tsleep() + * or + * 2 - get the nfsv4_lock() { indicated by haslock == 1 } + * before using the lists, since this lock stops the other + * nfsd. This should only be used for rare cases, since it + * essentially single threads the nfsd. + * At this time, it is only done for cases where the stable + * storage file must be written prior to completion of state + * expiration. + */ + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + if (!error && (clp->lc_flags & LCL_NEEDSCBNULL) && + clp->lc_program) { + /* + * This happens on the first open for a client + * that supports callbacks. + */ + NFSUNLOCKSTATE(); + /* + * Although nfsrv_docallback() will sleep, clp won't + * go away, since they are only removed when the + * nfsv4_lock() has blocked the nfsd threads. The + * fields in clp can change, but having multiple + * threads do this Null callback RPC should be + * harmless. + */ + cbret = nfsrv_docallback(clp, NFSV4PROC_CBNULL, + NULL, 0, NULL, NULL, NULL, p); + NFSLOCKSTATE(); + clp->lc_flags &= ~LCL_NEEDSCBNULL; + if (!cbret) + clp->lc_flags |= LCL_CALLBACKSON; + } + + /* + * Look up the open owner. See if it needs confirmation and + * check the seq#, as required. + */ + if (!error) + nfsrv_getowner(&clp->lc_open, new_stp, &ownerstp); + + if (error) { + NFSUNLOCKSTATE(); + printf("Nfsd: openctrl unexpected state err=%d\n", + error); + free((caddr_t)new_lfp, M_NFSDLOCKFILE); + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_EXPIRED); + } + + if (new_stp->ls_flags & NFSLCK_RECLAIM) + nfsrv_markstable(clp); + + /* + * Get the structure for the underlying file. + */ + if (getfhret) + error = getfhret; + else + error = nfsrv_getlockfile(new_stp->ls_flags, &new_lfp, &lfp, + NULL); + if (new_lfp) + FREE((caddr_t)new_lfp, M_NFSDLOCKFILE); + if (error) { + NFSUNLOCKSTATE(); + printf("Nfsd openctrl unexpected getlockfile err=%d\n", + error); + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + + /* + * Search for a conflicting open/share. + */ + if (new_stp->ls_flags & NFSLCK_DELEGCUR) { + /* + * For Delegate_Cur, search for the matching Delegation, + * which indicates no conflict. + * An old delegation should have been recovered by the + * client doing a Claim_DELEGATE_Prev, so I won't let + * it match and return NFSERR_EXPIRED. Should I let it + * match? + */ + LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { + if (!(stp->ls_flags & NFSLCK_OLDDELEG) && + stateidp->seqid == stp->ls_stateid.seqid && + !NFSBCMP(stateidp->other, stp->ls_stateid.other, + NFSX_STATEIDOTHER)) + break; + } + if (stp == LIST_END(&lfp->lf_deleg) || + ((new_stp->ls_flags & NFSLCK_WRITEACCESS) && + (stp->ls_flags & NFSLCK_DELEGREAD))) { + NFSUNLOCKSTATE(); + printf("Nfsd openctrl unexpected expiry\n"); + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_EXPIRED); + } + + /* + * Don't issue a Delegation, since one already exists and + * delay delegation timeout, as required. + */ + delegate = 0; + nfsrv_delaydelegtimeout(stp); + } + + /* + * Check for access/deny bit conflicts. I also check for the + * same owner, since the client might not have bothered to check. + * Also, note an open for the same file and owner, if found, + * which is all we do here for Delegate_Cur, since conflict + * checking is already done. + */ + LIST_FOREACH(stp, &lfp->lf_open, ls_file) { + if (ownerstp && stp->ls_openowner == ownerstp) + openstp = stp; + if (!(new_stp->ls_flags & NFSLCK_DELEGCUR)) { + /* + * If another client has the file open, the only + * delegation that can be issued is a Read delegation + * and only if it is a Read open with Deny none. + */ + if (clp != stp->ls_clp) { + if ((stp->ls_flags & NFSLCK_SHAREBITS) == + NFSLCK_READACCESS) + writedeleg = 0; + else + delegate = 0; + } + if(((new_stp->ls_flags & NFSLCK_ACCESSBITS) & + ((stp->ls_flags>>NFSLCK_SHIFT) & NFSLCK_ACCESSBITS))|| + ((stp->ls_flags & NFSLCK_ACCESSBITS) & + ((new_stp->ls_flags>>NFSLCK_SHIFT)&NFSLCK_ACCESSBITS))){ + ret = nfsrv_clientconflict(stp->ls_clp,&haslock,vp,p); + if (ret) { + /* + * nfsrv_clientconflict() unlocks state + * when it returns non-zero. + */ + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + openstp = NULL; + goto tryagain; + } + if (new_stp->ls_flags & NFSLCK_RECLAIM) + error = NFSERR_RECLAIMCONFLICT; + else + error = NFSERR_SHAREDENIED; + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + printf("nfsd openctrl unexpected client cnfl\n"); + return (error); + } + } + } + + /* + * Check for a conflicting delegation. If one is found, call + * nfsrv_delegconflict() to handle it. If the v4root lock hasn't + * been set yet, it will get the lock. Otherwise, it will recall + * the delegation. Then, we try try again... + * (If NFSLCK_DELEGCUR is set, it has a delegation, so there + * isn't a conflict.) + * I currently believe the conflict algorithm to be: + * For Open with Read Access and Deny None + * - there is a conflict iff a different client has a write delegation + * For Open with other Write Access or any Deny except None + * - there is a conflict if a different client has any delegation + * - there is a conflict if the same client has a read delegation + * (The current concensus is that this last case should be + * considered a conflict since the client with a read delegation + * could have done an Open with ReadAccess and WriteDeny + * locally and then not have checked for the WriteDeny.) + */ + if (!(new_stp->ls_flags & (NFSLCK_DELEGPREV | NFSLCK_DELEGCUR))) { + stp = LIST_FIRST(&lfp->lf_deleg); + while (stp != LIST_END(&lfp->lf_deleg)) { + nstp = LIST_NEXT(stp, ls_file); + if (stp->ls_clp != clp && (stp->ls_flags & NFSLCK_DELEGREAD)) + writedeleg = 0; + else + delegate = 0; + if ((readonly && stp->ls_clp != clp && + (stp->ls_flags & NFSLCK_DELEGWRITE)) || + (!readonly && (stp->ls_clp != clp || + (stp->ls_flags & NFSLCK_DELEGREAD)))) { + if (new_stp->ls_flags & NFSLCK_RECLAIM) { + delegate = 2; + } else { + ret = nfsrv_delegconflict(stp, &haslock, p, vp); + if (ret) { + /* + * nfsrv_delegconflict() unlocks state + * when it returns non-zero. + */ + printf("Nfsd openctrl unexpected deleg cnfl\n"); + free((caddr_t)new_open, M_NFSDSTATE); + free((caddr_t)new_deleg, M_NFSDSTATE); + if (ret == -1) { + openstp = NULL; + goto tryagain; + } + return (ret); + } + } + } + stp = nstp; + } + } + + /* + * We only get here if there was no open that conflicted. + * If an open for the owner exists, or in the access/deny bits. + * Otherwise it is a new open. If the open_owner hasn't been + * confirmed, replace the open with the new one needing confirmation, + * otherwise add the open. + */ + if (new_stp->ls_flags & NFSLCK_DELEGPREV) { + /* + * Handle NFSLCK_DELEGPREV by searching the old delegations for + * a match. If found, just move the old delegation to the current + * delegation list and issue open. If not found, return + * NFSERR_EXPIRED. + */ + LIST_FOREACH(stp, &clp->lc_olddeleg, ls_list) { + if (stp->ls_lfp == lfp) { + /* Found it */ + if (stp->ls_clp != clp) + panic("olddeleg clp"); + LIST_REMOVE(stp, ls_list); + LIST_REMOVE(stp, ls_hash); + stp->ls_flags &= ~NFSLCK_OLDDELEG; + stp->ls_stateid.seqid = delegstateidp->seqid = 0; + stp->ls_stateid.other[0] = delegstateidp->other[0] = + clp->lc_clientid.lval[0]; + stp->ls_stateid.other[1] = delegstateidp->other[1] = + clp->lc_clientid.lval[1]; + stp->ls_stateid.other[2] = delegstateidp->other[2] = + nfsrv_nextstateindex(clp); + stp->ls_compref = nd->nd_compref; + LIST_INSERT_HEAD(&clp->lc_deleg, stp, ls_list); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, + stp->ls_stateid), stp, ls_hash); + if (stp->ls_flags & NFSLCK_DELEGWRITE) + *rflagsp |= NFSV4OPEN_WRITEDELEGATE; + else + *rflagsp |= NFSV4OPEN_READDELEGATE; + clp->lc_delegtime = NFSD_MONOSEC + + nfsrv_lease + NFSRV_LEASEDELTA; + + /* + * Now, do the associated open. + */ + new_open->ls_stateid.seqid = 0; + new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; + new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; + new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); + new_open->ls_flags = (new_stp->ls_flags&NFSLCK_DENYBITS)| + NFSLCK_OPEN; + if (stp->ls_flags & NFSLCK_DELEGWRITE) + new_open->ls_flags |= (NFSLCK_READACCESS | + NFSLCK_WRITEACCESS); + else + new_open->ls_flags |= NFSLCK_READACCESS; + new_open->ls_uid = new_stp->ls_uid; + new_open->ls_lfp = lfp; + new_open->ls_clp = clp; + LIST_INIT(&new_open->ls_open); + LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), + new_open, ls_hash); + /* + * and handle the open owner + */ + if (ownerstp) { + new_open->ls_openowner = ownerstp; + LIST_INSERT_HEAD(&ownerstp->ls_open,new_open,ls_list); + } else { + new_open->ls_openowner = new_stp; + new_stp->ls_flags = 0; + nfsrvd_refcache(new_stp->ls_op); + new_stp->ls_noopens = 0; + LIST_INIT(&new_stp->ls_open); + LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); + LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); + *new_stpp = NULL; + newnfsstats.srvopenowners++; + nfsrv_openpluslock++; + } + openstp = new_open; + new_open = NULL; + newnfsstats.srvopens++; + nfsrv_openpluslock++; + break; + } + } + if (stp == LIST_END(&clp->lc_olddeleg)) + error = NFSERR_EXPIRED; + } else if (new_stp->ls_flags & (NFSLCK_DELEGREAD | NFSLCK_DELEGWRITE)) { + /* + * Scan to see that no delegation for this client and file + * doesn't already exist. + * There also shouldn't yet be an Open for this file and + * openowner. + */ + LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { + if (stp->ls_clp == clp) + break; + } + if (stp == LIST_END(&lfp->lf_deleg) && openstp == NULL) { + /* + * This is the Claim_Previous case with a delegation + * type != Delegate_None. + */ + /* + * First, add the delegation. (Although we must issue the + * delegation, we can also ask for an immediate return.) + */ + new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0; + new_deleg->ls_stateid.other[0] = delegstateidp->other[0] = + clp->lc_clientid.lval[0]; + new_deleg->ls_stateid.other[1] = delegstateidp->other[1] = + clp->lc_clientid.lval[1]; + new_deleg->ls_stateid.other[2] = delegstateidp->other[2] = + nfsrv_nextstateindex(clp); + if (new_stp->ls_flags & NFSLCK_DELEGWRITE) { + new_deleg->ls_flags = (NFSLCK_DELEGWRITE | + NFSLCK_READACCESS | NFSLCK_WRITEACCESS); + *rflagsp |= NFSV4OPEN_WRITEDELEGATE; + } else { + new_deleg->ls_flags = (NFSLCK_DELEGREAD | + NFSLCK_READACCESS); + *rflagsp |= NFSV4OPEN_READDELEGATE; + } + new_deleg->ls_uid = new_stp->ls_uid; + new_deleg->ls_lfp = lfp; + new_deleg->ls_clp = clp; + new_deleg->ls_filerev = filerev; + new_deleg->ls_compref = nd->nd_compref; + LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, + new_deleg->ls_stateid), new_deleg, ls_hash); + LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); + new_deleg = NULL; + if (delegate == 2 || nfsrv_issuedelegs == 0 || + (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) != + LCL_CALLBACKSON || + NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) || + !NFSVNO_DELEGOK(vp)) + *rflagsp |= NFSV4OPEN_RECALL; + newnfsstats.srvdelegates++; + nfsrv_openpluslock++; + nfsrv_delegatecnt++; + + /* + * Now, do the associated open. + */ + new_open->ls_stateid.seqid = 0; + new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; + new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; + new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); + new_open->ls_flags = (new_stp->ls_flags & NFSLCK_DENYBITS) | + NFSLCK_OPEN; + if (new_stp->ls_flags & NFSLCK_DELEGWRITE) + new_open->ls_flags |= (NFSLCK_READACCESS | + NFSLCK_WRITEACCESS); + else + new_open->ls_flags |= NFSLCK_READACCESS; + new_open->ls_uid = new_stp->ls_uid; + new_open->ls_lfp = lfp; + new_open->ls_clp = clp; + LIST_INIT(&new_open->ls_open); + LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), + new_open, ls_hash); + /* + * and handle the open owner + */ + if (ownerstp) { + new_open->ls_openowner = ownerstp; + LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list); + } else { + new_open->ls_openowner = new_stp; + new_stp->ls_flags = 0; + nfsrvd_refcache(new_stp->ls_op); + new_stp->ls_noopens = 0; + LIST_INIT(&new_stp->ls_open); + LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); + LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); + *new_stpp = NULL; + newnfsstats.srvopenowners++; + nfsrv_openpluslock++; + } + openstp = new_open; + new_open = NULL; + newnfsstats.srvopens++; + nfsrv_openpluslock++; + } else { + error = NFSERR_RECLAIMCONFLICT; + } + } else if (ownerstp) { + if (ownerstp->ls_flags & NFSLCK_NEEDSCONFIRM) { + /* Replace the open */ + if (ownerstp->ls_op) + nfsrvd_derefcache(ownerstp->ls_op); + ownerstp->ls_op = new_stp->ls_op; + nfsrvd_refcache(ownerstp->ls_op); + ownerstp->ls_seq = new_stp->ls_seq; + *rflagsp |= NFSV4OPEN_RESULTCONFIRM; + stp = LIST_FIRST(&ownerstp->ls_open); + stp->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) | + NFSLCK_OPEN; + stp->ls_stateid.seqid = 0; + stp->ls_uid = new_stp->ls_uid; + if (lfp != stp->ls_lfp) { + LIST_REMOVE(stp, ls_file); + LIST_INSERT_HEAD(&lfp->lf_open, stp, ls_file); + stp->ls_lfp = lfp; + } + openstp = stp; + } else if (openstp) { + openstp->ls_flags |= (new_stp->ls_flags & NFSLCK_SHAREBITS); + openstp->ls_stateid.seqid++; + + /* + * This is where we can choose to issue a delegation. + */ + if (delegate && nfsrv_issuedelegs && + writedeleg && !NFSVNO_EXRDONLY(exp) && + (nfsrv_writedelegifpos || !readonly) && + (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) == + LCL_CALLBACKSON && + !NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) && + NFSVNO_DELEGOK(vp)) { + new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0; + new_deleg->ls_stateid.other[0] = delegstateidp->other[0] + = clp->lc_clientid.lval[0]; + new_deleg->ls_stateid.other[1] = delegstateidp->other[1] + = clp->lc_clientid.lval[1]; + new_deleg->ls_stateid.other[2] = delegstateidp->other[2] + = nfsrv_nextstateindex(clp); + new_deleg->ls_flags = (NFSLCK_DELEGWRITE | + NFSLCK_READACCESS | NFSLCK_WRITEACCESS); + *rflagsp |= NFSV4OPEN_WRITEDELEGATE; + new_deleg->ls_uid = new_stp->ls_uid; + new_deleg->ls_lfp = lfp; + new_deleg->ls_clp = clp; + new_deleg->ls_filerev = filerev; + new_deleg->ls_compref = nd->nd_compref; + LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, + new_deleg->ls_stateid), new_deleg, ls_hash); + LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); + new_deleg = NULL; + newnfsstats.srvdelegates++; + nfsrv_openpluslock++; + nfsrv_delegatecnt++; + } + } else { + new_open->ls_stateid.seqid = 0; + new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; + new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; + new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); + new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS)| + NFSLCK_OPEN; + new_open->ls_uid = new_stp->ls_uid; + new_open->ls_openowner = ownerstp; + new_open->ls_lfp = lfp; + new_open->ls_clp = clp; + LIST_INIT(&new_open->ls_open); + LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); + LIST_INSERT_HEAD(&ownerstp->ls_open, new_open, ls_list); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), + new_open, ls_hash); + openstp = new_open; + new_open = NULL; + newnfsstats.srvopens++; + nfsrv_openpluslock++; + + /* + * This is where we can choose to issue a delegation. + */ + if (delegate && nfsrv_issuedelegs && + (writedeleg || readonly) && + (clp->lc_flags & (LCL_CALLBACKSON | LCL_CBDOWN)) == + LCL_CALLBACKSON && + !NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) && + NFSVNO_DELEGOK(vp)) { + new_deleg->ls_stateid.seqid = delegstateidp->seqid = 0; + new_deleg->ls_stateid.other[0] = delegstateidp->other[0] + = clp->lc_clientid.lval[0]; + new_deleg->ls_stateid.other[1] = delegstateidp->other[1] + = clp->lc_clientid.lval[1]; + new_deleg->ls_stateid.other[2] = delegstateidp->other[2] + = nfsrv_nextstateindex(clp); + if (writedeleg && !NFSVNO_EXRDONLY(exp) && + (nfsrv_writedelegifpos || !readonly)) { + new_deleg->ls_flags = (NFSLCK_DELEGWRITE | + NFSLCK_READACCESS | NFSLCK_WRITEACCESS); + *rflagsp |= NFSV4OPEN_WRITEDELEGATE; + } else { + new_deleg->ls_flags = (NFSLCK_DELEGREAD | + NFSLCK_READACCESS); + *rflagsp |= NFSV4OPEN_READDELEGATE; + } + new_deleg->ls_uid = new_stp->ls_uid; + new_deleg->ls_lfp = lfp; + new_deleg->ls_clp = clp; + new_deleg->ls_filerev = filerev; + new_deleg->ls_compref = nd->nd_compref; + LIST_INSERT_HEAD(&lfp->lf_deleg, new_deleg, ls_file); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, + new_deleg->ls_stateid), new_deleg, ls_hash); + LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list); + new_deleg = NULL; + newnfsstats.srvdelegates++; + nfsrv_openpluslock++; + nfsrv_delegatecnt++; + } + } + } else { + /* + * New owner case. Start the open_owner sequence with a + * Needs confirmation (unless a reclaim) and hang the + * new open off it. + */ + new_open->ls_stateid.seqid = 0; + new_open->ls_stateid.other[0] = clp->lc_clientid.lval[0]; + new_open->ls_stateid.other[1] = clp->lc_clientid.lval[1]; + new_open->ls_stateid.other[2] = nfsrv_nextstateindex(clp); + new_open->ls_flags = (new_stp->ls_flags & NFSLCK_SHAREBITS) | + NFSLCK_OPEN; + new_open->ls_uid = new_stp->ls_uid; + LIST_INIT(&new_open->ls_open); + new_open->ls_openowner = new_stp; + new_open->ls_lfp = lfp; + new_open->ls_clp = clp; + LIST_INSERT_HEAD(&lfp->lf_open, new_open, ls_file); + if (new_stp->ls_flags & NFSLCK_RECLAIM) { + new_stp->ls_flags = 0; + } else { + *rflagsp |= NFSV4OPEN_RESULTCONFIRM; + new_stp->ls_flags = NFSLCK_NEEDSCONFIRM; + } + nfsrvd_refcache(new_stp->ls_op); + new_stp->ls_noopens = 0; + LIST_INIT(&new_stp->ls_open); + LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list); + LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list); + LIST_INSERT_HEAD(NFSSTATEHASH(clp, new_open->ls_stateid), + new_open, ls_hash); + openstp = new_open; + new_open = NULL; + *new_stpp = NULL; + newnfsstats.srvopens++; + nfsrv_openpluslock++; + newnfsstats.srvopenowners++; + nfsrv_openpluslock++; + } + if (!error) { + stateidp->seqid = openstp->ls_stateid.seqid; + stateidp->other[0] = openstp->ls_stateid.other[0]; + stateidp->other[1] = openstp->ls_stateid.other[1]; + stateidp->other[2] = openstp->ls_stateid.other[2]; + } + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + if (new_open) + FREE((caddr_t)new_open, M_NFSDSTATE); + if (new_deleg) + FREE((caddr_t)new_deleg, M_NFSDSTATE); + return (error); +} + +/* + * Open update. Does the confirm, downgrade and close. + */ +APPLESTATIC int +nfsrv_openupdate(vnode_t vp, struct nfsstate *new_stp, nfsquad_t clientid, + nfsv4stateid_t *stateidp, struct nfsrv_descript *nd, NFSPROC_T *p) +{ + struct nfsstate *stp, *ownerstp; + struct nfsclient *clp; + struct nfslockfile *lfp; + u_int32_t bits; + int error, gotstate = 0, len = 0, ret, freedlock; + u_char client[NFSV4_OPAQUELIMIT]; + + /* + * Check for restart conditions (client and server). + */ + error = nfsrv_checkrestart(clientid, new_stp->ls_flags, + &new_stp->ls_stateid, 0); + if (error) + return (error); + + NFSLOCKSTATE(); + /* + * Get the open structure via clientid and stateid. + */ + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + if (!error) + error = nfsrv_getstate(clp, &new_stp->ls_stateid, + new_stp->ls_flags, &stp); + + /* + * Sanity check the open. + */ + if (!error && (!(stp->ls_flags & NFSLCK_OPEN) || + (!(new_stp->ls_flags & NFSLCK_CONFIRM) && + (stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)) || + ((new_stp->ls_flags & NFSLCK_CONFIRM) && + (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM))))) + error = NFSERR_BADSTATEID; + + if (!error) + error = nfsrv_checkseqid(nd, new_stp->ls_seq, + stp->ls_openowner, new_stp->ls_op); + if (!error && stp->ls_stateid.seqid != new_stp->ls_stateid.seqid && + !(new_stp->ls_flags & NFSLCK_CONFIRM)) + error = NFSERR_OLDSTATEID; + if (!error && vnode_vtype(vp) != VREG) { + if (vnode_vtype(vp) == VDIR) + error = NFSERR_ISDIR; + else + error = NFSERR_INVAL; + } + + if (error) { + /* + * If a client tries to confirm an Open with a bad + * seqid# and there are no byte range locks or other Opens + * on the openowner, just throw it away, so the next use of the + * openowner will start a fresh seq#. + */ + if (error == NFSERR_BADSEQID && + (new_stp->ls_flags & NFSLCK_CONFIRM) && + nfsrv_nootherstate(stp)) + nfsrv_freeopenowner(stp->ls_openowner, 0, p); + NFSUNLOCKSTATE(); + return (error); + } + + /* + * Set the return stateid. + */ + stateidp->seqid = stp->ls_stateid.seqid + 1; + stateidp->other[0] = stp->ls_stateid.other[0]; + stateidp->other[1] = stp->ls_stateid.other[1]; + stateidp->other[2] = stp->ls_stateid.other[2]; + /* + * Now, handle the three cases. + */ + if (new_stp->ls_flags & NFSLCK_CONFIRM) { + /* + * If the open doesn't need confirmation, it seems to me that + * there is a client error, but I'll just log it and keep going? + */ + if (!(stp->ls_openowner->ls_flags & NFSLCK_NEEDSCONFIRM)) + printf("Nfsv4d: stray open confirm\n"); + stp->ls_openowner->ls_flags = 0; + stp->ls_stateid.seqid++; + if (!(clp->lc_flags & LCL_STAMPEDSTABLE)) { + clp->lc_flags |= LCL_STAMPEDSTABLE; + len = clp->lc_idlen; + NFSBCOPY(clp->lc_id, client, len); + gotstate = 1; + } + NFSUNLOCKSTATE(); + } else if (new_stp->ls_flags & NFSLCK_CLOSE) { + ownerstp = stp->ls_openowner; + lfp = stp->ls_lfp; + freedlock = 0; + ret = nfsrv_freeopen(stp, &freedlock, 0, p); + /* See comment on nfsrv_lockctrl() w.r.t. locallocks. */ + if (ret) { + lfp = NULL; + } else { + if (LIST_EMPTY(&lfp->lf_lock)) + lfp = NULL; + } + /* + * For now, I won't do this. The openowner should be + * free'd in NFSNOOPEN seconds and it will be deref'd then. + if (LIST_EMPTY(&ownerstp->ls_open) && ownerstp->ls_op) { + nfsrvd_derefcache(ownerstp->ls_op); + ownerstp->ls_op = NULL; + } + */ + NFSUNLOCKSTATE(); + if (freedlock && lfp != NULL) + nfsrv_locallocks(vp, lfp, p); + } else { + /* + * Update the share bits, making sure that the new set are a + * subset of the old ones. + */ + bits = (new_stp->ls_flags & NFSLCK_SHAREBITS); + if (~(stp->ls_flags) & bits) { + NFSUNLOCKSTATE(); + return (NFSERR_INVAL); + } + stp->ls_flags = (bits | NFSLCK_OPEN); + stp->ls_stateid.seqid++; + NFSUNLOCKSTATE(); + } + + /* + * If the client just confirmed its first open, write a timestamp + * to the stable storage file. + */ + if (gotstate) + nfsrv_writestable(client, len, NFSNST_NEWSTATE, p); + return (error); +} + +/* + * Delegation update. Does the purge and return. + */ +APPLESTATIC int +nfsrv_delegupdate(nfsquad_t clientid, nfsv4stateid_t *stateidp, + vnode_t vp, int op, struct ucred *cred, NFSPROC_T *p) +{ + struct nfsstate *stp; + struct nfsclient *clp; + int error; + fhandle_t fh; + + /* + * Do a sanity check against the file handle for DelegReturn. + */ + if (vp) { + error = nfsvno_getfh(vp, &fh, p); + if (error) + return (error); + } + /* + * Check for restart conditions (client and server). + */ + if (op == NFSV4OP_DELEGRETURN) + error = nfsrv_checkrestart(clientid, NFSLCK_DELEGRETURN, + stateidp, 0); + else + error = nfsrv_checkrestart(clientid, NFSLCK_DELEGPURGE, + stateidp, 0); + + NFSLOCKSTATE(); + /* + * Get the open structure via clientid and stateid. + */ + if (!error) + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + if (error) { + if (error == NFSERR_CBPATHDOWN) + error = 0; + if (error == NFSERR_STALECLIENTID && op == NFSV4OP_DELEGRETURN) + error = NFSERR_STALESTATEID; + } + if (!error && op == NFSV4OP_DELEGRETURN) { + error = nfsrv_getstate(clp, stateidp, NFSLCK_DELEGRETURN, &stp); + if (!error && stp->ls_stateid.seqid != stateidp->seqid) + error = NFSERR_OLDSTATEID; + } + /* + * NFSERR_EXPIRED means that the state has gone away, + * so Delegations have been purged. Just return ok. + */ + if (error == NFSERR_EXPIRED && op == NFSV4OP_DELEGPURGE) { + NFSUNLOCKSTATE(); + return (0); + } + if (error) { + NFSUNLOCKSTATE(); + return (error); + } + + if (op == NFSV4OP_DELEGRETURN) { + if (NFSBCMP((caddr_t)&fh, (caddr_t)&stp->ls_lfp->lf_fh, + sizeof (fhandle_t))) { + NFSUNLOCKSTATE(); + return (NFSERR_BADSTATEID); + } + nfsrv_freedeleg(stp); + } else { + nfsrv_freedeleglist(&clp->lc_olddeleg); + } + NFSUNLOCKSTATE(); + return (0); +} + +/* + * Release lock owner. + */ +APPLESTATIC int +nfsrv_releaselckown(struct nfsstate *new_stp, nfsquad_t clientid, + NFSPROC_T *p) +{ + struct nfsstate *stp, *nstp, *openstp, *ownstp; + struct nfsclient *clp; + int error; + + /* + * Check for restart conditions (client and server). + */ + error = nfsrv_checkrestart(clientid, new_stp->ls_flags, + &new_stp->ls_stateid, 0); + if (error) + return (error); + + NFSLOCKSTATE(); + /* + * Get the lock owner by name. + */ + error = nfsrv_getclient(clientid, CLOPS_RENEW, &clp, + (nfsquad_t)((u_quad_t)0), NULL, p); + if (error) { + NFSUNLOCKSTATE(); + return (error); + } + LIST_FOREACH(ownstp, &clp->lc_open, ls_list) { + LIST_FOREACH(openstp, &ownstp->ls_open, ls_list) { + stp = LIST_FIRST(&openstp->ls_open); + while (stp != LIST_END(&openstp->ls_open)) { + nstp = LIST_NEXT(stp, ls_list); + /* + * If the owner matches, check for locks and + * then free or return an error. + */ + if (stp->ls_ownerlen == new_stp->ls_ownerlen && + !NFSBCMP(stp->ls_owner, new_stp->ls_owner, + stp->ls_ownerlen)){ + if (LIST_EMPTY(&stp->ls_lock)) { + (void) nfsrv_freelockowner(stp, NULL, 0, p); + } else { + NFSUNLOCKSTATE(); + return (NFSERR_LOCKSHELD); + } + } + stp = nstp; + } + } + } + NFSUNLOCKSTATE(); + return (0); +} + +/* + * Get the file handle for a lock structure. + */ +static int +nfsrv_getlockfh(vnode_t vp, u_short flags, + struct nfslockfile **new_lfpp, fhandle_t *nfhp, NFSPROC_T *p) +{ + fhandle_t *fhp = NULL; + struct nfslockfile *new_lfp; + int error; + + /* + * For lock, use the new nfslock structure, otherwise just + * a fhandle_t on the stack. + */ + if (flags & NFSLCK_OPEN) { + new_lfp = *new_lfpp; + fhp = &new_lfp->lf_fh; + } else if (nfhp) { + fhp = nfhp; + } else { + panic("nfsrv_getlockfh"); + } + error = nfsvno_getfh(vp, fhp, p); + return (error); +} + +/* + * Get an nfs lock structure. Allocate one, as required, and return a + * pointer to it. + * Returns an NFSERR_xxx upon failure or -1 to indicate no current lock. + */ +static int +nfsrv_getlockfile(u_short flags, struct nfslockfile **new_lfpp, + struct nfslockfile **lfpp, fhandle_t *nfhp) +{ + struct nfslockfile *lfp; + fhandle_t *fhp = NULL, *tfhp; + struct nfslockhashhead *hp; + struct nfslockfile *new_lfp = NULL; + + /* + * For lock, use the new nfslock structure, otherwise just + * a fhandle_t on the stack. + */ + if (flags & NFSLCK_OPEN) { + new_lfp = *new_lfpp; + fhp = &new_lfp->lf_fh; + } else if (nfhp) { + fhp = nfhp; + } else { + panic("nfsrv_getlockfile"); + } + + hp = NFSLOCKHASH(fhp); + LIST_FOREACH(lfp, hp, lf_hash) { + tfhp = &lfp->lf_fh; + if (NFSVNO_CMPFH(fhp, tfhp)) { + *lfpp = lfp; + return (0); + } + } + if (!(flags & NFSLCK_OPEN)) + return (-1); + + /* + * No match, so chain the new one into the list. + */ + LIST_INIT(&new_lfp->lf_open); + LIST_INIT(&new_lfp->lf_lock); + LIST_INIT(&new_lfp->lf_deleg); + LIST_INSERT_HEAD(hp, new_lfp, lf_hash); + *lfpp = new_lfp; + *new_lfpp = NULL; + return (0); +} + +/* + * This function adds a nfslock lock structure to the list for the associated + * nfsstate and nfslockfile structures. It will be inserted after the + * entry pointed at by insert_lop. + * Must be called with soft clock interrupts disabled. + */ +static void +nfsrv_insertlock(struct nfslock *new_lop, struct nfslock *insert_lop, + struct nfsstate *stp, struct nfslockfile *lfp) +{ + struct nfslock *lop, *nlop; + + new_lop->lo_stp = stp; + new_lop->lo_lfp = lfp; + + /* Insert in increasing lo_first order */ + lop = LIST_FIRST(&lfp->lf_lock); + if (lop == LIST_END(&lfp->lf_lock) || + new_lop->lo_first <= lop->lo_first) { + LIST_INSERT_HEAD(&lfp->lf_lock, new_lop, lo_lckfile); + } else { + nlop = LIST_NEXT(lop, lo_lckfile); + while (nlop != LIST_END(&lfp->lf_lock) && + nlop->lo_first < new_lop->lo_first) { + lop = nlop; + nlop = LIST_NEXT(lop, lo_lckfile); + } + LIST_INSERT_AFTER(lop, new_lop, lo_lckfile); + } + + /* + * Insert after insert_lop, which is overloaded as stp for + * an empty list. + */ + if ((struct nfsstate *)insert_lop == stp) + LIST_INSERT_HEAD(&stp->ls_lock, new_lop, lo_lckowner); + else + LIST_INSERT_AFTER(insert_lop, new_lop, lo_lckowner); + newnfsstats.srvlocks++; + nfsrv_openpluslock++; +} + +/* + * This function updates the locking for a lock owner and given file. It + * maintains a list of lock ranges ordered on increasing file offset that + * are NFSLCK_READ or NFSLCK_WRITE and non-overlapping (aka POSIX style). + * It always adds new_lop to the list and sometimes uses the one pointed + * at by other_lopp. + * Must be called with soft clock interrupts disabled. + */ +static void +nfsrv_updatelock(struct nfsstate *stp, struct nfslock **new_lopp, + struct nfslock **other_lopp, struct nfslockfile *lfp) +{ + struct nfslock *new_lop = *new_lopp; + struct nfslock *lop, *tlop, *ilop; + struct nfslock *other_lop = *other_lopp; + int unlock = 0, myfile = 0; + u_int64_t tmp; + + /* + * Work down the list until the lock is merged. + */ + if (new_lop->lo_flags & NFSLCK_UNLOCK) + unlock = 1; + ilop = (struct nfslock *)stp; + lop = LIST_FIRST(&stp->ls_lock); + while (lop != LIST_END(&stp->ls_lock)) { + /* + * Only check locks for this file that aren't before the start of + * new lock's range. + */ + if (lop->lo_lfp == lfp) { + myfile = 1; + if (lop->lo_end >= new_lop->lo_first) { + if (new_lop->lo_end < lop->lo_first) { + /* + * If the new lock ends before the start of the + * current lock's range, no merge, just insert + * the new lock. + */ + break; + } + if (new_lop->lo_flags == lop->lo_flags || + (new_lop->lo_first <= lop->lo_first && + new_lop->lo_end >= lop->lo_end)) { + /* + * This lock can be absorbed by the new lock/unlock. + * This happens when it covers the entire range + * of the old lock or is contiguous + * with the old lock and is of the same type or an + * unlock. + */ + if (lop->lo_first < new_lop->lo_first) + new_lop->lo_first = lop->lo_first; + if (lop->lo_end > new_lop->lo_end) + new_lop->lo_end = lop->lo_end; + tlop = lop; + lop = LIST_NEXT(lop, lo_lckowner); + nfsrv_freenfslock(tlop); + continue; + } + + /* + * All these cases are for contiguous locks that are not the + * same type, so they can't be merged. + */ + if (new_lop->lo_first <= lop->lo_first) { + /* + * This case is where the new lock overlaps with the + * first part of the old lock. Move the start of the + * old lock to just past the end of the new lock. The + * new lock will be inserted in front of the old, since + * ilop hasn't been updated. (We are done now.) + */ + lop->lo_first = new_lop->lo_end; + break; + } + if (new_lop->lo_end >= lop->lo_end) { + /* + * This case is where the new lock overlaps with the + * end of the old lock's range. Move the old lock's + * end to just before the new lock's first and insert + * the new lock after the old lock. + * Might not be done yet, since the new lock could + * overlap further locks with higher ranges. + */ + lop->lo_end = new_lop->lo_first; + ilop = lop; + lop = LIST_NEXT(lop, lo_lckowner); + continue; + } + /* + * The final case is where the new lock's range is in the + * middle of the current lock's and splits the current lock + * up. Use *other_lopp to handle the second part of the + * split old lock range. (We are done now.) + * For unlock, we use new_lop as other_lop and tmp, since + * other_lop and new_lop are the same for this case. + * We noted the unlock case above, so we don't need + * new_lop->lo_flags any longer. + */ + tmp = new_lop->lo_first; + if (other_lop == NULL) { + if (!unlock) + panic("nfsd srv update unlock"); + other_lop = new_lop; + *new_lopp = NULL; + } + other_lop->lo_first = new_lop->lo_end; + other_lop->lo_end = lop->lo_end; + other_lop->lo_flags = lop->lo_flags; + other_lop->lo_stp = stp; + other_lop->lo_lfp = lfp; + lop->lo_end = tmp; + nfsrv_insertlock(other_lop, lop, stp, lfp); + *other_lopp = NULL; + ilop = lop; + break; + } + } + ilop = lop; + lop = LIST_NEXT(lop, lo_lckowner); + if (myfile && (lop == LIST_END(&stp->ls_lock) || + lop->lo_lfp != lfp)) + break; + } + + /* + * Insert the new lock in the list at the appropriate place. + */ + if (!unlock) { + nfsrv_insertlock(new_lop, ilop, stp, lfp); + *new_lopp = NULL; + } +} + +/* + * This function handles sequencing of locks, etc. + * It returns an error that indicates what the caller should do. + */ +static int +nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid, + struct nfsstate *stp, struct nfsrvcache *op) +{ + + if (op != nd->nd_rp) + panic("nfsrvstate checkseqid"); + if (!(op->rc_flag & RC_INPROG)) + panic("nfsrvstate not inprog"); + if (stp->ls_op && stp->ls_op->rc_refcnt <= 0) { + printf("refcnt=%d\n", stp->ls_op->rc_refcnt); + panic("nfsrvstate op refcnt"); + } + if ((stp->ls_seq + 1) == seqid) { + if (stp->ls_op) + nfsrvd_derefcache(stp->ls_op); + stp->ls_op = op; + nfsrvd_refcache(op); + stp->ls_seq = seqid; + return (0); + } else if (stp->ls_seq == seqid && stp->ls_op && + op->rc_xid == stp->ls_op->rc_xid && + op->rc_refcnt == 0 && + op->rc_reqlen == stp->ls_op->rc_reqlen && + op->rc_cksum == stp->ls_op->rc_cksum) { + if (stp->ls_op->rc_flag & RC_INPROG) + return (NFSERR_DONTREPLY); + nd->nd_rp = stp->ls_op; + nd->nd_rp->rc_flag |= RC_INPROG; + nfsrvd_delcache(op); + return (NFSERR_REPLYFROMCACHE); + } + return (NFSERR_BADSEQID); +} + +/* + * Get the client ip address for callbacks. If the strings can't be parsed, + * just set lc_program to 0 to indicate no callbacks are possible. + * (For cases where the address can't be parsed or is 0.0.0.0.0.0, set + * the address to the client's transport address. This won't be used + * for callbacks, but can be printed out by newnfsstats for info.) + * Return error if the xdr can't be parsed, 0 otherwise. + */ +APPLESTATIC int +nfsrv_getclientipaddr(struct nfsrv_descript *nd, struct nfsclient *clp) +{ + u_int32_t *tl; + u_char *cp, *cp2; + int i, j; + struct sockaddr_in *rad, *sad; + u_char protocol[5], addr[24]; + int error = 0, cantparse = 0; + union { + u_long ival; + u_char cval[4]; + } ip; + union { + u_short sval; + u_char cval[2]; + } port; + + rad = NFSSOCKADDR(clp->lc_req.nr_nam, struct sockaddr_in *); + rad->sin_family = AF_INET; + rad->sin_len = sizeof (struct sockaddr_in); + rad->sin_addr.s_addr = 0; + rad->sin_port = 0; + clp->lc_req.nr_client = NULL; + clp->lc_req.nr_lock = 0; + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + if (i >= 3 && i <= 4) { + error = nfsrv_mtostr(nd, protocol, i); + if (error) + goto nfsmout; + if (!strcmp(protocol, "tcp")) { + clp->lc_flags |= LCL_TCPCALLBACK; + clp->lc_req.nr_sotype = SOCK_STREAM; + clp->lc_req.nr_soproto = IPPROTO_TCP; + } else if (!strcmp(protocol, "udp")) { + clp->lc_req.nr_sotype = SOCK_DGRAM; + clp->lc_req.nr_soproto = IPPROTO_UDP; + } else { + cantparse = 1; + } + } else { + cantparse = 1; + if (i > 0) { + error = nfsm_advance(nd, NFSM_RNDUP(i), -1); + if (error) + goto nfsmout; + } + } + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + if (i < 0) { + error = NFSERR_BADXDR; + goto nfsmout; + } else if (i == 0) { + cantparse = 1; + } else if (!cantparse && i <= 23 && i >= 11) { + error = nfsrv_mtostr(nd, addr, i); + if (error) + goto nfsmout; + + /* + * Parse out the address fields. We expect 6 decimal numbers + * separated by '.'s. + */ + cp = addr; + i = 0; + while (*cp && i < 6) { + cp2 = cp; + while (*cp2 && *cp2 != '.') + cp2++; + if (*cp2) + *cp2++ = '\0'; + else if (i != 5) { + cantparse = 1; + break; + } + j = nfsrv_getipnumber(cp); + if (j >= 0) { + if (i < 4) + ip.cval[3 - i] = j; + else + port.cval[5 - i] = j; + } else { + cantparse = 1; + break; + } + cp = cp2; + i++; + } + if (!cantparse) { + if (ip.ival != 0x0) { + rad->sin_addr.s_addr = htonl(ip.ival); + rad->sin_port = htons(port.sval); + } else { + cantparse = 1; + } + } + } else { + cantparse = 1; + if (i > 0) { + error = nfsm_advance(nd, NFSM_RNDUP(i), -1); + if (error) + goto nfsmout; + } + } + if (cantparse) { + sad = NFSSOCKADDR(nd->nd_nam, struct sockaddr_in *); + rad->sin_addr.s_addr = sad->sin_addr.s_addr; + rad->sin_port = 0x0; + clp->lc_program = 0; + } +nfsmout: + return (error); +} + +/* + * Turn a string of up to three decimal digits into a number. Return -1 upon + * error. + */ +static int +nfsrv_getipnumber(u_char *cp) +{ + int i = 0, j = 0; + + while (*cp) { + if (j > 2 || *cp < '0' || *cp > '9') + return (-1); + i *= 10; + i += (*cp - '0'); + cp++; + j++; + } + if (i < 256) + return (i); + return (-1); +} + +/* + * This function checks for restart conditions. + */ +static int +nfsrv_checkrestart(nfsquad_t clientid, u_int32_t flags, + nfsv4stateid_t *stateidp, int specialid) +{ + int ret; + + /* + * First check for a server restart. Open, LockT, ReleaseLockOwner + * and DelegPurge have a clientid, the rest a stateid. + */ + if (flags & + (NFSLCK_OPEN | NFSLCK_TEST | NFSLCK_RELEASE | NFSLCK_DELEGPURGE)) { + if (clientid.lval[0] != nfsrvboottime) + return (NFSERR_STALECLIENTID); + } else if (stateidp->other[0] != nfsrvboottime && + specialid == 0) + return (NFSERR_STALESTATEID); + + /* + * Read, Write, Setattr and LockT can return NFSERR_GRACE and do + * not use a lock/open owner seqid#, so the check can be done now. + * (The others will be checked, as required, later.) + */ + if (!(flags & (NFSLCK_CHECK | NFSLCK_TEST))) + return (0); + + NFSLOCKSTATE(); + ret = nfsrv_checkgrace(flags); + NFSUNLOCKSTATE(); + return (ret); +} + +/* + * Check for grace. + */ +static int +nfsrv_checkgrace(u_int32_t flags) +{ + + if (nfsrv_stablefirst.nsf_flags & NFSNSF_GRACEOVER) { + if (flags & NFSLCK_RECLAIM) + return (NFSERR_NOGRACE); + } else { + if (!(flags & NFSLCK_RECLAIM)) + return (NFSERR_GRACE); + + /* + * If grace is almost over and we are still getting Reclaims, + * extend grace a bit. + */ + if ((NFSD_MONOSEC + NFSRV_LEASEDELTA) > + nfsrv_stablefirst.nsf_eograce) + nfsrv_stablefirst.nsf_eograce = NFSD_MONOSEC + + NFSRV_LEASEDELTA; + } + return (0); +} + +/* + * Do a server callback. + */ +static int +nfsrv_docallback(struct nfsclient *clp, int procnum, + nfsv4stateid_t *stateidp, int trunc, fhandle_t *fhp, + struct nfsvattr *nap, nfsattrbit_t *attrbitp, NFSPROC_T *p) +{ + mbuf_t m; + u_int32_t *tl; + struct nfsrv_descript nfsd, *nd = &nfsd; + struct ucred *cred; + int error = 0; + u_int32_t callback; + + cred = newnfs_getcred(); + NFSLOCKSTATE(); /* mostly for lc_cbref++ */ + if (clp->lc_flags & LCL_NEEDSCONFIRM) { + NFSUNLOCKSTATE(); + panic("docallb"); + } + clp->lc_cbref++; + /* + * First, fill in some of the fields of nd and cr. + */ + nd->nd_flag = ND_NFSV4; + if (clp->lc_flags & LCL_GSS) + nd->nd_flag |= ND_KERBV; + nd->nd_repstat = 0; + cred->cr_uid = clp->lc_uid; + cred->cr_gid = clp->lc_gid; + cred->cr_groups[0] = clp->lc_gid; + callback = clp->lc_callback; + NFSUNLOCKSTATE(); + cred->cr_ngroups = 1; + + /* + * Get the first mbuf for the request. + */ + MGET(m, M_WAIT, MT_DATA); + mbuf_setlen(m, 0); + nd->nd_mreq = nd->nd_mb = m; + nd->nd_bpos = NFSMTOD(m, caddr_t); + + /* + * and build the callback request. + */ + if (procnum == NFSV4OP_CBGETATTR) { + nd->nd_procnum = NFSV4PROC_CBCOMPOUND; + (void) nfsm_strtom(nd, "CB Getattr", 10); + NFSM_BUILD(tl, u_int32_t *, 4 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV4_MINORVERSION); + *tl++ = txdr_unsigned(callback); + *tl++ = txdr_unsigned(1); + *tl = txdr_unsigned(NFSV4OP_CBGETATTR); + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0); + (void) nfsrv_putattrbit(nd, attrbitp); + } else if (procnum == NFSV4OP_CBRECALL) { + nd->nd_procnum = NFSV4PROC_CBCOMPOUND; + (void) nfsm_strtom(nd, "CB Recall", 9); + NFSM_BUILD(tl, u_int32_t *, 5 * NFSX_UNSIGNED + NFSX_STATEID); + *tl++ = txdr_unsigned(NFSV4_MINORVERSION); + *tl++ = txdr_unsigned(callback); + *tl++ = txdr_unsigned(1); + *tl++ = txdr_unsigned(NFSV4OP_CBRECALL); + *tl++ = txdr_unsigned(stateidp->seqid); + NFSBCOPY((caddr_t)stateidp->other, (caddr_t)tl, + NFSX_STATEIDOTHER); + tl += (NFSX_STATEIDOTHER / NFSX_UNSIGNED); + if (trunc) + *tl = newnfs_true; + else + *tl = newnfs_false; + (void) nfsm_fhtom(nd, (u_int8_t *)fhp, NFSX_MYFH, 0); + } else { + nd->nd_procnum = NFSV4PROC_CBNULL; + } + + /* + * Call newnfs_connect(), as required, and then newnfs_request(). + */ + (void) newnfs_sndlock(&clp->lc_req.nr_lock); + if (clp->lc_req.nr_client == NULL) { + if (nd->nd_procnum == NFSV4PROC_CBNULL) + error = newnfs_connect(NULL, &clp->lc_req, cred, + NULL, 1); + else + error = newnfs_connect(NULL, &clp->lc_req, cred, + NULL, 3); + } + newnfs_sndunlock(&clp->lc_req.nr_lock); + if (!error) { + error = newnfs_request(nd, NULL, clp, &clp->lc_req, NULL, + NULL, cred, clp->lc_program, NFSV4_CBVERS, NULL, 1, NULL); + } + NFSFREECRED(cred); + + /* + * If error is set here, the Callback path isn't working + * properly, so twiddle the appropriate LCL_ flags. + * (nd_repstat != 0 indicates the Callback path is working, + * but the callback failed on the client.) + */ + if (error) { + /* + * Mark the callback pathway down, which disabled issuing + * of delegations and gets Renew to return NFSERR_CBPATHDOWN. + */ + NFSLOCKSTATE(); + clp->lc_flags |= LCL_CBDOWN; + NFSUNLOCKSTATE(); + } else { + /* + * Callback worked. If the callback path was down, disable + * callbacks, so no more delegations will be issued. (This + * is done on the assumption that the callback pathway is + * flakey.) + */ + NFSLOCKSTATE(); + if (clp->lc_flags & LCL_CBDOWN) + clp->lc_flags &= ~(LCL_CBDOWN | LCL_CALLBACKSON); + NFSUNLOCKSTATE(); + if (nd->nd_repstat) + error = nd->nd_repstat; + else if (procnum == NFSV4OP_CBGETATTR) + error = nfsv4_loadattr(nd, NULL, nap, NULL, NULL, 0, + NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL, NULL, + p, NULL); + mbuf_freem(nd->nd_mrep); + } + NFSLOCKSTATE(); + clp->lc_cbref--; + if ((clp->lc_flags & LCL_WAKEUPWANTED) && clp->lc_cbref == 0) { + clp->lc_flags &= ~LCL_WAKEUPWANTED; + NFSUNLOCKSTATE(); + wakeup((caddr_t)clp); + } else { + NFSUNLOCKSTATE(); + } + return (error); +} + +/* + * Return the next index# for a clientid. Mostly just increment and return + * the next one, but... if the 32bit unsigned does actually wrap around, + * reboot. This is here more for fun than practical purposes. At an + * average rate of one new client per second, it will wrap around in + * approximately 136 years. (I think the server will have been shut + * down or rebooted before then.) + */ +static u_int32_t +nfsrv_nextclientindex(void) +{ + static u_int32_t client_index = 0; + + client_index++; + if (client_index != 0) + return (client_index); + + /* + * In practice, we'll never get here, but the reboot is here, + * just for fun. (client_index will not wrap around on any real server) + */ + printf("you must reboot now\n"); + return (0); /* Just to shut the compiler up */ +} + +/* + * Return the next index# for a stateid. Mostly just increment and return + * the next one, but... if the 32bit unsigned does actually wrap around + * (will a BSD server stay up that long?), find + * new start and end values. + */ +static u_int32_t +nfsrv_nextstateindex(struct nfsclient *clp) +{ + struct nfsstate *stp; + int i; + u_int32_t canuse, min_index, max_index; + + if (!(clp->lc_flags & LCL_INDEXNOTOK)) { + clp->lc_stateindex++; + if (clp->lc_stateindex != clp->lc_statemaxindex) + return (clp->lc_stateindex); + } + + /* + * Yuck, we've hit the end. + * Look for a new min and max. + */ + min_index = 0; + max_index = 0xffffffff; + for (i = 0; i < NFSSTATEHASHSIZE; i++) { + LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { + if (stp->ls_stateid.other[2] > 0x80000000) { + if (stp->ls_stateid.other[2] < max_index) + max_index = stp->ls_stateid.other[2]; + } else { + if (stp->ls_stateid.other[2] > min_index) + min_index = stp->ls_stateid.other[2]; + } + } + } + + /* + * Yikes, highly unlikely, but I'll handle it anyhow. + */ + if (min_index == 0x80000000 && max_index == 0x80000001) { + canuse = 0; + /* + * Loop around until we find an unused entry. Return that + * and set LCL_INDEXNOTOK, so the search will continue next time. + * (This is one of those rare cases where a goto is the + * cleanest way to code the loop.) + */ +tryagain: + for (i = 0; i < NFSSTATEHASHSIZE; i++) { + LIST_FOREACH(stp, &clp->lc_stateid[i], ls_hash) { + if (stp->ls_stateid.other[2] == canuse) { + canuse++; + goto tryagain; + } + } + } + clp->lc_flags |= LCL_INDEXNOTOK; + return (canuse); + } + + /* + * Ok to start again from min + 1. + */ + clp->lc_stateindex = min_index + 1; + clp->lc_statemaxindex = max_index; + clp->lc_flags &= ~LCL_INDEXNOTOK; + return (clp->lc_stateindex); +} + +/* + * The following functions handle the stable storage file that deals with + * the edge conditions described in RFC3530 Sec. 8.6.3. + * The file is as follows: + * - a single record at the beginning that has the lease time of the + * previous server instance (before the last reboot) and the nfsrvboottime + * values for the previous server boots. + * These previous boot times are used to ensure that the current + * nfsrvboottime does not, somehow, get set to a previous one. + * (This is important so that Stale ClientIDs and StateIDs can + * be recognized.) + * The number of previous nfsvrboottime values preceeds the list. + * - followed by some number of appended records with: + * - client id string + * - flag that indicates it is a record revoking state via lease + * expiration or similar + * OR has successfully acquired state. + * These structures vary in length, with the client string at the end, up + * to NFSV4_OPAQUELIMIT in size. + * + * At the end of the grace period, the file is truncated, the first + * record is rewritten with updated information and any acquired state + * records for successful reclaims of state are written. + * + * Subsequent records are appended when the first state is issued to + * a client and when state is revoked for a client. + * + * When reading the file in, state issued records that come later in + * the file override older ones, since the append log is in cronological order. + * If, for some reason, the file can't be read, the grace period is + * immediately terminated and all reclaims get NFSERR_NOGRACE. + */ + +/* + * Read in the stable storage file. Called by nfssvc() before the nfsd + * processes start servicing requests. + */ +APPLESTATIC void +nfsrv_setupstable(NFSPROC_T *p) +{ + struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; + struct nfsrv_stable *sp, *nsp; + struct nfst_rec *tsp; + int error, i, tryagain; + off_t off = 0; + size_t aresid, len; + struct timeval curtime; + + /* + * If NFSNSF_UPDATEDONE is set, this is a restart of the nfsds without + * a reboot, so state has not been lost. + */ + if (sf->nsf_flags & NFSNSF_UPDATEDONE) + return; + /* + * Set Grace over just until the file reads successfully. + */ + NFSGETTIME(&curtime); + nfsrvboottime = curtime.tv_sec; + LIST_INIT(&sf->nsf_head); + sf->nsf_flags = (NFSNSF_GRACEOVER | NFSNSF_NEEDLOCK); + sf->nsf_eograce = NFSD_MONOSEC + NFSRV_LEASEDELTA; + if (sf->nsf_fp == NULL) + return; + error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), + (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), off, UIO_SYSSPACE, + 0, NFSFPCRED(sf->nsf_fp), &aresid, p); + if (error || aresid || sf->nsf_numboots == 0 || + sf->nsf_numboots > NFSNSF_MAXNUMBOOTS) + return; + + /* + * Now, read in the boottimes. + */ + sf->nsf_bootvals = (time_t *)malloc((sf->nsf_numboots + 1) * + sizeof (time_t), M_TEMP, M_WAITOK); + off = sizeof (struct nfsf_rec); + error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), + (caddr_t)sf->nsf_bootvals, sf->nsf_numboots * sizeof (time_t), off, + UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p); + if (error || aresid) { + free((caddr_t)sf->nsf_bootvals, M_TEMP); + sf->nsf_bootvals = NULL; + return; + } + + /* + * Make sure this nfsrvboottime is different from all recorded + * previous ones. + */ + do { + tryagain = 0; + for (i = 0; i < sf->nsf_numboots; i++) { + if (nfsrvboottime == sf->nsf_bootvals[i]) { + nfsrvboottime++; + tryagain = 1; + break; + } + } + } while (tryagain); + + sf->nsf_flags |= NFSNSF_OK; + off += (sf->nsf_numboots * sizeof (time_t)); + + /* + * Read through the file, building a list of records for grace + * checking. + * Each record is between sizeof (struct nfst_rec) and + * sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1 + * and is actually sizeof (struct nfst_rec) + nst_len - 1. + */ + tsp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) + + NFSV4_OPAQUELIMIT - 1, M_TEMP, M_WAITOK); + do { + error = NFSD_RDWR(UIO_READ, NFSFPVNODE(sf->nsf_fp), + (caddr_t)tsp, sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1, + off, UIO_SYSSPACE, 0, NFSFPCRED(sf->nsf_fp), &aresid, p); + len = (sizeof (struct nfst_rec) + NFSV4_OPAQUELIMIT - 1) - aresid; + if (error || (len > 0 && (len < sizeof (struct nfst_rec) || + len < (sizeof (struct nfst_rec) + tsp->len - 1)))) { + /* + * Yuck, the file has been corrupted, so just return + * after clearing out any restart state, so the grace period + * is over. + */ + LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) { + LIST_REMOVE(sp, nst_list); + free((caddr_t)sp, M_TEMP); + } + free((caddr_t)tsp, M_TEMP); + sf->nsf_flags &= ~NFSNSF_OK; + free((caddr_t)sf->nsf_bootvals, M_TEMP); + sf->nsf_bootvals = NULL; + return; + } + if (len > 0) { + off += sizeof (struct nfst_rec) + tsp->len - 1; + /* + * Search the list for a matching client. + */ + LIST_FOREACH(sp, &sf->nsf_head, nst_list) { + if (tsp->len == sp->nst_len && + !NFSBCMP(tsp->client, sp->nst_client, tsp->len)) + break; + } + if (sp == LIST_END(&sf->nsf_head)) { + sp = (struct nfsrv_stable *)malloc(tsp->len + + sizeof (struct nfsrv_stable) - 1, M_TEMP, + M_WAITOK); + NFSBCOPY((caddr_t)tsp, (caddr_t)&sp->nst_rec, + sizeof (struct nfst_rec) + tsp->len - 1); + LIST_INSERT_HEAD(&sf->nsf_head, sp, nst_list); + } else { + if (tsp->flag == NFSNST_REVOKE) + sp->nst_flag |= NFSNST_REVOKE; + else + /* + * A subsequent timestamp indicates the client + * did a setclientid/confirm and any previous + * revoke is no longer relevant. + */ + sp->nst_flag &= ~NFSNST_REVOKE; + } + } + } while (len > 0); + free((caddr_t)tsp, M_TEMP); + sf->nsf_flags = NFSNSF_OK; + sf->nsf_eograce = NFSD_MONOSEC + sf->nsf_lease + + NFSRV_LEASEDELTA; +} + +/* + * Update the stable storage file, now that the grace period is over. + */ +APPLESTATIC void +nfsrv_updatestable(NFSPROC_T *p) +{ + struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; + struct nfsrv_stable *sp, *nsp; + int i; + struct nfsvattr nva; + vnode_t vp; +#if defined(__FreeBSD_version) && (__FreeBSD_version >= 500000) + mount_t mp = NULL; +#endif + int error; + + if (sf->nsf_fp == NULL || (sf->nsf_flags & NFSNSF_UPDATEDONE)) + return; + sf->nsf_flags |= NFSNSF_UPDATEDONE; + /* + * Ok, we need to rewrite the stable storage file. + * - truncate to 0 length + * - write the new first structure + * - loop through the data structures, writing out any that + * have timestamps older than the old boot + */ + if (sf->nsf_bootvals) { + sf->nsf_numboots++; + for (i = sf->nsf_numboots - 2; i >= 0; i--) + sf->nsf_bootvals[i + 1] = sf->nsf_bootvals[i]; + } else { + sf->nsf_numboots = 1; + sf->nsf_bootvals = (time_t *)malloc(sizeof (time_t), + M_TEMP, M_WAITOK); + } + sf->nsf_bootvals[0] = nfsrvboottime; + sf->nsf_lease = nfsrv_lease; + NFSVNO_ATTRINIT(&nva); + NFSVNO_SETATTRVAL(&nva, size, 0); + vp = NFSFPVNODE(sf->nsf_fp); + NFS_STARTWRITE(vp, &mp); + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + error = nfsvno_setattr(vp, &nva, NFSFPCRED(sf->nsf_fp), p, NULL); + NFS_ENDWRITE(mp); + NFSVOPUNLOCK(vp, 0, p); + if (!error) + error = NFSD_RDWR(UIO_WRITE, vp, + (caddr_t)&sf->nsf_rec, sizeof (struct nfsf_rec), (off_t)0, + UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p); + if (!error) + error = NFSD_RDWR(UIO_WRITE, vp, + (caddr_t)sf->nsf_bootvals, + sf->nsf_numboots * sizeof (time_t), + (off_t)(sizeof (struct nfsf_rec)), + UIO_SYSSPACE, IO_SYNC, NFSFPCRED(sf->nsf_fp), NULL, p); + free((caddr_t)sf->nsf_bootvals, M_TEMP); + sf->nsf_bootvals = NULL; + if (error) { + sf->nsf_flags &= ~NFSNSF_OK; + printf("EEK! Can't write NfsV4 stable storage file\n"); + return; + } + sf->nsf_flags |= NFSNSF_OK; + + /* + * Loop through the list and write out timestamp records for + * any clients that successfully reclaimed state. + */ + LIST_FOREACH_SAFE(sp, &sf->nsf_head, nst_list, nsp) { + if (sp->nst_flag & NFSNST_GOTSTATE) { + nfsrv_writestable(sp->nst_client, sp->nst_len, + NFSNST_NEWSTATE, p); + sp->nst_clp->lc_flags |= LCL_STAMPEDSTABLE; + } + LIST_REMOVE(sp, nst_list); + free((caddr_t)sp, M_TEMP); + } +} + +/* + * Append a record to the stable storage file. + */ +APPLESTATIC void +nfsrv_writestable(u_char *client, int len, int flag, NFSPROC_T *p) +{ + struct nfsrv_stablefirst *sf = &nfsrv_stablefirst; + struct nfst_rec *sp; + int error; + + if (!(sf->nsf_flags & NFSNSF_OK) || sf->nsf_fp == NULL) + return; + sp = (struct nfst_rec *)malloc(sizeof (struct nfst_rec) + + len - 1, M_TEMP, M_WAITOK); + sp->len = len; + NFSBCOPY(client, sp->client, len); + sp->flag = flag; + error = NFSD_RDWR(UIO_WRITE, NFSFPVNODE(sf->nsf_fp), + (caddr_t)sp, sizeof (struct nfst_rec) + len - 1, (off_t)0, + UIO_SYSSPACE, (IO_SYNC | IO_APPEND), NFSFPCRED(sf->nsf_fp), NULL, p); + free((caddr_t)sp, M_TEMP); + if (error) { + sf->nsf_flags &= ~NFSNSF_OK; + printf("EEK! Can't write NfsV4 stable storage file\n"); + } +} + +/* + * This function is called during the grace period to mark a client + * that successfully reclaimed state. + */ +static void +nfsrv_markstable(struct nfsclient *clp) +{ + struct nfsrv_stable *sp; + + /* + * First find the client structure. + */ + LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { + if (sp->nst_len == clp->lc_idlen && + !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len)) + break; + } + if (sp == LIST_END(&nfsrv_stablefirst.nsf_head)) + return; + + /* + * Now, just mark it and set the nfsclient back pointer. + */ + sp->nst_flag |= NFSNST_GOTSTATE; + sp->nst_clp = clp; +} + +/* + * This function is called for a reclaim, to see if it gets grace. + * It returns 0 if a reclaim is allowed, 1 otherwise. + */ +static int +nfsrv_checkstable(struct nfsclient *clp) +{ + struct nfsrv_stable *sp; + + /* + * First, find the entry for the client. + */ + LIST_FOREACH(sp, &nfsrv_stablefirst.nsf_head, nst_list) { + if (sp->nst_len == clp->lc_idlen && + !NFSBCMP(sp->nst_client, clp->lc_id, sp->nst_len)) + break; + } + + /* + * If not in the list, state was revoked or no state was issued + * since the previous reboot, a reclaim is denied. + */ + if (sp == LIST_END(&nfsrv_stablefirst.nsf_head) || + (sp->nst_flag & NFSNST_REVOKE) || + !(nfsrv_stablefirst.nsf_flags & NFSNSF_OK)) + return (1); + return (0); +} + +/* + * Test for and try to clear out a conflicting client. This is called by + * nfsrv_lockctrl() and nfsrv_openctrl() when conflicts with other clients + * a found. + * The trick here is that it can't revoke a conflicting client with an + * expired lease unless it holds the v4root lock, so... + * If no v4root lock, get the lock and return 1 to indicate "try again". + * Return 0 to indicate the conflict can't be revoked and 1 to indicate + * the revocation worked and the conflicting client is "bye, bye", so it + * can be tried again. + * Unlocks State before a non-zero value is returned. + */ +static int +nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, __unused vnode_t vp, + NFSPROC_T *p) +{ + int gotlock; + + /* + * If lease hasn't expired, we can't fix it. + */ + if (clp->lc_expiry >= NFSD_MONOSEC || + !(nfsrv_stablefirst.nsf_flags & NFSNSF_UPDATEDONE)) + return (0); + if (*haslockp == 0) { + NFSUNLOCKSTATE(); + NFSVOPUNLOCK(vp, 0, p); + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + do { + gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + } while (!gotlock); + NFSUNLOCKV4ROOTMUTEX(); + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + *haslockp = 1; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (1); + } + NFSUNLOCKSTATE(); + + /* + * Ok, we can expire the conflicting client. + */ + nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + nfsrv_freedeleglist(&clp->lc_olddeleg); + LIST_REMOVE(clp, lc_hash); + nfsrv_zapclient(clp, p); + return (1); +} + + +/* + * Resolve a delegation conflict. + * Returns 0 to indicate the conflict was resolved without sleeping. + * Return -1 to indicate that the caller should check for conflicts again. + * Return > 0 for an error that should be returned, normally NFSERR_DELAY. + * + * Also, manipulate the nfsv4root_lock, as required. It isn't changed + * for a return of 0, since there was no sleep and it could be required + * later. It is released for a return of NFSERR_DELAY, since the caller + * will return that error. It is released when a sleep was done waiting + * for the delegation to be returned or expire (so that other nfsds can + * handle ops). Then, it must be acquired for the write to stable storage. + * (This function is somewhat similar to nfsrv_clientconflict(), but + * the semantics differ in a couple of subtle ways. The return of 0 + * indicates the conflict was resolved without sleeping here, not + * that the conflict can't be resolved and the handling of nfsv4root_lock + * differs, as noted above.) + * Unlocks State before returning a non-zero value. + */ +static int +nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p, + __unused vnode_t vp) +{ + struct nfsclient *clp = stp->ls_clp; + int gotlock, error, retrycnt, zapped_clp; + nfsv4stateid_t tstateid; + fhandle_t tfh; + + /* + * If the conflict is with an old delegation... + */ + if (stp->ls_flags & NFSLCK_OLDDELEG) { + /* + * You can delete it, if it has expired. + */ + if (clp->lc_delegtime < NFSD_MONOSEC) { + nfsrv_freedeleg(stp); + NFSUNLOCKSTATE(); + return (-1); + } + NFSUNLOCKSTATE(); + /* + * During this delay, the old delegation could expire or it + * could be recovered by the client via an Open with + * CLAIM_DELEGATE_PREV. + * Release the nfsv4root_lock, if held. + */ + if (*haslockp) { + *haslockp = 0; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_DELAY); + } + + /* + * It's a current delegation, so: + * - check to see if the delegation has expired + * - if so, get the v4root lock and then expire it + */ + if (!(stp->ls_flags & NFSLCK_DELEGRECALL)) { + /* + * - do a recall callback, since not yet done + * For now, never allow truncate to be set. To use + * truncate safely, it must be guaranteed that the + * Remove, Rename or Setattr with size of 0 will + * succeed and that would require major changes to + * the VFS/Vnode OPs. + * Set the expiry time large enough so that it won't expire + * until after the callback, then set it correctly, once + * the callback is done. (The delegation will now time + * out whether or not the Recall worked ok. The timeout + * will be extended when ops are done on the delegation + * stateid, up to the timelimit.) + */ + stp->ls_delegtime = NFSD_MONOSEC + (2 * nfsrv_lease) + + NFSRV_LEASEDELTA; + stp->ls_delegtimelimit = NFSD_MONOSEC + (6 * nfsrv_lease) + + NFSRV_LEASEDELTA; + stp->ls_flags |= NFSLCK_DELEGRECALL; + + /* + * Loop NFSRV_CBRETRYCNT times while the CBRecall replies + * NFSERR_BADSTATEID or NFSERR_BADHANDLE. This is done + * in order to try and avoid a race that could happen + * when a CBRecall request passed the Open reply with + * the delegation in it when transitting the network. + * Since nfsrv_docallback will sleep, don't use stp after + * the call. + */ + NFSBCOPY((caddr_t)&stp->ls_stateid, (caddr_t)&tstateid, + sizeof (tstateid)); + NFSBCOPY((caddr_t)&stp->ls_lfp->lf_fh, (caddr_t)&tfh, + sizeof (tfh)); + NFSUNLOCKSTATE(); + if (*haslockp) { + *haslockp = 0; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + retrycnt = 0; + do { + error = nfsrv_docallback(clp, NFSV4OP_CBRECALL, + &tstateid, 0, &tfh, NULL, NULL, p); + retrycnt++; + } while ((error == NFSERR_BADSTATEID || + error == NFSERR_BADHANDLE) && retrycnt < NFSV4_CBRETRYCNT); + return (NFSERR_DELAY); + } + + if (clp->lc_expiry >= NFSD_MONOSEC && + stp->ls_delegtime >= NFSD_MONOSEC) { + NFSUNLOCKSTATE(); + /* + * A recall has been done, but it has not yet expired. + * So, RETURN_DELAY. + */ + if (*haslockp) { + *haslockp = 0; + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (NFSERR_DELAY); + } + + /* + * If we don't yet have the lock, just get it and then return, + * since we need that before deleting expired state, such as + * this delegation. + * When getting the lock, unlock the vnode, so other nfsds that + * are in progress, won't get stuck waiting for the vnode lock. + */ + if (*haslockp == 0) { + NFSUNLOCKSTATE(); + NFSVOPUNLOCK(vp, 0, p); + NFSLOCKV4ROOTMUTEX(); + nfsv4_relref(&nfsv4rootfs_lock); + do { + gotlock = nfsv4_lock(&nfsv4rootfs_lock, 1, NULL, + NFSV4ROOTLOCKMUTEXPTR); + } while (!gotlock); + NFSUNLOCKV4ROOTMUTEX(); + NFSLOCKSTATE(); /* to avoid a race with */ + NFSUNLOCKSTATE(); /* nfsrv_servertimer() */ + *haslockp = 1; + NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (-1); + } + + NFSUNLOCKSTATE(); + /* + * Ok, we can delete the expired delegation. + * First, write the Revoke record to stable storage and then + * clear out the conflict. + * Since all other nfsd threads are now blocked, we can safely + * sleep without the state changing. + */ + nfsrv_writestable(clp->lc_id, clp->lc_idlen, NFSNST_REVOKE, p); + if (clp->lc_expiry < NFSD_MONOSEC) { + nfsrv_cleanclient(clp, p); + nfsrv_freedeleglist(&clp->lc_deleg); + nfsrv_freedeleglist(&clp->lc_olddeleg); + LIST_REMOVE(clp, lc_hash); + zapped_clp = 1; + } else { + nfsrv_freedeleg(stp); + zapped_clp = 0; + } + if (zapped_clp) + nfsrv_zapclient(clp, p); + return (-1); +} + +/* + * Check for a remove allowed, if remove is set to 1 and get rid of + * delegations. + */ +APPLESTATIC int +nfsrv_checkremove(vnode_t vp, int remove, NFSPROC_T *p) +{ + struct nfsstate *stp; + struct nfslockfile *lfp; + int error, haslock = 0; + fhandle_t nfh; + + /* + * First, get the lock file structure. + * (A return of -1 means no associated state, so remove ok.) + */ + error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p); +tryagain: + NFSLOCKSTATE(); + if (!error) + error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh); + if (error) { + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + if (error == -1) + return (0); + return (error); + } + + /* + * Now, we must Recall any delegations. + */ + error = nfsrv_cleandeleg(vp, lfp, NULL, &haslock, p); + if (error) { + /* + * nfsrv_cleandeleg() unlocks state for non-zero + * return. + */ + if (error == -1) + goto tryagain; + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); + } + + /* + * Now, look for a conflicting open share. + */ + if (remove) { + LIST_FOREACH(stp, &lfp->lf_open, ls_file) { + if (stp->ls_flags & NFSLCK_WRITEDENY) { + error = NFSERR_FILEOPEN; + break; + } + } + } + + NFSUNLOCKSTATE(); + if (haslock) { + NFSLOCKV4ROOTMUTEX(); + nfsv4_unlock(&nfsv4rootfs_lock, 1); + NFSUNLOCKV4ROOTMUTEX(); + } + return (error); +} + +/* + * Clear out all delegations for the file referred to by lfp. + * May return NFSERR_DELAY, if there will be a delay waiting for + * delegations to expire. + * Returns -1 to indicate it slept while recalling a delegation. + * This function has the side effect of deleting the nfslockfile structure, + * if it no longer has associated state and didn't have to sleep. + * Unlocks State before a non-zero value is returned. + */ +static int +nfsrv_cleandeleg(vnode_t vp, struct nfslockfile *lfp, + struct nfsclient *clp, int *haslockp, NFSPROC_T *p) +{ + struct nfsstate *stp, *nstp; + int ret; + + stp = LIST_FIRST(&lfp->lf_deleg); + while (stp != LIST_END(&lfp->lf_deleg)) { + nstp = LIST_NEXT(stp, ls_file); + if (stp->ls_clp != clp) { + ret = nfsrv_delegconflict(stp, haslockp, p, vp); + if (ret) { + /* + * nfsrv_delegconflict() unlocks state + * when it returns non-zero. + */ + return (ret); + } + } + stp = nstp; + } + return (0); +} + +/* + * There are certain operations that, when being done outside of NFSv4, + * require that any NFSv4 delegation for the file be recalled. + * This function is to be called for those cases: + * VOP_RENAME() - When a delegation is being recalled for any reason, + * the client may have to do Opens against the server, using the file's + * final component name. If the file has been renamed on the server, + * that component name will be incorrect and the Open will fail. + * VOP_REMOVE() - Theoretically, a client could Open a file after it has + * been removed on the server, if there is a delegation issued to + * that client for the file. I say "theoretically" since clients + * normally do an Access Op before the Open and that Access Op will + * fail with ESTALE. Note that NFSv2 and 3 don't even do Opens, so + * they will detect the file's removal in the same manner. (There is + * one case where RFC3530 allows a client to do an Open without first + * doing an Access Op, which is passage of a check against the ACE + * returned with a Write delegation, but current practice is to ignore + * the ACE and always do an Access Op.) + * Since the functions can only be called with an unlocked vnode, this + * can't be done at this time. + * VOP_ADVLOCK() - When a client holds a delegation, it can issue byte range + * locks locally in the client, which are not visible to the server. To + * deal with this, issuing of delegations for a vnode must be disabled + * and all delegations for the vnode recalled. This is done via the + * second function, using the VV_DISABLEDELEG vflag on the vnode. + */ +APPLESTATIC void +nfsd_recalldelegation(vnode_t vp, NFSPROC_T *p) +{ + struct timespec mytime; + int32_t starttime; + int error; + + KASSERT(!VOP_ISLOCKED(vp), ("vp %p is locked", vp)); + + /* + * First, check to see if the server is currently running and it has + * been called for a regular file when issuing delegations. + */ + if (newnfs_numnfsd == 0 || vp->v_type != VREG || + nfsrv_issuedelegs == 0) + return; + + /* + * Now, call nfsrv_checkremove() in a loop while it returns + * NFSERR_DELAY. Return upon any other error or when timed out. + */ + NFSGETNANOTIME(&mytime); + starttime = (u_int32_t)mytime.tv_sec; + do { + error = nfsrv_checkremove(vp, 0, p); + if (error == NFSERR_DELAY) { + NFSGETNANOTIME(&mytime); + if (((u_int32_t)mytime.tv_sec - starttime) > + NFS_REMOVETIMEO && + ((u_int32_t)mytime.tv_sec - starttime) < + 100000) + return; + /* Sleep for a short period of time */ + (void) nfs_catnap(PZERO, "nfsremove"); + } + } while (error == NFSERR_DELAY); +} + +APPLESTATIC void +nfsd_disabledelegation(vnode_t vp, NFSPROC_T *p) +{ + +#ifdef VV_DISABLEDELEG + /* + * First, flag issuance of delegations disabled. + */ + atomic_set_long(&vp->v_vflag, VV_DISABLEDELEG); +#endif + + /* + * Then call nfsd_recalldelegation() to get rid of all extant + * delegations. + */ + nfsd_recalldelegation(vp, p); +} + +/* + * Check for conflicting locks, etc. and then get rid of delegations. + * (At one point I thought that I should get rid of delegations for any + * Setattr, since it could potentially disallow the I/O op (read or write) + * allowed by the delegation. However, Setattr Ops that aren't changing + * the size get a stateid of all 0s, so you can't tell if it is a delegation + * for the same client or a different one, so I decided to only get rid + * of delegations for other clients when the size is being changed.) + * In general, a Setattr can disable NFS I/O Ops that are outstanding, such + * as Write backs, even if there is no delegation, so it really isn't any + * different?) + */ +APPLESTATIC int +nfsrv_checksetattr(vnode_t vp, struct nfsrv_descript *nd, + nfsv4stateid_t *stateidp, struct nfsvattr *nvap, nfsattrbit_t *attrbitp, + struct nfsexstuff *exp, NFSPROC_T *p) +{ + struct nfsstate st, *stp = &st; + struct nfslock lo, *lop = &lo; + int error = 0; + nfsquad_t clientid; + + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_SIZE)) { + stp->ls_flags = (NFSLCK_CHECK | NFSLCK_WRITEACCESS); + lop->lo_first = nvap->na_size; + } else { + stp->ls_flags = 0; + lop->lo_first = 0; + } + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNER) || + NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP) || + NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_MODE) || + NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_ACL)) + stp->ls_flags |= NFSLCK_SETATTR; + if (stp->ls_flags == 0) + return (0); + lop->lo_end = NFS64BITSSET; + lop->lo_flags = NFSLCK_WRITE; + stp->ls_ownerlen = 0; + stp->ls_op = NULL; + stp->ls_uid = nd->nd_cred->cr_uid; + stp->ls_stateid.seqid = stateidp->seqid; + clientid.lval[0] = stp->ls_stateid.other[0] = stateidp->other[0]; + clientid.lval[1] = stp->ls_stateid.other[1] = stateidp->other[1]; + stp->ls_stateid.other[2] = stateidp->other[2]; + error = nfsrv_lockctrl(vp, &stp, &lop, NULL, clientid, + stateidp, exp, nd, p); + return (error); +} + +/* + * Check for a write delegation and do a CBGETATTR if there is one, updating + * the attributes, as required. + * Should I return an error if I can't get the attributes? (For now, I'll + * just return ok. + */ +APPLESTATIC int +nfsrv_checkgetattr(struct nfsrv_descript *nd, vnode_t vp, + struct nfsvattr *nvap, nfsattrbit_t *attrbitp, struct ucred *cred, + NFSPROC_T *p) +{ + struct nfsstate *stp; + struct nfslockfile *lfp; + struct nfsclient *clp; + struct nfsvattr nva; + fhandle_t nfh; + int error; + nfsattrbit_t cbbits; + u_quad_t delegfilerev; + + NFSCBGETATTR_ATTRBIT(attrbitp, &cbbits); + if (!NFSNONZERO_ATTRBIT(&cbbits)) + return (0); + + /* + * Get the lock file structure. + * (A return of -1 means no associated state, so return ok.) + */ + error = nfsrv_getlockfh(vp, NFSLCK_CHECK, NULL, &nfh, p); + NFSLOCKSTATE(); + if (!error) + error = nfsrv_getlockfile(NFSLCK_CHECK, NULL, &lfp, &nfh); + if (error) { + NFSUNLOCKSTATE(); + if (error == -1) + return (0); + return (error); + } + + /* + * Now, look for a write delegation. + */ + LIST_FOREACH(stp, &lfp->lf_deleg, ls_file) { + if (stp->ls_flags & NFSLCK_DELEGWRITE) + break; + } + if (stp == LIST_END(&lfp->lf_deleg)) { + NFSUNLOCKSTATE(); + return (0); + } + clp = stp->ls_clp; + delegfilerev = stp->ls_filerev; + + /* + * If the Write delegation was issued as a part of this Compound RPC + * or if we have an Implied Clientid (used in a previous Op in this + * compound) and it is the client the delegation was issued to, + * just return ok. + * I also assume that it is from the same client iff the network + * host IP address is the same as the callback address. (Not + * exactly correct by the RFC, but avoids a lot of Getattr + * callbacks.) + */ + if (nd->nd_compref == stp->ls_compref || + ((nd->nd_flag & ND_IMPLIEDCLID) && + clp->lc_clientid.qval == nd->nd_clientid.qval) || + nfsaddr2_match(clp->lc_req.nr_nam, nd->nd_nam)) { + NFSUNLOCKSTATE(); + return (0); + } + + /* + * We are now done with the delegation state structure, + * so the statelock can be released and we can now tsleep(). + */ + + /* + * Now, we must do the CB Getattr callback, to see if Change or Size + * has changed. + */ + if (clp->lc_expiry >= NFSD_MONOSEC) { + NFSUNLOCKSTATE(); + NFSVNO_ATTRINIT(&nva); + nva.na_filerev = NFS64BITSSET; + error = nfsrv_docallback(clp, NFSV4OP_CBGETATTR, NULL, + 0, &nfh, &nva, &cbbits, p); + if (!error) { + if ((nva.na_filerev != NFS64BITSSET && + nva.na_filerev > delegfilerev) || + (NFSVNO_ISSETSIZE(&nva) && + nva.na_size != nvap->na_size)) { + nfsvno_updfilerev(vp, nvap, cred, p); + if (NFSVNO_ISSETSIZE(&nva)) + nvap->na_size = nva.na_size; + } + } + } else { + NFSUNLOCKSTATE(); + } + return (0); +} + +/* + * This function looks for openowners that haven't had any opens for + * a while and throws them away. Called by an nfsd when NFSNSF_NOOPENS + * is set. + */ +APPLESTATIC void +nfsrv_throwawayopens(NFSPROC_T *p) +{ + struct nfsclient *clp, *nclp; + struct nfsstate *stp, *nstp; + int i; + + NFSLOCKSTATE(); + nfsrv_stablefirst.nsf_flags &= ~NFSNSF_NOOPENS; + /* + * For each client... + */ + for (i = 0; i < NFSCLIENTHASHSIZE; i++) { + LIST_FOREACH_SAFE(clp, &nfsclienthash[i], lc_hash, nclp) { + LIST_FOREACH_SAFE(stp, &clp->lc_open, ls_list, nstp) { + if (LIST_EMPTY(&stp->ls_open) && + (stp->ls_noopens > NFSNOOPEN || + (nfsrv_openpluslock * 2) > + NFSRV_V4STATELIMIT)) + nfsrv_freeopenowner(stp, 0, p); + } + } + } + NFSUNLOCKSTATE(); +} + +/* + * This function checks to see if the credentials are the same. + * Returns 1 for not same, 0 otherwise. + */ +static int +nfsrv_notsamecredname(struct nfsrv_descript *nd, struct nfsclient *clp) +{ + + if (nd->nd_flag & ND_GSS) { + if (!(clp->lc_flags & LCL_GSS)) + return (1); + if (clp->lc_flags & LCL_NAME) { + if (nd->nd_princlen != clp->lc_namelen || + NFSBCMP(nd->nd_principal, clp->lc_name, + clp->lc_namelen)) + return (1); + else + return (0); + } + if (nd->nd_cred->cr_uid == clp->lc_uid) + return (0); + else + return (1); + } else if (clp->lc_flags & LCL_GSS) + return (1); + /* + * For AUTH_SYS, allow the same uid or root. (This is underspecified + * in RFC3530, which talks about principals, but doesn't say anything + * about uids for AUTH_SYS.) + */ + if (nd->nd_cred->cr_uid == clp->lc_uid || nd->nd_cred->cr_uid == 0) + return (0); + else + return (1); +} + +/* + * Calculate the lease expiry time. + */ +static time_t +nfsrv_leaseexpiry(void) +{ + struct timeval curtime; + + NFSGETTIME(&curtime); + if (nfsrv_stablefirst.nsf_eograce > NFSD_MONOSEC) + return (NFSD_MONOSEC + 2 * (nfsrv_lease + NFSRV_LEASEDELTA)); + return (NFSD_MONOSEC + nfsrv_lease + NFSRV_LEASEDELTA); +} + +/* + * Delay the delegation timeout as far as ls_delegtimelimit, as required. + */ +static void +nfsrv_delaydelegtimeout(struct nfsstate *stp) +{ + + if ((stp->ls_flags & NFSLCK_DELEGRECALL) == 0) + return; + + if ((stp->ls_delegtime + 15) > NFSD_MONOSEC && + stp->ls_delegtime < stp->ls_delegtimelimit) { + stp->ls_delegtime += nfsrv_lease; + if (stp->ls_delegtime > stp->ls_delegtimelimit) + stp->ls_delegtime = stp->ls_delegtimelimit; + } +} + +/* + * Go through a lock list and set local locks for all ranges. + * This assumes that the lock list is sorted on increasing + * lo_first and that the list won't change, despite the possibility + * of sleeps. + */ +static void +nfsrv_locallocks(vnode_t vp, struct nfslockfile *lfp, + NFSPROC_T *p) +{ + struct nfslock *lop, *nlop; + vnode_t tvp; + int newcollate, flags = 0; + u_int64_t first = 0x0ull, end = 0x0ull; + + if (!nfsrv_dolocallocks) + return; + /* + * If vp is NULL, a vnode must be aquired from the file + * handle. + */ + if (vp == NULL) { + if (lfp == NULL) + panic("nfsrv_locallocks"); + tvp = nfsvno_getvp(&lfp->lf_fh); + if (tvp == NULL) + return; + } else { + tvp = vp; + } + + /* + * If lfp == NULL, the lock list is empty, so just unlock + * everything. + */ + if (lfp == NULL) { + (void) nfsvno_advlock(tvp, F_UNLCK, (u_int64_t)0, + NFS64BITSSET, p); + /* vp can't be NULL */ + return; + } + + /* handle whole file case first */ + lop = LIST_FIRST(&lfp->lf_lock); + if (lop != LIST_END(&lfp->lf_lock) && + lop->lo_first == (u_int64_t)0 && + lop->lo_end == NFS64BITSSET) { + if (lop->lo_flags & NFSLCK_WRITE) + (void) nfsvno_advlock(tvp, F_WRLCK, lop->lo_first, + lop->lo_end, p); + else + (void) nfsvno_advlock(tvp, F_RDLCK, lop->lo_first, + lop->lo_end, p); + if (vp == NULL) + vput(tvp); + return; + } + + /* + * Now, handle the separate byte ranges cases. + */ + (void) nfsvno_advlock(tvp, F_UNLCK, (u_int64_t)0, + NFS64BITSSET, p); + newcollate = 1; + while (lop != LIST_END(&lfp->lf_lock)) { + nlop = LIST_NEXT(lop, lo_lckfile); + if (newcollate) { + first = lop->lo_first; + end = lop->lo_end; + flags = lop->lo_flags; + newcollate = 0; + } + if (nlop != LIST_END(&lfp->lf_lock) && + flags == nlop->lo_flags && + end >= nlop->lo_first) { + /* can collate this one */ + end = nlop->lo_end; + } else { + /* do the local lock and start again */ + if (flags & NFSLCK_WRITE) + (void) nfsvno_advlock(tvp, F_WRLCK, first, + end, p); + else + (void) nfsvno_advlock(tvp, F_RDLCK, first, + end, p); + newcollate = 1; + } + lop = nlop; + } + if (vp == NULL) + vput(tvp); +} + +/* + * This function checks to see if there is any other state associated + * with the openowner for this Open. + * It returns 1 if there is no other state, 0 otherwise. + */ +static int +nfsrv_nootherstate(struct nfsstate *stp) +{ + struct nfsstate *tstp; + + LIST_FOREACH(tstp, &stp->ls_openowner->ls_open, ls_list) { + if (tstp != stp || !LIST_EMPTY(&tstp->ls_lock)) + return (0); + } + return (1); +} + diff --git a/sys/fs/nfsserver/nfs_nfsdsubs.c b/sys/fs/nfsserver/nfs_nfsdsubs.c new file mode 100644 index 0000000..51f367a --- /dev/null +++ b/sys/fs/nfsserver/nfs_nfsdsubs.c @@ -0,0 +1,2021 @@ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +#ifndef APPLEKEXT +/* + * These functions support the macros and help fiddle mbuf chains for + * the nfs op functions. They do things like create the rpc header and + * copy data between mbuf chains and uio lists. + */ +#include <fs/nfs/nfsport.h> + +extern u_int32_t newnfs_true, newnfs_false; +extern int nfs_pubfhset; +extern struct nfsclienthashhead nfsclienthash[NFSCLIENTHASHSIZE]; +extern struct nfslockhashhead nfslockhash[NFSLOCKHASHSIZE]; +extern int nfsrv_useacl; +extern uid_t nfsrv_defaultuid; +extern gid_t nfsrv_defaultgid; + +char nfs_v2pubfh[NFSX_V2FH]; +static nfstype newnfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, + NFNON, NFCHR, NFNON }; +extern nfstype nfsv34_type[9]; +#endif /* !APPLEKEXT */ + +static char nfsrv_hexdigit(char, int *); + +/* + * Maps errno values to nfs error numbers. + * Use NFSERR_IO as the catch all for ones not specifically defined in + * RFC 1094. + */ +static u_char nfsrv_v2errmap[ELAST] = { + NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, + NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, + NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, +}; + +/* + * Maps errno values to nfs error numbers. + * Although it is not obvious whether or not NFS clients really care if + * a returned error value is in the specified list for the procedure, the + * safest thing to do is filter them appropriately. For Version 2, the + * X/Open XNFS document is the only specification that defines error values + * for each RPC (The RFC simply lists all possible error values for all RPCs), + * so I have decided to not do this for Version 2. + * The first entry is the default error return and the rest are the valid + * errors for that RPC in increasing numeric order. + */ +static short nfsv3err_null[] = { + 0, + 0, +}; + +static short nfsv3err_getattr[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_setattr[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_PERM, + NFSERR_IO, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOT_SYNC, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_lookup[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_ACCES, + NFSERR_NAMETOL, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_access[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_readlink[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_read[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_NXIO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_write[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOSPC, + NFSERR_INVAL, + NFSERR_FBIG, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_create[] = { + NFSERR_IO, + NFSERR_EXIST, + NFSERR_NAMETOL, + NFSERR_ACCES, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_mkdir[] = { + NFSERR_IO, + NFSERR_EXIST, + NFSERR_ACCES, + NFSERR_NAMETOL, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_symlink[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NAMETOL, + NFSERR_NOSPC, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_mknod[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NAMETOL, + NFSERR_NOSPC, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + NFSERR_BADTYPE, + 0, +}; + +static short nfsv3err_remove[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_ACCES, + NFSERR_NAMETOL, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_ROFS, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_rmdir[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_NAMETOL, + NFSERR_IO, + NFSERR_EXIST, + NFSERR_INVAL, + NFSERR_ROFS, + NFSERR_NOTEMPTY, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_rename[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NAMETOL, + NFSERR_XDEV, + NFSERR_IO, + NFSERR_NOTDIR, + NFSERR_ISDIR, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_MLINK, + NFSERR_NOTEMPTY, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_link[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NAMETOL, + NFSERR_IO, + NFSERR_XDEV, + NFSERR_NOTDIR, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_MLINK, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_readdir[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_BAD_COOKIE, + NFSERR_TOOSMALL, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_readdirplus[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_BAD_COOKIE, + NFSERR_NOTSUPP, + NFSERR_TOOSMALL, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_fsstat[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_fsinfo[] = { + NFSERR_STALE, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_pathconf[] = { + NFSERR_STALE, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short nfsv3err_commit[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + NFSERR_DELAY, + 0, +}; + +static short *nfsrv_v3errmap[] = { + nfsv3err_null, + nfsv3err_getattr, + nfsv3err_setattr, + nfsv3err_lookup, + nfsv3err_access, + nfsv3err_readlink, + nfsv3err_read, + nfsv3err_write, + nfsv3err_create, + nfsv3err_mkdir, + nfsv3err_symlink, + nfsv3err_mknod, + nfsv3err_remove, + nfsv3err_rmdir, + nfsv3err_rename, + nfsv3err_link, + nfsv3err_readdir, + nfsv3err_readdirplus, + nfsv3err_fsstat, + nfsv3err_fsinfo, + nfsv3err_pathconf, + nfsv3err_commit, +}; + +/* + * And the same for V4. + */ +static short nfsv4err_null[] = { + 0, + 0, +}; + +static short nfsv4err_access[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_close[] = { + NFSERR_EXPIRED, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADSEQID, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKSHELD, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_OLDSTATEID, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_commit[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_ISDIR, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_create[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ATTRNOTSUPP, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADOWNER, + NFSERR_BADTYPE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXIST, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_NOTDIR, + NFSERR_PERM, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_delegpurge[] = { + NFSERR_SERVERFAULT, + NFSERR_BADXDR, + NFSERR_NOTSUPP, + NFSERR_LEASEMOVED, + NFSERR_MOVED, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALECLIENTID, + 0, +}; + +static short nfsv4err_delegreturn[] = { + NFSERR_SERVERFAULT, + NFSERR_ADMINREVOKED, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_EXPIRED, + NFSERR_INVAL, + NFSERR_LEASEMOVED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOTSUPP, + NFSERR_OLDSTATEID, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_getattr[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_getfh[] = { + NFSERR_BADHANDLE, + NFSERR_BADHANDLE, + NFSERR_FHEXPIRED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_link[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXIST, + NFSERR_FHEXPIRED, + NFSERR_FILEOPEN, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_ISDIR, + NFSERR_MLINK, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_NOTDIR, + NFSERR_NOTSUPP, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_WRONGSEC, + NFSERR_XDEV, + 0, +}; + +static short nfsv4err_lock[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADRANGE, + NFSERR_BADSEQID, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_DEADLOCK, + NFSERR_DELAY, + NFSERR_DENIED, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKNOTSUPP, + NFSERR_LOCKRANGE, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOGRACE, + NFSERR_OLDSTATEID, + NFSERR_OPENMODE, + NFSERR_RECLAIMBAD, + NFSERR_RECLAIMCONFLICT, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALECLIENTID, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_lockt[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BADRANGE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DENIED, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKRANGE, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALECLIENTID, + 0, +}; + +static short nfsv4err_locku[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADRANGE, + NFSERR_BADSEQID, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKRANGE, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_OLDSTATEID, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_lookup[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADXDR, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOTDIR, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_SYMLINK, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_lookupp[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_FHEXPIRED, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOTDIR, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_nverify[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ATTRNOTSUPP, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SAME, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_open[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_ATTRNOTSUPP, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADOWNER, + NFSERR_BADSEQID, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXIST, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_IO, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOGRACE, + NFSERR_NOSPC, + NFSERR_NOTDIR, + NFSERR_NOTSUPP, + NFSERR_PERM, + NFSERR_RECLAIMBAD, + NFSERR_RECLAIMCONFLICT, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_SHAREDENIED, + NFSERR_STALE, + NFSERR_STALECLIENTID, + NFSERR_SYMLINK, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_openattr[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_FHEXPIRED, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_NOTSUPP, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_openconfirm[] = { + NFSERR_SERVERFAULT, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADSEQID, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_OLDSTATEID, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_opendowngrade[] = { + NFSERR_SERVERFAULT, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADSEQID, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_OLDSTATEID, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_putfh[] = { + NFSERR_SERVERFAULT, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_FHEXPIRED, + NFSERR_MOVED, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_putpubfh[] = { + NFSERR_SERVERFAULT, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_putrootfh[] = { + NFSERR_SERVERFAULT, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_read[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_EXPIRED, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_IO, + NFSERR_INVAL, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NXIO, + NFSERR_OLDSTATEID, + NFSERR_OPENMODE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_readdir[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_BAD_COOKIE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOTDIR, + NFSERR_NOTSAME, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_TOOSMALL, + 0, +}; + +static short nfsv4err_readlink[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADHANDLE, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_ISDIR, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOTSUPP, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_remove[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_FILEOPEN, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOTDIR, + NFSERR_NOTEMPTY, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_rename[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXIST, + NFSERR_FHEXPIRED, + NFSERR_FILEOPEN, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_NOTDIR, + NFSERR_NOTEMPTY, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_WRONGSEC, + NFSERR_XDEV, + 0, +}; + +static short nfsv4err_renew[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_BADXDR, + NFSERR_CBPATHDOWN, + NFSERR_EXPIRED, + NFSERR_LEASEMOVED, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALECLIENTID, + 0, +}; + +static short nfsv4err_restorefh[] = { + NFSERR_SERVERFAULT, + NFSERR_BADHANDLE, + NFSERR_FHEXPIRED, + NFSERR_MOVED, + NFSERR_RESOURCE, + NFSERR_RESTOREFH, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_WRONGSEC, + 0, +}; + +static short nfsv4err_savefh[] = { + NFSERR_SERVERFAULT, + NFSERR_BADHANDLE, + NFSERR_FHEXPIRED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_secinfo[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADNAME, + NFSERR_BADXDR, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_MOVED, + NFSERR_NAMETOL, + NFSERR_NOENT, + NFSERR_NOFILEHANDLE, + NFSERR_NOTDIR, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_setattr[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_ATTRNOTSUPP, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADOWNER, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXPIRED, + NFSERR_FBIG, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_ISDIR, + NFSERR_LOCKED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_OLDSTATEID, + NFSERR_OPENMODE, + NFSERR_PERM, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_setclientid[] = { + NFSERR_SERVERFAULT, + NFSERR_BADXDR, + NFSERR_CLIDINUSE, + NFSERR_INVAL, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv4err_setclientidconfirm[] = { + NFSERR_SERVERFAULT, + NFSERR_BADXDR, + NFSERR_CLIDINUSE, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALECLIENTID, + 0, +}; + +static short nfsv4err_verify[] = { + NFSERR_SERVERFAULT, + NFSERR_ACCES, + NFSERR_ATTRNOTSUPP, + NFSERR_BADCHAR, + NFSERR_BADHANDLE, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_FHEXPIRED, + NFSERR_INVAL, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOTSAME, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALE, + 0, +}; + +static short nfsv4err_write[] = { + NFSERR_IO, + NFSERR_ACCES, + NFSERR_ADMINREVOKED, + NFSERR_BADHANDLE, + NFSERR_BADSTATEID, + NFSERR_BADXDR, + NFSERR_DELAY, + NFSERR_DQUOT, + NFSERR_EXPIRED, + NFSERR_FBIG, + NFSERR_FHEXPIRED, + NFSERR_GRACE, + NFSERR_INVAL, + NFSERR_IO, + NFSERR_ISDIR, + NFSERR_LEASEMOVED, + NFSERR_LOCKED, + NFSERR_MOVED, + NFSERR_NOFILEHANDLE, + NFSERR_NOSPC, + NFSERR_NXIO, + NFSERR_OLDSTATEID, + NFSERR_OPENMODE, + NFSERR_RESOURCE, + NFSERR_ROFS, + NFSERR_SERVERFAULT, + NFSERR_STALE, + NFSERR_STALESTATEID, + 0, +}; + +static short nfsv4err_releaselockowner[] = { + NFSERR_SERVERFAULT, + NFSERR_ADMINREVOKED, + NFSERR_BADXDR, + NFSERR_EXPIRED, + NFSERR_LEASEMOVED, + NFSERR_LOCKSHELD, + NFSERR_RESOURCE, + NFSERR_SERVERFAULT, + NFSERR_STALECLIENTID, + 0, +}; + +static short *nfsrv_v4errmap[] = { + nfsv4err_null, + nfsv4err_null, + nfsv4err_null, + nfsv4err_access, + nfsv4err_close, + nfsv4err_commit, + nfsv4err_create, + nfsv4err_delegpurge, + nfsv4err_delegreturn, + nfsv4err_getattr, + nfsv4err_getfh, + nfsv4err_link, + nfsv4err_lock, + nfsv4err_lockt, + nfsv4err_locku, + nfsv4err_lookup, + nfsv4err_lookupp, + nfsv4err_nverify, + nfsv4err_open, + nfsv4err_openattr, + nfsv4err_openconfirm, + nfsv4err_opendowngrade, + nfsv4err_putfh, + nfsv4err_putpubfh, + nfsv4err_putrootfh, + nfsv4err_read, + nfsv4err_readdir, + nfsv4err_readlink, + nfsv4err_remove, + nfsv4err_rename, + nfsv4err_renew, + nfsv4err_restorefh, + nfsv4err_savefh, + nfsv4err_secinfo, + nfsv4err_setattr, + nfsv4err_setclientid, + nfsv4err_setclientidconfirm, + nfsv4err_verify, + nfsv4err_write, + nfsv4err_releaselockowner, +}; + +/* + * A fiddled version of m_adj() that ensures null fill to a long + * boundary and only trims off the back end + */ +APPLESTATIC void +nfsrv_adj(mbuf_t mp, int len, int nul) +{ + mbuf_t m; + int count, i; + char *cp; + + /* + * Trim from tail. Scan the mbuf chain, + * calculating its length and finding the last mbuf. + * If the adjustment only affects this mbuf, then just + * adjust and return. Otherwise, rescan and truncate + * after the remaining size. + */ + count = 0; + m = mp; + for (;;) { + count += mbuf_len(m); + if (mbuf_next(m) == NULL) + break; + m = mbuf_next(m); + } + if (mbuf_len(m) > len) { + mbuf_setlen(m, mbuf_len(m) - len); + if (nul > 0) { + cp = NFSMTOD(m, caddr_t) + mbuf_len(m) - nul; + for (i = 0; i < nul; i++) + *cp++ = '\0'; + } + return; + } + count -= len; + if (count < 0) + count = 0; + /* + * Correct length for chain is "count". + * Find the mbuf with last data, adjust its length, + * and toss data from remaining mbufs on chain. + */ + for (m = mp; m; m = mbuf_next(m)) { + if (mbuf_len(m) >= count) { + mbuf_setlen(m, count); + if (nul > 0) { + cp = NFSMTOD(m, caddr_t) + mbuf_len(m) - nul; + for (i = 0; i < nul; i++) + *cp++ = '\0'; + } + break; + } + count -= mbuf_len(m); + } + for (m = mbuf_next(m); m; m = mbuf_next(m)) + mbuf_setlen(m, 0); +} + +/* + * Make these functions instead of macros, so that the kernel text size + * doesn't get too big... + */ +APPLESTATIC void +nfsrv_wcc(struct nfsrv_descript *nd, int before_ret, + struct nfsvattr *before_nvap, int after_ret, struct nfsvattr *after_nvap) +{ + u_int32_t *tl; + + if (before_ret) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = newnfs_false; + } else { + NFSM_BUILD(tl, u_int32_t *, 7 * NFSX_UNSIGNED); + *tl++ = newnfs_true; + txdr_hyper(before_nvap->na_size, tl); + tl += 2; + txdr_nfsv3time(&(before_nvap->na_mtime), tl); + tl += 2; + txdr_nfsv3time(&(before_nvap->na_ctime), tl); + } + nfsrv_postopattr(nd, after_ret, after_nvap); +} + +APPLESTATIC void +nfsrv_postopattr(struct nfsrv_descript *nd, int after_ret, + struct nfsvattr *after_nvap) +{ + u_int32_t *tl; + + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + if (after_ret) + *tl = newnfs_false; + else { + *tl = newnfs_true; + nfsrv_fillattr(nd, after_nvap); + } +} + +/* + * Fill in file attributes for V2 and 3. For V4, call a separate + * routine that sifts through all the attribute bits. + */ +APPLESTATIC void +nfsrv_fillattr(struct nfsrv_descript *nd, struct nfsvattr *nvap) +{ + struct nfs_fattr *fp; + int fattr_size; + + /* + * Build space for the attribute structure. + */ + if (nd->nd_flag & ND_NFSV3) + fattr_size = NFSX_V3FATTR; + else + fattr_size = NFSX_V2FATTR; + NFSM_BUILD(fp, struct nfs_fattr *, fattr_size); + + /* + * Now just fill it all in. + */ + fp->fa_nlink = txdr_unsigned(nvap->na_nlink); + fp->fa_uid = txdr_unsigned(nvap->na_uid); + fp->fa_gid = txdr_unsigned(nvap->na_gid); + if (nd->nd_flag & ND_NFSV3) { + fp->fa_type = vtonfsv34_type(nvap->na_type); + fp->fa_mode = vtonfsv34_mode(nvap->na_mode); + txdr_hyper(nvap->na_size, &fp->fa3_size); + txdr_hyper(nvap->na_bytes, &fp->fa3_used); + fp->fa3_rdev.specdata1 = txdr_unsigned(NFSMAJOR(nvap->na_rdev)); + fp->fa3_rdev.specdata2 = txdr_unsigned(NFSMINOR(nvap->na_rdev)); + fp->fa3_fsid.nfsuquad[0] = 0; + fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(nvap->na_fsid); + fp->fa3_fileid.nfsuquad[0] = 0; + fp->fa3_fileid.nfsuquad[1] = txdr_unsigned(nvap->na_fileid); + txdr_nfsv3time(&nvap->na_atime, &fp->fa3_atime); + txdr_nfsv3time(&nvap->na_mtime, &fp->fa3_mtime); + txdr_nfsv3time(&nvap->na_ctime, &fp->fa3_ctime); + } else { + fp->fa_type = vtonfsv2_type(nvap->na_type); + fp->fa_mode = vtonfsv2_mode(nvap->na_type, nvap->na_mode); + fp->fa2_size = txdr_unsigned(nvap->na_size); + fp->fa2_blocksize = txdr_unsigned(nvap->na_blocksize); + if (nvap->na_type == VFIFO) + fp->fa2_rdev = 0xffffffff; + else + fp->fa2_rdev = txdr_unsigned(nvap->na_rdev); + fp->fa2_blocks = txdr_unsigned(nvap->na_bytes / NFS_FABLKSIZE); + fp->fa2_fsid = txdr_unsigned(nvap->na_fsid); + fp->fa2_fileid = txdr_unsigned(nvap->na_fileid); + txdr_nfsv2time(&nvap->na_atime, &fp->fa2_atime); + txdr_nfsv2time(&nvap->na_mtime, &fp->fa2_mtime); + txdr_nfsv2time(&nvap->na_ctime, &fp->fa2_ctime); + } +} + +/* + * This function gets a file handle out of an mbuf list. + * It returns 0 for success, EBADRPC otherwise. + * If sets the third flagp argument to 1 if the file handle is + * the public file handle. + * For NFSv4, if the length is incorrect, set nd_repstat == NFSERR_BADHANDLE + */ +APPLESTATIC int +nfsrv_mtofh(struct nfsrv_descript *nd, struct nfsrvfh *fhp) +{ + u_int32_t *tl; + int error = 0, len, copylen; + + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + len = fxdr_unsigned(int, *tl); + if (len == 0 && nfs_pubfhset && (nd->nd_flag & ND_NFSV3) && + nd->nd_procnum == NFSPROC_LOOKUP) { + nd->nd_flag |= ND_PUBLOOKUP; + return (0); + } + if (len < NFSRV_MINFH || len > NFSRV_MAXFH) { + if (nd->nd_flag & ND_NFSV4) { + if (len > 0 && len <= NFSX_V4FHMAX) { + error = nfsm_advance(nd, NFSM_RNDUP(len), -1); + if (error) + return (error); + nd->nd_repstat = NFSERR_BADHANDLE; + return (0); + } else { + return (EBADRPC); + } + } else { + return (EBADRPC); + } + } + copylen = len; + } else { + /* + * For NFSv2, the file handle is always 32 bytes on the + * wire, but this server only cares about the first + * NFSRV_MAXFH bytes. + */ + len = NFSX_V2FH; + copylen = NFSRV_MAXFH; + } + NFSM_DISSECT(tl, u_int32_t *, len); + if ((nd->nd_flag & ND_NFSV2) && nfs_pubfhset && + nd->nd_procnum == NFSPROC_LOOKUP && + !NFSBCMP((caddr_t)tl, nfs_v2pubfh, NFSX_V2FH)) { + nd->nd_flag |= ND_PUBLOOKUP; + return (0); + } + NFSBCOPY(tl, (caddr_t)fhp->nfsrvfh_data, copylen); + fhp->nfsrvfh_len = copylen; +nfsmout: + return (error); +} + +/* + * Map errnos to NFS error numbers. For Version 3 and 4 also filter out error + * numbers not specified for the associated procedure. + * NFSPROC_NOOP is a special case, where the high order bits of nd_repstat + * should be cleared. NFSPROC_NOOP is used to return errors when a valid + * RPC procedure is not involved. + * Returns the error number in XDR. + */ +APPLESTATIC int +nfsd_errmap(struct nfsrv_descript *nd) +{ + short *defaulterrp, *errp; + + if (!nd->nd_repstat) + return (0); + if (nd->nd_flag & (ND_NFSV3 | ND_NFSV4)) { + if (nd->nd_procnum == NFSPROC_NOOP) + return (txdr_unsigned(nd->nd_repstat & 0xffff)); + if (nd->nd_flag & ND_NFSV3) + errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum]; + else if (nd->nd_repstat == EBADRPC) + return (txdr_unsigned(NFSERR_BADXDR)); + else if (nd->nd_repstat == NFSERR_MINORVERMISMATCH || + nd->nd_repstat == NFSERR_OPILLEGAL) + return (txdr_unsigned(nd->nd_repstat)); + else + errp = defaulterrp = nfsrv_v4errmap[nd->nd_procnum]; + while (*++errp) + if (*errp == nd->nd_repstat) + return (txdr_unsigned(nd->nd_repstat)); + return (txdr_unsigned(*defaulterrp)); + } + if (nd->nd_repstat <= ELAST) + return (txdr_unsigned(nfsrv_v2errmap[nd->nd_repstat - 1])); + return (txdr_unsigned(NFSERR_IO)); +} + +/* + * Check to see if setting a uid/gid is permitted when creating a new + * file object. (Called when uid and/or gid is specified in the + * settable attributes for V4. + */ +APPLESTATIC int +nfsrv_checkuidgid(struct nfsrv_descript *nd, struct nfsvattr *nvap) +{ + + /* + * If not setting either uid nor gid, it's OK. + */ + if (NFSVNO_NOTSETUID(nvap) && NFSVNO_NOTSETGID(nvap)) + return (0); + if ((NFSVNO_ISSETUID(nvap) && nvap->na_uid == nfsrv_defaultuid) + || (NFSVNO_ISSETGID(nvap) && nvap->na_gid == nfsrv_defaultgid)) + return (NFSERR_BADOWNER); + if (nd->nd_cred->cr_uid == 0) + return (0); + if ((NFSVNO_ISSETUID(nvap) && nvap->na_uid != nd->nd_cred->cr_uid) || + (NFSVNO_ISSETGID(nvap) && nvap->na_gid != nd->nd_cred->cr_gid && + !groupmember(nvap->na_gid, nd->nd_cred))) + return (NFSERR_PERM); + return (0); +} + +/* + * and this routine fixes up the settable attributes for V4 if allowed + * by nfsrv_checkuidgid(). + */ +APPLESTATIC void +nfsrv_fixattr(struct nfsrv_descript *nd, vnode_t vp, + struct nfsvattr *nvap, NFSACL_T *aclp, NFSPROC_T *p, nfsattrbit_t *attrbitp, + struct nfsexstuff *exp) +{ + int change = 0; + struct nfsvattr nva; + uid_t tuid; + int error; + nfsattrbit_t nattrbits; + + /* + * Maybe this should be done for V2 and 3 but it never has been + * and nobody seems to be upset, so I think it's best not to change + * the V2 and 3 semantics. + */ + if ((nd->nd_flag & ND_NFSV4) == 0) + return; + NFSVNO_ATTRINIT(&nva); + NFSZERO_ATTRBIT(&nattrbits); + tuid = nd->nd_cred->cr_uid; + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNER) && + NFSVNO_ISSETUID(nvap) && + nvap->na_uid != nd->nd_cred->cr_uid) { + if (nd->nd_cred->cr_uid == 0) { + nva.na_uid = nvap->na_uid; + change++; + NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_OWNER); + } else { + NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_OWNER); + } + } + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_TIMEACCESSSET) && + NFSVNO_ISSETATIME(nvap)) { + nva.na_atime = nvap->na_atime; + change++; + NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_TIMEACCESSSET); + } + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_TIMEMODIFYSET) && + NFSVNO_ISSETMTIME(nvap)) { + nva.na_mtime = nvap->na_mtime; + change++; + NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_TIMEMODIFYSET); + } + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP) && + NFSVNO_ISSETGID(nvap)) { + if (nvap->na_gid == nd->nd_cred->cr_gid || + groupmember(nvap->na_gid, nd->nd_cred)) { + nd->nd_cred->cr_uid = 0; + nva.na_gid = nvap->na_gid; + change++; + NFSSETBIT_ATTRBIT(&nattrbits, NFSATTRBIT_OWNERGROUP); + } else { + NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_OWNERGROUP); + } + } + if (change) { + error = nfsvno_setattr(vp, &nva, nd->nd_cred, p, exp); + if (error) { + NFSCLRALL_ATTRBIT(attrbitp, &nattrbits); + } + } + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_SIZE) && + NFSVNO_ISSETSIZE(nvap) && nvap->na_size != (u_quad_t)0) { + NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_SIZE); + } +#ifdef NFS4_ACL_EXTATTR_NAME + if (NFSISSET_ATTRBIT(attrbitp, NFSATTRBIT_ACL) && + nfsrv_useacl != 0 && aclp != NULL) { + if (aclp->acl_cnt > 0) { + error = nfsrv_setacl(vp, aclp, nd->nd_cred, p); + if (error) { + NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_ACL); + } + } + } else +#endif + NFSCLRBIT_ATTRBIT(attrbitp, NFSATTRBIT_ACL); + nd->nd_cred->cr_uid = tuid; +} + +/* + * Translate an ASCII hex digit to it's binary value. Return -1 if the + * char isn't a hex digit. + */ +static char +nfsrv_hexdigit(char c, int *err) +{ + + *err = 0; + if (c >= '0' && c <= '9') + return (c - '0'); + if (c >= 'a' && c <= 'f') + return (c - 'a' + ((char)10)); + if (c >= 'A' && c <= 'F') + return (c - 'A' + ((char)10)); + /* Not valid ! */ + *err = 1; + return (1); /* BOGUS */ +} + +/* + * Check to see if NFSERR_MOVED can be returned for this op. Return 1 iff + * it can be. + */ +APPLESTATIC int +nfsrv_errmoved(int op) +{ + short *errp; + + errp = nfsrv_v4errmap[op]; + while (*errp != 0) { + if (*errp == NFSERR_MOVED) + return (1); + errp++; + } + return (0); +} + +/* + * Fill in attributes for a Referral. + * (Return the number of bytes of XDR created.) + */ +APPLESTATIC int +nfsrv_putreferralattr(struct nfsrv_descript *nd, nfsattrbit_t *retbitp, + struct nfsreferral *refp, int getattr, int *reterrp) +{ + u_int32_t *tl, *retnump; + u_char *cp, *cp2; + int prefixnum, retnum = 0, i, len, bitpos, rderrbit = 0, nonrefbit = 0; + int fslocationsbit = 0; + nfsattrbit_t tmpbits, refbits; + + NFSREFERRAL_ATTRBIT(&refbits); + if (getattr) + NFSCLRBIT_ATTRBIT(&refbits, NFSATTRBIT_RDATTRERROR); + else if (NFSISSET_ATTRBIT(retbitp, NFSATTRBIT_RDATTRERROR)) + rderrbit = 1; + if (NFSISSET_ATTRBIT(retbitp, NFSATTRBIT_FSLOCATIONS)) + fslocationsbit = 1; + + /* + * Check for the case where unsupported referral attributes are + * requested. + */ + NFSSET_ATTRBIT(&tmpbits, retbitp); + NFSCLRALL_ATTRBIT(&tmpbits, &refbits); + if (NFSNONZERO_ATTRBIT(&tmpbits)) + nonrefbit = 1; + + if (nonrefbit && !fslocationsbit && (getattr || !rderrbit)) { + *reterrp = NFSERR_MOVED; + return (0); + } + + /* + * Now we can fill in the attributes. + */ + NFSSET_ATTRBIT(&tmpbits, retbitp); + NFSCLRNOT_ATTRBIT(&tmpbits, &refbits); + + /* + * Put out the attribute bitmap for the ones being filled in + * and get the field for the number of attributes returned. + */ + prefixnum = nfsrv_putattrbit(nd, &tmpbits); + NFSM_BUILD(retnump, u_int32_t *, NFSX_UNSIGNED); + prefixnum += NFSX_UNSIGNED; + + /* + * Now, loop around filling in the attributes for each bit set. + */ + for (bitpos = 0; bitpos < NFSATTRBIT_MAX; bitpos++) { + if (NFSISSET_ATTRBIT(&tmpbits, bitpos)) { + switch (bitpos) { + case NFSATTRBIT_TYPE: + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFDIR); + retnum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_FSID: + NFSM_BUILD(tl, u_int32_t *, NFSX_V4FSID); + *tl++ = 0; + *tl++ = txdr_unsigned(NFSV4ROOT_FSID0); + *tl++ = 0; + *tl = txdr_unsigned(NFSV4ROOT_REFERRAL); + retnum += NFSX_V4FSID; + break; + case NFSATTRBIT_RDATTRERROR: + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + if (nonrefbit) + *tl = txdr_unsigned(NFSERR_MOVED); + else + *tl = 0; + retnum += NFSX_UNSIGNED; + break; + case NFSATTRBIT_FSLOCATIONS: + retnum += nfsm_strtom(nd, "/", 1); + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(refp->nfr_srvcnt); + retnum += NFSX_UNSIGNED; + cp = refp->nfr_srvlist; + for (i = 0; i < refp->nfr_srvcnt; i++) { + NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(1); + retnum += NFSX_UNSIGNED; + cp2 = STRCHR(cp, ':'); + if (cp2 != NULL) + len = cp2 - cp; + else + len = 1; + retnum += nfsm_strtom(nd, cp, len); + if (cp2 != NULL) + cp = cp2 + 1; + cp2 = STRCHR(cp, ','); + if (cp2 != NULL) + len = cp2 - cp; + else + len = strlen(cp); + retnum += nfsm_strtom(nd, cp, len); + if (cp2 != NULL) + cp = cp2 + 1; + } + break; + case NFSATTRBIT_MOUNTEDONFILEID: + NFSM_BUILD(tl, u_int32_t *, NFSX_HYPER); + *tl++ = 0; + *tl = txdr_unsigned(refp->nfr_dfileno); + retnum += NFSX_HYPER; + break; + default: + printf("EEK! Bad V4 refattr bitpos=%d\n", bitpos); + }; + } + } + *retnump = txdr_unsigned(retnum); + return (retnum + prefixnum); +} + +/* + * Parse a file name out of a request. + */ +APPLESTATIC int +nfsrv_parsename(struct nfsrv_descript *nd, char *bufp, u_long *hashp, + NFSPATHLEN_T *outlenp) +{ + char *fromcp, *tocp, val = '\0'; + mbuf_t md; + int i; + int rem, len, error = 0, pubtype = 0, outlen = 0, percent = 0; + char digit; + u_int32_t *tl; + u_long hash = 0; + + if (hashp != NULL) + *hashp = 0; + tocp = bufp; + /* + * For V4, check for lookup parent. + * Otherwise, get the component name. + */ + if ((nd->nd_flag & ND_NFSV4) && nd->nd_procnum == NFSV4OP_LOOKUPP) { + *tocp++ = '.'; + hash += ((u_char)'.'); + *tocp++ = '.'; + hash += ((u_char)'.'); + outlen = 2; + } else { + /* + * First, get the name length. + */ + NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); + len = fxdr_unsigned(int, *tl); + if (len > NFS_MAXNAMLEN) { + nd->nd_repstat = NFSERR_NAMETOL; + return (0); + } else if (len <= 0) { + nd->nd_repstat = NFSERR_INVAL; + return (0); + } + + /* + * Now, copy the component name into the buffer. + */ + fromcp = nd->nd_dpos; + md = nd->nd_md; + rem = NFSMTOD(md, caddr_t) + mbuf_len(md) - fromcp; + for (i = 0; i < len; i++) { + while (rem == 0) { + md = mbuf_next(md); + if (md == NULL) + return (EBADRPC); + fromcp = NFSMTOD(md, caddr_t); + rem = mbuf_len(md); + } + if (*fromcp == '\0') { + nd->nd_repstat = EACCES; + return (0); + } + /* + * For lookups on the public filehandle, do some special + * processing on the name. (The public file handle is the + * root of the public file system for this server.) + */ + if (nd->nd_flag & ND_PUBLOOKUP) { + /* + * If the first char is ASCII, it is a canonical + * path, otherwise it is a native path. (RFC2054 + * doesn't actually state what it is if the first + * char isn't ASCII or 0x80, so I assume native.) + * pubtype == 1 -> native path + * pubtype == 2 -> canonical path + */ + if (i == 0) { + if (*fromcp & 0x80) { + /* + * Since RFC2054 doesn't indicate + * that a native path of just 0x80 + * isn't allowed, I'll replace the + * 0x80 with '/' instead of just + * throwing it away. + */ + *fromcp = '/'; + pubtype = 1; + } else { + pubtype = 2; + } + } + /* + * '/' only allowed in a native path + */ + if (*fromcp == '/' && pubtype != 1) { + nd->nd_repstat = EACCES; + return (0); + } + + /* + * For the special case of 2 hex digits after a + * '%' in an absolute path, calculate the value. + * percent == 1 -> indicates "get first hex digit" + * percent == 2 -> indicates "get second hex digit" + */ + if (percent > 0) { + digit = nfsrv_hexdigit(*fromcp, &error); + if (error) { + nd->nd_repstat = EACCES; + return (0); + } + if (percent == 1) { + val = (digit << 4); + percent = 2; + } else { + val += digit; + percent = 0; + *tocp++ = val; + hash += ((u_char)val); + outlen++; + } + } else { + if (*fromcp == '%' && pubtype == 2) { + /* + * Must be followed by 2 hex digits + */ + if ((len - i) < 3) { + nd->nd_repstat = EACCES; + return (0); + } + percent = 1; + } else { + *tocp++ = *fromcp; + hash += ((u_char)*fromcp); + outlen++; + } + } + } else { + /* + * Normal, non lookup on public, name. + */ + if (*fromcp == '/') { + if (nd->nd_flag & ND_NFSV4) + nd->nd_repstat = NFSERR_BADNAME; + else + nd->nd_repstat = EACCES; + return (0); + } + hash += ((u_char)*fromcp); + *tocp++ = *fromcp; + outlen++; + } + fromcp++; + rem--; + } + nd->nd_md = md; + nd->nd_dpos = fromcp; + i = NFSM_RNDUP(len) - len; + if (i > 0) { + if (rem >= i) { + nd->nd_dpos += i; + } else { + error = nfsm_advance(nd, i, rem); + if (error) + return (error); + } + } + + /* + * For v4, don't allow lookups of '.' or '..' and + * also check for non-utf8 strings. + */ + if (nd->nd_flag & ND_NFSV4) { + if ((outlen == 1 && bufp[0] == '.') || + (outlen == 2 && bufp[0] == '.' && + bufp[1] == '.')) { + nd->nd_repstat = NFSERR_BADNAME; + return (0); + } + if (nfsrv_checkutf8((u_int8_t *)bufp, outlen)) { + nd->nd_repstat = NFSERR_INVAL; + return (0); + } + } + } + *tocp = '\0'; + *outlenp = (size_t)outlen; + if (hashp != NULL) + *hashp = hash; +nfsmout: + return (error); +} + +/* + * Check the tcp socket sequence number has been acknowledged. + */ +int +nfsrv_checksockseqnum(struct socket *so, tcp_seq tcpseqval) +{ + tcp_seq maxseq, unaseq; + int error, ret; + + error = nfsrv_getsocksndseq(so, &maxseq, &unaseq); + if (error) + return (0); + ret = SEQ_GEQ(unaseq, tcpseqval); + return (ret); +} + +/* + * Get the tcp sequence number to be acknowledged. + */ +int +nfsrv_getsockseqnum(struct socket *so, tcp_seq *tcpseqp) +{ + tcp_seq maxseq, unaseq; + u_int sbcc; + int error; + + sbcc = so->so_snd.sb_cc; + error = nfsrv_getsocksndseq(so, &maxseq, &unaseq); + if (error) + return (0); + /* + * Set the seq# to a value that will + * be at least the end of the reply. + * When this sequence# is acknowledged + * by the client, the client has received + * the reply. + */ + *tcpseqp = sbcc + maxseq; + return (1); +} + +void +nfsd_init(void) +{ + int i; + static int inited = 0; + + if (inited) + return; + inited = 1; + + /* + * Initialize client queues. Don't free/reinitialize + * them when nfsds are restarted. + */ + for (i = 0; i < NFSCLIENTHASHSIZE; i++) + LIST_INIT(&nfsclienthash[i]); + for (i = 0; i < NFSLOCKHASHSIZE; i++) + LIST_INIT(&nfslockhash[i]); + + /* and the v2 pubfh should be all zeros */ + NFSBZERO(nfs_v2pubfh, NFSX_V2FH); +} + |