summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>2008-03-26 15:23:12 +0000
committerdfr <dfr@FreeBSD.org>2008-03-26 15:23:12 +0000
commit79d2dfdaa69db38c43daed9744a6dbd0568189b5 (patch)
treeeaf6a0fa52bc76253126814ddab4cbf78722a8a5
parent632e5d39f769e15274575347233e9d7aa364c0d6 (diff)
downloadFreeBSD-src-79d2dfdaa69db38c43daed9744a6dbd0568189b5.zip
FreeBSD-src-79d2dfdaa69db38c43daed9744a6dbd0568189b5.tar.gz
Add the new kernel-mode NFS Lock Manager. To use it instead of the
user-mode lock manager, build a kernel with the NFSLOCKD option and add '-k' to 'rpc_lockd_flags' in rc.conf. Highlights include: * Thread-safe kernel RPC client - many threads can use the same RPC client handle safely with replies being de-multiplexed at the socket upcall (typically driven directly by the NIC interrupt) and handed off to whichever thread matches the reply. For UDP sockets, many RPC clients can share the same socket. This allows the use of a single privileged UDP port number to talk to an arbitrary number of remote hosts. * Single-threaded kernel RPC server. Adding support for multi-threaded server would be relatively straightforward and would follow approximately the Solaris KPI. A single thread should be sufficient for the NLM since it should rarely block in normal operation. * Kernel mode NLM server supporting cancel requests and granted callbacks. I've tested the NLM server reasonably extensively - it passes both my own tests and the NFS Connectathon locking tests running on Solaris, Mac OS X and Ubuntu Linux. * Userland NLM client supported. While the NLM server doesn't have support for the local NFS client's locking needs, it does have to field async replies and granted callbacks from remote NLMs that the local client has contacted. We relay these replies to the userland rpc.lockd over a local domain RPC socket. * Robust deadlock detection for the local lock manager. In particular it will detect deadlocks caused by a lock request that covers more than one blocking request. As required by the NLM protocol, all deadlock detection happens synchronously - a user is guaranteed that if a lock request isn't rejected immediately, the lock will eventually be granted. The old system allowed for a 'deferred deadlock' condition where a blocked lock request could wake up and find that some other deadlock-causing lock owner had beaten them to the lock. * Since both local and remote locks are managed by the same kernel locking code, local and remote processes can safely use file locks for mutual exclusion. Local processes have no fairness advantage compared to remote processes when contending to lock a region that has just been unlocked - the local lock manager enforces a strict first-come first-served model for both local and remote lockers. Sponsored by: Isilon Systems PR: 95247 107555 115524 116679 MFC after: 2 weeks
-rw-r--r--lib/libc/gen/lockf.c2
-rw-r--r--lib/libc/sys/Symbol.map1
-rw-r--r--lib/libc/sys/fcntl.29
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c20
-rw-r--r--sys/compat/freebsd32/syscalls.master3
-rw-r--r--sys/compat/linux/linux_file.c2
-rw-r--r--sys/compat/svr4/svr4_fcntl.c4
-rw-r--r--sys/conf/NOTES1
-rw-r--r--sys/conf/files32
-rw-r--r--sys/conf/options2
-rw-r--r--sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c20
-rw-r--r--sys/fs/msdosfs/msdosfs_vnops.c18
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c15
-rw-r--r--sys/i386/ibcs2/ibcs2_fcntl.c3
-rw-r--r--sys/kern/kern_descrip.c73
-rw-r--r--sys/kern/kern_lockf.c2353
-rw-r--r--sys/kern/syscalls.master3
-rw-r--r--sys/kern/vnode_if.src13
-rw-r--r--sys/nfs4client/nfs4_vnops.c18
-rw-r--r--sys/nfsclient/nfs_lock.c1
-rw-r--r--sys/nfsclient/nfs_vnops.c23
-rw-r--r--sys/nlm/nlm.h119
-rw-r--r--sys/nlm/nlm_prot.h448
-rw-r--r--sys/nlm/nlm_prot_clnt.c372
-rw-r--r--sys/nlm/nlm_prot_impl.c1783
-rw-r--r--sys/nlm/nlm_prot_server.c762
-rw-r--r--sys/nlm/nlm_prot_svc.c509
-rw-r--r--sys/nlm/nlm_prot_xdr.c454
-rw-r--r--sys/nlm/sm_inter.h112
-rw-r--r--sys/nlm/sm_inter_xdr.c107
-rw-r--r--sys/rpc/auth.h361
-rw-r--r--sys/rpc/auth_none.c148
-rw-r--r--sys/rpc/auth_unix.c299
-rw-r--r--sys/rpc/authunix_prot.c122
-rw-r--r--sys/rpc/clnt.h620
-rw-r--r--sys/rpc/clnt_dg.c865
-rw-r--r--sys/rpc/clnt_rc.c307
-rw-r--r--sys/rpc/clnt_stat.h83
-rw-r--r--sys/rpc/clnt_vc.c827
-rw-r--r--sys/rpc/getnetconfig.c138
-rw-r--r--sys/rpc/inet_ntop.c187
-rw-r--r--sys/rpc/inet_pton.c224
-rw-r--r--sys/rpc/netconfig.h99
-rw-r--r--sys/rpc/nettype.h68
-rw-r--r--sys/rpc/pmap_prot.h107
-rw-r--r--sys/rpc/rpc.h125
-rw-r--r--sys/rpc/rpc_callmsg.c200
-rw-r--r--sys/rpc/rpc_com.h126
-rw-r--r--sys/rpc/rpc_generic.c716
-rw-r--r--sys/rpc/rpc_msg.h214
-rw-r--r--sys/rpc/rpc_prot.c348
-rw-r--r--sys/rpc/rpcb_clnt.c1382
-rw-r--r--sys/rpc/rpcb_clnt.h89
-rw-r--r--sys/rpc/rpcb_prot.c244
-rw-r--r--sys/rpc/rpcb_prot.h579
-rw-r--r--sys/rpc/svc.c574
-rw-r--r--sys/rpc/svc.h614
-rw-r--r--sys/rpc/svc_auth.c133
-rw-r--r--sys/rpc/svc_auth.h67
-rw-r--r--sys/rpc/svc_auth_unix.c144
-rw-r--r--sys/rpc/svc_dg.c334
-rw-r--r--sys/rpc/svc_generic.c407
-rw-r--r--sys/rpc/svc_vc.c746
-rw-r--r--sys/rpc/types.h24
-rw-r--r--sys/rpc/xdr.h368
-rw-r--r--sys/sys/fcntl.h26
-rw-r--r--sys/sys/lockf.h88
-rw-r--r--sys/ufs/ufs/ufs_vnops.c21
-rw-r--r--sys/xdr/xdr.c816
-rw-r--r--sys/xdr/xdr_array.c155
-rw-r--r--sys/xdr/xdr_mbuf.c238
-rw-r--r--sys/xdr/xdr_mem.c232
-rw-r--r--sys/xdr/xdr_reference.c135
-rw-r--r--sys/xdr/xdr_sizeof.c162
-rw-r--r--usr.sbin/Makefile1
-rw-r--r--usr.sbin/clear_locks/Makefile8
-rw-r--r--usr.sbin/clear_locks/clear_locks.851
-rw-r--r--usr.sbin/clear_locks/clear_locks.c70
-rw-r--r--usr.sbin/rpc.lockd/lockd.c268
-rw-r--r--usr.sbin/rpc.lockd/rpc.lockd.86
80 files changed, 20923 insertions, 525 deletions
diff --git a/lib/libc/gen/lockf.c b/lib/libc/gen/lockf.c
index c448f18..e79e162 100644
--- a/lib/libc/gen/lockf.c
+++ b/lib/libc/gen/lockf.c
@@ -74,7 +74,7 @@ lockf(filedes, function, size)
fl.l_type = F_WRLCK;
if (_fcntl(filedes, F_GETLK, &fl) == -1)
return (-1);
- if (fl.l_type == F_UNLCK || fl.l_pid == getpid())
+ if (fl.l_type == F_UNLCK || (fl.l_sysid == 0 && fl.l_pid == getpid()))
return (0);
errno = EAGAIN;
return (-1);
diff --git a/lib/libc/sys/Symbol.map b/lib/libc/sys/Symbol.map
index 6c4eb40..5697403 100644
--- a/lib/libc/sys/Symbol.map
+++ b/lib/libc/sys/Symbol.map
@@ -960,4 +960,5 @@ FBSDprivate_1.0 {
_writev;
__sys_writev;
__error_unthreaded;
+ nlm_syscall;
};
diff --git a/lib/libc/sys/fcntl.2 b/lib/libc/sys/fcntl.2
index aef53e2..a16724c 100644
--- a/lib/libc/sys/fcntl.2
+++ b/lib/libc/sys/fcntl.2
@@ -189,6 +189,7 @@ struct flock {
pid_t l_pid; /* lock owner */
short l_type; /* lock type: read/write, etc. */
short l_whence; /* type of l_start */
+ int l_sysid; /* remote system id or zero for local */
};
.Ed
The commands available for advisory record locking are as follows:
@@ -276,9 +277,13 @@ is negative,
means end edge of the region.
The
.Fa l_pid
-field is only used with
+and
+.Fa l_sysid
+fields are only used with
.Dv F_GETLK
-to return the process ID of the process holding a blocking lock.
+to return the process ID of the process holding a blocking lock and
+the system ID of the system that owns that process.
+Locks created by the local system will have a system ID of zero.
After a successful
.Dv F_GETLK
request, the value of
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 4008178..088103a 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -3551,6 +3551,25 @@ zfs_freebsd_advlock(ap)
return (lf_advlock(ap, &(zp->z_lockf), zp->z_phys->zp_size));
}
+/*
+ * Advisory record locking support
+ */
+static int
+zfs_freebsd_advlockasync(ap)
+ struct vop_advlockasync_args /* {
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ struct task *a_task;
+ } */ *ap;
+{
+ znode_t *zp = VTOZ(ap->a_vp);
+
+ return (lf_advlockasync(ap, &(zp->z_lockf), zp->z_phys->zp_size));
+}
+
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
@@ -3584,6 +3603,7 @@ struct vop_vector zfs_vnodeops = {
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_advlock = zfs_freebsd_advlock,
+ .vop_advlockasync = zfs_freebsd_advlockasync,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = VOP_EOPNOTSUPP,
.vop_fid = zfs_freebsd_fid,
diff --git a/sys/compat/freebsd32/syscalls.master b/sys/compat/freebsd32/syscalls.master
index f74afc4..889f17b 100644
--- a/sys/compat/freebsd32/syscalls.master
+++ b/sys/compat/freebsd32/syscalls.master
@@ -274,7 +274,8 @@
151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x)
152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x)
153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x)
-154 AUE_NULL UNIMPL nosys
+; 154 is initialised by the NLM code, if present.
+154 AUE_NULL UNIMPL nlm_syscall
; 155 is initialized by the NFS code, if present.
; XXX this is a problem!!!
155 AUE_NFS_SVC UNIMPL nfssvc
diff --git a/sys/compat/linux/linux_file.c b/sys/compat/linux/linux_file.c
index 2a12a67..85a0cca 100644
--- a/sys/compat/linux/linux_file.c
+++ b/sys/compat/linux/linux_file.c
@@ -1051,6 +1051,7 @@ linux_to_bsd_flock(struct l_flock *linux_flock, struct flock *bsd_flock)
bsd_flock->l_start = (off_t)linux_flock->l_start;
bsd_flock->l_len = (off_t)linux_flock->l_len;
bsd_flock->l_pid = (pid_t)linux_flock->l_pid;
+ bsd_flock->l_sysid = 0;
}
static void
@@ -1107,6 +1108,7 @@ linux_to_bsd_flock64(struct l_flock64 *linux_flock, struct flock *bsd_flock)
bsd_flock->l_start = (off_t)linux_flock->l_start;
bsd_flock->l_len = (off_t)linux_flock->l_len;
bsd_flock->l_pid = (pid_t)linux_flock->l_pid;
+ bsd_flock->l_sysid = 0;
}
static void
diff --git a/sys/compat/svr4/svr4_fcntl.c b/sys/compat/svr4/svr4_fcntl.c
index ed541bd..02032d6 100644
--- a/sys/compat/svr4/svr4_fcntl.c
+++ b/sys/compat/svr4/svr4_fcntl.c
@@ -193,7 +193,7 @@ svr4_to_bsd_flock(iflp, oflp)
oflp->l_start = (off_t) iflp->l_start;
oflp->l_len = (off_t) iflp->l_len;
oflp->l_pid = (pid_t) iflp->l_pid;
-
+ oflp->l_sysid = iflp->l_sysid;
}
static void
@@ -219,7 +219,7 @@ bsd_to_svr4_flock64(iflp, oflp)
oflp->l_whence = (short) iflp->l_whence;
oflp->l_start = (svr4_off64_t) iflp->l_start;
oflp->l_len = (svr4_off64_t) iflp->l_len;
- oflp->l_sysid = 0;
+ oflp->l_sysid = iflp->l_sysid;
oflp->l_pid = (svr4_pid_t) iflp->l_pid;
}
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index b6be119..f0e422b 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -957,6 +957,7 @@ options FDESCFS #File descriptor filesystem
options HPFS #OS/2 File system
options MSDOSFS #MS DOS File System (FAT, FAT32)
options NFSSERVER #Network File System server
+options NFSLOCKD #Network Lock Manager
options NTFS #NT File System
options NULLFS #NULL filesystem
# Broken (depends on NCP):
diff --git a/sys/conf/files b/sys/conf/files
index eac57fa..3f1c6e0 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -2031,6 +2031,12 @@ nfsserver/nfs_srvsock.c optional nfsserver
nfsserver/nfs_srvcache.c optional nfsserver
nfsserver/nfs_srvsubs.c optional nfsserver
nfsserver/nfs_syscalls.c optional nfsserver
+nlm/nlm_prot_clnt.c optional nfslockd
+nlm/nlm_prot_impl.c optional nfslockd
+nlm/nlm_prot_server.c optional nfslockd
+nlm/nlm_prot_svc.c optional nfslockd
+nlm/nlm_prot_xdr.c optional nfslockd
+nlm/sm_inter_xdr.c optional nfslockd
# crypto support
opencrypto/cast.c optional crypto | ipsec
opencrypto/criov.c optional crypto
@@ -2057,7 +2063,27 @@ pci/intpm.c optional intpm pci
pci/ncr.c optional ncr pci
pci/nfsmb.c optional nfsmb pci
pci/viapm.c optional viapm pci
+rpc/auth_none.c optional krpc | nfslockd
+rpc/auth_unix.c optional krpc | nfslockd
+rpc/authunix_prot.c optional krpc | nfslockd
+rpc/clnt_dg.c optional krpc | nfslockd
+rpc/clnt_rc.c optional krpc | nfslockd
+rpc/clnt_vc.c optional krpc | nfslockd
+rpc/getnetconfig.c optional krpc | nfslockd
+rpc/inet_ntop.c optional krpc | nfslockd
+rpc/inet_pton.c optional krpc | nfslockd
+rpc/rpc_callmsg.c optional krpc | nfslockd
+rpc/rpc_generic.c optional krpc | nfslockd
+rpc/rpc_prot.c optional krpc | nfslockd
+rpc/rpcb_clnt.c optional krpc | nfslockd
+rpc/rpcb_prot.c optional krpc | nfslockd
rpc/rpcclnt.c optional nfsclient
+rpc/svc.c optional krpc | nfslockd
+rpc/svc_auth.c optional krpc | nfslockd
+rpc/svc_auth_unix.c optional krpc | nfslockd
+rpc/svc_dg.c optional krpc | nfslockd
+rpc/svc_generic.c optional krpc | nfslockd
+rpc/svc_vc.c optional krpc | nfslockd
security/audit/audit.c optional audit
security/audit/audit_arg.c optional audit
security/audit/audit_bsm.c optional audit
@@ -2142,6 +2168,12 @@ vm/vm_reserv.c standard
vm/vm_unix.c standard
vm/vm_zeroidle.c standard
vm/vnode_pager.c standard
+xdr/xdr.c optional krpc | nfslockd
+xdr/xdr_array.c optional krpc | nfslockd
+xdr/xdr_mbuf.c optional krpc | nfslockd
+xdr/xdr_mem.c optional krpc | nfslockd
+xdr/xdr_reference.c optional krpc | nfslockd
+xdr/xdr_sizeof.c optional krpc | nfslockd
#
gnu/fs/xfs/xfs_alloc.c optional xfs \
compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" \
diff --git a/sys/conf/options b/sys/conf/options
index 77710be..c15a7eb 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -398,6 +398,8 @@ TCP_SIGNATURE opt_inet.h
DEV_VLAN opt_vlan.h
VLAN_ARRAY opt_vlan.h
XBONEHACK
+KRPC
+NFSLOCKD
#
# SCTP
diff --git a/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 4008178..088103a 100644
--- a/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -3551,6 +3551,25 @@ zfs_freebsd_advlock(ap)
return (lf_advlock(ap, &(zp->z_lockf), zp->z_phys->zp_size));
}
+/*
+ * Advisory record locking support
+ */
+static int
+zfs_freebsd_advlockasync(ap)
+ struct vop_advlockasync_args /* {
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ struct task *a_task;
+ } */ *ap;
+{
+ znode_t *zp = VTOZ(ap->a_vp);
+
+ return (lf_advlockasync(ap, &(zp->z_lockf), zp->z_phys->zp_size));
+}
+
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
@@ -3584,6 +3603,7 @@ struct vop_vector zfs_vnodeops = {
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_advlock = zfs_freebsd_advlock,
+ .vop_advlockasync = zfs_freebsd_advlockasync,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = VOP_EOPNOTSUPP,
.vop_fid = zfs_freebsd_fid,
diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c
index d12ff51..7a89f7b 100644
--- a/sys/fs/msdosfs/msdosfs_vnops.c
+++ b/sys/fs/msdosfs/msdosfs_vnops.c
@@ -83,6 +83,7 @@
* Prototypes for MSDOSFS vnode operations
*/
static vop_advlock_t msdosfs_advlock;
+static vop_advlockasync_t msdosfs_advlockasync;
static vop_create_t msdosfs_create;
static vop_mknod_t msdosfs_mknod;
static vop_open_t msdosfs_open;
@@ -1963,6 +1964,22 @@ msdosfs_advlock(ap)
}
static int
+msdosfs_advlockasync(ap)
+ struct vop_advlockasync_args /* {
+ struct vnode *a_vp;
+ u_char a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ struct task *a_task;
+ } */ *ap;
+{
+ struct denode *dep = VTODE(ap->a_vp);
+
+ return (lf_advlockasync(ap, &dep->de_lockf, dep->de_FileSize));
+}
+
+static int
msdosfs_vptofh(ap)
struct vop_vptofh_args /* {
struct vnode *a_vp;
@@ -1987,6 +2004,7 @@ struct vop_vector msdosfs_vnodeops = {
.vop_access = msdosfs_access,
.vop_advlock = msdosfs_advlock,
+ .vop_advlockasync = msdosfs_advlockasync,
.vop_bmap = msdosfs_bmap,
.vop_cachedlookup = msdosfs_lookup,
.vop_open = msdosfs_open,
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index ff466ab..f50af94 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -1446,6 +1446,20 @@ tmpfs_advlock(struct vop_advlock_args *v)
/* --------------------------------------------------------------------- */
static int
+tmpfs_advlockasync(struct vop_advlockasync_args *v)
+{
+ struct vnode *vp = v->a_vp;
+
+ struct tmpfs_node *node;
+
+ node = VP_TO_TMPFS_NODE(vp);
+
+ return lf_advlockasync(v, &node->tn_lockf, node->tn_size);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
tmpfs_vptofh(struct vop_vptofh_args *ap)
{
struct tmpfs_fid *tfhp;
@@ -1493,6 +1507,7 @@ struct vop_vector tmpfs_vnodeop_entries = {
.vop_print = tmpfs_print,
.vop_pathconf = tmpfs_pathconf,
.vop_advlock = tmpfs_advlock,
+ .vop_advlockasync = tmpfs_advlockasync,
.vop_vptofh = tmpfs_vptofh,
.vop_bmap = VOP_EOPNOTSUPP,
};
diff --git a/sys/i386/ibcs2/ibcs2_fcntl.c b/sys/i386/ibcs2/ibcs2_fcntl.c
index fcdc714..6875aef 100644
--- a/sys/i386/ibcs2/ibcs2_fcntl.c
+++ b/sys/i386/ibcs2/ibcs2_fcntl.c
@@ -93,7 +93,7 @@ cvt_flock2iflock(flp, iflp)
iflp->l_whence = (short)flp->l_whence;
iflp->l_start = (ibcs2_off_t)flp->l_start;
iflp->l_len = (ibcs2_off_t)flp->l_len;
- iflp->l_sysid = 0;
+ iflp->l_sysid = flp->l_sysid;
iflp->l_pid = (ibcs2_pid_t)flp->l_pid;
}
@@ -127,6 +127,7 @@ cvt_iflock2flock(iflp, flp)
break;
}
flp->l_whence = iflp->l_whence;
+ flp->l_sysid = iflp->l_sysid;
}
/* convert iBCS2 mode into NetBSD mode */
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 9d20ec5..791239d 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -320,28 +320,67 @@ int
fcntl(struct thread *td, struct fcntl_args *uap)
{
struct flock fl;
+ struct oflock ofl;
intptr_t arg;
int error;
+ int cmd;
error = 0;
+ cmd = uap->cmd;
switch (uap->cmd) {
- case F_GETLK:
- case F_SETLK:
- case F_SETLKW:
- error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
+ case F_OGETLK:
+ case F_OSETLK:
+ case F_OSETLKW:
+ /*
+ * Convert old flock structure to new.
+ */
+ error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl));
+ fl.l_start = ofl.l_start;
+ fl.l_len = ofl.l_len;
+ fl.l_pid = ofl.l_pid;
+ fl.l_type = ofl.l_type;
+ fl.l_whence = ofl.l_whence;
+ fl.l_sysid = 0;
+
+ switch (uap->cmd) {
+ case F_OGETLK:
+ cmd = F_GETLK;
+ break;
+ case F_OSETLK:
+ cmd = F_SETLK;
+ break;
+ case F_OSETLKW:
+ cmd = F_SETLKW;
+ break;
+ }
arg = (intptr_t)&fl;
break;
+ case F_GETLK:
+ case F_SETLK:
+ case F_SETLKW:
+ case F_SETLK_REMOTE:
+ error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
+ arg = (intptr_t)&fl;
+ break;
default:
arg = uap->arg;
break;
}
if (error)
return (error);
- error = kern_fcntl(td, uap->fd, uap->cmd, arg);
+ error = kern_fcntl(td, uap->fd, cmd, arg);
if (error)
return (error);
- if (uap->cmd == F_GETLK)
+ if (uap->cmd == F_OGETLK) {
+ ofl.l_start = fl.l_start;
+ ofl.l_len = fl.l_len;
+ ofl.l_pid = fl.l_pid;
+ ofl.l_type = fl.l_type;
+ ofl.l_whence = fl.l_whence;
+ error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl));
+ } else if (uap->cmd == F_GETLK) {
error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
+ }
return (error);
}
@@ -499,11 +538,19 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
fdrop(fp, td);
break;
+ case F_SETLK_REMOTE:
+ error = priv_check(td, PRIV_NFS_LOCKD);
+ if (error)
+ return (error);
+ flg = F_REMOTE;
+ goto do_setlk;
+
case F_SETLKW:
flg |= F_WAIT;
/* FALLTHROUGH F_SETLK */
case F_SETLK:
+ do_setlk:
FILEDESC_SLOCK(fdp);
if ((fp = fdtofp(fd, fdp)) == NULL) {
FILEDESC_SUNLOCK(fdp);
@@ -559,7 +606,19 @@ kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
break;
case F_UNLCK:
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
- flp, F_POSIX);
+ flp, flg);
+ break;
+ case F_UNLCKSYS:
+ /*
+ * Temporary api for testing remote lock
+ * infrastructure.
+ */
+ if (flg != F_REMOTE) {
+ error = EINVAL;
+ break;
+ }
+ error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
+ F_UNLCKSYS, flp, flg);
break;
default:
error = EINVAL;
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index 95ac99d..9ccee35 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -1,4 +1,30 @@
/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -39,23 +65,20 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/hash.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/sx.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
-
-/*
- * This variable controls the maximum number of processes that will
- * be checked in doing deadlock detection.
- */
-static int maxlockdepth = MAXDEPTH;
+#include <sys/taskqueue.h>
#ifdef LOCKF_DEBUG
#include <sys/sysctl.h>
@@ -63,53 +86,344 @@ static int maxlockdepth = MAXDEPTH;
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
-
-static int lockf_debug = 0;
+static int lockf_debug = 0; /* control debug output */
SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
#endif
MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
-#define NOLOCKF (struct lockf *)0
+struct owner_edge;
+struct owner_vertex;
+struct owner_vertex_list;
+struct owner_graph;
+
+#define NOLOCKF (struct lockf_entry *)0
#define SELF 0x1
#define OTHERS 0x2
-static int lf_clearlock(struct lockf *, struct lockf **);
-static int lf_findoverlap(struct lockf *,
- struct lockf *, int, struct lockf ***, struct lockf **);
-static struct lockf *
- lf_getblock(struct lockf *);
-static int lf_getlock(struct lockf *, struct flock *);
-static int lf_setlock(struct lockf *, struct vnode *, struct lockf **);
-static void lf_split(struct lockf *, struct lockf *, struct lockf **);
-static void lf_wakelock(struct lockf *);
-#ifdef LOCKF_DEBUG
-static void lf_print(char *, struct lockf *);
-static void lf_printlist(char *, struct lockf *);
+static void lf_init(void *);
+static int lf_hash_owner(caddr_t, struct flock *, int);
+static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
+ int);
+static struct lockf_entry *
+ lf_alloc_lock(struct lock_owner *);
+static void lf_free_lock(struct lockf_entry *);
+static int lf_clearlock(struct lockf *, struct lockf_entry *);
+static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
+static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
+static void lf_free_edge(struct lockf_edge *);
+static struct lockf_edge *
+ lf_alloc_edge(void);
+static void lf_alloc_vertex(struct lockf_entry *);
+static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
+static void lf_remove_edge(struct lockf_edge *);
+static void lf_remove_outgoing(struct lockf_entry *);
+static void lf_remove_incoming(struct lockf_entry *);
+static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
+static int lf_add_incoming(struct lockf *, struct lockf_entry *);
+static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
+ int);
+static struct lockf_entry *
+ lf_getblock(struct lockf *, struct lockf_entry *);
+static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
+static void lf_insert_lock(struct lockf *, struct lockf_entry *);
+static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
+static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
+ int all, struct lockf_entry_list *);
+static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
+ struct lockf_entry_list*);
+static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
+ struct lockf_entry_list*);
+static int lf_setlock(struct lockf *, struct lockf_entry *,
+ struct vnode *, void **cookiep);
+static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
+static void lf_split(struct lockf *, struct lockf_entry *,
+ struct lockf_entry *, struct lockf_entry_list *);
+#ifdef LOCKF_DEBUG
+static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
+ struct owner_vertex_list *path);
+static void graph_check(struct owner_graph *g, int checkorder);
+static void graph_print_vertices(struct owner_vertex_list *set);
+#endif
+static int graph_delta_forward(struct owner_graph *g,
+ struct owner_vertex *x, struct owner_vertex *y,
+ struct owner_vertex_list *delta);
+static int graph_delta_backward(struct owner_graph *g,
+ struct owner_vertex *x, struct owner_vertex *y,
+ struct owner_vertex_list *delta);
+static int graph_add_indices(int *indices, int n,
+ struct owner_vertex_list *set);
+static int graph_assign_indices(struct owner_graph *g, int *indices,
+ int nextunused, struct owner_vertex_list *set);
+static int graph_add_edge(struct owner_graph *g,
+ struct owner_vertex *x, struct owner_vertex *y);
+static void graph_remove_edge(struct owner_graph *g,
+ struct owner_vertex *x, struct owner_vertex *y);
+static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
+ struct lock_owner *lo);
+static void graph_free_vertex(struct owner_graph *g,
+ struct owner_vertex *v);
+static struct owner_graph * graph_init(struct owner_graph *g);
+#ifdef LOCKF_DEBUG
+static void lf_print(char *, struct lockf_entry *);
+static void lf_printlist(char *, struct lockf_entry *);
+static void lf_print_owner(struct lock_owner *);
+#endif
+
+/*
+ * This structure is used to keep track of both local and remote lock
+ * owners. The lf_owner field of the struct lockf_entry points back at
+ * the lock owner structure. Each possible lock owner (local proc for
+ * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
+ * pair for remote locks) is represented by a unique instance of
+ * struct lock_owner.
+ *
+ * If a lock owner has a lock that blocks some other lock or a lock
+ * that is waiting for some other lock, it also has a vertex in the
+ * owner_graph below.
+ *
+ * Locks:
+ * (s) locked by state->ls_lock
+ * (S) locked by lf_lock_states_lock
+ * (l) locked by lf_lock_owners_lock
+ * (g) locked by lf_owner_graph_lock
+ * (c) const until freeing
+ */
+#define LOCK_OWNER_HASH_SIZE 256
+
+struct lock_owner {
+ LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
+ int lo_refs; /* (l) Number of locks referring to this */
+ int lo_flags; /* (c) Flags passwd to lf_advlock */
+ caddr_t lo_id; /* (c) Id value passed to lf_advlock */
+ pid_t lo_pid; /* (c) Process Id of the lock owner */
+ int lo_sysid; /* (c) System Id of the lock owner */
+ struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
+};
+
+LIST_HEAD(lock_owner_list, lock_owner);
+
+static struct sx lf_lock_states_lock;
+static struct lockf_list lf_lock_states; /* (S) */
+static struct sx lf_lock_owners_lock;
+static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
+
+/*
+ * Structures for deadlock detection.
+ *
+ * We have two types of directed graph, the first is the set of locks,
+ * both active and pending on a vnode. Within this graph, active locks
+ * are terminal nodes in the graph (i.e. have no out-going
+ * edges). Pending locks have out-going edges to each blocking active
+ * lock that prevents the lock from being granted and also to each
+ * older pending lock that would block them if it was active. The
+ * graph for each vnode is naturally acyclic; new edges are only ever
+ * added to or from new nodes (either new pending locks which only add
+ * out-going edges or new active locks which only add in-coming edges)
+ * therefore they cannot create loops in the lock graph.
+ *
+ * The second graph is a global graph of lock owners. Each lock owner
+ * is a vertex in that graph and an edge is added to the graph
+ * whenever an edge is added to a vnode graph, with end points
+ * corresponding to owner of the new pending lock and the owner of the
+ * lock upon which it waits. In order to prevent deadlock, we only add
+ * an edge to this graph if the new edge would not create a cycle.
+ *
+ * The lock owner graph is topologically sorted, i.e. if a node has
+ * any outgoing edges, then it has an order strictly less than any
+ * node to which it has an outgoing edge. We preserve this ordering
+ * (and detect cycles) on edge insertion using Algorithm PK from the
+ * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
+ * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
+ * No. 1.7)
+ */
+struct owner_vertex;
+
+struct owner_edge {
+ LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
+ LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
+ int e_refs; /* (g) number of times added */
+ struct owner_vertex *e_from; /* (c) out-going from here */
+ struct owner_vertex *e_to; /* (c) in-coming to here */
+};
+LIST_HEAD(owner_edge_list, owner_edge);
+
+struct owner_vertex {
+ TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
+ uint32_t v_gen; /* (g) workspace for edge insertion */
+ int v_order; /* (g) order of vertex in graph */
+ struct owner_edge_list v_outedges;/* (g) list of out-edges */
+ struct owner_edge_list v_inedges; /* (g) list of in-edges */
+ struct lock_owner *v_owner; /* (c) corresponding lock owner */
+};
+TAILQ_HEAD(owner_vertex_list, owner_vertex);
+
+struct owner_graph {
+ struct owner_vertex** g_vertices; /* (g) pointers to vertices */
+ int g_size; /* (g) number of vertices */
+ int g_space; /* (g) space allocated for vertices */
+ int *g_indexbuf; /* (g) workspace for loop detection */
+ uint32_t g_gen; /* (g) increment when re-ordering */
+};
+
+static struct sx lf_owner_graph_lock;
+static struct owner_graph lf_owner_graph;
+
+/*
+ * Initialise various structures and locks.
+ */
+static void
+lf_init(void *dummy)
+{
+ int i;
+
+ sx_init(&lf_lock_states_lock, "lock states lock");
+ LIST_INIT(&lf_lock_states);
+
+ sx_init(&lf_lock_owners_lock, "lock owners lock");
+ for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
+ LIST_INIT(&lf_lock_owners[i]);
+
+ sx_init(&lf_owner_graph_lock, "owner graph lock");
+ graph_init(&lf_owner_graph);
+}
+SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
+
+/*
+ * Generate a hash value for a lock owner.
+ */
+static int
+lf_hash_owner(caddr_t id, struct flock *fl, int flags)
+{
+ uint32_t h;
+
+ if (flags & F_REMOTE) {
+ h = HASHSTEP(0, fl->l_pid);
+ h = HASHSTEP(h, fl->l_sysid);
+ } else if (flags & F_FLOCK) {
+ h = ((uintptr_t) id) >> 7;
+ } else {
+ struct proc *p = (struct proc *) id;
+ h = HASHSTEP(0, p->p_pid);
+ h = HASHSTEP(h, 0);
+ }
+
+ return (h % LOCK_OWNER_HASH_SIZE);
+}
+
+/*
+ * Return true if a lock owner matches the details passed to
+ * lf_advlock.
+ */
+static int
+lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
+ int flags)
+{
+ if (flags & F_REMOTE) {
+ return lo->lo_pid == fl->l_pid
+ && lo->lo_sysid == fl->l_sysid;
+ } else {
+ return lo->lo_id == id;
+ }
+}
+
+static struct lockf_entry *
+lf_alloc_lock(struct lock_owner *lo)
+{
+ struct lockf_entry *lf;
+
+ lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 4)
+ printf("Allocated lock %p\n", lf);
+#endif
+ if (lo) {
+ sx_xlock(&lf_lock_owners_lock);
+ lo->lo_refs++;
+ sx_xunlock(&lf_lock_owners_lock);
+ lf->lf_owner = lo;
+ }
+
+ return (lf);
+}
+
+static void
+lf_free_lock(struct lockf_entry *lock)
+{
+ /*
+ * Adjust the lock_owner reference count and
+ * reclaim the entry if this is the last lock
+ * for that owner.
+ */
+ struct lock_owner *lo = lock->lf_owner;
+ if (lo) {
+ KASSERT(LIST_EMPTY(&lock->lf_outedges),
+ ("freeing lock with dependancies"));
+ KASSERT(LIST_EMPTY(&lock->lf_inedges),
+ ("freeing lock with dependants"));
+ sx_xlock(&lf_lock_owners_lock);
+ KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
+ lo->lo_refs--;
+ if (lo->lo_refs == 0) {
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1)
+ printf("lf_free_lock: freeing lock owner %p\n",
+ lo);
+#endif
+ if (lo->lo_vertex) {
+ sx_xlock(&lf_owner_graph_lock);
+ graph_free_vertex(&lf_owner_graph,
+ lo->lo_vertex);
+ sx_xunlock(&lf_owner_graph_lock);
+ }
+ LIST_REMOVE(lo, lo_link);
+ free(lo, M_LOCKF);
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 4)
+ printf("Freed lock owner %p\n", lo);
+#endif
+ }
+ sx_unlock(&lf_lock_owners_lock);
+ }
+ if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
+ vrele(lock->lf_vnode);
+ lock->lf_vnode = NULL;
+ }
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 4)
+ printf("Freed lock %p\n", lock);
#endif
+ free(lock, M_LOCKF);
+}
/*
* Advisory record locking support
*/
int
-lf_advlock(ap, head, size)
- struct vop_advlock_args /* {
- struct vnode *a_vp;
- caddr_t a_id;
- int a_op;
- struct flock *a_fl;
- int a_flags;
- } */ *ap;
- struct lockf **head;
- u_quad_t size;
+lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
+ u_quad_t size)
{
+ struct lockf *state, *freestate = NULL;
struct flock *fl = ap->a_fl;
- struct lockf *lock;
+ struct lockf_entry *lock;
struct vnode *vp = ap->a_vp;
+ caddr_t id = ap->a_id;
+ int flags = ap->a_flags;
+ int hash;
+ struct lock_owner *lo;
off_t start, end, oadd;
- struct lockf *clean, *n;
int error;
/*
+ * Handle the F_UNLKSYS case first - no need to mess about
+ * creating a lock owner for this one.
+ */
+ if (ap->a_op == F_UNLCKSYS) {
+ lf_clearremotesys(fl->l_sysid);
+ return (0);
+ }
+
+ /*
* Convert the flock structure into a start and end.
*/
switch (fl->l_whence) {
@@ -142,9 +456,9 @@ lf_advlock(ap, head, size)
start += fl->l_len;
if (start < 0)
return (EINVAL);
- } else if (fl->l_len == 0)
- end = -1;
- else {
+ } else if (fl->l_len == 0) {
+ end = OFF_MAX;
+ } else {
oadd = fl->l_len - 1;
if (oadd > OFF_MAX - start)
return (EOVERFLOW);
@@ -153,27 +467,89 @@ lf_advlock(ap, head, size)
/*
* Avoid the common case of unlocking when inode has no locks.
*/
- if (*head == (struct lockf *)0) {
+ if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) {
if (ap->a_op != F_SETLK) {
fl->l_type = F_UNLCK;
return (0);
}
}
+
/*
- * Allocate a spare structure in case we have to split.
+ * Map our arguments to an existing lock owner or create one
+ * if this is the first time we have seen this owner.
*/
- clean = NULL;
- if (ap->a_op == F_SETLK || ap->a_op == F_UNLCK) {
- MALLOC(clean, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
- clean->lf_next = NULL;
+ hash = lf_hash_owner(id, fl, flags);
+ sx_xlock(&lf_lock_owners_lock);
+ LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
+ if (lf_owner_matches(lo, id, fl, flags))
+ break;
+ if (!lo) {
+ /*
+ * We initialise the lock with a reference
+ * count which matches the new lockf_entry
+ * structure created below.
+ */
+ lo = malloc(sizeof(struct lock_owner), M_LOCKF,
+ M_WAITOK|M_ZERO);
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 4)
+ printf("Allocated lock owner %p\n", lo);
+#endif
+
+ lo->lo_refs = 1;
+ lo->lo_flags = flags;
+ lo->lo_id = id;
+ if (flags & F_REMOTE) {
+ lo->lo_pid = fl->l_pid;
+ lo->lo_sysid = fl->l_sysid;
+ } else if (flags & F_FLOCK) {
+ lo->lo_pid = -1;
+ lo->lo_sysid = 0;
+ } else {
+ struct proc *p = (struct proc *) id;
+ lo->lo_pid = p->p_pid;
+ lo->lo_sysid = 0;
+ }
+ lo->lo_vertex = NULL;
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1) {
+ printf("lf_advlockasync: new lock owner %p ", lo);
+ lf_print_owner(lo);
+ printf("\n");
+ }
+#endif
+
+ LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
+ } else {
+ /*
+ * We have seen this lock owner before, increase its
+ * reference count to account for the new lockf_entry
+ * structure we create below.
+ */
+ lo->lo_refs++;
}
+ sx_xunlock(&lf_lock_owners_lock);
+
/*
- * Create the lockf structure
+ * Create the lockf structure. We initialise the lf_owner
+ * field here instead of in lf_alloc_lock() to avoid paying
+ * the lf_lock_owners_lock tax twice.
*/
- MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
+ lock = lf_alloc_lock(NULL);
lock->lf_start = start;
lock->lf_end = end;
- lock->lf_id = ap->a_id;
+ lock->lf_owner = lo;
+ lock->lf_vnode = vp;
+ if (flags & F_REMOTE) {
+ /*
+ * For remote locks, the caller may release its ref to
+ * the vnode at any time - we have to ref it here to
+ * prevent it from being recycled unexpectedly.
+ */
+ vref(vp);
+ }
+
/*
* XXX The problem is that VTOI is ufs specific, so it will
* break LOCKF_DEBUG for all other FS's other than UFS because
@@ -182,60 +558,698 @@ lf_advlock(ap, head, size)
/* lock->lf_inode = VTOI(ap->a_vp); */
lock->lf_inode = (struct inode *)0;
lock->lf_type = fl->l_type;
- lock->lf_head = head;
- lock->lf_next = (struct lockf *)0;
- TAILQ_INIT(&lock->lf_blkhd);
+ LIST_INIT(&lock->lf_outedges);
+ LIST_INIT(&lock->lf_inedges);
+ lock->lf_async_task = ap->a_task;
lock->lf_flags = ap->a_flags;
+
/*
- * Do the requested operation.
+ * Do the requested operation. First find our state structure
+ * and create a new one if necessary - the caller's *statep
+ * variable and the state's ls_threads count is protected by
+ * the vnode interlock.
*/
VI_LOCK(vp);
+
+ /*
+ * Allocate a state structure if necessary.
+ */
+ state = *statep;
+ if (state == NULL) {
+ struct lockf *ls;
+
+ VI_UNLOCK(vp);
+
+ ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
+ sx_init(&ls->ls_lock, "ls_lock");
+ LIST_INIT(&ls->ls_active);
+ LIST_INIT(&ls->ls_pending);
+
+ sx_xlock(&lf_lock_states_lock);
+ LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
+ sx_xunlock(&lf_lock_states_lock);
+
+ /*
+ * Cope if we lost a race with some other thread while
+ * trying to allocate memory.
+ */
+ VI_LOCK(vp);
+ if ((*statep) == NULL) {
+ (*statep) = ls;
+ } else {
+ sx_xlock(&lf_lock_states_lock);
+ LIST_REMOVE(ls, ls_link);
+ sx_xunlock(&lf_lock_states_lock);
+ sx_destroy(&ls->ls_lock);
+ free(ls, M_LOCKF);
+ }
+ }
+ state = *statep;
+ state->ls_threads++;
+
+ VI_UNLOCK(vp);
+
+ sx_xlock(&state->ls_lock);
switch(ap->a_op) {
case F_SETLK:
- error = lf_setlock(lock, vp, &clean);
+ error = lf_setlock(state, lock, vp, ap->a_cookiep);
break;
case F_UNLCK:
- error = lf_clearlock(lock, &clean);
- lock->lf_next = clean;
- clean = lock;
+ error = lf_clearlock(state, lock);
+ lf_free_lock(lock);
break;
case F_GETLK:
- error = lf_getlock(lock, fl);
- lock->lf_next = clean;
- clean = lock;
+ error = lf_getlock(state, lock, fl);
+ lf_free_lock(lock);
+ break;
+
+ case F_CANCEL:
+ if (ap->a_cookiep)
+ error = lf_cancel(state, lock, *ap->a_cookiep);
+ else
+ error = EINVAL;
+ lf_free_lock(lock);
break;
default:
- lock->lf_next = clean;
- clean = lock;
+ lf_free_lock(lock);
error = EINVAL;
break;
}
+
+#ifdef INVARIANTS
+ /*
+ * Check for some can't happen stuff. In this case, the active
+ * lock list becoming disordered or containing mutually
+ * blocking locks. We also check the pending list for locks
+ * which should be active (i.e. have no out-going edges).
+ */
+ LIST_FOREACH(lock, &state->ls_active, lf_link) {
+ struct lockf_entry *lf;
+ if (LIST_NEXT(lock, lf_link))
+ KASSERT((lock->lf_start
+ <= LIST_NEXT(lock, lf_link)->lf_start),
+ ("locks disordered"));
+ LIST_FOREACH(lf, &state->ls_active, lf_link) {
+ if (lock == lf)
+ break;
+ KASSERT(!lf_blocks(lock, lf),
+ ("two conflicting active locks"));
+ if (lock->lf_owner == lf->lf_owner)
+ KASSERT(!lf_overlaps(lock, lf),
+ ("two overlapping locks from same owner"));
+ }
+ }
+ LIST_FOREACH(lock, &state->ls_pending, lf_link) {
+ KASSERT(!LIST_EMPTY(&lock->lf_outedges),
+ ("pending lock which should be active"));
+ }
+#endif
+ sx_xunlock(&state->ls_lock);
+
+ /*
+ * If we have removed the last active lock on the vnode and
+ * this is the last thread that was in-progress, we can free
+ * the state structure. We update the caller's pointer inside
+ * the vnode interlock but call free outside.
+ *
+ * XXX alternatively, keep the state structure around until
+ * the filesystem recycles - requires a callback from the
+ * filesystem.
+ */
+ VI_LOCK(vp);
+
+ state->ls_threads--;
+ if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
+ KASSERT(LIST_EMPTY(&state->ls_pending),
+ ("freeing state with pending locks"));
+ freestate = state;
+ *statep = NULL;
+ }
+
VI_UNLOCK(vp);
- for (lock = clean; lock != NULL; ) {
- n = lock->lf_next;
- free(lock, M_LOCKF);
- lock = n;
+
+ if (freestate) {
+ sx_xlock(&lf_lock_states_lock);
+ LIST_REMOVE(freestate, ls_link);
+ sx_xunlock(&lf_lock_states_lock);
+ sx_destroy(&freestate->ls_lock);
+ free(freestate, M_LOCKF);
}
return (error);
}
+int
+lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
+{
+ struct vop_advlockasync_args a;
+
+ a.a_vp = ap->a_vp;
+ a.a_id = ap->a_id;
+ a.a_op = ap->a_op;
+ a.a_fl = ap->a_fl;
+ a.a_flags = ap->a_flags;
+ a.a_task = NULL;
+ a.a_cookiep = NULL;
+
+ return (lf_advlockasync(&a, statep, size));
+}
+
+/*
+ * Return non-zero if locks 'x' and 'y' overlap.
+ */
+static int
+lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
+{
+
+ return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
+}
+
+/*
+ * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
+ */
+static int
+lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
+{
+
+ return x->lf_owner != y->lf_owner
+ && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
+ && lf_overlaps(x, y);
+}
+
+/*
+ * Allocate a lock edge from the free list
+ */
+static struct lockf_edge *
+lf_alloc_edge(void)
+{
+
+ return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
+}
+
+/*
+ * Free a lock edge.
+ */
+static void
+lf_free_edge(struct lockf_edge *e)
+{
+
+ free(e, M_LOCKF);
+}
+
+
+/*
+ * Ensure that the lock's owner has a corresponding vertex in the
+ * owner graph.
+ */
+static void
+lf_alloc_vertex(struct lockf_entry *lock)
+{
+ struct owner_graph *g = &lf_owner_graph;
+
+ if (!lock->lf_owner->lo_vertex)
+ lock->lf_owner->lo_vertex =
+ graph_alloc_vertex(g, lock->lf_owner);
+}
+
+/*
+ * Attempt to record an edge from lock x to lock y. Return EDEADLK if
+ * the new edge would cause a cycle in the owner graph.
+ */
+static int
+lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
+{
+ struct owner_graph *g = &lf_owner_graph;
+ struct lockf_edge *e;
+ int error;
+
+#ifdef INVARIANTS
+ LIST_FOREACH(e, &x->lf_outedges, le_outlink)
+ KASSERT(e->le_to != y, ("adding lock edge twice"));
+#endif
+
+ /*
+ * Make sure the two owners have entries in the owner graph.
+ */
+ lf_alloc_vertex(x);
+ lf_alloc_vertex(y);
+
+ error = graph_add_edge(g, x->lf_owner->lo_vertex,
+ y->lf_owner->lo_vertex);
+ if (error)
+ return (error);
+
+ e = lf_alloc_edge();
+ LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
+ LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
+ e->le_from = x;
+ e->le_to = y;
+
+ return (0);
+}
+
+/*
+ * Remove an edge from the lock graph.
+ */
+static void
+lf_remove_edge(struct lockf_edge *e)
+{
+ struct owner_graph *g = &lf_owner_graph;
+ struct lockf_entry *x = e->le_from;
+ struct lockf_entry *y = e->le_to;
+
+ graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
+ LIST_REMOVE(e, le_outlink);
+ LIST_REMOVE(e, le_inlink);
+ e->le_from = NULL;
+ e->le_to = NULL;
+ lf_free_edge(e);
+}
+
+/*
+ * Remove all out-going edges from lock x.
+ */
+static void
+lf_remove_outgoing(struct lockf_entry *x)
+{
+ struct lockf_edge *e;
+
+ while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
+ lf_remove_edge(e);
+ }
+}
+
+/*
+ * Remove all in-coming edges from lock x.
+ */
+static void
+lf_remove_incoming(struct lockf_entry *x)
+{
+ struct lockf_edge *e;
+
+ while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
+ lf_remove_edge(e);
+ }
+}
+
+/*
+ * Walk the list of locks for the file and create an out-going edge
+ * from lock to each blocking lock.
+ */
+static int
+lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
+{
+ struct lockf_entry *overlap;
+ int error;
+
+ LIST_FOREACH(overlap, &state->ls_active, lf_link) {
+ /*
+ * We may assume that the active list is sorted by
+ * lf_start.
+ */
+ if (overlap->lf_start > lock->lf_end)
+ break;
+ if (!lf_blocks(lock, overlap))
+ continue;
+
+ /*
+ * We've found a blocking lock. Add the corresponding
+ * edge to the graphs and see if it would cause a
+ * deadlock.
+ */
+ error = lf_add_edge(lock, overlap);
+
+ /*
+ * The only error that lf_add_edge returns is EDEADLK.
+ * Remove any edges we added and return the error.
+ */
+ if (error) {
+ lf_remove_outgoing(lock);
+ return (error);
+ }
+ }
+
+ /*
+ * We also need to add edges to sleeping locks that block
+ * us. This ensures that lf_wakeup_lock cannot grant two
+ * mutually blocking locks simultaneously and also enforces a
+ * 'first come, first served' fairness model. Note that this
+ * only happens if we are blocked by at least one active lock
+ * due to the call to lf_getblock in lf_setlock below.
+ */
+ LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
+ if (!lf_blocks(lock, overlap))
+ continue;
+ /*
+ * We've found a blocking lock. Add the corresponding
+ * edge to the graphs and see if it would cause a
+ * deadlock.
+ */
+ error = lf_add_edge(lock, overlap);
+
+ /*
+ * The only error that lf_add_edge returns is EDEADLK.
+ * Remove any edges we added and return the error.
+ */
+ if (error) {
+ lf_remove_outgoing(lock);
+ return (error);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Walk the list of pending locks for the file and create an in-coming
+ * edge from lock to each blocking lock.
+ */
+static int
+lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
+{
+ struct lockf_entry *overlap;
+ int error;
+
+ LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
+ if (!lf_blocks(lock, overlap))
+ continue;
+
+ /*
+ * We've found a blocking lock. Add the corresponding
+ * edge to the graphs and see if it would cause a
+ * deadlock.
+ */
+ error = lf_add_edge(overlap, lock);
+
+ /*
+ * The only error that lf_add_edge returns is EDEADLK.
+ * Remove any edges we added and return the error.
+ */
+ if (error) {
+ lf_remove_incoming(lock);
+ return (error);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Insert lock into the active list, keeping list entries ordered by
+ * increasing values of lf_start.
+ */
+static void
+lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
+{
+ struct lockf_entry *lf, *lfprev;
+
+ if (LIST_EMPTY(&state->ls_active)) {
+ LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
+ return;
+ }
+
+ lfprev = NULL;
+ LIST_FOREACH(lf, &state->ls_active, lf_link) {
+ if (lf->lf_start > lock->lf_start) {
+ LIST_INSERT_BEFORE(lf, lock, lf_link);
+ return;
+ }
+ lfprev = lf;
+ }
+ LIST_INSERT_AFTER(lfprev, lock, lf_link);
+}
+
+/*
+ * Wake up a sleeping lock and remove it from the pending list now
+ * that all its dependancies have been resolved. The caller should
+ * arrange for the lock to be added to the active list, adjusting any
+ * existing locks for the same owner as needed.
+ */
+static void
+lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
+{
+
+ /*
+ * Remove from ls_pending list and wake up the caller
+ * or start the async notification, as appropriate.
+ */
+ LIST_REMOVE(wakelock, lf_link);
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1)
+ lf_print("lf_wakeup_lock: awakening", wakelock);
+#endif /* LOCKF_DEBUG */
+ if (wakelock->lf_async_task) {
+ taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
+ } else {
+ wakeup(wakelock);
+ }
+}
+
+/*
+ * Re-check all dependant locks and remove edges to locks that we no
+ * longer block. If 'all' is non-zero, the lock has been removed and
+ * we must remove all the dependancies, otherwise it has simply been
+ * reduced but remains active. Any pending locks which have been been
+ * unblocked are added to 'granted'
+ */
+static void
+lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
+ struct lockf_entry_list *granted)
+{
+ struct lockf_edge *e, *ne;
+ struct lockf_entry *deplock;
+
+ LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
+ deplock = e->le_from;
+ if (all || !lf_blocks(lock, deplock)) {
+ sx_xlock(&lf_owner_graph_lock);
+ lf_remove_edge(e);
+ sx_xunlock(&lf_owner_graph_lock);
+ if (LIST_EMPTY(&deplock->lf_outedges)) {
+ lf_wakeup_lock(state, deplock);
+ LIST_INSERT_HEAD(granted, deplock, lf_link);
+ }
+ }
+ }
+}
+
+/*
+ * Set the start of an existing active lock, updating dependancies and
+ * adding any newly woken locks to 'granted'.
+ */
+static void
+lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
+ struct lockf_entry_list *granted)
+{
+
+ KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
+ lock->lf_start = new_start;
+ LIST_REMOVE(lock, lf_link);
+ lf_insert_lock(state, lock);
+ lf_update_dependancies(state, lock, FALSE, granted);
+}
+
+/*
+ * Set the end of an existing active lock, updating dependancies and
+ * adding any newly woken locks to 'granted'.
+ */
+static void
+lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
+ struct lockf_entry_list *granted)
+{
+
+ KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
+ lock->lf_end = new_end;
+ lf_update_dependancies(state, lock, FALSE, granted);
+}
+
+/*
+ * Add a lock to the active list, updating or removing any current
+ * locks owned by the same owner and processing any pending locks that
+ * become unblocked as a result. This code is also used for unlock
+ * since the logic for updating existing locks is identical.
+ *
+ * As a result of processing the new lock, we may unblock existing
+ * pending locks as a result of downgrading/unlocking. We simply
+ * activate the newly granted locks by looping.
+ *
+ * Since the new lock already has its dependancies set up, we always
+ * add it to the list (unless its an unlock request). This may
+ * fragment the lock list in some pathological cases but its probably
+ * not a real problem.
+ */
+static void
+lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
+{
+ struct lockf_entry *overlap, *lf;
+ struct lockf_entry_list granted;
+ int ovcase;
+
+ LIST_INIT(&granted);
+ LIST_INSERT_HEAD(&granted, lock, lf_link);
+
+ while (!LIST_EMPTY(&granted)) {
+ lock = LIST_FIRST(&granted);
+ LIST_REMOVE(lock, lf_link);
+
+ /*
+ * Skip over locks owned by other processes. Handle
+ * any locks that overlap and are owned by ourselves.
+ */
+ overlap = LIST_FIRST(&state->ls_active);
+ for (;;) {
+ ovcase = lf_findoverlap(&overlap, lock, SELF);
+
+#ifdef LOCKF_DEBUG
+ if (ovcase && (lockf_debug & 2)) {
+ printf("lf_setlock: overlap %d", ovcase);
+ lf_print("", overlap);
+ }
+#endif
+ /*
+ * Six cases:
+ * 0) no overlap
+ * 1) overlap == lock
+ * 2) overlap contains lock
+ * 3) lock contains overlap
+ * 4) overlap starts before lock
+ * 5) overlap ends after lock
+ */
+ switch (ovcase) {
+ case 0: /* no overlap */
+ break;
+
+ case 1: /* overlap == lock */
+ /*
+ * We have already setup the
+ * dependants for the new lock, taking
+ * into account a possible downgrade
+ * or unlock. Remove the old lock.
+ */
+ LIST_REMOVE(overlap, lf_link);
+ lf_update_dependancies(state, overlap, TRUE,
+ &granted);
+ lf_free_lock(overlap);
+ break;
+
+ case 2: /* overlap contains lock */
+ /*
+ * Just split the existing lock.
+ */
+ lf_split(state, overlap, lock, &granted);
+ break;
+
+ case 3: /* lock contains overlap */
+ /*
+ * Delete the overlap and advance to
+ * the next entry in the list.
+ */
+ lf = LIST_NEXT(overlap, lf_link);
+ LIST_REMOVE(overlap, lf_link);
+ lf_update_dependancies(state, overlap, TRUE,
+ &granted);
+ lf_free_lock(overlap);
+ overlap = lf;
+ continue;
+
+ case 4: /* overlap starts before lock */
+ /*
+ * Just update the overlap end and
+ * move on.
+ */
+ lf_set_end(state, overlap, lock->lf_start - 1,
+ &granted);
+ overlap = LIST_NEXT(overlap, lf_link);
+ continue;
+
+ case 5: /* overlap ends after lock */
+ /*
+ * Change the start of overlap and
+ * re-insert.
+ */
+ lf_set_start(state, overlap, lock->lf_end + 1,
+ &granted);
+ break;
+ }
+ break;
+ }
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1) {
+ if (lock->lf_type != F_UNLCK)
+ lf_print("lf_activate_lock: activated", lock);
+ else
+ lf_print("lf_activate_lock: unlocked", lock);
+ lf_printlist("lf_activate_lock", lock);
+ }
+#endif /* LOCKF_DEBUG */
+ if (lock->lf_type != F_UNLCK)
+ lf_insert_lock(state, lock);
+ }
+}
+
+/*
+ * Cancel a pending lock request, either as a result of a signal or a
+ * cancel request for an async lock.
+ */
+static void
+lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
+{
+ struct lockf_entry_list granted;
+
+ /*
+ * Note it is theoretically possible that cancelling this lock
+ * may allow some other pending lock to become
+ * active. Consider this case:
+ *
+ * Owner Action Result Dependancies
+ *
+ * A: lock [0..0] succeeds
+ * B: lock [2..2] succeeds
+ * C: lock [1..2] blocked C->B
+ * D: lock [0..1] blocked C->B,D->A,D->C
+ * A: unlock [0..0] C->B,D->C
+ * C: cancel [1..2]
+ */
+
+ LIST_REMOVE(lock, lf_link);
+
+ /*
+ * Removing out-going edges is simple.
+ */
+ sx_xlock(&lf_owner_graph_lock);
+ lf_remove_outgoing(lock);
+ sx_xunlock(&lf_owner_graph_lock);
+
+ /*
+ * Removing in-coming edges may allow some other lock to
+ * become active - we use lf_update_dependancies to figure
+ * this out.
+ */
+ LIST_INIT(&granted);
+ lf_update_dependancies(state, lock, TRUE, &granted);
+ lf_free_lock(lock);
+
+ /*
+ * Feed any newly active locks to lf_activate_lock.
+ */
+ while (!LIST_EMPTY(&granted)) {
+ lock = LIST_FIRST(&granted);
+ LIST_REMOVE(lock, lf_link);
+ lf_activate_lock(state, lock);
+ }
+}
+
/*
* Set a byte-range lock.
*/
static int
-lf_setlock(lock, vp, clean)
- struct lockf *lock;
- struct vnode *vp;
- struct lockf **clean;
+lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
+ void **cookiep)
{
- struct lockf *block;
- struct lockf **head = lock->lf_head;
- struct lockf **prev, *overlap, *ltmp;
+ struct lockf_entry *block;
static char lockstr[] = "lockf";
- int ovcase, priority, needtolink, error;
+ int priority, error;
#ifdef LOCKF_DEBUG
if (lockf_debug & 1)
@@ -252,70 +1266,36 @@ lf_setlock(lock, vp, clean)
/*
* Scan lock list for this file looking for locks that would block us.
*/
- while ((block = lf_getblock(lock))) {
+ while ((block = lf_getblock(state, lock))) {
/*
* Free the structure and return if nonblocking.
*/
- if ((lock->lf_flags & F_WAIT) == 0) {
- lock->lf_next = *clean;
- *clean = lock;
- return (EAGAIN);
+ if ((lock->lf_flags & F_WAIT) == 0
+ && lock->lf_async_task == NULL) {
+ lf_free_lock(lock);
+ error = EAGAIN;
+ goto out;
}
+
/*
- * We are blocked. Since flock style locks cover
- * the whole file, there is no chance for deadlock.
- * For byte-range locks we must check for deadlock.
- *
- * Deadlock detection is done by looking through the
- * wait channels to see if there are any cycles that
- * involve us. MAXDEPTH is set just to make sure we
- * do not go off into neverland.
+ * We are blocked. Create edges to each blocking lock,
+ * checking for deadlock using the owner graph. For
+ * simplicity, we run deadlock detection for all
+ * locks, posix and otherwise.
*/
- if ((lock->lf_flags & F_POSIX) &&
- (block->lf_flags & F_POSIX)) {
- struct proc *wproc;
- struct proc *nproc;
- struct thread *td;
- struct lockf *waitblock;
- int i = 0;
-
- /* The block is waiting on something */
- wproc = (struct proc *)block->lf_id;
-restart:
- nproc = NULL;
- PROC_LOCK(wproc);
- FOREACH_THREAD_IN_PROC(wproc, td) {
- thread_lock(td);
- for (;;) {
- if (!TD_ON_SLEEPQ(td) ||
- td->td_wmesg != lockstr)
- break;
- waitblock = (struct lockf *)td->td_wchan;
- /* Get the owner of the blocking lock */
- if (waitblock->lf_next == NULL)
- break;
- waitblock = waitblock->lf_next;
- if ((waitblock->lf_flags & F_POSIX) == 0)
- break;
- if (waitblock->lf_id == lock->lf_id) {
- thread_unlock(td);
- PROC_UNLOCK(wproc);
- lock->lf_next = *clean;
- *clean = lock;
- return (EDEADLK);
- }
- nproc = (struct proc *)waitblock->lf_id;
- break;
- }
- thread_unlock(td);
- if (nproc)
- break;
- }
- PROC_UNLOCK(wproc);
- wproc = nproc;
- if (++i < maxlockdepth && wproc)
- goto restart;
+ sx_xlock(&lf_owner_graph_lock);
+ error = lf_add_outgoing(state, lock);
+ sx_xunlock(&lf_owner_graph_lock);
+
+ if (error) {
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1)
+ lf_print("lf_setlock: deadlock", lock);
+#endif
+ lf_free_lock(lock);
+ goto out;
}
+
/*
* For flock type locks, we must first remove
* any shared locks that we hold before we sleep
@@ -324,170 +1304,94 @@ restart:
if ((lock->lf_flags & F_FLOCK) &&
lock->lf_type == F_WRLCK) {
lock->lf_type = F_UNLCK;
- (void) lf_clearlock(lock, clean);
+ lf_activate_lock(state, lock);
lock->lf_type = F_WRLCK;
}
/*
- * Add our lock to the blocked list and sleep until we're free.
- * Remember who blocked us (for deadlock detection).
+ * We have added edges to everything that blocks
+ * us. Sleep until they all go away.
*/
- lock->lf_next = block;
- TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
+ LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
#ifdef LOCKF_DEBUG
if (lockf_debug & 1) {
- lf_print("lf_setlock: blocking on", block);
- lf_printlist("lf_setlock", block);
+ struct lockf_edge *e;
+ LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
+ lf_print("lf_setlock: blocking on", e->le_to);
+ lf_printlist("lf_setlock", e->le_to);
+ }
}
#endif /* LOCKF_DEBUG */
- error = msleep(lock, VI_MTX(vp), priority, lockstr, 0);
+
+ if ((lock->lf_flags & F_WAIT) == 0) {
+ /*
+ * The caller requested async notification -
+ * this callback happens when the blocking
+ * lock is released, allowing the caller to
+ * make another attempt to take the lock.
+ */
+ *cookiep = (void *) lock;
+ error = EINPROGRESS;
+ goto out;
+ }
+
+ error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
/*
* We may have been awakened by a signal and/or by a
- * debugger continuing us (in which cases we must remove
- * ourselves from the blocked list) and/or by another
- * process releasing a lock (in which case we have
- * already been removed from the blocked list and our
- * lf_next field set to NOLOCKF).
+ * debugger continuing us (in which cases we must
+ * remove our lock graph edges) and/or by another
+ * process releasing a lock (in which case our edges
+ * have already been removed and we have been moved to
+ * the active list).
+ *
+ * Note that it is possible to receive a signal after
+ * we were successfully woken (and moved to the active
+ * list) but before we resumed execution. In this
+ * case, our lf_outedges list will be clear. We
+ * pretend there was no error.
+ *
+ * Note also, if we have been sleeping long enough, we
+ * may now have incoming edges from some newer lock
+ * which is waiting behind us in the queue.
*/
- if (lock->lf_next) {
- TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
- lock->lf_next = NOLOCKF;
+ if (LIST_EMPTY(&lock->lf_outedges)) {
+ error = 0;
+ } else {
+ lf_cancel_lock(state, lock);
+ goto out;
}
- if (error) {
- lock->lf_next = *clean;
- *clean = lock;
- return (error);
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1) {
+ lf_print("lf_setlock: granted", lock);
}
+#endif
+ goto out;
+ }
+ /*
+ * It looks like we are going to grant the lock. First add
+ * edges from any currently pending lock that the new lock
+ * would block.
+ */
+ sx_xlock(&lf_owner_graph_lock);
+ error = lf_add_incoming(state, lock);
+ sx_xunlock(&lf_owner_graph_lock);
+ if (error) {
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 1)
+ lf_print("lf_setlock: deadlock", lock);
+#endif
+ lf_free_lock(lock);
+ goto out;
}
+
/*
* No blocks!! Add the lock. Note that we will
* downgrade or upgrade any overlapping locks this
* process already owns.
- *
- * Skip over locks owned by other processes.
- * Handle any locks that overlap and are owned by ourselves.
- */
- prev = head;
- block = *head;
- needtolink = 1;
- for (;;) {
- ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
- if (ovcase)
- block = overlap->lf_next;
- /*
- * Six cases:
- * 0) no overlap
- * 1) overlap == lock
- * 2) overlap contains lock
- * 3) lock contains overlap
- * 4) overlap starts before lock
- * 5) overlap ends after lock
- */
- switch (ovcase) {
- case 0: /* no overlap */
- if (needtolink) {
- *prev = lock;
- lock->lf_next = overlap;
- }
- break;
-
- case 1: /* overlap == lock */
- /*
- * If downgrading lock, others may be
- * able to acquire it.
- */
- if (lock->lf_type == F_RDLCK &&
- overlap->lf_type == F_WRLCK)
- lf_wakelock(overlap);
- overlap->lf_type = lock->lf_type;
- lock->lf_next = *clean;
- *clean = lock;
- lock = overlap; /* for debug output below */
- break;
-
- case 2: /* overlap contains lock */
- /*
- * Check for common starting point and different types.
- */
- if (overlap->lf_type == lock->lf_type) {
- lock->lf_next = *clean;
- *clean = lock;
- lock = overlap; /* for debug output below */
- break;
- }
- if (overlap->lf_start == lock->lf_start) {
- *prev = lock;
- lock->lf_next = overlap;
- overlap->lf_start = lock->lf_end + 1;
- } else
- lf_split(overlap, lock, clean);
- lf_wakelock(overlap);
- break;
-
- case 3: /* lock contains overlap */
- /*
- * If downgrading lock, others may be able to
- * acquire it, otherwise take the list.
- */
- if (lock->lf_type == F_RDLCK &&
- overlap->lf_type == F_WRLCK) {
- lf_wakelock(overlap);
- } else {
- while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
- ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
- TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
- lf_block);
- TAILQ_INSERT_TAIL(&lock->lf_blkhd,
- ltmp, lf_block);
- ltmp->lf_next = lock;
- }
- }
- /*
- * Add the new lock if necessary and delete the overlap.
- */
- if (needtolink) {
- *prev = lock;
- lock->lf_next = overlap->lf_next;
- prev = &lock->lf_next;
- needtolink = 0;
- } else
- *prev = overlap->lf_next;
- overlap->lf_next = *clean;
- *clean = overlap;
- continue;
-
- case 4: /* overlap starts before lock */
- /*
- * Add lock after overlap on the list.
- */
- lock->lf_next = overlap->lf_next;
- overlap->lf_next = lock;
- overlap->lf_end = lock->lf_start - 1;
- prev = &lock->lf_next;
- lf_wakelock(overlap);
- needtolink = 0;
- continue;
-
- case 5: /* overlap ends after lock */
- /*
- * Add the new lock before overlap.
- */
- if (needtolink) {
- *prev = lock;
- lock->lf_next = overlap;
- }
- overlap->lf_start = lock->lf_end + 1;
- lf_wakelock(overlap);
- break;
- }
- break;
- }
-#ifdef LOCKF_DEBUG
- if (lockf_debug & 1) {
- lf_print("lf_setlock: got the lock", lock);
- lf_printlist("lf_setlock", lock);
- }
-#endif /* LOCKF_DEBUG */
- return (0);
+ */
+ lf_activate_lock(state, lock);
+ error = 0;
+out:
+ return (error);
}
/*
@@ -497,16 +1401,13 @@ restart:
* and remove it (or shrink it), then wakeup anyone we can.
*/
static int
-lf_clearlock(unlock, clean)
- struct lockf *unlock;
- struct lockf **clean;
+lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
{
- struct lockf **head = unlock->lf_head;
- register struct lockf *lf = *head;
- struct lockf *overlap, **prev;
- int ovcase;
+ struct lockf_entry *overlap;
+
+ overlap = LIST_FIRST(&state->ls_active);
- if (lf == NOLOCKF)
+ if (overlap == NOLOCKF)
return (0);
#ifdef LOCKF_DEBUG
if (unlock->lf_type != F_UNLCK)
@@ -514,84 +1415,36 @@ lf_clearlock(unlock, clean)
if (lockf_debug & 1)
lf_print("lf_clearlock", unlock);
#endif /* LOCKF_DEBUG */
- prev = head;
- while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
- /*
- * Wakeup the list of locks to be retried.
- */
- lf_wakelock(overlap);
-
- switch (ovcase) {
- case 1: /* overlap == lock */
- *prev = overlap->lf_next;
- overlap->lf_next = *clean;
- *clean = overlap;
- break;
+ lf_activate_lock(state, unlock);
- case 2: /* overlap contains lock: split it */
- if (overlap->lf_start == unlock->lf_start) {
- overlap->lf_start = unlock->lf_end + 1;
- break;
- }
- lf_split(overlap, unlock, clean);
- overlap->lf_next = unlock->lf_next;
- break;
-
- case 3: /* lock contains overlap */
- *prev = overlap->lf_next;
- lf = overlap->lf_next;
- overlap->lf_next = *clean;
- *clean = overlap;
- continue;
-
- case 4: /* overlap starts before lock */
- overlap->lf_end = unlock->lf_start - 1;
- prev = &overlap->lf_next;
- lf = overlap->lf_next;
- continue;
-
- case 5: /* overlap ends after lock */
- overlap->lf_start = unlock->lf_end + 1;
- break;
- }
- break;
- }
-#ifdef LOCKF_DEBUG
- if (lockf_debug & 1)
- lf_printlist("lf_clearlock", unlock);
-#endif /* LOCKF_DEBUG */
return (0);
}
/*
- * Check whether there is a blocking lock,
- * and if so return its process identifier.
+ * Check whether there is a blocking lock, and if so return its
+ * details in '*fl'.
*/
static int
-lf_getlock(lock, fl)
- register struct lockf *lock;
- register struct flock *fl;
+lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
{
- register struct lockf *block;
+ struct lockf_entry *block;
#ifdef LOCKF_DEBUG
if (lockf_debug & 1)
lf_print("lf_getlock", lock);
#endif /* LOCKF_DEBUG */
- if ((block = lf_getblock(lock))) {
+ if ((block = lf_getblock(state, lock))) {
fl->l_type = block->lf_type;
fl->l_whence = SEEK_SET;
fl->l_start = block->lf_start;
- if (block->lf_end == -1)
+ if (block->lf_end == OFF_MAX)
fl->l_len = 0;
else
fl->l_len = block->lf_end - block->lf_start + 1;
- if (block->lf_flags & F_POSIX)
- fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
- else
- fl->l_pid = -1;
+ fl->l_pid = block->lf_owner->lo_pid;
+ fl->l_sysid = block->lf_owner->lo_sysid;
} else {
fl->l_type = F_UNLCK;
}
@@ -599,63 +1452,129 @@ lf_getlock(lock, fl)
}
/*
+ * Cancel an async lock request.
+ */
+static int
+lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
+{
+ struct lockf_entry *reallock;
+
+ /*
+ * We need to match this request with an existing lock
+ * request.
+ */
+ LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
+ if ((void *) reallock == cookie) {
+ /*
+ * Double-check that this lock looks right
+ * (maybe use a rolling ID for the cancel
+ * cookie instead?)
+ */
+ if (!(reallock->lf_vnode == lock->lf_vnode
+ && reallock->lf_start == lock->lf_start
+ && reallock->lf_end == lock->lf_end)) {
+ return (ENOENT);
+ }
+
+ /*
+ * Make sure this lock was async and then just
+ * remove it from its wait lists.
+ */
+ if (!reallock->lf_async_task) {
+ return (ENOENT);
+ }
+
+ /*
+ * Note that since any other thread must take
+ * state->ls_lock before it can possibly
+ * trigger the async callback, we are safe
+ * from a race with lf_wakeup_lock, i.e. we
+ * can free the lock (actually our caller does
+ * this).
+ */
+ lf_cancel_lock(state, reallock);
+ return (0);
+ }
+ }
+
+ /*
+ * We didn't find a matching lock - not much we can do here.
+ */
+ return (ENOENT);
+}
+
+/*
* Walk the list of locks for an inode and
* return the first blocking lock.
*/
-static struct lockf *
-lf_getblock(lock)
- register struct lockf *lock;
+static struct lockf_entry *
+lf_getblock(struct lockf *state, struct lockf_entry *lock)
{
- struct lockf **prev, *overlap, *lf = *(lock->lf_head);
- int ovcase;
+ struct lockf_entry *overlap;
- prev = lock->lf_head;
- while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
+ LIST_FOREACH(overlap, &state->ls_active, lf_link) {
/*
- * We've found an overlap, see if it blocks us
+ * We may assume that the active list is sorted by
+ * lf_start.
*/
- if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
- return (overlap);
- /*
- * Nope, point to the next one on the list and
- * see if it blocks us
- */
- lf = overlap->lf_next;
+ if (overlap->lf_start > lock->lf_end)
+ break;
+ if (!lf_blocks(lock, overlap))
+ continue;
+ return (overlap);
}
return (NOLOCKF);
}
/*
- * Walk the list of locks for an inode to
- * find an overlapping lock (if any).
+ * Walk the list of locks for an inode to find an overlapping lock (if
+ * any) and return a classification of that overlap.
+ *
+ * Arguments:
+ * *overlap The place in the lock list to start looking
+ * lock The lock which is being tested
+ * type Pass 'SELF' to test only locks with the same
+ * owner as lock, or 'OTHER' to test only locks
+ * with a different owner
+ *
+ * Returns one of six values:
+ * 0) no overlap
+ * 1) overlap == lock
+ * 2) overlap contains lock
+ * 3) lock contains overlap
+ * 4) overlap starts before lock
+ * 5) overlap ends after lock
+ *
+ * If there is an overlapping lock, '*overlap' is set to point at the
+ * overlapping lock.
*
* NOTE: this returns only the FIRST overlapping lock. There
* may be more than one.
*/
static int
-lf_findoverlap(lf, lock, type, prev, overlap)
- register struct lockf *lf;
- struct lockf *lock;
- int type;
- struct lockf ***prev;
- struct lockf **overlap;
+lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
{
+ struct lockf_entry *lf;
off_t start, end;
+ int res;
- *overlap = lf;
- if (lf == NOLOCKF)
+ if ((*overlap) == NOLOCKF) {
return (0);
+ }
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
lf_print("lf_findoverlap: looking for overlap in", lock);
#endif /* LOCKF_DEBUG */
start = lock->lf_start;
end = lock->lf_end;
- while (lf != NOLOCKF) {
- if (((type & SELF) && lf->lf_id != lock->lf_id) ||
- ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
- *prev = &lf->lf_next;
- *overlap = lf = lf->lf_next;
+ res = 0;
+ while (*overlap) {
+ lf = *overlap;
+ if (lf->lf_start > end)
+ break;
+ if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
+ ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
+ *overlap = LIST_NEXT(lf, lf_link);
continue;
}
#ifdef LOCKF_DEBUG
@@ -673,82 +1592,78 @@ lf_findoverlap(lf, lock, type, prev, overlap)
* 4) overlap starts before lock
* 5) overlap ends after lock
*/
- if ((lf->lf_end != -1 && start > lf->lf_end) ||
- (end != -1 && lf->lf_start > end)) {
+ if (start > lf->lf_end) {
/* Case 0 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("no overlap\n");
#endif /* LOCKF_DEBUG */
- if ((type & SELF) && end != -1 && lf->lf_start > end)
- return (0);
- *prev = &lf->lf_next;
- *overlap = lf = lf->lf_next;
+ *overlap = LIST_NEXT(lf, lf_link);
continue;
}
- if ((lf->lf_start == start) && (lf->lf_end == end)) {
+ if (lf->lf_start == start && lf->lf_end == end) {
/* Case 1 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("overlap == lock\n");
#endif /* LOCKF_DEBUG */
- return (1);
+ res = 1;
+ break;
}
- if ((lf->lf_start <= start) &&
- (end != -1) &&
- ((lf->lf_end >= end) || (lf->lf_end == -1))) {
+ if (lf->lf_start <= start && lf->lf_end >= end) {
/* Case 2 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("overlap contains lock\n");
#endif /* LOCKF_DEBUG */
- return (2);
+ res = 2;
+ break;
}
- if (start <= lf->lf_start &&
- (end == -1 ||
- (lf->lf_end != -1 && end >= lf->lf_end))) {
+ if (start <= lf->lf_start && end >= lf->lf_end) {
/* Case 3 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("lock contains overlap\n");
#endif /* LOCKF_DEBUG */
- return (3);
+ res = 3;
+ break;
}
- if ((lf->lf_start < start) &&
- ((lf->lf_end >= start) || (lf->lf_end == -1))) {
+ if (lf->lf_start < start && lf->lf_end >= start) {
/* Case 4 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("overlap starts before lock\n");
#endif /* LOCKF_DEBUG */
- return (4);
+ res = 4;
+ break;
}
- if ((lf->lf_start > start) &&
- (end != -1) &&
- ((lf->lf_end > end) || (lf->lf_end == -1))) {
+ if (lf->lf_start > start && lf->lf_end > end) {
/* Case 5 */
#ifdef LOCKF_DEBUG
if (lockf_debug & 2)
printf("overlap ends after lock\n");
#endif /* LOCKF_DEBUG */
- return (5);
+ res = 5;
+ break;
}
panic("lf_findoverlap: default");
}
- return (0);
+ return (res);
}
/*
- * Split a lock and a contained region into
- * two or three locks as necessary.
+ * Split an the existing 'lock1', based on the extent of the lock
+ * described by 'lock2'. The existing lock should cover 'lock2'
+ * entirely.
+ *
+ * Any pending locks which have been been unblocked are added to
+ * 'granted'
*/
static void
-lf_split(lock1, lock2, split)
- struct lockf *lock1;
- struct lockf *lock2;
- struct lockf **split;
+lf_split(struct lockf *state, struct lockf_entry *lock1,
+ struct lockf_entry *lock2, struct lockf_entry_list *granted)
{
- struct lockf *splitlock;
+ struct lockf_entry *splitlock;
#ifdef LOCKF_DEBUG
if (lockf_debug & 2) {
@@ -757,101 +1672,616 @@ lf_split(lock1, lock2, split)
}
#endif /* LOCKF_DEBUG */
/*
- * Check to see if spliting into only two pieces.
+ * Check to see if we don't need to split at all.
*/
if (lock1->lf_start == lock2->lf_start) {
- lock1->lf_start = lock2->lf_end + 1;
- lock2->lf_next = lock1;
+ lf_set_start(state, lock1, lock2->lf_end + 1, granted);
return;
}
if (lock1->lf_end == lock2->lf_end) {
- lock1->lf_end = lock2->lf_start - 1;
- lock2->lf_next = lock1->lf_next;
- lock1->lf_next = lock2;
+ lf_set_end(state, lock1, lock2->lf_start - 1, granted);
return;
}
/*
* Make a new lock consisting of the last part of
- * the encompassing lock. We use the preallocated
- * splitlock so we don't have to block.
+ * the encompassing lock.
+ */
+ splitlock = lf_alloc_lock(lock1->lf_owner);
+ memcpy(splitlock, lock1, sizeof *splitlock);
+ if (splitlock->lf_flags & F_REMOTE)
+ vref(splitlock->lf_vnode);
+
+ /*
+ * This cannot cause a deadlock since any edges we would add
+ * to splitlock already exist in lock1. We must be sure to add
+ * necessary dependancies to splitlock before we reduce lock1
+ * otherwise we may accidentally grant a pending lock that
+ * was blocked by the tail end of lock1.
*/
- splitlock = *split;
- KASSERT(splitlock != NULL, ("no split"));
- *split = splitlock->lf_next;
- bcopy(lock1, splitlock, sizeof *splitlock);
splitlock->lf_start = lock2->lf_end + 1;
- TAILQ_INIT(&splitlock->lf_blkhd);
- lock1->lf_end = lock2->lf_start - 1;
+ LIST_INIT(&splitlock->lf_outedges);
+ LIST_INIT(&splitlock->lf_inedges);
+ sx_xlock(&lf_owner_graph_lock);
+ lf_add_incoming(state, splitlock);
+ sx_xunlock(&lf_owner_graph_lock);
+
+ lf_set_end(state, lock1, lock2->lf_start - 1, granted);
+
/*
* OK, now link it in
*/
- splitlock->lf_next = lock1->lf_next;
- lock2->lf_next = splitlock;
- lock1->lf_next = lock2;
+ lf_insert_lock(state, splitlock);
+}
+
+struct clearlock {
+ STAILQ_ENTRY(clearlock) link;
+ struct vnode *vp;
+ struct flock fl;
+};
+STAILQ_HEAD(clearlocklist, clearlock);
+
+void
+lf_clearremotesys(int sysid)
+{
+ struct lockf *ls;
+ struct lockf_entry *lf;
+ struct clearlock *cl;
+ struct clearlocklist locks;
+
+ KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
+
+ /*
+ * In order to keep the locking simple, we iterate over the
+ * active lock lists to build a list of locks that need
+ * releasing. We then call VOP_ADVLOCK for each one in turn.
+ *
+ * We take an extra reference to the vnode for the duration to
+ * make sure it doesn't go away before we are finished.
+ */
+ STAILQ_INIT(&locks);
+ sx_xlock(&lf_lock_states_lock);
+ LIST_FOREACH(ls, &lf_lock_states, ls_link) {
+ sx_xlock(&ls->ls_lock);
+ LIST_FOREACH(lf, &ls->ls_active, lf_link) {
+ if (lf->lf_owner->lo_sysid != sysid)
+ continue;
+
+ cl = malloc(sizeof(struct clearlock), M_LOCKF,
+ M_WAITOK);
+ cl->vp = lf->lf_vnode;
+ vref(cl->vp);
+ cl->fl.l_start = lf->lf_start;
+ if (lf->lf_end == OFF_MAX)
+ cl->fl.l_len = 0;
+ else
+ cl->fl.l_len =
+ lf->lf_end - lf->lf_start + 1;
+ cl->fl.l_whence = SEEK_SET;
+ cl->fl.l_type = F_UNLCK;
+ cl->fl.l_pid = lf->lf_owner->lo_pid;
+ cl->fl.l_sysid = sysid;
+ STAILQ_INSERT_TAIL(&locks, cl, link);
+ }
+ sx_xunlock(&ls->ls_lock);
+ }
+ sx_xunlock(&lf_lock_states_lock);
+
+ while ((cl = STAILQ_FIRST(&locks)) != NULL) {
+ STAILQ_REMOVE_HEAD(&locks, link);
+ VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE);
+ vrele(cl->vp);
+ free(cl, M_LOCKF);
+ }
+}
+
+int
+lf_countlocks(int sysid)
+{
+ int i;
+ struct lock_owner *lo;
+ int count;
+
+ count = 0;
+ sx_xlock(&lf_lock_owners_lock);
+ for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
+ LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
+ if (lo->lo_sysid == sysid)
+ count += lo->lo_refs;
+ sx_xunlock(&lf_lock_owners_lock);
+
+ return (count);
}
+#ifdef LOCKF_DEBUG
+
/*
- * Wakeup a blocklist
+ * Return non-zero if y is reachable from x using a brute force
+ * search. If reachable and path is non-null, return the route taken
+ * in path.
*/
+static int
+graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
+ struct owner_vertex_list *path)
+{
+ struct owner_edge *e;
+
+ if (x == y) {
+ if (path)
+ TAILQ_INSERT_HEAD(path, x, v_link);
+ return 1;
+ }
+
+ LIST_FOREACH(e, &x->v_outedges, e_outlink) {
+ if (graph_reaches(e->e_to, y, path)) {
+ if (path)
+ TAILQ_INSERT_HEAD(path, x, v_link);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Perform consistency checks on the graph. Make sure the values of
+ * v_order are correct. If checkorder is non-zero, check no vertex can
+ * reach any other vertex with a smaller order.
+ */
+static void
+graph_check(struct owner_graph *g, int checkorder)
+{
+ int i, j;
+
+ for (i = 0; i < g->g_size; i++) {
+ if (!g->g_vertices[i]->v_owner)
+ continue;
+ KASSERT(g->g_vertices[i]->v_order == i,
+ ("lock graph vertices disordered"));
+ if (checkorder) {
+ for (j = 0; j < i; j++) {
+ if (!g->g_vertices[j]->v_owner)
+ continue;
+ KASSERT(!graph_reaches(g->g_vertices[i],
+ g->g_vertices[j], NULL),
+ ("lock graph vertices disordered"));
+ }
+ }
+ }
+}
+
static void
-lf_wakelock(listhead)
- struct lockf *listhead;
+graph_print_vertices(struct owner_vertex_list *set)
+{
+ struct owner_vertex *v;
+
+ printf("{ ");
+ TAILQ_FOREACH(v, set, v_link) {
+ printf("%d:", v->v_order);
+ lf_print_owner(v->v_owner);
+ if (TAILQ_NEXT(v, v_link))
+ printf(", ");
+ }
+ printf(" }\n");
+}
+
+#endif
+
+/*
+ * Calculate the sub-set of vertices v from the affected region [y..x]
+ * where v is reachable from y. Return -1 if a loop was detected
+ * (i.e. x is reachable from y, otherwise the number of vertices in
+ * this subset.
+ */
+static int
+graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
+ struct owner_vertex *y, struct owner_vertex_list *delta)
+{
+ uint32_t gen;
+ struct owner_vertex *v;
+ struct owner_edge *e;
+ int n;
+
+ /*
+ * We start with a set containing just y. Then for each vertex
+ * v in the set so far unprocessed, we add each vertex that v
+ * has an out-edge to and that is within the affected region
+ * [y..x]. If we see the vertex x on our travels, stop
+ * immediately.
+ */
+ TAILQ_INIT(delta);
+ TAILQ_INSERT_TAIL(delta, y, v_link);
+ v = y;
+ n = 1;
+ gen = g->g_gen;
+ while (v) {
+ LIST_FOREACH(e, &v->v_outedges, e_outlink) {
+ if (e->e_to == x)
+ return -1;
+ if (e->e_to->v_order < x->v_order
+ && e->e_to->v_gen != gen) {
+ e->e_to->v_gen = gen;
+ TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
+ n++;
+ }
+ }
+ v = TAILQ_NEXT(v, v_link);
+ }
+
+ return (n);
+}
+
+/*
+ * Calculate the sub-set of vertices v from the affected region [y..x]
+ * where v reaches x. Return the number of vertices in this subset.
+ */
+static int
+graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
+ struct owner_vertex *y, struct owner_vertex_list *delta)
{
- register struct lockf *wakelock;
+ uint32_t gen;
+ struct owner_vertex *v;
+ struct owner_edge *e;
+ int n;
+
+ /*
+ * We start with a set containing just x. Then for each vertex
+ * v in the set so far unprocessed, we add each vertex that v
+ * has an in-edge from and that is within the affected region
+ * [y..x].
+ */
+ TAILQ_INIT(delta);
+ TAILQ_INSERT_TAIL(delta, x, v_link);
+ v = x;
+ n = 1;
+ gen = g->g_gen;
+ while (v) {
+ LIST_FOREACH(e, &v->v_inedges, e_inlink) {
+ if (e->e_from->v_order > y->v_order
+ && e->e_from->v_gen != gen) {
+ e->e_from->v_gen = gen;
+ TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
+ n++;
+ }
+ }
+ v = TAILQ_PREV(v, owner_vertex_list, v_link);
+ }
+
+ return (n);
+}
+
+static int
+graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
+{
+ struct owner_vertex *v;
+ int i, j;
+
+ TAILQ_FOREACH(v, set, v_link) {
+ for (i = n;
+ i > 0 && indices[i - 1] > v->v_order; i--)
+ ;
+ for (j = n - 1; j >= i; j--)
+ indices[j + 1] = indices[j];
+ indices[i] = v->v_order;
+ n++;
+ }
+
+ return (n);
+}
+
+static int
+graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
+ struct owner_vertex_list *set)
+{
+ struct owner_vertex *v, *vlowest;
+
+ while (!TAILQ_EMPTY(set)) {
+ vlowest = NULL;
+ TAILQ_FOREACH(v, set, v_link) {
+ if (!vlowest || v->v_order < vlowest->v_order)
+ vlowest = v;
+ }
+ TAILQ_REMOVE(set, vlowest, v_link);
+ vlowest->v_order = indices[nextunused];
+ g->g_vertices[vlowest->v_order] = vlowest;
+ nextunused++;
+ }
+
+ return (nextunused);
+}
+
+static int
+graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
+ struct owner_vertex *y)
+{
+ struct owner_edge *e;
+ struct owner_vertex_list deltaF, deltaB;
+ int nF, nB, n, vi, i;
+ int *indices;
+
+ sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
+
+ LIST_FOREACH(e, &x->v_outedges, e_outlink) {
+ if (e->e_to == y) {
+ e->e_refs++;
+ return (0);
+ }
+ }
- while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
- wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
- TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
- wakelock->lf_next = NOLOCKF;
#ifdef LOCKF_DEBUG
- if (lockf_debug & 2)
- lf_print("lf_wakelock: awakening", wakelock);
-#endif /* LOCKF_DEBUG */
- wakeup(wakelock);
+ if (lockf_debug & 8) {
+ printf("adding edge %d:", x->v_order);
+ lf_print_owner(x->v_owner);
+ printf(" -> %d:", y->v_order);
+ lf_print_owner(y->v_owner);
+ printf("\n");
}
+#endif
+ if (y->v_order < x->v_order) {
+ /*
+ * The new edge violates the order. First find the set
+ * of affected vertices reachable from y (deltaF) and
+ * the set of affect vertices affected that reach x
+ * (deltaB), using the graph generation number to
+ * detect whether we have visited a given vertex
+ * already. We re-order the graph so that each vertex
+ * in deltaB appears before each vertex in deltaF.
+ *
+ * If x is a member of deltaF, then the new edge would
+ * create a cycle. Otherwise, we may assume that
+ * deltaF and deltaB are disjoint.
+ */
+ g->g_gen++;
+ if (g->g_gen == 0) {
+ /*
+ * Generation wrap.
+ */
+ for (vi = 0; vi < g->g_size; vi++) {
+ g->g_vertices[vi]->v_gen = 0;
+ }
+ g->g_gen++;
+ }
+ nF = graph_delta_forward(g, x, y, &deltaF);
+ if (nF < 0) {
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ struct owner_vertex_list path;
+ printf("deadlock: ");
+ TAILQ_INIT(&path);
+ graph_reaches(y, x, &path);
+ graph_print_vertices(&path);
+ }
+#endif
+ return (EDEADLK);
+ }
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ printf("re-ordering graph vertices\n");
+ printf("deltaF = ");
+ graph_print_vertices(&deltaF);
+ }
+#endif
+
+ nB = graph_delta_backward(g, x, y, &deltaB);
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ printf("deltaB = ");
+ graph_print_vertices(&deltaB);
+ }
+#endif
+
+ /*
+ * We first build a set of vertex indices (vertex
+ * order values) that we may use, then we re-assign
+ * orders first to those vertices in deltaB, then to
+ * deltaF. Note that the contents of deltaF and deltaB
+ * may be partially disordered - we perform an
+ * insertion sort while building our index set.
+ */
+ indices = g->g_indexbuf;
+ n = graph_add_indices(indices, 0, &deltaF);
+ graph_add_indices(indices, n, &deltaB);
+
+ /*
+ * We must also be sure to maintain the relative
+ * ordering of deltaF and deltaB when re-assigning
+ * vertices. We do this by iteratively removing the
+ * lowest ordered element from the set and assigning
+ * it the next value from our new ordering.
+ */
+ i = graph_assign_indices(g, indices, 0, &deltaB);
+ graph_assign_indices(g, indices, i, &deltaF);
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ struct owner_vertex_list set;
+ TAILQ_INIT(&set);
+ for (i = 0; i < nB + nF; i++)
+ TAILQ_INSERT_TAIL(&set,
+ g->g_vertices[indices[i]], v_link);
+ printf("new ordering = ");
+ graph_print_vertices(&set);
+ }
+#endif
+ }
+
+ KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
+
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ graph_check(g, TRUE);
+ }
+#endif
+
+ e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
+
+ LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
+ LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
+ e->e_refs = 1;
+ e->e_from = x;
+ e->e_to = y;
+
+ return (0);
+}
+
+/*
+ * Remove an edge x->y from the graph.
+ */
+static void
+graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
+ struct owner_vertex *y)
+{
+ struct owner_edge *e;
+
+ sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
+
+ LIST_FOREACH(e, &x->v_outedges, e_outlink) {
+ if (e->e_to == y)
+ break;
+ }
+ KASSERT(e, ("Removing non-existent edge from deadlock graph"));
+
+ e->e_refs--;
+ if (e->e_refs == 0) {
+#ifdef LOCKF_DEBUG
+ if (lockf_debug & 8) {
+ printf("removing edge %d:", x->v_order);
+ lf_print_owner(x->v_owner);
+ printf(" -> %d:", y->v_order);
+ lf_print_owner(y->v_owner);
+ printf("\n");
+ }
+#endif
+ LIST_REMOVE(e, e_outlink);
+ LIST_REMOVE(e, e_inlink);
+ free(e, M_LOCKF);
+ }
+}
+
+/*
+ * Allocate a vertex from the free list. Return ENOMEM if there are
+ * none.
+ */
+static struct owner_vertex *
+graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
+{
+ struct owner_vertex *v;
+
+ sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
+
+ v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
+ if (g->g_size == g->g_space) {
+ g->g_vertices = realloc(g->g_vertices,
+ 2 * g->g_space * sizeof(struct owner_vertex *),
+ M_LOCKF, M_WAITOK);
+ free(g->g_indexbuf, M_LOCKF);
+ g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
+ M_LOCKF, M_WAITOK);
+ g->g_space = 2 * g->g_space;
+ }
+ v->v_order = g->g_size;
+ v->v_gen = g->g_gen;
+ g->g_vertices[g->g_size] = v;
+ g->g_size++;
+
+ LIST_INIT(&v->v_outedges);
+ LIST_INIT(&v->v_inedges);
+ v->v_owner = lo;
+
+ return (v);
+}
+
+static void
+graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
+{
+ struct owner_vertex *w;
+ int i;
+
+ sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
+
+ KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
+ KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
+
+ /*
+ * Remove from the graph's array and close up the gap,
+ * renumbering the other vertices.
+ */
+ for (i = v->v_order + 1; i < g->g_size; i++) {
+ w = g->g_vertices[i];
+ w->v_order--;
+ g->g_vertices[i - 1] = w;
+ }
+ g->g_size--;
+
+ free(v, M_LOCKF);
+}
+
+static struct owner_graph *
+graph_init(struct owner_graph *g)
+{
+
+ g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
+ M_LOCKF, M_WAITOK);
+ g->g_size = 0;
+ g->g_space = 10;
+ g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
+ g->g_gen = 0;
+
+ return (g);
}
#ifdef LOCKF_DEBUG
/*
+ * Print description of a lock owner
+ */
+static void
+lf_print_owner(struct lock_owner *lo)
+{
+
+ if (lo->lo_flags & F_REMOTE) {
+ printf("remote pid %d, system %d",
+ lo->lo_pid, lo->lo_sysid);
+ } else if (lo->lo_flags & F_FLOCK) {
+ printf("file %p", lo->lo_id);
+ } else {
+ printf("local pid %d", lo->lo_pid);
+ }
+}
+
+/*
* Print out a lock.
*/
static void
-lf_print(tag, lock)
- char *tag;
- register struct lockf *lock;
+lf_print(char *tag, struct lockf_entry *lock)
{
printf("%s: lock %p for ", tag, (void *)lock);
- if (lock->lf_flags & F_POSIX)
- printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
- else
- printf("id %p", (void *)lock->lf_id);
+ lf_print_owner(lock->lf_owner);
if (lock->lf_inode != (struct inode *)0)
- printf(" in ino %ju on dev <%s>, %s, start %jd, end %jd",
+ printf(" in ino %ju on dev <%s>,",
(uintmax_t)lock->lf_inode->i_number,
- devtoname(lock->lf_inode->i_dev),
- lock->lf_type == F_RDLCK ? "shared" :
- lock->lf_type == F_WRLCK ? "exclusive" :
- lock->lf_type == F_UNLCK ? "unlock" : "unknown",
- (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
+ devtoname(lock->lf_inode->i_dev));
+ printf(" %s, start %jd, end ",
+ lock->lf_type == F_RDLCK ? "shared" :
+ lock->lf_type == F_WRLCK ? "exclusive" :
+ lock->lf_type == F_UNLCK ? "unlock" : "unknown",
+ (intmax_t)lock->lf_start);
+ if (lock->lf_end == OFF_MAX)
+ printf("EOF");
else
- printf(" %s, start %jd, end %jd",
- lock->lf_type == F_RDLCK ? "shared" :
- lock->lf_type == F_WRLCK ? "exclusive" :
- lock->lf_type == F_UNLCK ? "unlock" : "unknown",
- (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
- if (!TAILQ_EMPTY(&lock->lf_blkhd))
- printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
+ printf("%jd", (intmax_t)lock->lf_end);
+ if (!LIST_EMPTY(&lock->lf_outedges))
+ printf(" block %p\n",
+ (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
else
printf("\n");
}
static void
-lf_printlist(tag, lock)
- char *tag;
- struct lockf *lock;
+lf_printlist(char *tag, struct lockf_entry *lock)
{
- register struct lockf *lf, *blk;
+ struct lockf_entry *lf, *blk;
+ struct lockf_edge *e;
if (lock->lf_inode == (struct inode *)0)
return;
@@ -859,32 +2289,25 @@ lf_printlist(tag, lock)
printf("%s: Lock list for ino %ju on dev <%s>:\n",
tag, (uintmax_t)lock->lf_inode->i_number,
devtoname(lock->lf_inode->i_dev));
- for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
+ LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) {
printf("\tlock %p for ",(void *)lf);
- if (lf->lf_flags & F_POSIX)
- printf("proc %ld",
- (long)((struct proc *)lf->lf_id)->p_pid);
- else
- printf("id %p", (void *)lf->lf_id);
+ lf_print_owner(lock->lf_owner);
printf(", %s, start %jd, end %jd",
lf->lf_type == F_RDLCK ? "shared" :
lf->lf_type == F_WRLCK ? "exclusive" :
lf->lf_type == F_UNLCK ? "unlock" :
"unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
- TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
+ LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
+ blk = e->le_to;
printf("\n\t\tlock request %p for ", (void *)blk);
- if (blk->lf_flags & F_POSIX)
- printf("proc %ld",
- (long)((struct proc *)blk->lf_id)->p_pid);
- else
- printf("id %p", (void *)blk->lf_id);
+ lf_print_owner(blk->lf_owner);
printf(", %s, start %jd, end %jd",
blk->lf_type == F_RDLCK ? "shared" :
blk->lf_type == F_WRLCK ? "exclusive" :
blk->lf_type == F_UNLCK ? "unlock" :
"unknown", (intmax_t)blk->lf_start,
(intmax_t)blk->lf_end);
- if (!TAILQ_EMPTY(&blk->lf_blkhd))
+ if (!LIST_EMPTY(&blk->lf_inedges))
panic("lf_printlist: bad list");
}
printf("\n");
diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master
index 508feaa..ed32611 100644
--- a/sys/kern/syscalls.master
+++ b/sys/kern/syscalls.master
@@ -297,7 +297,8 @@
151 AUE_NULL UNIMPL sem_lock (BSD/OS 2.x)
152 AUE_NULL UNIMPL sem_wakeup (BSD/OS 2.x)
153 AUE_NULL UNIMPL asyncdaemon (BSD/OS 2.x)
-154 AUE_NULL UNIMPL nosys
+; 154 is initialised by the NLM code, if present.
+154 AUE_NULL NOSTD { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); }
; 155 is initialized by the NFS code, if present.
155 AUE_NFS_SVC NOSTD { int nfssvc(int flag, caddr_t argp); }
156 AUE_GETDIRENTRIES COMPAT { int getdirentries(int fd, char *buf, \
diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src
index b06e64f..82156bf 100644
--- a/sys/kern/vnode_if.src
+++ b/sys/kern/vnode_if.src
@@ -437,6 +437,19 @@ vop_advlock {
};
+%% advlockasync vp U U U
+
+vop_advlockasync {
+ IN struct vnode *vp;
+ IN void *id;
+ IN int op;
+ IN struct flock *fl;
+ IN int flags;
+ IN struct task *task;
+ INOUT void **cookiep;
+};
+
+
%% reallocblks vp E E E
vop_reallocblks {
diff --git a/sys/nfs4client/nfs4_vnops.c b/sys/nfs4client/nfs4_vnops.c
index 0d6ad9a..633f1c3 100644
--- a/sys/nfs4client/nfs4_vnops.c
+++ b/sys/nfs4client/nfs4_vnops.c
@@ -157,6 +157,7 @@ static int nfs4_sillyrename(struct vnode *, struct vnode *,
static vop_readlink_t nfs4_readlink;
static vop_print_t nfs4_print;
static vop_advlock_t nfs4_advlock;
+static vop_advlockasync_t nfs4_advlockasync;
/*
* Global vfs data structures for nfs
@@ -165,6 +166,7 @@ struct vop_vector nfs4_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = nfs4_access,
.vop_advlock = nfs4_advlock,
+ .vop_advlockasync = nfs4_advlockasync,
.vop_close = nfs4_close,
.vop_create = nfs4_create,
.vop_fsync = nfs4_fsync,
@@ -2772,6 +2774,22 @@ nfs4_advlock(struct vop_advlock_args *ap)
}
/*
+ * NFS advisory byte-level locks.
+ */
+static int
+nfs4_advlockasync(struct vop_advlockasync_args *ap)
+{
+ return (EPERM);
+
+ if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
+ struct nfsnode *np = VTONFS(ap->a_vp);
+
+ return (lf_advlockasync(ap, &(np->n_lockf), np->n_size));
+ }
+ return (EOPNOTSUPP);
+}
+
+/*
* Print out the contents of an nfsnode.
*/
static int
diff --git a/sys/nfsclient/nfs_lock.c b/sys/nfsclient/nfs_lock.c
index be334c7..bdb20d8 100644
--- a/sys/nfsclient/nfs_lock.c
+++ b/sys/nfsclient/nfs_lock.c
@@ -324,6 +324,7 @@ nfs_dolock(struct vop_advlock_args *ap)
if (msg.lm_getlk && p->p_nlminfo->retcode == 0) {
if (p->p_nlminfo->set_getlk_pid) {
+ fl->l_sysid = 0; /* XXX */
fl->l_pid = p->p_nlminfo->getlk_pid;
} else {
fl->l_type = F_UNLCK;
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 2b90abd..14a50c1 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -129,6 +129,7 @@ static vop_access_t nfsspec_access;
static vop_readlink_t nfs_readlink;
static vop_print_t nfs_print;
static vop_advlock_t nfs_advlock;
+static vop_advlockasync_t nfs_advlockasync;
/*
* Global vfs data structures for nfs
@@ -137,6 +138,7 @@ struct vop_vector nfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = nfs_access,
.vop_advlock = nfs_advlock,
+ .vop_advlockasync = nfs_advlockasync,
.vop_close = nfs_close,
.vop_create = nfs_create,
.vop_fsync = nfs_fsync,
@@ -3052,6 +3054,27 @@ out:
}
/*
+ * NFS advisory byte-level locks.
+ */
+static int
+nfs_advlockasync(struct vop_advlockasync_args *ap)
+{
+ int error;
+
+ mtx_lock(&Giant);
+ if ((VFSTONFS(ap->a_vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
+ struct nfsnode *np = VTONFS(ap->a_vp);
+
+ error = lf_advlockasync(ap, &(np->n_lockf), np->n_size);
+ goto out;
+ }
+ error = EOPNOTSUPP;
+out:
+ mtx_unlock(&Giant);
+ return (error);
+}
+
+/*
* Print out the contents of an nfsnode.
*/
static int
diff --git a/sys/nlm/nlm.h b/sys/nlm/nlm.h
new file mode 100644
index 0000000..32bb974
--- /dev/null
+++ b/sys/nlm/nlm.h
@@ -0,0 +1,119 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NLM_NLM_H_
+#define _NLM_NLM_H_
+
+#ifdef _KERNEL
+
+#ifdef _SYS_MALLOC_H_
+MALLOC_DECLARE(M_NLM);
+#endif
+
+struct nlm_host;
+
+/*
+ * Copy a struct netobj.
+ */
+extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src,
+ struct malloc_type *type);
+
+/*
+ * Search for an existing NLM host that matches the given name
+ * (typically the caller_name element of an nlm4_lock). If none is
+ * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * address of the host so that we can call it back for async
+ * responses.
+ */
+extern struct nlm_host *nlm_find_host_by_name(const char *name,
+ struct svc_req *rqstp);
+
+/*
+ * Search for an existing NLM host that matches the given remote
+ * address. If none is found, create a new host with the requested
+ * address and remember 'vers' as the NLM protocol version to use for
+ * that host.
+ */
+extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr,
+ int vers);
+
+/*
+ * Return an RPC client handle that can be used to talk to the NLM
+ * running on the given host.
+ */
+extern CLIENT *nlm_host_get_rpc(struct nlm_host *host);
+
+/*
+ * Called when a host restarts.
+ */
+extern void nlm_sm_notify(nlm_sm_status *argp);
+
+/*
+ * Implementation for lock testing RPCs. Returns the NLM host that
+ * matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_test(nlm4_testargs *argp,
+ nlm4_testres *result, struct svc_req *rqstp);
+
+/*
+ * Implementation for lock setting RPCs. Returns the NLM host that
+ * matches the RPC arguments. If monitor is TRUE, set up an NSM
+ * monitor for this host.
+ */
+extern struct nlm_host *nlm_do_lock(nlm4_lockargs *argp,
+ nlm4_res *result, struct svc_req *rqstp, bool_t monitor);
+
+/*
+ * Implementation for cancelling a pending lock request. Returns the
+ * NLM host that matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_cancel(nlm4_cancargs *argp,
+ nlm4_res *result, struct svc_req *rqstp);
+
+/*
+ * Implementation for unlocking RPCs. Returns the NLM host that
+ * matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
+ nlm4_res *result, struct svc_req *rqstp);
+
+/*
+ * Free all locks associated with the hostname argp->name.
+ */
+extern void nlm_do_free_all(nlm4_notify *argp);
+
+/*
+ * Find an RPC transport that can be used to communicate with the
+ * userland part of lockd.
+ */
+extern CLIENT *nlm_user_lockd(void);
+
+#endif
+
+#endif
diff --git a/sys/nlm/nlm_prot.h b/sys/nlm/nlm_prot.h
new file mode 100644
index 0000000..6197189
--- /dev/null
+++ b/sys/nlm/nlm_prot.h
@@ -0,0 +1,448 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+/* $FreeBSD$ */
+
+#ifndef _NLM_PROT_H_RPCGEN
+#define _NLM_PROT_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LM_MAXSTRLEN 1024
+#define MAXNAMELEN LM_MAXSTRLEN+1
+
+enum nlm_stats {
+ nlm_granted = 0,
+ nlm_denied = 1,
+ nlm_denied_nolocks = 2,
+ nlm_blocked = 3,
+ nlm_denied_grace_period = 4,
+ nlm_deadlck = 5
+};
+typedef enum nlm_stats nlm_stats;
+
+struct nlm_holder {
+ bool_t exclusive;
+ int svid;
+ netobj oh;
+ u_int l_offset;
+ u_int l_len;
+};
+typedef struct nlm_holder nlm_holder;
+
+struct nlm_testrply {
+ nlm_stats stat;
+ union {
+ struct nlm_holder holder;
+ } nlm_testrply_u;
+};
+typedef struct nlm_testrply nlm_testrply;
+
+struct nlm_stat {
+ nlm_stats stat;
+};
+typedef struct nlm_stat nlm_stat;
+
+struct nlm_res {
+ netobj cookie;
+ nlm_stat stat;
+};
+typedef struct nlm_res nlm_res;
+
+struct nlm_testres {
+ netobj cookie;
+ nlm_testrply stat;
+};
+typedef struct nlm_testres nlm_testres;
+
+struct nlm_lock {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ int svid;
+ u_int l_offset;
+ u_int l_len;
+};
+typedef struct nlm_lock nlm_lock;
+
+struct nlm_lockargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm_lock alock;
+ bool_t reclaim;
+ int state;
+};
+typedef struct nlm_lockargs nlm_lockargs;
+
+struct nlm_cancargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm_lock alock;
+};
+typedef struct nlm_cancargs nlm_cancargs;
+
+struct nlm_testargs {
+ netobj cookie;
+ bool_t exclusive;
+ struct nlm_lock alock;
+};
+typedef struct nlm_testargs nlm_testargs;
+
+struct nlm_unlockargs {
+ netobj cookie;
+ struct nlm_lock alock;
+};
+typedef struct nlm_unlockargs nlm_unlockargs;
+/*
+ * The following enums are actually bit encoded for efficient
+ * boolean algebra.... DON'T change them.....
+ */
+
+enum fsh_mode {
+ fsm_DN = 0,
+ fsm_DR = 1,
+ fsm_DW = 2,
+ fsm_DRW = 3
+};
+typedef enum fsh_mode fsh_mode;
+
+enum fsh_access {
+ fsa_NONE = 0,
+ fsa_R = 1,
+ fsa_W = 2,
+ fsa_RW = 3
+};
+typedef enum fsh_access fsh_access;
+
+struct nlm_share {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ fsh_mode mode;
+ fsh_access access;
+};
+typedef struct nlm_share nlm_share;
+
+struct nlm_shareargs {
+ netobj cookie;
+ nlm_share share;
+ bool_t reclaim;
+};
+typedef struct nlm_shareargs nlm_shareargs;
+
+struct nlm_shareres {
+ netobj cookie;
+ nlm_stats stat;
+ int sequence;
+};
+typedef struct nlm_shareres nlm_shareres;
+
+struct nlm_notify {
+ char *name;
+ long state;
+};
+typedef struct nlm_notify nlm_notify;
+/* definitions for NLM version 4 */
+
+enum nlm4_stats {
+ nlm4_granted = 0,
+ nlm4_denied = 1,
+ nlm4_denied_nolocks = 2,
+ nlm4_blocked = 3,
+ nlm4_denied_grace_period = 4,
+ nlm4_deadlck = 5,
+ nlm4_rofs = 6,
+ nlm4_stale_fh = 7,
+ nlm4_fbig = 8,
+ nlm4_failed = 9
+};
+typedef enum nlm4_stats nlm4_stats;
+
+struct nlm4_stat {
+ nlm4_stats stat;
+};
+typedef struct nlm4_stat nlm4_stat;
+
+struct nlm4_holder {
+ bool_t exclusive;
+ u_int32_t svid;
+ netobj oh;
+ u_int64_t l_offset;
+ u_int64_t l_len;
+};
+typedef struct nlm4_holder nlm4_holder;
+
+struct nlm4_lock {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ u_int32_t svid;
+ u_int64_t l_offset;
+ u_int64_t l_len;
+};
+typedef struct nlm4_lock nlm4_lock;
+
+struct nlm4_share {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ fsh_mode mode;
+ fsh_access access;
+};
+typedef struct nlm4_share nlm4_share;
+
+struct nlm4_testrply {
+ nlm4_stats stat;
+ union {
+ struct nlm4_holder holder;
+ } nlm4_testrply_u;
+};
+typedef struct nlm4_testrply nlm4_testrply;
+
+struct nlm4_testres {
+ netobj cookie;
+ nlm4_testrply stat;
+};
+typedef struct nlm4_testres nlm4_testres;
+
+struct nlm4_testargs {
+ netobj cookie;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_testargs nlm4_testargs;
+
+struct nlm4_res {
+ netobj cookie;
+ nlm4_stat stat;
+};
+typedef struct nlm4_res nlm4_res;
+
+struct nlm4_lockargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+ bool_t reclaim;
+ int state;
+};
+typedef struct nlm4_lockargs nlm4_lockargs;
+
+struct nlm4_cancargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_cancargs nlm4_cancargs;
+
+struct nlm4_unlockargs {
+ netobj cookie;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_unlockargs nlm4_unlockargs;
+
+struct nlm4_shareargs {
+ netobj cookie;
+ nlm4_share share;
+ bool_t reclaim;
+};
+typedef struct nlm4_shareargs nlm4_shareargs;
+
+struct nlm4_shareres {
+ netobj cookie;
+ nlm4_stats stat;
+ int sequence;
+};
+typedef struct nlm4_shareres nlm4_shareres;
+
+struct nlm_sm_status {
+ char *mon_name;
+ int state;
+ char priv[16];
+};
+typedef struct nlm_sm_status nlm_sm_status;
+
+struct nlm4_notify {
+ char *name;
+ int32_t state;
+};
+typedef struct nlm4_notify nlm4_notify;
+
+#define NLM_PROG ((unsigned long)(100021))
+#define NLM_SM ((unsigned long)(0))
+
+#define NLM_SM_NOTIFY ((unsigned long)(1))
+extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *);
+extern bool_t nlm_sm_notify_0_svc(struct nlm_sm_status *, void *, struct svc_req *);
+#define NLM_VERS ((unsigned long)(1))
+
+#define NLM_TEST ((unsigned long)(1))
+extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *);
+extern bool_t nlm_test_1_svc(struct nlm_testargs *, nlm_testres *, struct svc_req *);
+#define NLM_LOCK ((unsigned long)(2))
+extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_lock_1_svc(struct nlm_lockargs *, nlm_res *, struct svc_req *);
+#define NLM_CANCEL ((unsigned long)(3))
+extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_cancel_1_svc(struct nlm_cancargs *, nlm_res *, struct svc_req *);
+#define NLM_UNLOCK ((unsigned long)(4))
+extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_unlock_1_svc(struct nlm_unlockargs *, nlm_res *, struct svc_req *);
+#define NLM_GRANTED ((unsigned long)(5))
+extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_granted_1_svc(struct nlm_testargs *, nlm_res *, struct svc_req *);
+#define NLM_TEST_MSG ((unsigned long)(6))
+extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern bool_t nlm_test_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
+#define NLM_LOCK_MSG ((unsigned long)(7))
+extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *);
+extern bool_t nlm_lock_msg_1_svc(struct nlm_lockargs *, void *, struct svc_req *);
+#define NLM_CANCEL_MSG ((unsigned long)(8))
+extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *);
+extern bool_t nlm_cancel_msg_1_svc(struct nlm_cancargs *, void *, struct svc_req *);
+#define NLM_UNLOCK_MSG ((unsigned long)(9))
+extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *);
+extern bool_t nlm_unlock_msg_1_svc(struct nlm_unlockargs *, void *, struct svc_req *);
+#define NLM_GRANTED_MSG ((unsigned long)(10))
+extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern bool_t nlm_granted_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
+#define NLM_TEST_RES ((unsigned long)(11))
+extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *);
+extern bool_t nlm_test_res_1_svc(nlm_testres *, void *, struct svc_req *);
+#define NLM_LOCK_RES ((unsigned long)(12))
+extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_lock_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_CANCEL_RES ((unsigned long)(13))
+extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_cancel_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_UNLOCK_RES ((unsigned long)(14))
+extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_unlock_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_GRANTED_RES ((unsigned long)(15))
+extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_granted_res_1_svc(nlm_res *, void *, struct svc_req *);
+extern int nlm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+#define NLM_VERSX ((unsigned long)(3))
+
+#define NLM_SHARE ((unsigned long)(20))
+extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern bool_t nlm_share_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
+#define NLM_UNSHARE ((unsigned long)(21))
+extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern bool_t nlm_unshare_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
+#define NLM_NM_LOCK ((unsigned long)(22))
+extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_nm_lock_3_svc(nlm_lockargs *, nlm_res *, struct svc_req *);
+#define NLM_FREE_ALL ((unsigned long)(23))
+extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *);
+extern bool_t nlm_free_all_3_svc(nlm_notify *, void *, struct svc_req *);
+extern int nlm_prog_3_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+#define NLM_VERS4 ((unsigned long)(4))
+
+#define NLM4_TEST ((unsigned long)(1))
+extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *);
+extern bool_t nlm4_test_4_svc(nlm4_testargs *, nlm4_testres *, struct svc_req *);
+#define NLM4_LOCK ((unsigned long)(2))
+extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_CANCEL ((unsigned long)(3))
+extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_cancel_4_svc(nlm4_cancargs *, nlm4_res *, struct svc_req *);
+#define NLM4_UNLOCK ((unsigned long)(4))
+extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_unlock_4_svc(nlm4_unlockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_GRANTED ((unsigned long)(5))
+extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_granted_4_svc(nlm4_testargs *, nlm4_res *, struct svc_req *);
+#define NLM4_TEST_MSG ((unsigned long)(6))
+extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern bool_t nlm4_test_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
+#define NLM4_LOCK_MSG ((unsigned long)(7))
+extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *);
+extern bool_t nlm4_lock_msg_4_svc(nlm4_lockargs *, void *, struct svc_req *);
+#define NLM4_CANCEL_MSG ((unsigned long)(8))
+extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *);
+extern bool_t nlm4_cancel_msg_4_svc(nlm4_cancargs *, void *, struct svc_req *);
+#define NLM4_UNLOCK_MSG ((unsigned long)(9))
+extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *);
+extern bool_t nlm4_unlock_msg_4_svc(nlm4_unlockargs *, void *, struct svc_req *);
+#define NLM4_GRANTED_MSG ((unsigned long)(10))
+extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern bool_t nlm4_granted_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
+#define NLM4_TEST_RES ((unsigned long)(11))
+extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *);
+extern bool_t nlm4_test_res_4_svc(nlm4_testres *, void *, struct svc_req *);
+#define NLM4_LOCK_RES ((unsigned long)(12))
+extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_lock_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_CANCEL_RES ((unsigned long)(13))
+extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_cancel_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_UNLOCK_RES ((unsigned long)(14))
+extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_unlock_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_GRANTED_RES ((unsigned long)(15))
+extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_granted_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_SHARE ((unsigned long)(20))
+extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern bool_t nlm4_share_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
+#define NLM4_UNSHARE ((unsigned long)(21))
+extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern bool_t nlm4_unshare_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
+#define NLM4_NM_LOCK ((unsigned long)(22))
+extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_nm_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_FREE_ALL ((unsigned long)(23))
+extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *);
+extern bool_t nlm4_free_all_4_svc(nlm4_notify *, void *, struct svc_req *);
+extern int nlm_prog_4_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+
+/* the xdr functions */
+extern bool_t xdr_nlm_stats(XDR *, nlm_stats*);
+extern bool_t xdr_nlm_holder(XDR *, nlm_holder*);
+extern bool_t xdr_nlm_testrply(XDR *, nlm_testrply*);
+extern bool_t xdr_nlm_stat(XDR *, nlm_stat*);
+extern bool_t xdr_nlm_res(XDR *, nlm_res*);
+extern bool_t xdr_nlm_testres(XDR *, nlm_testres*);
+extern bool_t xdr_nlm_lock(XDR *, nlm_lock*);
+extern bool_t xdr_nlm_lockargs(XDR *, nlm_lockargs*);
+extern bool_t xdr_nlm_cancargs(XDR *, nlm_cancargs*);
+extern bool_t xdr_nlm_testargs(XDR *, nlm_testargs*);
+extern bool_t xdr_nlm_unlockargs(XDR *, nlm_unlockargs*);
+extern bool_t xdr_fsh_mode(XDR *, fsh_mode*);
+extern bool_t xdr_fsh_access(XDR *, fsh_access*);
+extern bool_t xdr_nlm_share(XDR *, nlm_share*);
+extern bool_t xdr_nlm_shareargs(XDR *, nlm_shareargs*);
+extern bool_t xdr_nlm_shareres(XDR *, nlm_shareres*);
+extern bool_t xdr_nlm_notify(XDR *, nlm_notify*);
+extern bool_t xdr_nlm4_stats(XDR *, nlm4_stats*);
+extern bool_t xdr_nlm4_stat(XDR *, nlm4_stat*);
+extern bool_t xdr_nlm4_holder(XDR *, nlm4_holder*);
+extern bool_t xdr_nlm4_lock(XDR *, nlm4_lock*);
+extern bool_t xdr_nlm4_share(XDR *, nlm4_share*);
+extern bool_t xdr_nlm4_testrply(XDR *, nlm4_testrply*);
+extern bool_t xdr_nlm4_testres(XDR *, nlm4_testres*);
+extern bool_t xdr_nlm4_testargs(XDR *, nlm4_testargs*);
+extern bool_t xdr_nlm4_res(XDR *, nlm4_res*);
+extern bool_t xdr_nlm4_lockargs(XDR *, nlm4_lockargs*);
+extern bool_t xdr_nlm4_cancargs(XDR *, nlm4_cancargs*);
+extern bool_t xdr_nlm4_unlockargs(XDR *, nlm4_unlockargs*);
+extern bool_t xdr_nlm4_shareargs(XDR *, nlm4_shareargs*);
+extern bool_t xdr_nlm4_shareres(XDR *, nlm4_shareres*);
+extern bool_t xdr_nlm_sm_status(XDR *, nlm_sm_status*);
+extern bool_t xdr_nlm4_notify(XDR *, nlm4_notify*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_NLM_PROT_H_RPCGEN */
diff --git a/sys/nlm/nlm_prot_clnt.c b/sys/nlm/nlm_prot_clnt.c
new file mode 100644
index 0000000..b3ae5d8
--- /dev/null
+++ b/sys/nlm/nlm_prot_clnt.c
@@ -0,0 +1,372 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+/* Default timeout can be changed using clnt_control() */
+static struct timeval TIMEOUT = { 25, 0 };
+
+enum clnt_stat
+nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_SM_NOTIFY,
+ (xdrproc_t) xdr_nlm_sm_status, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_testres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL,
+ (xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK,
+ (xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST_MSG,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK_MSG,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL_MSG,
+ (xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK_MSG,
+ (xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED_MSG,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST_RES,
+ (xdrproc_t) xdr_nlm_testres, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_SHARE,
+ (xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNSHARE,
+ (xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_NM_LOCK,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_FREE_ALL,
+ (xdrproc_t) xdr_nlm_notify, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_testres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL,
+ (xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK,
+ (xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST_MSG,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK_MSG,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL_MSG,
+ (xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK_MSG,
+ (xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED_MSG,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST_RES,
+ (xdrproc_t) xdr_nlm4_testres, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_SHARE,
+ (xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNSHARE,
+ (xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_NM_LOCK,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_FREE_ALL,
+ (xdrproc_t) xdr_nlm4_notify, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
diff --git a/sys/nlm/nlm_prot_impl.c b/sys/nlm/nlm_prot_impl.c
new file mode 100644
index 0000000..106f4b5
--- /dev/null
+++ b/sys/nlm/nlm_prot_impl.c
@@ -0,0 +1,1783 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/lockf.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+
+#include "nlm_prot.h"
+#include "sm_inter.h"
+#include "nlm.h"
+#include <rpc/rpc_com.h>
+#include <rpc/rpcb_prot.h>
+
+MALLOC_DEFINE(M_NLM, "NLM", "Network Lock Manager");
+
+/*
+ * If a host is inactive (and holds no locks) for this amount of
+ * seconds, we consider it idle and stop tracking it.
+ */
+#define NLM_IDLE_TIMEOUT 30
+
+/*
+ * We check the host list for idle every few seconds.
+ */
+#define NLM_IDLE_PERIOD 5
+
+/*
+ * Support for sysctl vfs.nlm.sysid
+ */
+SYSCTL_NODE(_vfs, OID_AUTO, nlm, CTLFLAG_RW, NULL, "Network Lock Manager");
+SYSCTL_NODE(_vfs_nlm, OID_AUTO, sysid, CTLFLAG_RW, NULL, "");
+
+/*
+ * Syscall hooks
+ */
+static int nlm_syscall_offset = SYS_nlm_syscall;
+static struct sysent nlm_syscall_prev_sysent;
+MAKE_SYSENT(nlm_syscall);
+static bool_t nlm_syscall_registered = FALSE;
+
+/*
+ * Debug level passed in from userland. We also support a sysctl hook
+ * so that it can be changed on a live system.
+ */
+static int nlm_debug_level;
+SYSCTL_INT(_debug, OID_AUTO, nlm_debug, CTLFLAG_RW, &nlm_debug_level, 0, "");
+
+/*
+ * Grace period handling. The value of nlm_grace_threshold is the
+ * value of time_uptime after which we are serving requests normally.
+ */
+static time_t nlm_grace_threshold;
+
+/*
+ * We check for idle hosts if time_uptime is greater than
+ * nlm_next_idle_check,
+ */
+static time_t nlm_next_idle_check;
+
+/*
+ * A socket to use for RPC - shared by all IPv4 RPC clients.
+ */
+static struct socket *nlm_socket;
+
+#ifdef INET6
+
+/*
+ * A socket to use for RPC - shared by all IPv6 RPC clients.
+ */
+static struct socket *nlm_socket6;
+
+#endif
+
+/*
+ * An RPC client handle that can be used to communicate with the local
+ * NSM.
+ */
+static CLIENT *nlm_nsm;
+
+/*
+ * An RPC client handle that can be used to communicate with the
+ * userland part of lockd.
+ */
+static CLIENT *nlm_lockd;
+
+/*
+ * Locks:
+ * (l) locked by nh_lock
+ * (s) only accessed via server RPC which is single threaded
+ * (c) const until freeing
+ */
+
+/*
+ * A pending asynchronous lock request, stored on the nc_pending list
+ * of the NLM host.
+ */
+struct nlm_async_lock {
+ TAILQ_ENTRY(nlm_async_lock) af_link; /* (l) host's list of locks */
+ struct task af_task; /* (c) async callback details */
+ void *af_cookie; /* (l) lock manager cancel token */
+ struct vnode *af_vp; /* (l) vnode to lock */
+ struct flock af_fl; /* (c) lock details */
+ struct nlm_host *af_host; /* (c) host which is locking */
+ nlm4_testargs af_granted; /* (c) notification details */
+};
+TAILQ_HEAD(nlm_async_lock_list, nlm_async_lock);
+
+/*
+ * NLM host.
+ */
+struct nlm_host {
+ struct mtx nh_lock;
+ TAILQ_ENTRY(nlm_host) nh_link; /* (s) global list of hosts */
+ char *nh_caller_name; /* (c) printable name of host */
+ uint32_t nh_sysid; /* (c) our allocaed system ID */
+ char nh_sysid_string[10]; /* (c) string rep. of sysid */
+ struct sockaddr_storage nh_addr; /* (s) remote address of host */
+ CLIENT *nh_rpc; /* (s) RPC handle to send to host */
+ rpcvers_t nh_vers; /* (s) NLM version of host */
+ int nh_state; /* (s) last seen NSM state of host */
+ bool_t nh_monitored; /* (s) TRUE if local NSM is monitoring */
+ time_t nh_idle_timeout; /* (s) Time at which host is idle */
+ struct sysctl_ctx_list nh_sysctl; /* (c) vfs.nlm.sysid nodes */
+ struct nlm_async_lock_list nh_pending; /* (l) pending async locks */
+ struct nlm_async_lock_list nh_finished; /* (l) finished async locks */
+};
+TAILQ_HEAD(nlm_host_list, nlm_host);
+
+static struct nlm_host_list nlm_hosts;
+static uint32_t nlm_next_sysid = 1;
+
+static void nlm_host_unmonitor(struct nlm_host *);
+
+/**********************************************************************/
+
+/*
+ * Initialise NLM globals.
+ */
+static void
+nlm_init(void *dummy)
+{
+ int error;
+
+ TAILQ_INIT(&nlm_hosts);
+
+ error = syscall_register(&nlm_syscall_offset, &nlm_syscall_sysent,
+ &nlm_syscall_prev_sysent);
+ if (error)
+ printf("Can't register NLM syscall\n");
+ else
+ nlm_syscall_registered = TRUE;
+}
+SYSINIT(nlm_init, SI_SUB_LOCK, SI_ORDER_FIRST, nlm_init, NULL);
+
+static void
+nlm_uninit(void *dummy)
+{
+
+ if (nlm_syscall_registered)
+ syscall_deregister(&nlm_syscall_offset,
+ &nlm_syscall_prev_sysent);
+}
+SYSUNINIT(nlm_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, nlm_uninit, NULL);
+
+/*
+ * Copy a struct netobj.
+ */
+void
+nlm_copy_netobj(struct netobj *dst, struct netobj *src,
+ struct malloc_type *type)
+{
+
+ dst->n_len = src->n_len;
+ dst->n_bytes = malloc(src->n_len, type, M_WAITOK);
+ memcpy(dst->n_bytes, src->n_bytes, src->n_len);
+}
+
+/*
+ * Create an RPC client handle for the given (address,prog,vers)
+ * triple using UDP.
+ */
+static CLIENT *
+nlm_get_rpc(struct sockaddr *sa, rpcprog_t prog, rpcvers_t vers)
+{
+ const char *wchan = "nlmrcv";
+ const char* protofmly;
+ struct sockaddr_storage ss;
+ struct socket *so;
+ CLIENT *rpcb;
+ struct timeval timo;
+ RPCB parms;
+ char *uaddr;
+ enum clnt_stat stat;
+ int rpcvers;
+
+ /*
+ * First we need to contact the remote RPCBIND service to find
+ * the right port.
+ */
+ memcpy(&ss, sa, sa->sa_len);
+ switch (ss.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&ss)->sin_port = htons(111);
+ protofmly = "inet";
+ so = nlm_socket;
+ break;
+
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&ss)->sin6_port = htons(111);
+ protofmly = "inet6";
+ so = nlm_socket6;
+ break;
+#endif
+
+ default:
+ /*
+ * Unsupported address family - fail.
+ */
+ return (NULL);
+ }
+
+ rpcb = clnt_dg_create(so, (struct sockaddr *)&ss,
+ RPCBPROG, RPCBVERS4, 0, 0);
+ if (!rpcb)
+ return (NULL);
+
+ parms.r_prog = prog;
+ parms.r_vers = vers;
+ parms.r_netid = "udp";
+ parms.r_addr = "";
+ parms.r_owner = "";
+
+ /*
+ * Use the default timeout.
+ */
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+again:
+ uaddr = NULL;
+ stat = CLNT_CALL(rpcb, (rpcprog_t) RPCBPROC_GETADDR,
+ (xdrproc_t) xdr_rpcb, &parms,
+ (xdrproc_t) xdr_wrapstring, &uaddr, timo);
+ if (stat == RPC_PROGVERSMISMATCH) {
+ /*
+ * Try RPCBIND version 3 if we haven't already.
+ *
+ * XXX fall back to portmap?
+ */
+ CLNT_CONTROL(rpcb, CLGET_VERS, &rpcvers);
+ if (rpcvers == RPCBVERS4) {
+ rpcvers = RPCBVERS;
+ CLNT_CONTROL(rpcb, CLSET_VERS, &rpcvers);
+ goto again;
+ }
+ }
+
+ if (stat == RPC_SUCCESS) {
+ /*
+ * We have a reply from the remote RPCBIND - turn it into an
+ * appropriate address and make a new client that can talk to
+ * the remote NLM.
+ *
+ * XXX fixup IPv6 scope ID.
+ */
+ struct netbuf *a;
+ a = __rpc_uaddr2taddr_af(ss.ss_family, uaddr);
+ memcpy(&ss, a->buf, a->len);
+ free(a->buf, M_RPC);
+ free(a, M_RPC);
+ xdr_free((xdrproc_t) xdr_wrapstring, &uaddr);
+ } else if (stat == RPC_PROGVERSMISMATCH) {
+ /*
+ * Try portmap.
+ */
+ struct pmap mapping;
+ u_short port;
+
+ rpcvers = PMAPVERS;
+ CLNT_CONTROL(rpcb, CLSET_VERS, &rpcvers);
+
+
+ mapping.pm_prog = parms.r_prog;
+ mapping.pm_vers = parms.r_vers;
+ mapping.pm_prot = IPPROTO_UDP;
+ mapping.pm_port = 0;
+
+ stat = CLNT_CALL(rpcb, (rpcprog_t) PMAPPROC_GETPORT,
+ (xdrproc_t) xdr_pmap, &mapping,
+ (xdrproc_t) xdr_u_short, &port, timo);
+
+ if (stat == RPC_SUCCESS) {
+ switch (ss.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&ss)->sin_port =
+ htons(port);
+ break;
+
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&ss)->sin6_port =
+ htons(port);
+ break;
+#endif
+ }
+ }
+ }
+ if (stat != RPC_SUCCESS) {
+ printf("NLM: failed to contact remote rpcbind, stat = %d\n",
+ (int) stat);
+ return (NULL);
+ }
+
+ /*
+ * Re-use the client we used to speak to rpcbind.
+ */
+ CLNT_CONTROL(rpcb, CLSET_SVC_ADDR, &ss);
+ CLNT_CONTROL(rpcb, CLSET_PROG, &prog);
+ CLNT_CONTROL(rpcb, CLSET_VERS, &vers);
+ CLNT_CONTROL(rpcb, CLSET_WAITCHAN, &wchan);
+ rpcb->cl_auth = authunix_create(curthread->td_ucred);
+
+ return (rpcb);
+}
+
+/*
+ * This async callback after when an async lock request has been
+ * granted. We notify the host which initiated the request.
+ */
+static void
+nlm_lock_callback(void *arg, int pending)
+{
+ struct nlm_async_lock *af = (struct nlm_async_lock *) arg;
+
+ if (nlm_debug_level >= 2)
+ printf("NLM: async lock %p for %s (sysid %d) granted\n",
+ af, af->af_host->nh_caller_name,
+ af->af_host->nh_sysid);
+
+ /*
+ * Send the results back to the host.
+ *
+ * Note: there is a possible race here with nlm_host_notify
+ * destroying teh RPC client. To avoid problems, the first
+ * thing nlm_host_notify does is to cancel pending async lock
+ * requests.
+ */
+ if (af->af_host->nh_vers == NLM_VERS4) {
+ nlm4_granted_msg_4(&af->af_granted,
+ NULL, af->af_host->nh_rpc);
+ } else {
+ /*
+ * Back-convert to legacy protocol
+ */
+ nlm_testargs granted;
+ granted.cookie = af->af_granted.cookie;
+ granted.exclusive = af->af_granted.exclusive;
+ granted.alock.caller_name =
+ af->af_granted.alock.caller_name;
+ granted.alock.fh = af->af_granted.alock.fh;
+ granted.alock.oh = af->af_granted.alock.oh;
+ granted.alock.svid = af->af_granted.alock.svid;
+ granted.alock.l_offset =
+ af->af_granted.alock.l_offset;
+ granted.alock.l_len =
+ af->af_granted.alock.l_len;
+
+ nlm_granted_msg_1(&granted,
+ NULL, af->af_host->nh_rpc);
+ }
+
+ /*
+ * Move this entry to the nh_finished list. Someone else will
+ * free it later - its too hard to do it here safely without
+ * racing with cancel.
+ *
+ * XXX possibly we should have a third "granted sent but not
+ * ack'ed" list so that we can re-send the granted message.
+ */
+ mtx_lock(&af->af_host->nh_lock);
+ TAILQ_REMOVE(&af->af_host->nh_pending, af, af_link);
+ TAILQ_INSERT_TAIL(&af->af_host->nh_finished, af, af_link);
+ mtx_unlock(&af->af_host->nh_lock);
+}
+
+/*
+ * Free an async lock request. The request must have been removed from
+ * any list.
+ */
+static void
+nlm_free_async_lock(struct nlm_async_lock *af)
+{
+ /*
+ * Free an async lock.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_testargs, &af->af_granted);
+ if (af->af_vp)
+ vrele(af->af_vp);
+ free(af, M_NLM);
+}
+
+/*
+ * Cancel our async request - this must be called with
+ * af->nh_host->nh_lock held. This is slightly complicated by a
+ * potential race with our own callback. If we fail to cancel the
+ * lock, it must already have been granted - we make sure our async
+ * task has completed by calling taskqueue_drain in this case.
+ */
+static int
+nlm_cancel_async_lock(struct nlm_async_lock *af)
+{
+ struct nlm_host *host = af->af_host;
+ int error;
+
+ mtx_assert(&host->nh_lock, MA_OWNED);
+
+ mtx_unlock(&host->nh_lock);
+
+ error = VOP_ADVLOCKASYNC(af->af_vp, NULL, F_CANCEL, &af->af_fl,
+ F_REMOTE, NULL, &af->af_cookie);
+
+ if (error) {
+ /*
+ * We failed to cancel - make sure our callback has
+ * completed before we continue.
+ */
+ taskqueue_drain(taskqueue_thread, &af->af_task);
+ }
+
+ mtx_lock(&host->nh_lock);
+
+ if (!error) {
+ if (nlm_debug_level >= 2)
+ printf("NLM: async lock %p for %s (sysid %d) "
+ "cancelled\n",
+ af, host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * Remove from the nh_pending list and free now that
+ * we are safe from the callback.
+ */
+ TAILQ_REMOVE(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ nlm_free_async_lock(af);
+ mtx_lock(&host->nh_lock);
+ }
+
+ return (error);
+}
+
+static void
+nlm_free_finished_locks(struct nlm_host *host)
+{
+ struct nlm_async_lock *af;
+
+ mtx_lock(&host->nh_lock);
+ while ((af = TAILQ_FIRST(&host->nh_finished)) != NULL) {
+ TAILQ_REMOVE(&host->nh_finished, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ nlm_free_async_lock(af);
+ mtx_lock(&host->nh_lock);
+ }
+ mtx_unlock(&host->nh_lock);
+}
+
+/*
+ * This is called when we receive a host state change
+ * notification. We unlock any active locks owned by the host.
+ */
+static void
+nlm_host_notify(struct nlm_host *host, int newstate, bool_t destroy)
+{
+ struct nlm_async_lock *af;
+
+ if (newstate) {
+ if (nlm_debug_level >= 1)
+ printf("NLM: host %s (sysid %d) rebooted, new "
+ "state is %d\n",
+ host->nh_caller_name, host->nh_sysid, newstate);
+ }
+
+ /*
+ * Cancel any pending async locks for this host.
+ */
+ mtx_lock(&host->nh_lock);
+ while ((af = TAILQ_FIRST(&host->nh_pending)) != NULL) {
+ /*
+ * nlm_cancel_async_lock will remove the entry from
+ * nh_pending and free it.
+ */
+ nlm_cancel_async_lock(af);
+ }
+ mtx_unlock(&host->nh_lock);
+ nlm_free_finished_locks(host);
+
+ /*
+ * The host just rebooted - trash its locks and forget any
+ * RPC client handle that we may have for it.
+ */
+ lf_clearremotesys(host->nh_sysid);
+ if (host->nh_rpc) {
+ AUTH_DESTROY(host->nh_rpc->cl_auth);
+ CLNT_DESTROY(host->nh_rpc);
+ host->nh_rpc = NULL;
+ }
+ host->nh_state = newstate;
+
+ /*
+ * Destroy the host if the caller believes that it won't be
+ * used again. This is safe enough - if we see the same name
+ * again, we will just create a new host.
+ */
+ if (destroy) {
+ TAILQ_REMOVE(&nlm_hosts, host, nh_link);
+ mtx_destroy(&host->nh_lock);
+ sysctl_ctx_free(&host->nh_sysctl);
+ free(host->nh_caller_name, M_NLM);
+ free(host, M_NLM);
+ }
+}
+
+/*
+ * Sysctl handler to count the number of locks for a sysid.
+ */
+static int
+nlm_host_lock_count_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nlm_host *host;
+ int count;
+
+ host = oidp->oid_arg1;
+ count = lf_countlocks(host->nh_sysid);
+ return sysctl_handle_int(oidp, &count, 0, req);
+}
+
+/*
+ * Create a new NLM host.
+ */
+static struct nlm_host *
+nlm_create_host(const char* caller_name)
+{
+ struct nlm_host *host;
+ struct sysctl_oid *oid;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: new host %s (sysid %d)\n",
+ caller_name, nlm_next_sysid);
+ host = malloc(sizeof(struct nlm_host), M_NLM, M_WAITOK|M_ZERO);
+ mtx_init(&host->nh_lock, "nh_lock", NULL, MTX_DEF);
+ host->nh_caller_name = strdup(caller_name, M_NLM);
+ host->nh_sysid = nlm_next_sysid++;
+ snprintf(host->nh_sysid_string, sizeof(host->nh_sysid_string),
+ "%d", host->nh_sysid);
+ host->nh_rpc = NULL;
+ host->nh_vers = 0;
+ host->nh_state = 0;
+ host->nh_monitored = FALSE;
+ TAILQ_INIT(&host->nh_pending);
+ TAILQ_INIT(&host->nh_finished);
+ TAILQ_INSERT_TAIL(&nlm_hosts, host, nh_link);
+
+ sysctl_ctx_init(&host->nh_sysctl);
+ oid = SYSCTL_ADD_NODE(&host->nh_sysctl,
+ SYSCTL_STATIC_CHILDREN(_vfs_nlm_sysid),
+ OID_AUTO, host->nh_sysid_string, CTLFLAG_RD, NULL, "");
+ SYSCTL_ADD_STRING(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "hostname", CTLFLAG_RD, host->nh_caller_name, 0, "");
+ SYSCTL_ADD_INT(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "version", CTLFLAG_RD, &host->nh_vers, 0, "");
+ SYSCTL_ADD_INT(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "monitored", CTLFLAG_RD, &host->nh_monitored, 0, "");
+ SYSCTL_ADD_PROC(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "lock_count", CTLTYPE_INT | CTLFLAG_RD, host, 0,
+ nlm_host_lock_count_sysctl, "I", "");
+
+ return (host);
+}
+
+/*
+ * Return non-zero if the address parts of the two sockaddrs are the
+ * same.
+ */
+static int
+nlm_compare_addr(const struct sockaddr *a, const struct sockaddr *b)
+{
+ const struct sockaddr_in *a4, *b4;
+#ifdef INET6
+ const struct sockaddr_in6 *a6, *b6;
+#endif
+
+ if (a->sa_family != b->sa_family)
+ return (FALSE);
+
+ switch (a->sa_family) {
+ case AF_INET:
+ a4 = (const struct sockaddr_in *) a;
+ b4 = (const struct sockaddr_in *) b;
+ return !memcmp(&a4->sin_addr, &b4->sin_addr,
+ sizeof(a4->sin_addr));
+#ifdef INET6
+ case AF_INET6:
+ a6 = (const struct sockaddr_in6 *) a;
+ b6 = (const struct sockaddr_in6 *) b;
+ return !memcmp(&a6->sin6_addr, &b6->sin6_addr,
+ sizeof(a6->sin6_addr));
+#endif
+ }
+
+ return (0);
+}
+
+/*
+ * Check for idle hosts and stop monitoring them. We could also free
+ * the host structure here, possibly after a larger timeout but that
+ * would require some care to avoid races with
+ * e.g. nlm_host_lock_count_sysctl.
+ */
+static void
+nlm_check_idle(void)
+{
+ struct nlm_host *host;
+
+ if (time_uptime <= nlm_next_idle_check)
+ return;
+
+ nlm_next_idle_check = time_uptime + NLM_IDLE_PERIOD;
+
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (host->nh_monitored
+ && time_uptime > host->nh_idle_timeout) {
+ if (lf_countlocks(host->nh_sysid) > 0) {
+ host->nh_idle_timeout =
+ time_uptime + NLM_IDLE_TIMEOUT;
+ continue;
+ }
+ nlm_host_unmonitor(host);
+ }
+ }
+}
+
+/*
+ * Search for an existing NLM host that matches the given name
+ * (typically the caller_name element of an nlm4_lock). If none is
+ * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * address of the host so that we can call it back for async
+ * responses.
+ */
+struct nlm_host *
+nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
+{
+ struct nlm_host *host;
+
+ nlm_check_idle();
+
+ /*
+ * The remote host is determined by caller_name.
+ */
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (!strcmp(host->nh_caller_name, name))
+ break;
+ }
+
+ if (!host)
+ host = nlm_create_host(name);
+ host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
+
+ /*
+ * If we have an RPC request, record the remote address so
+ * that can send async replies etc.
+ */
+ if (rqstp) {
+ struct netbuf *addr = &rqstp->rq_xprt->xp_rtaddr;
+
+ KASSERT(addr->len < sizeof(struct sockaddr_storage),
+ ("Strange remote transport address length"));
+
+ /*
+ * If we have seen an address before and we currently
+ * have an RPC client handle, make sure the address is
+ * the same, otherwise discard the client handle.
+ */
+ if (host->nh_addr.ss_len && host->nh_rpc) {
+ if (!nlm_compare_addr(
+ (struct sockaddr *) &host->nh_addr,
+ (struct sockaddr *) addr->buf)
+ || host->nh_vers != rqstp->rq_vers) {
+ AUTH_DESTROY(host->nh_rpc->cl_auth);
+ CLNT_DESTROY(host->nh_rpc);
+ host->nh_rpc = NULL;
+ }
+ }
+ memcpy(&host->nh_addr, addr->buf, addr->len);
+ host->nh_vers = rqstp->rq_vers;
+ }
+
+ return (host);
+}
+
+/*
+ * Search for an existing NLM host that matches the given remote
+ * address. If none is found, create a new host with the requested
+ * address and remember 'vers' as the NLM protocol version to use for
+ * that host.
+ */
+struct nlm_host *
+nlm_find_host_by_addr(const struct sockaddr *addr, int vers)
+{
+ struct nlm_host *host;
+
+ nlm_check_idle();
+
+ /*
+ * The remote host is determined by caller_name.
+ */
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (nlm_compare_addr(addr,
+ (const struct sockaddr *) &host->nh_addr))
+ break;
+ }
+
+ if (!host) {
+ /*
+ * Fake up a name using inet_ntop. This buffer is
+ * large enough for an IPv6 address.
+ */
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"];
+ switch (addr->sa_family) {
+ case AF_INET:
+ __rpc_inet_ntop(AF_INET,
+ &((const struct sockaddr_in *) addr)->sin_addr,
+ tmp, sizeof tmp);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ __rpc_inet_ntop(AF_INET6,
+ &((const struct sockaddr_in6 *) addr)->sin6_addr,
+ tmp, sizeof tmp);
+ break;
+#endif
+ default:
+ strcmp(tmp, "<unknown>");
+ }
+ host = nlm_create_host(tmp);
+ memcpy(&host->nh_addr, addr, addr->sa_len);
+ host->nh_vers = vers;
+ }
+ host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
+
+ return (host);
+}
+
+/*
+ * Find the NLM host that matches the value of 'sysid'. If none
+ * exists, return NULL.
+ */
+static struct nlm_host *
+nlm_find_host_by_sysid(int sysid)
+{
+ struct nlm_host *host;
+
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (host->nh_sysid == sysid)
+ return (host);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Unregister this NLM host with the local NSM due to idleness.
+ */
+static void
+nlm_host_unmonitor(struct nlm_host *host)
+{
+ mon_id smmonid;
+ sm_stat_res smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: unmonitoring %s (sysid %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * We put our assigned system ID value in the priv field to
+ * make it simpler to find the host if we are notified of a
+ * host restart.
+ */
+ smmonid.mon_name = host->nh_caller_name;
+ smmonid.my_id.my_name = "localhost";
+ smmonid.my_id.my_prog = NLM_PROG;
+ smmonid.my_id.my_vers = NLM_SM;
+ smmonid.my_id.my_proc = NLM_SM_NOTIFY;
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_UNMON,
+ (xdrproc_t) xdr_mon, &smmonid,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ printf("Failed to contact local NSM - rpc error %d\n", stat);
+ return;
+ }
+ if (smstat.res_stat == stat_fail) {
+ printf("Local NSM refuses to unmonitor %s\n",
+ host->nh_caller_name);
+ return;
+ }
+
+ host->nh_monitored = FALSE;
+}
+
+/*
+ * Register this NLM host with the local NSM so that we can be
+ * notified if it reboots.
+ */
+static void
+nlm_host_monitor(struct nlm_host *host, int state)
+{
+ mon smmon;
+ sm_stat_res smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+
+ if (host->nh_state && state && host->nh_state != state) {
+ /*
+ * The host rebooted without telling us. Trash its
+ * locks.
+ */
+ nlm_host_notify(host, state, FALSE);
+ }
+
+ if (state && !host->nh_state) {
+ /*
+ * This is the first time we have seen an NSM state
+ * value for this host. We record it here to help
+ * detect host reboots.
+ */
+ host->nh_state = state;
+ if (nlm_debug_level >= 1)
+ printf("NLM: host %s (sysid %d) has NSM state %d\n",
+ host->nh_caller_name, host->nh_sysid, state);
+ }
+
+ if (host->nh_monitored)
+ return;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: monitoring %s (sysid %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * We put our assigned system ID value in the priv field to
+ * make it simpler to find the host if we are notified of a
+ * host restart.
+ */
+ smmon.mon_id.mon_name = host->nh_caller_name;
+ smmon.mon_id.my_id.my_name = "localhost";
+ smmon.mon_id.my_id.my_prog = NLM_PROG;
+ smmon.mon_id.my_id.my_vers = NLM_SM;
+ smmon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
+ memcpy(smmon.priv, &host->nh_sysid, sizeof(host->nh_sysid));
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_MON,
+ (xdrproc_t) xdr_mon, &smmon,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ printf("Failed to contact local NSM - rpc error %d\n", stat);
+ return;
+ }
+ if (smstat.res_stat == stat_fail) {
+ printf("Local NSM refuses to monitor %s\n",
+ host->nh_caller_name);
+ return;
+ }
+
+ host->nh_monitored = TRUE;
+}
+
+/*
+ * Return an RPC client handle that can be used to talk to the NLM
+ * running on the given host.
+ */
+CLIENT *
+nlm_host_get_rpc(struct nlm_host *host)
+{
+ struct timeval zero;
+
+ if (host->nh_rpc)
+ return (host->nh_rpc);
+
+ /*
+ * Set the send timeout to zero - we only use this rpc handle
+ * for sending async replies which have no return value.
+ */
+ host->nh_rpc = nlm_get_rpc((struct sockaddr *)&host->nh_addr,
+ NLM_PROG, host->nh_vers);
+
+ if (host->nh_rpc) {
+ zero.tv_sec = 0;
+ zero.tv_usec = 0;
+ CLNT_CONTROL(host->nh_rpc, CLSET_TIMEOUT, &zero);
+
+ /*
+ * Monitor the host - if it reboots, the address of
+ * its NSM might change so we must discard our RPC
+ * handle.
+ */
+ nlm_host_monitor(host, 0);
+ }
+
+ return (host->nh_rpc);
+}
+
+/**********************************************************************/
+
+/*
+ * Syscall interface with userland.
+ */
+
+extern void nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
+
+static int
+nlm_register_services(SVCPOOL *pool, int addr_count, char **addrs)
+{
+ static rpcvers_t versions[] = {
+ NLM_SM, NLM_VERS, NLM_VERSX, NLM_VERS4
+ };
+ static void (*dispatchers[])(struct svc_req *, SVCXPRT *) = {
+ nlm_prog_0, nlm_prog_1, nlm_prog_3, nlm_prog_4
+ };
+ static const int version_count = sizeof(versions) / sizeof(versions[0]);
+
+ SVCXPRT **xprts;
+ char netid[16];
+ char uaddr[128];
+ struct netconfig *nconf;
+ int i, j, error;
+
+ if (!addr_count) {
+ printf("NLM: no service addresses given - can't start server");
+ return (EINVAL);
+ }
+
+ xprts = malloc(addr_count * sizeof(SVCXPRT *), M_NLM, M_WAITOK);
+ for (i = 0; i < version_count; i++) {
+ for (j = 0; j < addr_count; j++) {
+ /*
+ * Create transports for the first version and
+ * then just register everything else to the
+ * same transports.
+ */
+ if (i == 0) {
+ char *up;
+
+ error = copyin(&addrs[2*j], &up,
+ sizeof(char*));
+ if (error)
+ goto out;
+ error = copyinstr(up, netid, sizeof(netid),
+ NULL);
+ if (error)
+ goto out;
+ error = copyin(&addrs[2*j+1], &up,
+ sizeof(char*));
+ if (error)
+ goto out;
+ error = copyinstr(up, uaddr, sizeof(uaddr),
+ NULL);
+ if (error)
+ goto out;
+ nconf = getnetconfigent(netid);
+ if (!nconf) {
+ printf("Can't lookup netid %s\n",
+ netid);
+ error = EINVAL;
+ goto out;
+ }
+ xprts[j] = svc_tp_create(pool, dispatchers[i],
+ NLM_PROG, versions[i], uaddr, nconf);
+ if (!xprts[j]) {
+ printf("NLM: unable to create "
+ "(NLM_PROG, %d).\n", versions[i]);
+ error = EINVAL;
+ goto out;
+ }
+ freenetconfigent(nconf);
+ } else {
+ nconf = getnetconfigent(xprts[j]->xp_netid);
+ rpcb_unset(NLM_PROG, versions[i], nconf);
+ if (!svc_reg(xprts[j], NLM_PROG, versions[i],
+ dispatchers[i], nconf)) {
+ printf("NLM: can't register "
+ "(NLM_PROG, %d)\n", versions[i]);
+ error = EINVAL;
+ goto out;
+ }
+ }
+ }
+ }
+ error = 0;
+out:
+ free(xprts, M_NLM);
+ return (error);
+}
+
+/*
+ * Main server entry point. Contacts the local NSM to get its current
+ * state and send SM_UNMON_ALL. Registers the NLM services and then
+ * services requests. Does not return until the server is interrupted
+ * by a signal.
+ */
+static int
+nlm_server_main(int addr_count, char **addrs)
+{
+ struct thread *td = curthread;
+ int error;
+ SVCPOOL *pool;
+ struct sockopt opt;
+ int portlow;
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_in sin;
+ my_id id;
+ sm_stat smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+ struct nlm_host *host;
+
+ if (nlm_socket) {
+ printf("NLM: can't start server - it appears to be running already\n");
+ return (EPERM);
+ }
+
+ memset(&opt, 0, sizeof(opt));
+
+ nlm_socket = NULL;
+ error = socreate(AF_INET, &nlm_socket, SOCK_DGRAM, 0,
+ td->td_ucred, td);
+ if (error) {
+ printf("NLM: can't create IPv4 socket - error %d\n", error);
+ return (error);
+ }
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_IP;
+ opt.sopt_name = IP_PORTRANGE;
+ portlow = IP_PORTRANGE_LOW;
+ opt.sopt_val = &portlow;
+ opt.sopt_valsize = sizeof(portlow);
+ sosetopt(nlm_socket, &opt);
+
+#ifdef INET6
+ nlm_socket6 = NULL;
+ error = socreate(AF_INET6, &nlm_socket6, SOCK_DGRAM, 0,
+ td->td_ucred, td);
+ if (error) {
+ printf("NLM: can't create IPv6 socket - error %d\n", error);
+ return (error);
+ }
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_IPV6;
+ opt.sopt_name = IPV6_PORTRANGE;
+ portlow = IPV6_PORTRANGE_LOW;
+ opt.sopt_val = &portlow;
+ opt.sopt_valsize = sizeof(portlow);
+ sosetopt(nlm_socket6, &opt);
+#endif
+
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = in6addr_loopback;
+ nlm_nsm = nlm_get_rpc((struct sockaddr *) &sin6, SM_PROG, SM_VERS);
+ if (!nlm_nsm) {
+#endif
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET6;
+ sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ nlm_nsm = nlm_get_rpc((struct sockaddr *) &sin, SM_PROG,
+ SM_VERS);
+#ifdef INET6
+ }
+#endif
+
+ if (!nlm_nsm) {
+ printf("Can't start NLM - unable to contact NSM\n");
+ return (EINVAL);
+ }
+
+ pool = svcpool_create();
+
+ error = nlm_register_services(pool, addr_count, addrs);
+ if (error)
+ goto out;
+
+ memset(&id, 0, sizeof(id));
+ id.my_name = "NFS NLM";
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_UNMON_ALL,
+ (xdrproc_t) xdr_my_id, &id,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ struct rpc_err err;
+
+ CLNT_GETERR(nlm_nsm, &err);
+ printf("NLM: unexpected error contacting NSM, stat=%d, errno=%d\n",
+ stat, err.re_errno);
+ error = EINVAL;
+ goto out;
+ }
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: local NSM state is %d\n", smstat.state);
+
+ svc_run(pool);
+ error = 0;
+
+out:
+ if (pool)
+ svcpool_destroy(pool);
+
+ /*
+ * Trash all the existing state so that if the server
+ * restarts, it gets a clean slate.
+ */
+ while ((host = TAILQ_FIRST(&nlm_hosts)) != NULL) {
+ nlm_host_notify(host, 0, TRUE);
+ }
+ if (nlm_nsm) {
+ AUTH_DESTROY(nlm_nsm->cl_auth);
+ CLNT_DESTROY(nlm_nsm);
+ nlm_nsm = NULL;
+ }
+ if (nlm_lockd) {
+ AUTH_DESTROY(nlm_lockd->cl_auth);
+ CLNT_DESTROY(nlm_lockd);
+ nlm_lockd = NULL;
+ }
+
+ soclose(nlm_socket);
+ nlm_socket = NULL;
+#ifdef INET6
+ soclose(nlm_socket6);
+ nlm_socket6 = NULL;
+#endif
+
+ return (error);
+}
+
+int
+nlm_syscall(struct thread *td, struct nlm_syscall_args *uap)
+{
+ int error;
+
+ error = priv_check(td, PRIV_NFS_LOCKD);
+ if (error)
+ return (error);
+
+ nlm_debug_level = uap->debug_level;
+ nlm_grace_threshold = time_uptime + uap->grace_period;
+ nlm_next_idle_check = time_uptime + NLM_IDLE_PERIOD;
+
+ return nlm_server_main(uap->addr_count, uap->addrs);
+}
+
+/**********************************************************************/
+
+/*
+ * NLM implementation details, called from the RPC stubs.
+ */
+
+
+void
+nlm_sm_notify(struct nlm_sm_status *argp)
+{
+ uint32_t sysid;
+ struct nlm_host *host;
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_sm_notify(): mon_name = %s\n", argp->mon_name);
+ memcpy(&sysid, &argp->priv, sizeof(sysid));
+ host = nlm_find_host_by_sysid(sysid);
+ if (host)
+ nlm_host_notify(host, argp->state, FALSE);
+}
+
+static void
+nlm_convert_to_fhandle_t(fhandle_t *fhp, struct netobj *p)
+{
+ memcpy(fhp, p->n_bytes, sizeof(fhandle_t));
+}
+
+struct vfs_state {
+ struct mount *vs_mp;
+ struct vnode *vs_vp;
+ int vs_vfslocked;
+};
+
+static int
+nlm_get_vfs_state(struct nlm_host *host, struct svc_req *rqstp,
+ fhandle_t *fhp, struct vfs_state *vs)
+{
+ int error, exflags, freecred;
+ struct ucred *cred = NULL, *credanon;
+
+ memset(vs, 0, sizeof(*vs));
+ freecred = FALSE;
+
+ vs->vs_mp = vfs_getvfs(&fhp->fh_fsid);
+ if (!vs->vs_mp) {
+ return (ESTALE);
+ }
+ vs->vs_vfslocked = VFS_LOCK_GIANT(vs->vs_mp);
+
+ error = VFS_CHECKEXP(vs->vs_mp, (struct sockaddr *)&host->nh_addr,
+ &exflags, &credanon);
+ if (error)
+ goto out;
+
+ if (exflags & MNT_EXRDONLY || (vs->vs_mp->mnt_flag & MNT_RDONLY)) {
+ error = EROFS;
+ goto out;
+ }
+
+ error = VFS_FHTOVP(vs->vs_mp, &fhp->fh_fid, &vs->vs_vp);
+ if (error)
+ goto out;
+
+ cred = crget();
+ freecred = TRUE;
+ if (!svc_getcred(rqstp, cred, NULL)) {
+ error = EINVAL;
+ goto out;
+ }
+ if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
+ crfree(cred);
+ cred = credanon;
+ freecred = FALSE;
+ }
+#if __FreeBSD_version < 800011
+ VOP_UNLOCK(vs->vs_vp, 0, curthread);
+#else
+ VOP_UNLOCK(vs->vs_vp, 0);
+#endif
+
+ /*
+ * Check cred.
+ */
+ error = VOP_ACCESS(vs->vs_vp, VWRITE, cred, curthread);
+ if (error)
+ goto out;
+
+out:
+ if (freecred)
+ crfree(cred);
+
+ return (error);
+}
+
+static void
+nlm_release_vfs_state(struct vfs_state *vs)
+{
+
+ if (vs->vs_vp)
+ vrele(vs->vs_vp);
+ if (vs->vs_mp)
+ vfs_rel(vs->vs_mp);
+ VFS_UNLOCK_GIANT(vs->vs_vfslocked);
+}
+
+static nlm4_stats
+nlm_convert_error(int error)
+{
+
+ if (error == ESTALE)
+ return nlm4_stale_fh;
+ else if (error == EROFS)
+ return nlm4_rofs;
+ else
+ return nlm4_failed;
+}
+
+struct nlm_host *
+nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host, *bhost;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_test(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_GETLK, &fl, F_REMOTE);
+ if (error) {
+ result->stat.stat = nlm4_failed;
+ goto out;
+ }
+
+ if (fl.l_type == F_UNLCK) {
+ result->stat.stat = nlm4_granted;
+ } else {
+ result->stat.stat = nlm4_denied;
+ result->stat.nlm4_testrply_u.holder.exclusive =
+ (fl.l_type == F_WRLCK);
+ result->stat.nlm4_testrply_u.holder.svid = fl.l_pid;
+ bhost = nlm_find_host_by_sysid(fl.l_sysid);
+ if (bhost) {
+ /*
+ * We don't have any useful way of recording
+ * the value of oh used in the original lock
+ * request. Ideally, the test reply would have
+ * a space for the owning host's name allowing
+ * our caller's NLM to keep track.
+ *
+ * As far as I can see, Solaris uses an eight
+ * byte structure for oh which contains a four
+ * byte pid encoded in local byte order and
+ * the first four bytes of the host
+ * name. Linux uses a variable length string
+ * 'pid@hostname' in ascii but doesn't even
+ * return that in test replies.
+ *
+ * For the moment, return nothing in oh
+ * (already zero'ed above).
+ */
+ }
+ result->stat.nlm4_testrply_u.holder.l_offset = fl.l_start;
+ result->stat.nlm4_testrply_u.holder.l_len = fl.l_len;
+ }
+
+out:
+ nlm_release_vfs_state(&vs);
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
+ bool_t monitor)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_lock(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold && !argp->reclaim) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+ if (argp->block) {
+ struct nlm_async_lock *af;
+
+ /*
+ * First, make sure we can contact the host's NLM.
+ */
+ if (!nlm_host_get_rpc(host)) {
+ result->stat.stat = nlm4_failed;
+ goto out;
+ }
+
+ /*
+ * First we need to check and see if there is an
+ * existing blocked lock that matches. This could be a
+ * badly behaved client or an RPC re-send. If we find
+ * one, just return nlm4_blocked.
+ */
+ mtx_lock(&host->nh_lock);
+ TAILQ_FOREACH(af, &host->nh_pending, af_link) {
+ if (af->af_fl.l_start == fl.l_start
+ && af->af_fl.l_len == fl.l_len
+ && af->af_fl.l_pid == fl.l_pid
+ && af->af_fl.l_type == fl.l_type) {
+ break;
+ }
+ }
+ mtx_unlock(&host->nh_lock);
+ if (af) {
+ result->stat.stat = nlm4_blocked;
+ goto out;
+ }
+
+ af = malloc(sizeof(struct nlm_async_lock), M_NLM,
+ M_WAITOK|M_ZERO);
+ TASK_INIT(&af->af_task, 0, nlm_lock_callback, af);
+ af->af_vp = vs.vs_vp;
+ af->af_fl = fl;
+ af->af_host = host;
+ /*
+ * We use M_RPC here so that we can xdr_free the thing
+ * later.
+ */
+ af->af_granted.exclusive = argp->exclusive;
+ af->af_granted.alock.caller_name =
+ strdup(argp->alock.caller_name, M_RPC);
+ nlm_copy_netobj(&af->af_granted.alock.fh,
+ &argp->alock.fh, M_RPC);
+ nlm_copy_netobj(&af->af_granted.alock.oh,
+ &argp->alock.oh, M_RPC);
+ af->af_granted.alock.svid = argp->alock.svid;
+ af->af_granted.alock.l_offset = argp->alock.l_offset;
+ af->af_granted.alock.l_len = argp->alock.l_len;
+
+ /*
+ * Put the entry on the pending list before calling
+ * VOP_ADVLOCKASYNC. We do this in case the lock
+ * request was blocked (returning EINPROGRESS) but
+ * then granted before we manage to run again. The
+ * client may receive the granted message before we
+ * send our blocked reply but thats their problem.
+ */
+ mtx_lock(&host->nh_lock);
+ TAILQ_INSERT_TAIL(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+
+ error = VOP_ADVLOCKASYNC(vs.vs_vp, NULL, F_SETLK, &fl, F_REMOTE,
+ &af->af_task, &af->af_cookie);
+
+ /*
+ * If the lock completed synchronously, just free the
+ * tracking structure now.
+ */
+ if (error != EINPROGRESS) {
+ mtx_lock(&host->nh_lock);
+ TAILQ_REMOVE(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ xdr_free((xdrproc_t) xdr_nlm4_testargs,
+ &af->af_granted);
+ free(af, M_NLM);
+ } else {
+ if (nlm_debug_level >= 2)
+ printf("NLM: pending async lock %p for %s "
+ "(sysid %d)\n",
+ af, host->nh_caller_name, sysid);
+ /*
+ * Don't vrele the vnode just yet - this must
+ * wait until either the async callback
+ * happens or the lock is cancelled.
+ */
+ vs.vs_vp = NULL;
+ }
+ } else {
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_SETLK, &fl, F_REMOTE);
+ }
+
+ if (error) {
+ if (error == EINPROGRESS) {
+ result->stat.stat = nlm4_blocked;
+ } else if (error == EDEADLK) {
+ result->stat.stat = nlm4_deadlck;
+ } else if (error == EAGAIN) {
+ result->stat.stat = nlm4_denied;
+ } else {
+ result->stat.stat = nlm4_failed;
+ }
+ } else {
+ if (monitor)
+ nlm_host_monitor(host, argp->state);
+ result->stat.stat = nlm4_granted;
+ }
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+ struct nlm_async_lock *af;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_cancel(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+
+ /*
+ * First we need to try and find the async lock request - if
+ * there isn't one, we give up and return nlm4_denied.
+ */
+ mtx_lock(&host->nh_lock);
+
+ TAILQ_FOREACH(af, &host->nh_pending, af_link) {
+ if (af->af_fl.l_start == fl.l_start
+ && af->af_fl.l_len == fl.l_len
+ && af->af_fl.l_pid == fl.l_pid
+ && af->af_fl.l_type == fl.l_type) {
+ break;
+ }
+ }
+
+ if (!af) {
+ mtx_unlock(&host->nh_lock);
+ result->stat.stat = nlm4_denied;
+ goto out;
+ }
+
+ error = nlm_cancel_async_lock(af);
+
+ if (error) {
+ result->stat.stat = nlm4_denied;
+ } else {
+ result->stat.stat = nlm4_granted;
+ }
+
+ mtx_unlock(&host->nh_lock);
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_unlock(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ fl.l_type = F_UNLCK;
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_UNLCK, &fl, F_REMOTE);
+
+ /*
+ * Ignore the error - there is no result code for failure,
+ * only for grace period.
+ */
+ result->stat.stat = nlm4_granted;
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+void
+nlm_do_free_all(nlm4_notify *argp)
+{
+ struct nlm_host *host, *thost;
+
+ TAILQ_FOREACH_SAFE(host, &nlm_hosts, nh_link, thost) {
+ if (!strcmp(host->nh_caller_name, argp->name))
+ nlm_host_notify(host, argp->state, FALSE);
+ }
+}
+
+#define _PATH_RPCLOCKDSOCK "/var/run/rpclockd.sock"
+
+/*
+ * Make a connection to the userland lockd - we push anything we can't
+ * handle out to userland.
+ */
+CLIENT *
+nlm_user_lockd(void)
+{
+ struct sockaddr_un sun;
+ struct netconfig *nconf;
+ struct timeval zero;
+
+ if (nlm_lockd)
+ return (nlm_lockd);
+
+ sun.sun_family = AF_LOCAL;
+ strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
+ sun.sun_len = SUN_LEN(&sun);
+
+ nconf = getnetconfigent("local");
+ nlm_lockd = clnt_reconnect_create(nconf, (struct sockaddr *) &sun,
+ NLM_PROG, NLM_VERS4, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
+
+ /*
+ * Set the send timeout to zero - we only use this rpc handle
+ * for sending async replies which have no return value.
+ */
+ zero.tv_sec = 0;
+ zero.tv_usec = 0;
+ CLNT_CONTROL(nlm_lockd, CLSET_TIMEOUT, &zero);
+
+ return (nlm_lockd);
+}
diff --git a/sys/nlm/nlm_prot_server.c b/sys/nlm/nlm_prot_server.c
new file mode 100644
index 0000000..3e4499d
--- /dev/null
+++ b/sys/nlm/nlm_prot_server.c
@@ -0,0 +1,762 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include "nlm.h"
+
+/**********************************************************************/
+
+/*
+ * Convert between various versions of the protocol structures.
+ */
+
+static void
+nlm_convert_to_nlm4_lock(struct nlm4_lock *dst, struct nlm_lock *src)
+{
+
+ dst->caller_name = src->caller_name;
+ dst->fh = src->fh;
+ dst->oh = src->oh;
+ dst->svid = src->svid;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_share(struct nlm4_share *dst, struct nlm_share *src)
+{
+
+ dst->caller_name = src->caller_name;
+ dst->fh = src->fh;
+ dst->oh = src->oh;
+ dst->mode = src->mode;
+ dst->access = src->access;
+}
+
+static void
+nlm_convert_to_nlm_holder(struct nlm_holder *dst, struct nlm4_holder *src)
+{
+
+ dst->exclusive = src->exclusive;
+ dst->svid = src->svid;
+ dst->oh = src->oh;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_holder(struct nlm4_holder *dst, struct nlm_holder *src)
+{
+
+ dst->exclusive = src->exclusive;
+ dst->svid = src->svid;
+ dst->oh = src->oh;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static enum nlm_stats
+nlm_convert_to_nlm_stats(enum nlm4_stats src)
+{
+ if (src > nlm4_deadlck)
+ return nlm_denied;
+ return (enum nlm_stats) src;
+}
+
+static void
+nlm_convert_to_nlm_res(struct nlm_res *dst, struct nlm4_res *src)
+{
+ dst->cookie = src->cookie;
+ dst->stat.stat = nlm_convert_to_nlm_stats(src->stat.stat);
+}
+
+static void
+nlm_convert_to_nlm4_res(struct nlm4_res *dst, struct nlm_res *src)
+{
+ dst->cookie = src->cookie;
+ dst->stat.stat = (enum nlm4_stats) src->stat.stat;
+}
+
+/**********************************************************************/
+
+/*
+ * RPC server stubs.
+ */
+
+bool_t
+nlm_sm_notify_0_svc(struct nlm_sm_status *argp, void *result, struct svc_req *rqstp)
+{
+ nlm_sm_notify(argp);
+
+ return (TRUE);
+}
+
+bool_t
+nlm_test_1_svc(struct nlm_testargs *argp, nlm_testres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_testargs args4;
+ nlm4_testres res4;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_test_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
+ if (result->stat.stat == nlm_denied)
+ nlm_convert_to_nlm_holder(
+ &result->stat.nlm_testrply_u.holder,
+ &res4.stat.nlm4_testrply_u.holder);
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_lock_1_svc(struct nlm_lockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_lockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ retval = nlm4_lock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_cancel_1_svc(struct nlm_cancargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_cancargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_cancel_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_unlock_1_svc(struct nlm_unlockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_unlockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_unlock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_granted_1_svc(struct nlm_testargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_testargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_granted_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testargs args4;
+ nlm4_testres res4;
+ nlm_testres res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_test(&args4, &res4, rqstp);
+
+ res.cookie = res4.cookie;
+ res.stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
+ if (res.stat.stat == nlm_denied)
+ nlm_convert_to_nlm_holder(
+ &res.stat.nlm_testrply_u.holder,
+ &res4.stat.nlm4_testrply_u.holder);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_test_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_testres, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_lockargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ host = nlm_do_lock(&args4, &res4, rqstp, TRUE);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_lock_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_cancargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_cancel(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_cancel_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_unlock_msg_1_svc(struct nlm_unlockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_unlockargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_unlock(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_unlock_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ /*
+ * We make a synchronous call to userland and send the reply
+ * back async.
+ */
+ nlm4_granted_4_svc(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ host = nlm_find_host_by_addr(
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
+ rqstp->rq_vers);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_granted_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_test_res_1_svc(nlm_testres *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testres args4;
+
+ args4.cookie = argp->cookie;
+ if (argp->stat.stat == nlm_denied)
+ nlm_convert_to_nlm4_holder(
+ &args4.stat.nlm4_testrply_u.holder,
+ &argp->stat.nlm_testrply_u.holder);
+
+ return (nlm4_test_res_4_svc(&args4, result, rqstp));
+}
+
+bool_t
+nlm_lock_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_lock_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_cancel_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_cancel_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_unlock_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_unlock_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_granted_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_granted_res_4_svc(&arg4, result, rqstp));
+}
+
+int
+nlm_prog_1_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
+
+bool_t
+nlm_share_3_svc(nlm_shareargs *argp, nlm_shareres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_shareargs args4;
+ nlm4_shareres res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_share(&args4.share, &argp->share);
+ args4.reclaim = argp->reclaim;
+
+ retval = nlm4_share_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat = nlm_convert_to_nlm_stats(res4.stat);
+ result->sequence = res4.sequence;
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_unshare_3_svc(nlm_shareargs *argp, nlm_shareres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_shareargs args4;
+ nlm4_shareres res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_share(&args4.share, &argp->share);
+ args4.reclaim = argp->reclaim;
+
+ retval = nlm4_unshare_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat = nlm_convert_to_nlm_stats(res4.stat);
+ result->sequence = res4.sequence;
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_nm_lock_3_svc(nlm_lockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_lockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ retval = nlm4_nm_lock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_free_all_3_svc(nlm_notify *argp, void *result, struct svc_req *rqstp)
+{
+ struct nlm4_notify args4;
+
+ args4.name = argp->name;
+ args4.state = argp->state;
+
+ return (nlm4_free_all_4_svc(&args4, result, rqstp));
+}
+
+int
+nlm_prog_3_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
+
+bool_t
+nlm4_test_4_svc(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
+{
+
+ nlm_do_test(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_lock(argp, result, rqstp, TRUE);
+ return (TRUE);
+}
+
+bool_t
+nlm4_cancel_4_svc(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_cancel(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_unlock_4_svc(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_unlock(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_granted_4_svc(nlm4_testargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+ struct timeval tv;
+
+ memset(result, 0, sizeof(*result));
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ /*
+ * Set a non-zero timeout to give the userland a chance to reply.
+ */
+ lockd = nlm_user_lockd();
+ if (!lockd) {
+ result->stat.stat = nlm4_failed;
+ return (TRUE);
+ }
+ tv.tv_sec = 20;
+ tv.tv_usec = 0;
+ CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
+ nlm4_granted_4(argp, result, lockd);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
+
+ return (TRUE);
+}
+
+bool_t
+nlm4_test_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testres res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_test(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_test_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_testres, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_lock_msg_4_svc(nlm4_lockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_lock(argp, &res4, rqstp, TRUE);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_lock_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_cancel_msg_4_svc(nlm4_cancargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_cancel(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_cancel_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_unlock(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_unlock_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ struct nlm_host *host;
+ CLIENT *rpc;
+ nlm4_res res4;
+ char dummy;
+
+ /*
+ * We make a synchronous call to userland and send the reply
+ * back async.
+ */
+ nlm4_granted_4_svc(argp, &res4, rqstp);
+
+ host = nlm_find_host_by_addr(
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
+ rqstp->rq_vers);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_granted_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_test_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_lock_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_cancel_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_unlock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_unlock_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_granted_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_share_4_svc(nlm4_shareargs *argp, nlm4_shareres *result, struct svc_req *rqstp)
+{
+
+ memset(result, 0, sizeof(*result));
+ result->stat = nlm4_denied;
+ return (TRUE);
+}
+
+bool_t
+nlm4_unshare_4_svc(nlm4_shareargs *argp, nlm4_shareres *result, struct svc_req *rqstp)
+{
+
+ memset(result, 0, sizeof(*result));
+ result->stat = nlm4_denied;
+ return (TRUE);
+}
+
+bool_t
+nlm4_nm_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_lock(argp, result, rqstp, FALSE);
+ return (TRUE);
+}
+
+bool_t
+nlm4_free_all_4_svc(nlm4_notify *argp, void *result, struct svc_req *rqstp)
+{
+
+ nlm_do_free_all(argp);
+ return (TRUE);
+}
+
+int
+nlm_prog_4_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
diff --git a/sys/nlm/nlm_prot_svc.c b/sys/nlm/nlm_prot_svc.c
new file mode 100644
index 0000000..eca6d86
--- /dev/null
+++ b/sys/nlm/nlm_prot_svc.c
@@ -0,0 +1,509 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include "nlm.h"
+
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+void nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
+
+void
+nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ struct nlm_sm_status nlm_sm_notify_0_arg;
+ } argument;
+ char result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_SM_NOTIFY:
+ xdr_argument = (xdrproc_t) xdr_nlm_sm_status;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_sm_notify_0_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+
+ return;
+}
+
+void
+nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ struct nlm_testargs nlm_test_1_arg;
+ struct nlm_lockargs nlm_lock_1_arg;
+ struct nlm_cancargs nlm_cancel_1_arg;
+ struct nlm_unlockargs nlm_unlock_1_arg;
+ struct nlm_testargs nlm_granted_1_arg;
+ struct nlm_testargs nlm_test_msg_1_arg;
+ struct nlm_lockargs nlm_lock_msg_1_arg;
+ struct nlm_cancargs nlm_cancel_msg_1_arg;
+ struct nlm_unlockargs nlm_unlock_msg_1_arg;
+ struct nlm_testargs nlm_granted_msg_1_arg;
+ nlm_testres nlm_test_res_1_arg;
+ nlm_res nlm_lock_res_1_arg;
+ nlm_res nlm_cancel_res_1_arg;
+ nlm_res nlm_unlock_res_1_arg;
+ nlm_res nlm_granted_res_1_arg;
+ } argument;
+ union {
+ nlm_testres nlm_test_1_res;
+ nlm_res nlm_lock_1_res;
+ nlm_res nlm_cancel_1_res;
+ nlm_res nlm_unlock_1_res;
+ nlm_res nlm_granted_1_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_TEST:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm_testres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_1_svc;
+ break;
+
+ case NLM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_1_svc;
+ break;
+
+ case NLM_CANCEL:
+ xdr_argument = (xdrproc_t) xdr_nlm_cancargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_1_svc;
+ break;
+
+ case NLM_UNLOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_unlockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_1_svc;
+ break;
+
+ case NLM_GRANTED:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_1_svc;
+ break;
+
+ case NLM_TEST_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_msg_1_svc;
+ break;
+
+ case NLM_LOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_msg_1_svc;
+ break;
+
+ case NLM_CANCEL_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_cancargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_msg_1_svc;
+ break;
+
+ case NLM_UNLOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_unlockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_msg_1_svc;
+ break;
+
+ case NLM_GRANTED_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_msg_1_svc;
+ break;
+
+ case NLM_TEST_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_testres;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_res_1_svc;
+ break;
+
+ case NLM_LOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_res_1_svc;
+ break;
+
+ case NLM_CANCEL_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_res_1_svc;
+ break;
+
+ case NLM_UNLOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_res_1_svc;
+ break;
+
+ case NLM_GRANTED_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_res_1_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_1_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
+
+void
+nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ nlm_shareargs nlm_share_3_arg;
+ nlm_shareargs nlm_unshare_3_arg;
+ nlm_lockargs nlm_nm_lock_3_arg;
+ nlm_notify nlm_free_all_3_arg;
+ } argument;
+ union {
+ nlm_shareres nlm_share_3_res;
+ nlm_shareres nlm_unshare_3_res;
+ nlm_res nlm_nm_lock_3_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_TEST:
+ case NLM_LOCK:
+ case NLM_CANCEL:
+ case NLM_UNLOCK:
+ case NLM_GRANTED:
+ case NLM_TEST_MSG:
+ case NLM_LOCK_MSG:
+ case NLM_CANCEL_MSG:
+ case NLM_UNLOCK_MSG:
+ case NLM_GRANTED_MSG:
+ case NLM_TEST_RES:
+ case NLM_LOCK_RES:
+ case NLM_CANCEL_RES:
+ case NLM_UNLOCK_RES:
+ case NLM_GRANTED_RES:
+ nlm_prog_1(rqstp, transp);
+ return;
+
+ case NLM_SHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_share_3_svc;
+ break;
+
+ case NLM_UNSHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unshare_3_svc;
+ break;
+
+ case NLM_NM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_nm_lock_3_svc;
+ break;
+
+ case NLM_FREE_ALL:
+ xdr_argument = (xdrproc_t) xdr_nlm_notify;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_free_all_3_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_3_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
+
+void
+nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ nlm4_testargs nlm4_test_4_arg;
+ nlm4_lockargs nlm4_lock_4_arg;
+ nlm4_cancargs nlm4_cancel_4_arg;
+ nlm4_unlockargs nlm4_unlock_4_arg;
+ nlm4_testargs nlm4_granted_4_arg;
+ nlm4_testargs nlm4_test_msg_4_arg;
+ nlm4_lockargs nlm4_lock_msg_4_arg;
+ nlm4_cancargs nlm4_cancel_msg_4_arg;
+ nlm4_unlockargs nlm4_unlock_msg_4_arg;
+ nlm4_testargs nlm4_granted_msg_4_arg;
+ nlm4_testres nlm4_test_res_4_arg;
+ nlm4_res nlm4_lock_res_4_arg;
+ nlm4_res nlm4_cancel_res_4_arg;
+ nlm4_res nlm4_unlock_res_4_arg;
+ nlm4_res nlm4_granted_res_4_arg;
+ nlm4_shareargs nlm4_share_4_arg;
+ nlm4_shareargs nlm4_unshare_4_arg;
+ nlm4_lockargs nlm4_nm_lock_4_arg;
+ nlm4_notify nlm4_free_all_4_arg;
+ } argument;
+ union {
+ nlm4_testres nlm4_test_4_res;
+ nlm4_res nlm4_lock_4_res;
+ nlm4_res nlm4_cancel_4_res;
+ nlm4_res nlm4_unlock_4_res;
+ nlm4_res nlm4_granted_4_res;
+ nlm4_shareres nlm4_share_4_res;
+ nlm4_shareres nlm4_unshare_4_res;
+ nlm4_res nlm4_nm_lock_4_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM4_TEST:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_testres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_4_svc;
+ break;
+
+ case NLM4_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_4_svc;
+ break;
+
+ case NLM4_CANCEL:
+ xdr_argument = (xdrproc_t) xdr_nlm4_cancargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_4_svc;
+ break;
+
+ case NLM4_UNLOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_unlockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_4_svc;
+ break;
+
+ case NLM4_GRANTED:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_4_svc;
+ break;
+
+ case NLM4_TEST_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_msg_4_svc;
+ break;
+
+ case NLM4_LOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_msg_4_svc;
+ break;
+
+ case NLM4_CANCEL_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_cancargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_msg_4_svc;
+ break;
+
+ case NLM4_UNLOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_unlockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_msg_4_svc;
+ break;
+
+ case NLM4_GRANTED_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_msg_4_svc;
+ break;
+
+ case NLM4_TEST_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testres;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_res_4_svc;
+ break;
+
+ case NLM4_LOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_res_4_svc;
+ break;
+
+ case NLM4_CANCEL_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_res_4_svc;
+ break;
+
+ case NLM4_UNLOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_res_4_svc;
+ break;
+
+ case NLM4_GRANTED_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_res_4_svc;
+ break;
+
+ case NLM4_SHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm4_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_share_4_svc;
+ break;
+
+ case NLM4_UNSHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm4_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unshare_4_svc;
+ break;
+
+ case NLM4_NM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_nm_lock_4_svc;
+ break;
+
+ case NLM4_FREE_ALL:
+ xdr_argument = (xdrproc_t) xdr_nlm4_notify;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_free_all_4_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_4_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
diff --git a/sys/nlm/nlm_prot_xdr.c b/sys/nlm/nlm_prot_xdr.c
new file mode 100644
index 0000000..034cbbc
--- /dev/null
+++ b/sys/nlm/nlm_prot_xdr.c
@@ -0,0 +1,454 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "nlm_prot.h"
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+bool_t
+xdr_nlm_stats(XDR *xdrs, nlm_stats *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_holder(XDR *xdrs, nlm_holder *objp)
+{
+
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testrply(XDR *xdrs, nlm_testrply *objp)
+{
+
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ switch (objp->stat) {
+ case nlm_denied:
+ if (!xdr_nlm_holder(xdrs, &objp->nlm_testrply_u.holder))
+ return (FALSE);
+ break;
+ default:
+ break;
+ }
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_stat(XDR *xdrs, nlm_stat *objp)
+{
+
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_res(XDR *xdrs, nlm_res *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_stat(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testres(XDR *xdrs, nlm_testres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_testrply(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_lock(XDR *xdrs, nlm_lock *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_lockargs(XDR *xdrs, nlm_lockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_cancargs(XDR *xdrs, nlm_cancargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testargs(XDR *xdrs, nlm_testargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_unlockargs(XDR *xdrs, nlm_unlockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_fsh_mode(XDR *xdrs, fsh_mode *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_fsh_access(XDR *xdrs, fsh_access *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_share(XDR *xdrs, nlm_share *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_fsh_mode(xdrs, &objp->mode))
+ return (FALSE);
+ if (!xdr_fsh_access(xdrs, &objp->access))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_shareargs(XDR *xdrs, nlm_shareargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_share(xdrs, &objp->share))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_shareres(XDR *xdrs, nlm_shareres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->sequence))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_notify(XDR *xdrs, nlm_notify *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_long(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_stats(XDR *xdrs, nlm4_stats *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_stat(XDR *xdrs, nlm4_stat *objp)
+{
+
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_holder(XDR *xdrs, nlm4_holder *objp)
+{
+
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_uint32_t(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_lock(XDR *xdrs, nlm4_lock *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_uint32_t(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_share(XDR *xdrs, nlm4_share *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_fsh_mode(xdrs, &objp->mode))
+ return (FALSE);
+ if (!xdr_fsh_access(xdrs, &objp->access))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testrply(XDR *xdrs, nlm4_testrply *objp)
+{
+
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ switch (objp->stat) {
+ case nlm_denied:
+ if (!xdr_nlm4_holder(xdrs, &objp->nlm4_testrply_u.holder))
+ return (FALSE);
+ break;
+ default:
+ break;
+ }
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testres(XDR *xdrs, nlm4_testres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_testrply(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testargs(XDR *xdrs, nlm4_testargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_res(XDR *xdrs, nlm4_res *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_stat(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_lockargs(XDR *xdrs, nlm4_lockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_cancargs(XDR *xdrs, nlm4_cancargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_unlockargs(XDR *xdrs, nlm4_unlockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_shareargs(XDR *xdrs, nlm4_shareargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_share(xdrs, &objp->share))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_shareres(XDR *xdrs, nlm4_shareres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->sequence))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_sm_status(XDR *xdrs, nlm_sm_status *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_notify(XDR *xdrs, nlm4_notify *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_int32_t(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
diff --git a/sys/nlm/sm_inter.h b/sys/nlm/sm_inter.h
new file mode 100644
index 0000000..0cc240b
--- /dev/null
+++ b/sys/nlm/sm_inter.h
@@ -0,0 +1,112 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+/* $FreeBSD$ */
+
+#ifndef _SM_INTER_H_RPCGEN
+#define _SM_INTER_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SM_MAXSTRLEN 1024
+
+struct sm_name {
+ char *mon_name;
+};
+typedef struct sm_name sm_name;
+
+struct my_id {
+ char *my_name;
+ int my_prog;
+ int my_vers;
+ int my_proc;
+};
+typedef struct my_id my_id;
+
+struct mon_id {
+ char *mon_name;
+ struct my_id my_id;
+};
+typedef struct mon_id mon_id;
+
+struct mon {
+ struct mon_id mon_id;
+ char priv[16];
+};
+typedef struct mon mon;
+
+struct stat_chge {
+ char *mon_name;
+ int state;
+};
+typedef struct stat_chge stat_chge;
+
+struct sm_stat {
+ int state;
+};
+typedef struct sm_stat sm_stat;
+
+enum sm_res {
+ stat_succ = 0,
+ stat_fail = 1
+};
+typedef enum sm_res sm_res;
+
+struct sm_stat_res {
+ sm_res res_stat;
+ int state;
+};
+typedef struct sm_stat_res sm_stat_res;
+
+struct sm_status {
+ char *mon_name;
+ int state;
+ char priv[16];
+};
+typedef struct sm_status sm_status;
+
+#define SM_PROG ((unsigned long)(100024))
+#define SM_VERS ((unsigned long)(1))
+
+extern void sm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+#define SM_STAT ((unsigned long)(1))
+extern struct sm_stat_res * sm_stat_1(struct sm_name *, CLIENT *);
+extern struct sm_stat_res * sm_stat_1_svc(struct sm_name *, struct svc_req *);
+#define SM_MON ((unsigned long)(2))
+extern struct sm_stat_res * sm_mon_1(struct mon *, CLIENT *);
+extern struct sm_stat_res * sm_mon_1_svc(struct mon *, struct svc_req *);
+#define SM_UNMON ((unsigned long)(3))
+extern struct sm_stat * sm_unmon_1(struct mon_id *, CLIENT *);
+extern struct sm_stat * sm_unmon_1_svc(struct mon_id *, struct svc_req *);
+#define SM_UNMON_ALL ((unsigned long)(4))
+extern struct sm_stat * sm_unmon_all_1(struct my_id *, CLIENT *);
+extern struct sm_stat * sm_unmon_all_1_svc(struct my_id *, struct svc_req *);
+#define SM_SIMU_CRASH ((unsigned long)(5))
+extern void * sm_simu_crash_1(void *, CLIENT *);
+extern void * sm_simu_crash_1_svc(void *, struct svc_req *);
+#define SM_NOTIFY ((unsigned long)(6))
+extern void * sm_notify_1(struct stat_chge *, CLIENT *);
+extern void * sm_notify_1_svc(struct stat_chge *, struct svc_req *);
+extern int sm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+
+/* the xdr functions */
+extern bool_t xdr_sm_name(XDR *, sm_name*);
+extern bool_t xdr_my_id(XDR *, my_id*);
+extern bool_t xdr_mon_id(XDR *, mon_id*);
+extern bool_t xdr_mon(XDR *, mon*);
+extern bool_t xdr_stat_chge(XDR *, stat_chge*);
+extern bool_t xdr_sm_stat(XDR *, sm_stat*);
+extern bool_t xdr_sm_res(XDR *, sm_res*);
+extern bool_t xdr_sm_stat_res(XDR *, sm_stat_res*);
+extern bool_t xdr_sm_status(XDR *, sm_status*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_SM_INTER_H_RPCGEN */
diff --git a/sys/nlm/sm_inter_xdr.c b/sys/nlm/sm_inter_xdr.c
new file mode 100644
index 0000000..5f75432
--- /dev/null
+++ b/sys/nlm/sm_inter_xdr.c
@@ -0,0 +1,107 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "sm_inter.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+bool_t
+xdr_sm_name(XDR *xdrs, sm_name *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_my_id(XDR *xdrs, my_id *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->my_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_prog))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_vers))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_proc))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_mon_id(XDR *xdrs, mon_id *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_my_id(xdrs, &objp->my_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_mon(XDR *xdrs, mon *objp)
+{
+
+ if (!xdr_mon_id(xdrs, &objp->mon_id))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_stat_chge(XDR *xdrs, stat_chge *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_stat(XDR *xdrs, sm_stat *objp)
+{
+
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_res(XDR *xdrs, sm_res *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_stat_res(XDR *xdrs, sm_stat_res *objp)
+{
+
+ if (!xdr_sm_res(xdrs, &objp->res_stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_status(XDR *xdrs, sm_status *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
diff --git a/sys/rpc/auth.h b/sys/rpc/auth.h
new file mode 100644
index 0000000..b919559
--- /dev/null
+++ b/sys/rpc/auth.h
@@ -0,0 +1,361 @@
+/* $NetBSD: auth.h,v 1.15 2000/06/02 22:57:55 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)auth.h 1.17 88/02/08 SMI
+ * from: @(#)auth.h 2.3 88/08/07 4.0 RPCSRC
+ * from: @(#)auth.h 1.43 98/02/02 SMI
+ * $FreeBSD$
+ */
+
+/*
+ * auth.h, Authentication interface.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * The data structures are completely opaque to the client. The client
+ * is required to pass an AUTH * to routines that create rpc
+ * "sessions".
+ */
+
+#ifndef _RPC_AUTH_H
+#define _RPC_AUTH_H
+#include <rpc/xdr.h>
+#include <rpc/clnt_stat.h>
+#include <sys/cdefs.h>
+#include <sys/socket.h>
+
+#define MAX_AUTH_BYTES 400
+#define MAXNETNAMELEN 255 /* maximum length of network user's name */
+
+/*
+ * Client side authentication/security data
+ */
+
+typedef struct sec_data {
+ u_int secmod; /* security mode number e.g. in nfssec.conf */
+ u_int rpcflavor; /* rpc flavors:AUTH_UNIX,AUTH_DES,RPCSEC_GSS */
+ int flags; /* AUTH_F_xxx flags */
+ caddr_t data; /* opaque data per flavor */
+} sec_data_t;
+
+#ifdef _SYSCALL32_IMPL
+struct sec_data32 {
+ uint32_t secmod; /* security mode number e.g. in nfssec.conf */
+ uint32_t rpcflavor; /* rpc flavors:AUTH_UNIX,AUTH_DES,RPCSEC_GSS */
+ int32_t flags; /* AUTH_F_xxx flags */
+ caddr32_t data; /* opaque data per flavor */
+};
+#endif /* _SYSCALL32_IMPL */
+
+/*
+ * AUTH_DES flavor specific data from sec_data opaque data field.
+ * AUTH_KERB has the same structure.
+ */
+typedef struct des_clnt_data {
+ struct netbuf syncaddr; /* time sync addr */
+ struct knetconfig *knconf; /* knetconfig info that associated */
+ /* with the syncaddr. */
+ char *netname; /* server's netname */
+ int netnamelen; /* server's netname len */
+} dh_k4_clntdata_t;
+
+#ifdef _SYSCALL32_IMPL
+struct des_clnt_data32 {
+ struct netbuf32 syncaddr; /* time sync addr */
+ caddr32_t knconf; /* knetconfig info that associated */
+ /* with the syncaddr. */
+ caddr32_t netname; /* server's netname */
+ int32_t netnamelen; /* server's netname len */
+};
+#endif /* _SYSCALL32_IMPL */
+
+#ifdef KERBEROS
+/*
+ * flavor specific data to hold the data for AUTH_DES/AUTH_KERB(v4)
+ * in sec_data->data opaque field.
+ */
+typedef struct krb4_svc_data {
+ int window; /* window option value */
+} krb4_svcdata_t;
+
+typedef struct krb4_svc_data des_svcdata_t;
+#endif /* KERBEROS */
+
+/*
+ * authentication/security specific flags
+ */
+#define AUTH_F_RPCTIMESYNC 0x001 /* use RPC to do time sync */
+#define AUTH_F_TRYNONE 0x002 /* allow fall back to AUTH_NONE */
+
+
+/*
+ * Status returned from authentication check
+ */
+enum auth_stat {
+ AUTH_OK=0,
+ /*
+ * failed at remote end
+ */
+ AUTH_BADCRED=1, /* bogus credentials (seal broken) */
+ AUTH_REJECTEDCRED=2, /* client should begin new session */
+ AUTH_BADVERF=3, /* bogus verifier (seal broken) */
+ AUTH_REJECTEDVERF=4, /* verifier expired or was replayed */
+ AUTH_TOOWEAK=5, /* rejected due to security reasons */
+ /*
+ * failed locally
+ */
+ AUTH_INVALIDRESP=6, /* bogus response verifier */
+ AUTH_FAILED=7 /* some unknown reason */
+#ifdef KERBEROS
+ /*
+ * kerberos errors
+ */
+ ,
+ AUTH_KERB_GENERIC = 8, /* kerberos generic error */
+ AUTH_TIMEEXPIRE = 9, /* time of credential expired */
+ AUTH_TKT_FILE = 10, /* something wrong with ticket file */
+ AUTH_DECODE = 11, /* can't decode authenticator */
+ AUTH_NET_ADDR = 12 /* wrong net address in ticket */
+#endif /* KERBEROS */
+};
+
+union des_block {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } key;
+ char c[8];
+};
+typedef union des_block des_block;
+__BEGIN_DECLS
+extern bool_t xdr_des_block(XDR *, des_block *);
+__END_DECLS
+
+/*
+ * Authentication info. Opaque to client.
+ */
+struct opaque_auth {
+ enum_t oa_flavor; /* flavor of auth */
+ caddr_t oa_base; /* address of more auth stuff */
+ u_int oa_length; /* not to exceed MAX_AUTH_BYTES */
+};
+
+
+/*
+ * Auth handle, interface to client side authenticators.
+ */
+typedef struct __auth {
+ struct opaque_auth ah_cred;
+ struct opaque_auth ah_verf;
+ union des_block ah_key;
+ struct auth_ops {
+ void (*ah_nextverf) (struct __auth *);
+ /* nextverf & serialize */
+ int (*ah_marshal) (struct __auth *, XDR *);
+ /* validate verifier */
+ int (*ah_validate) (struct __auth *,
+ struct opaque_auth *);
+ /* refresh credentials */
+ int (*ah_refresh) (struct __auth *, void *);
+ /* destroy this structure */
+ void (*ah_destroy) (struct __auth *);
+ } *ah_ops;
+ void *ah_private;
+} AUTH;
+
+
+/*
+ * Authentication ops.
+ * The ops and the auth handle provide the interface to the authenticators.
+ *
+ * AUTH *auth;
+ * XDR *xdrs;
+ * struct opaque_auth verf;
+ */
+#define AUTH_NEXTVERF(auth) \
+ ((*((auth)->ah_ops->ah_nextverf))(auth))
+#define auth_nextverf(auth) \
+ ((*((auth)->ah_ops->ah_nextverf))(auth))
+
+#define AUTH_MARSHALL(auth, xdrs) \
+ ((*((auth)->ah_ops->ah_marshal))(auth, xdrs))
+#define auth_marshall(auth, xdrs) \
+ ((*((auth)->ah_ops->ah_marshal))(auth, xdrs))
+
+#define AUTH_VALIDATE(auth, verfp) \
+ ((*((auth)->ah_ops->ah_validate))((auth), verfp))
+#define auth_validate(auth, verfp) \
+ ((*((auth)->ah_ops->ah_validate))((auth), verfp))
+
+#define AUTH_REFRESH(auth, msg) \
+ ((*((auth)->ah_ops->ah_refresh))(auth, msg))
+#define auth_refresh(auth, msg) \
+ ((*((auth)->ah_ops->ah_refresh))(auth, msg))
+
+#define AUTH_DESTROY(auth) \
+ ((*((auth)->ah_ops->ah_destroy))(auth))
+#define auth_destroy(auth) \
+ ((*((auth)->ah_ops->ah_destroy))(auth))
+
+
+__BEGIN_DECLS
+extern struct opaque_auth _null_auth;
+__END_DECLS
+
+/*
+ * These are the various implementations of client side authenticators.
+ */
+
+/*
+ * System style authentication
+ * AUTH *authunix_create(machname, uid, gid, len, aup_gids)
+ * char *machname;
+ * int uid;
+ * int gid;
+ * int len;
+ * int *aup_gids;
+ */
+__BEGIN_DECLS
+#ifdef _KERNEL
+struct ucred;
+extern AUTH *authunix_create(struct ucred *);
+#else
+extern AUTH *authunix_create(char *, int, int, int,
+ int *);
+extern AUTH *authunix_create_default(void); /* takes no parameters */
+#endif
+extern AUTH *authnone_create(void); /* takes no parameters */
+__END_DECLS
+/*
+ * DES style authentication
+ * AUTH *authsecdes_create(servername, window, timehost, ckey)
+ * char *servername; - network name of server
+ * u_int window; - time to live
+ * const char *timehost; - optional hostname to sync with
+ * des_block *ckey; - optional conversation key to use
+ */
+__BEGIN_DECLS
+extern AUTH *authdes_create (char *, u_int, struct sockaddr *, des_block *);
+extern AUTH *authdes_seccreate (const char *, const u_int, const char *,
+ const des_block *);
+__END_DECLS
+
+__BEGIN_DECLS
+extern bool_t xdr_opaque_auth (XDR *, struct opaque_auth *);
+__END_DECLS
+
+#define authsys_create(c,i1,i2,i3,ip) authunix_create((c),(i1),(i2),(i3),(ip))
+#define authsys_create_default() authunix_create_default()
+
+/*
+ * Netname manipulation routines.
+ */
+__BEGIN_DECLS
+extern int getnetname(char *);
+extern int host2netname(char *, const char *, const char *);
+extern int user2netname(char *, const uid_t, const char *);
+extern int netname2user(char *, uid_t *, gid_t *, int *, gid_t *);
+extern int netname2host(char *, char *, const int);
+extern void passwd2des ( char *, char * );
+__END_DECLS
+
+/*
+ *
+ * These routines interface to the keyserv daemon
+ *
+ */
+__BEGIN_DECLS
+extern int key_decryptsession(const char *, des_block *);
+extern int key_encryptsession(const char *, des_block *);
+extern int key_gendes(des_block *);
+extern int key_setsecret(const char *);
+extern int key_secretkey_is_set(void);
+__END_DECLS
+
+/*
+ * Publickey routines.
+ */
+__BEGIN_DECLS
+extern int getpublickey (const char *, char *);
+extern int getpublicandprivatekey (const char *, char *);
+extern int getsecretkey (char *, char *, char *);
+__END_DECLS
+
+#ifdef KERBEROS
+/*
+ * Kerberos style authentication
+ * AUTH *authkerb_seccreate(service, srv_inst, realm, window, timehost, status)
+ * const char *service; - service name
+ * const char *srv_inst; - server instance
+ * const char *realm; - server realm
+ * const u_int window; - time to live
+ * const char *timehost; - optional hostname to sync with
+ * int *status; - kerberos status returned
+ */
+__BEGIN_DECLS
+extern AUTH *authkerb_seccreate(const char *, const char *, const char *,
+ const u_int, const char *, int *);
+__END_DECLS
+
+/*
+ * Map a kerberos credential into a unix cred.
+ *
+ * authkerb_getucred(rqst, uid, gid, grouplen, groups)
+ * const struct svc_req *rqst; - request pointer
+ * uid_t *uid;
+ * gid_t *gid;
+ * short *grouplen;
+ * int *groups;
+ *
+ */
+__BEGIN_DECLS
+extern int authkerb_getucred(/* struct svc_req *, uid_t *, gid_t *,
+ short *, int * */);
+__END_DECLS
+#endif /* KERBEROS */
+
+__BEGIN_DECLS
+struct svc_req;
+struct rpc_msg;
+enum auth_stat _svcauth_null (struct svc_req *, struct rpc_msg *);
+enum auth_stat _svcauth_short (struct svc_req *, struct rpc_msg *);
+enum auth_stat _svcauth_unix (struct svc_req *, struct rpc_msg *);
+__END_DECLS
+
+#define AUTH_NONE 0 /* no authentication */
+#define AUTH_NULL 0 /* backward compatibility */
+#define AUTH_SYS 1 /* unix style (uid, gids) */
+#define AUTH_UNIX AUTH_SYS
+#define AUTH_SHORT 2 /* short hand unix style */
+#define AUTH_DH 3 /* for Diffie-Hellman mechanism */
+#define AUTH_DES AUTH_DH /* for backward compatibility */
+#define AUTH_KERB 4 /* kerberos style */
+
+#endif /* !_RPC_AUTH_H */
diff --git a/sys/rpc/auth_none.c b/sys/rpc/auth_none.c
new file mode 100644
index 0000000..8530437
--- /dev/null
+++ b/sys/rpc/auth_none.c
@@ -0,0 +1,148 @@
+/* $NetBSD: auth_none.c,v 1.13 2000/01/22 22:19:17 mycroft Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)auth_none.c 1.19 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)auth_none.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * auth_none.c
+ * Creates a client authentication handle for passing "null"
+ * credentials and verifiers to remote systems.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+#include <rpc/auth.h>
+
+#define MAX_MARSHAL_SIZE 20
+
+/*
+ * Authenticator operations routines
+ */
+
+static bool_t authnone_marshal (AUTH *, XDR *);
+static void authnone_verf (AUTH *);
+static bool_t authnone_validate (AUTH *, struct opaque_auth *);
+static bool_t authnone_refresh (AUTH *, void *);
+static void authnone_destroy (AUTH *);
+
+static struct auth_ops authnone_ops = {
+ .ah_nextverf = authnone_verf,
+ .ah_marshal = authnone_marshal,
+ .ah_validate = authnone_validate,
+ .ah_refresh = authnone_refresh,
+ .ah_destroy = authnone_destroy
+};
+
+struct authnone_private {
+ AUTH no_client;
+ char mclient[MAX_MARSHAL_SIZE];
+ u_int mcnt;
+};
+
+static struct authnone_private authnone_private;
+
+static void
+authnone_init(void *dummy)
+{
+ struct authnone_private *ap = &authnone_private;
+ XDR xdrs;
+
+ ap->no_client.ah_cred = ap->no_client.ah_verf = _null_auth;
+ ap->no_client.ah_ops = &authnone_ops;
+ xdrmem_create(&xdrs, ap->mclient, MAX_MARSHAL_SIZE, XDR_ENCODE);
+ xdr_opaque_auth(&xdrs, &ap->no_client.ah_cred);
+ xdr_opaque_auth(&xdrs, &ap->no_client.ah_verf);
+ ap->mcnt = XDR_GETPOS(&xdrs);
+ XDR_DESTROY(&xdrs);
+}
+SYSINIT(authnone_init, SI_SUB_KMEM, SI_ORDER_ANY, authnone_init, NULL);
+
+AUTH *
+authnone_create()
+{
+ struct authnone_private *ap = &authnone_private;
+
+ return (&ap->no_client);
+}
+
+/*ARGSUSED*/
+static bool_t
+authnone_marshal(AUTH *client, XDR *xdrs)
+{
+ struct authnone_private *ap = &authnone_private;
+
+ KASSERT(xdrs != NULL, ("authnone_marshal: xdrs is null"));
+
+ return (xdrs->x_ops->x_putbytes(xdrs, ap->mclient, ap->mcnt));
+}
+
+/* All these unused parameters are required to keep ANSI-C from grumbling */
+/*ARGSUSED*/
+static void
+authnone_verf(AUTH *client)
+{
+}
+
+/*ARGSUSED*/
+static bool_t
+authnone_validate(AUTH *client, struct opaque_auth *opaque)
+{
+
+ return (TRUE);
+}
+
+/*ARGSUSED*/
+static bool_t
+authnone_refresh(AUTH *client, void *dummy)
+{
+
+ return (FALSE);
+}
+
+/*ARGSUSED*/
+static void
+authnone_destroy(AUTH *client)
+{
+}
diff --git a/sys/rpc/auth_unix.c b/sys/rpc/auth_unix.c
new file mode 100644
index 0000000..5782400
--- /dev/null
+++ b/sys/rpc/auth_unix.c
@@ -0,0 +1,299 @@
+/* $NetBSD: auth_unix.c,v 1.18 2000/07/06 03:03:30 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)auth_unix.c 1.19 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)auth_unix.c 2.2 88/08/01 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * auth_unix.c, Implements UNIX style authentication parameters.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * The system is very weak. The client uses no encryption for it's
+ * credentials and only sends null verifiers. The server sends backs
+ * null verifiers or optionally a verifier that suggests a new short hand
+ * for the credentials.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/ucred.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+#include <rpc/auth.h>
+
+#include "rpc_com.h"
+
+/* auth_unix.c */
+static void authunix_nextverf (AUTH *);
+static bool_t authunix_marshal (AUTH *, XDR *);
+static bool_t authunix_validate (AUTH *, struct opaque_auth *);
+static bool_t authunix_refresh (AUTH *, void *);
+static void authunix_destroy (AUTH *);
+static void marshal_new_auth (AUTH *);
+
+static struct auth_ops authunix_ops = {
+ .ah_nextverf = authunix_nextverf,
+ .ah_marshal = authunix_marshal,
+ .ah_validate = authunix_validate,
+ .ah_refresh = authunix_refresh,
+ .ah_destroy = authunix_destroy
+};
+
+/*
+ * This struct is pointed to by the ah_private field of an auth_handle.
+ */
+struct audata {
+ struct opaque_auth au_origcred; /* original credentials */
+ struct opaque_auth au_shcred; /* short hand cred */
+ u_long au_shfaults; /* short hand cache faults */
+ char au_marshed[MAX_AUTH_BYTES];
+ u_int au_mpos; /* xdr pos at end of marshed */
+};
+#define AUTH_PRIVATE(auth) ((struct audata *)auth->ah_private)
+
+/*
+ * Create a unix style authenticator.
+ * Returns an auth handle with the given stuff in it.
+ */
+AUTH *
+authunix_create(struct ucred *cred)
+{
+ struct xucred xcr;
+ char mymem[MAX_AUTH_BYTES];
+ XDR xdrs;
+ AUTH *auth;
+ struct audata *au;
+ struct timeval now;
+ uint32_t time;
+ int len;
+
+ /*
+ * Allocate and set up auth handle
+ */
+ au = NULL;
+ auth = mem_alloc(sizeof(*auth));
+#ifndef _KERNEL
+ if (auth == NULL) {
+ printf("authunix_create: out of memory");
+ goto cleanup_authunix_create;
+ }
+#endif
+ au = mem_alloc(sizeof(*au));
+#ifndef _KERNEL
+ if (au == NULL) {
+ printf("authunix_create: out of memory");
+ goto cleanup_authunix_create;
+ }
+#endif
+ auth->ah_ops = &authunix_ops;
+ auth->ah_private = (caddr_t)au;
+ auth->ah_verf = au->au_shcred = _null_auth;
+ au->au_shfaults = 0;
+ au->au_origcred.oa_base = NULL;
+
+ getmicrotime(&now);
+ time = now.tv_sec;
+
+ /*
+ * Serialize the parameters into origcred
+ */
+ xdrmem_create(&xdrs, mymem, MAX_AUTH_BYTES, XDR_ENCODE);
+ cru2x(cred, &xcr);
+ if (! xdr_authunix_parms(&xdrs, &time, &xcr))
+ panic("authunix_create: failed to encode creds");
+ au->au_origcred.oa_length = len = XDR_GETPOS(&xdrs);
+ au->au_origcred.oa_flavor = AUTH_UNIX;
+#ifdef _KERNEL
+ au->au_origcred.oa_base = mem_alloc((u_int) len);
+#else
+ if ((au->au_origcred.oa_base = mem_alloc((u_int) len)) == NULL) {
+ printf("authunix_create: out of memory");
+ goto cleanup_authunix_create;
+ }
+#endif
+ memcpy(au->au_origcred.oa_base, mymem, (size_t)len);
+
+ /*
+ * set auth handle to reflect new cred.
+ */
+ auth->ah_cred = au->au_origcred;
+ marshal_new_auth(auth);
+ return (auth);
+#ifndef _KERNEL
+ cleanup_authunix_create:
+ if (auth)
+ mem_free(auth, sizeof(*auth));
+ if (au) {
+ if (au->au_origcred.oa_base)
+ mem_free(au->au_origcred.oa_base, (u_int)len);
+ mem_free(au, sizeof(*au));
+ }
+ return (NULL);
+#endif
+}
+
+/*
+ * authunix operations
+ */
+
+/* ARGSUSED */
+static void
+authunix_nextverf(AUTH *auth)
+{
+ /* no action necessary */
+}
+
+static bool_t
+authunix_marshal(AUTH *auth, XDR *xdrs)
+{
+ struct audata *au;
+
+ au = AUTH_PRIVATE(auth);
+ return (XDR_PUTBYTES(xdrs, au->au_marshed, au->au_mpos));
+}
+
+static bool_t
+authunix_validate(AUTH *auth, struct opaque_auth *verf)
+{
+ struct audata *au;
+ XDR xdrs;
+
+ if (verf->oa_flavor == AUTH_SHORT) {
+ au = AUTH_PRIVATE(auth);
+ xdrmem_create(&xdrs, verf->oa_base, verf->oa_length,
+ XDR_DECODE);
+
+ if (au->au_shcred.oa_base != NULL) {
+ mem_free(au->au_shcred.oa_base,
+ au->au_shcred.oa_length);
+ au->au_shcred.oa_base = NULL;
+ }
+ if (xdr_opaque_auth(&xdrs, &au->au_shcred)) {
+ auth->ah_cred = au->au_shcred;
+ } else {
+ xdrs.x_op = XDR_FREE;
+ (void)xdr_opaque_auth(&xdrs, &au->au_shcred);
+ au->au_shcred.oa_base = NULL;
+ auth->ah_cred = au->au_origcred;
+ }
+ marshal_new_auth(auth);
+ }
+ return (TRUE);
+}
+
+static bool_t
+authunix_refresh(AUTH *auth, void *dummy)
+{
+ struct audata *au = AUTH_PRIVATE(auth);
+ struct xucred xcr;
+ uint32_t time;
+ struct timeval now;
+ XDR xdrs;
+ int stat;
+
+ if (auth->ah_cred.oa_base == au->au_origcred.oa_base) {
+ /* there is no hope. Punt */
+ return (FALSE);
+ }
+ au->au_shfaults ++;
+
+ /* first deserialize the creds back into a struct ucred */
+ xdrmem_create(&xdrs, au->au_origcred.oa_base,
+ au->au_origcred.oa_length, XDR_DECODE);
+ stat = xdr_authunix_parms(&xdrs, &time, &xcr);
+ if (! stat)
+ goto done;
+
+ /* update the time and serialize in place */
+ getmicrotime(&now);
+ time = now.tv_sec;
+ xdrs.x_op = XDR_ENCODE;
+ XDR_SETPOS(&xdrs, 0);
+
+ stat = xdr_authunix_parms(&xdrs, &time, &xcr);
+ if (! stat)
+ goto done;
+ auth->ah_cred = au->au_origcred;
+ marshal_new_auth(auth);
+done:
+ XDR_DESTROY(&xdrs);
+ return (stat);
+}
+
+static void
+authunix_destroy(AUTH *auth)
+{
+ struct audata *au;
+
+ au = AUTH_PRIVATE(auth);
+ mem_free(au->au_origcred.oa_base, au->au_origcred.oa_length);
+
+ if (au->au_shcred.oa_base != NULL)
+ mem_free(au->au_shcred.oa_base, au->au_shcred.oa_length);
+
+ mem_free(auth->ah_private, sizeof(struct audata));
+
+ if (auth->ah_verf.oa_base != NULL)
+ mem_free(auth->ah_verf.oa_base, auth->ah_verf.oa_length);
+
+ mem_free(auth, sizeof(*auth));
+}
+
+/*
+ * Marshals (pre-serializes) an auth struct.
+ * sets private data, au_marshed and au_mpos
+ */
+static void
+marshal_new_auth(AUTH *auth)
+{
+ XDR xdr_stream;
+ XDR *xdrs = &xdr_stream;
+ struct audata *au;
+
+ au = AUTH_PRIVATE(auth);
+ xdrmem_create(xdrs, au->au_marshed, MAX_AUTH_BYTES, XDR_ENCODE);
+ if ((! xdr_opaque_auth(xdrs, &(auth->ah_cred))) ||
+ (! xdr_opaque_auth(xdrs, &(auth->ah_verf))))
+ printf("auth_none.c - Fatal marshalling problem");
+ else
+ au->au_mpos = XDR_GETPOS(xdrs);
+ XDR_DESTROY(xdrs);
+}
diff --git a/sys/rpc/authunix_prot.c b/sys/rpc/authunix_prot.c
new file mode 100644
index 0000000..67ab7fb
--- /dev/null
+++ b/sys/rpc/authunix_prot.c
@@ -0,0 +1,122 @@
+/* $NetBSD: authunix_prot.c,v 1.12 2000/01/22 22:19:17 mycroft Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)authunix_prot.c 1.15 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)authunix_prot.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * authunix_prot.c
+ * XDR for UNIX style authentication parameters for RPC
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/ucred.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+#include <rpc/auth.h>
+
+#include "rpc_com.h"
+
+/* gids compose part of a credential; there may not be more than 16 of them */
+#define NGRPS 16
+
+/*
+ * XDR for unix authentication parameters.
+ */
+bool_t
+xdr_authunix_parms(XDR *xdrs, uint32_t *time, struct xucred *cred)
+{
+ uint32_t namelen;
+ uint32_t ngroups, i;
+ uint32_t junk;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ namelen = strlen(hostname);
+ } else {
+ namelen = 0;
+ }
+ junk = 0;
+
+ if (!xdr_uint32_t(xdrs, time)
+ || !xdr_uint32_t(xdrs, &namelen))
+ return (FALSE);
+
+ /*
+ * Ignore the hostname on decode.
+ */
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_opaque(xdrs, hostname, namelen))
+ return (FALSE);
+ } else {
+ xdr_setpos(xdrs, xdr_getpos(xdrs) + RNDUP(namelen));
+ }
+
+ if (!xdr_uint32_t(xdrs, &cred->cr_uid))
+ return (FALSE);
+ if (!xdr_uint32_t(xdrs, &cred->cr_groups[0]))
+ return (FALSE);
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ ngroups = cred->cr_ngroups - 1;
+ if (ngroups > NGRPS)
+ ngroups = NGRPS;
+ }
+
+ if (!xdr_uint32_t(xdrs, &ngroups))
+ return (FALSE);
+ for (i = 0; i < ngroups; i++) {
+ if (i + 1 < NGROUPS) {
+ if (!xdr_uint32_t(xdrs, &cred->cr_groups[i + 1]))
+ return (FALSE);
+ } else {
+ if (!xdr_uint32_t(xdrs, &junk))
+ return (FALSE);
+ }
+ }
+
+ if (xdrs->x_op == XDR_DECODE) {
+ if (ngroups + 1 > NGROUPS)
+ cred->cr_ngroups = NGROUPS;
+ else
+ cred->cr_ngroups = ngroups + 1;
+ }
+
+ return (TRUE);
+}
diff --git a/sys/rpc/clnt.h b/sys/rpc/clnt.h
new file mode 100644
index 0000000..4d6a778
--- /dev/null
+++ b/sys/rpc/clnt.h
@@ -0,0 +1,620 @@
+/* $NetBSD: clnt.h,v 1.14 2000/06/02 22:57:55 fvdl Exp $ */
+
+/*
+ * The contents of this file are subject to the Sun Standards
+ * License Version 1.0 the (the "License";) You may not use
+ * this file except in compliance with the License. You may
+ * obtain a copy of the License at lib/libc/rpc/LICENSE
+ *
+ * Software distributed under the License is distributed on
+ * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either
+ * express or implied. See the License for the specific
+ * language governing rights and limitations under the License.
+ *
+ * The Original Code is Copyright 1998 by Sun Microsystems, Inc
+ *
+ * The Initial Developer of the Original Code is: Sun
+ * Microsystems, Inc.
+ *
+ * All Rights Reserved.
+ *
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)clnt.h 1.31 94/04/29 SMI
+ * from: @(#)clnt.h 2.1 88/07/29 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * clnt.h - Client side remote procedure call interface.
+ *
+ * Copyright (c) 1986-1991,1994-1999 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _RPC_CLNT_H_
+#define _RPC_CLNT_H_
+#include <rpc/clnt_stat.h>
+#include <sys/cdefs.h>
+#ifdef _KERNEL
+#include <rpc/netconfig.h>
+#else
+#include <netconfig.h>
+#endif
+#include <sys/un.h>
+
+/*
+ * Well-known IPV6 RPC broadcast address.
+ */
+#define RPCB_MULTICAST_ADDR "ff02::202"
+
+/*
+ * the following errors are in general unrecoverable. The caller
+ * should give up rather than retry.
+ */
+#define IS_UNRECOVERABLE_RPC(s) (((s) == RPC_AUTHERROR) || \
+ ((s) == RPC_CANTENCODEARGS) || \
+ ((s) == RPC_CANTDECODERES) || \
+ ((s) == RPC_VERSMISMATCH) || \
+ ((s) == RPC_PROCUNAVAIL) || \
+ ((s) == RPC_PROGUNAVAIL) || \
+ ((s) == RPC_PROGVERSMISMATCH) || \
+ ((s) == RPC_CANTDECODEARGS))
+
+/*
+ * Error info.
+ */
+struct rpc_err {
+ enum clnt_stat re_status;
+ union {
+ int RE_errno; /* related system error */
+ enum auth_stat RE_why; /* why the auth error occurred */
+ struct {
+ rpcvers_t low; /* lowest version supported */
+ rpcvers_t high; /* highest version supported */
+ } RE_vers;
+ struct { /* maybe meaningful if RPC_FAILED */
+ int32_t s1;
+ int32_t s2;
+ } RE_lb; /* life boot & debugging only */
+ } ru;
+#define re_errno ru.RE_errno
+#define re_why ru.RE_why
+#define re_vers ru.RE_vers
+#define re_lb ru.RE_lb
+};
+
+
+/*
+ * Client rpc handle.
+ * Created by individual implementations
+ * Client is responsible for initializing auth, see e.g. auth_none.c.
+ */
+typedef struct __rpc_client {
+ AUTH *cl_auth; /* authenticator */
+ struct clnt_ops {
+ /* call remote procedure */
+ enum clnt_stat (*cl_call)(struct __rpc_client *,
+ rpcproc_t, xdrproc_t, void *, xdrproc_t,
+ void *, struct timeval);
+ /* abort a call */
+ void (*cl_abort)(struct __rpc_client *);
+ /* get specific error code */
+ void (*cl_geterr)(struct __rpc_client *,
+ struct rpc_err *);
+ /* frees results */
+ bool_t (*cl_freeres)(struct __rpc_client *,
+ xdrproc_t, void *);
+ /* destroy this structure */
+ void (*cl_destroy)(struct __rpc_client *);
+ /* the ioctl() of rpc */
+ bool_t (*cl_control)(struct __rpc_client *, u_int,
+ void *);
+ } *cl_ops;
+ void *cl_private; /* private stuff */
+ char *cl_netid; /* network token */
+ char *cl_tp; /* device name */
+} CLIENT;
+
+
+/*
+ * Timers used for the pseudo-transport protocol when using datagrams
+ */
+struct rpc_timers {
+ u_short rt_srtt; /* smoothed round-trip time */
+ u_short rt_deviate; /* estimated deviation */
+ u_long rt_rtxcur; /* current (backed-off) rto */
+};
+
+/*
+ * Feedback values used for possible congestion and rate control
+ */
+#define FEEDBACK_REXMIT1 1 /* first retransmit */
+#define FEEDBACK_OK 2 /* no retransmits */
+
+/* Used to set version of portmapper used in broadcast */
+
+#define CLCR_SET_LOWVERS 3
+#define CLCR_GET_LOWVERS 4
+
+#define RPCSMALLMSGSIZE 400 /* a more reasonable packet size */
+
+/*
+ * client side rpc interface ops
+ *
+ * Parameter types are:
+ *
+ */
+
+/*
+ * enum clnt_stat
+ * CLNT_CALL(rh, proc, xargs, argsp, xres, resp, timeout)
+ * CLIENT *rh;
+ * rpcproc_t proc;
+ * xdrproc_t xargs;
+ * void *argsp;
+ * xdrproc_t xres;
+ * void *resp;
+ * struct timeval timeout;
+ */
+#define CLNT_CALL(rh, proc, xargs, argsp, xres, resp, secs) \
+ ((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
+ argsp, xres, resp, secs))
+#define clnt_call(rh, proc, xargs, argsp, xres, resp, secs) \
+ ((*(rh)->cl_ops->cl_call)(rh, proc, xargs, \
+ argsp, xres, resp, secs))
+
+/*
+ * void
+ * CLNT_ABORT(rh);
+ * CLIENT *rh;
+ */
+#define CLNT_ABORT(rh) ((*(rh)->cl_ops->cl_abort)(rh))
+#define clnt_abort(rh) ((*(rh)->cl_ops->cl_abort)(rh))
+
+/*
+ * struct rpc_err
+ * CLNT_GETERR(rh);
+ * CLIENT *rh;
+ */
+#define CLNT_GETERR(rh,errp) ((*(rh)->cl_ops->cl_geterr)(rh, errp))
+#define clnt_geterr(rh,errp) ((*(rh)->cl_ops->cl_geterr)(rh, errp))
+
+
+/*
+ * bool_t
+ * CLNT_FREERES(rh, xres, resp);
+ * CLIENT *rh;
+ * xdrproc_t xres;
+ * void *resp;
+ */
+#define CLNT_FREERES(rh,xres,resp) ((*(rh)->cl_ops->cl_freeres)(rh,xres,resp))
+#define clnt_freeres(rh,xres,resp) ((*(rh)->cl_ops->cl_freeres)(rh,xres,resp))
+
+/*
+ * bool_t
+ * CLNT_CONTROL(cl, request, info)
+ * CLIENT *cl;
+ * u_int request;
+ * char *info;
+ */
+#define CLNT_CONTROL(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in))
+#define clnt_control(cl,rq,in) ((*(cl)->cl_ops->cl_control)(cl,rq,in))
+
+/*
+ * control operations that apply to both udp and tcp transports
+ */
+#define CLSET_TIMEOUT 1 /* set timeout (timeval) */
+#define CLGET_TIMEOUT 2 /* get timeout (timeval) */
+#define CLGET_SERVER_ADDR 3 /* get server's address (sockaddr) */
+#define CLGET_FD 6 /* get connections file descriptor */
+#define CLGET_SVC_ADDR 7 /* get server's address (netbuf) */
+#define CLSET_FD_CLOSE 8 /* close fd while clnt_destroy */
+#define CLSET_FD_NCLOSE 9 /* Do not close fd while clnt_destroy */
+#define CLGET_XID 10 /* Get xid */
+#define CLSET_XID 11 /* Set xid */
+#define CLGET_VERS 12 /* Get version number */
+#define CLSET_VERS 13 /* Set version number */
+#define CLGET_PROG 14 /* Get program number */
+#define CLSET_PROG 15 /* Set program number */
+#define CLSET_SVC_ADDR 16 /* get server's address (netbuf) */
+#define CLSET_PUSH_TIMOD 17 /* push timod if not already present */
+#define CLSET_POP_TIMOD 18 /* pop timod */
+/*
+ * Connectionless only control operations
+ */
+#define CLSET_RETRY_TIMEOUT 4 /* set retry timeout (timeval) */
+#define CLGET_RETRY_TIMEOUT 5 /* get retry timeout (timeval) */
+#define CLSET_ASYNC 19
+#define CLSET_CONNECT 20 /* Use connect() for UDP. (int) */
+
+#ifdef _KERNEL
+/*
+ * Kernel control operations. The default msleep string is "rpcrecv",
+ * and sleeps are non-interruptible by default.
+ */
+#define CLSET_WAITCHAN 21 /* set string to use in msleep call */
+#define CLGET_WAITCHAN 22 /* get string used in msleep call */
+#define CLSET_INTERRUPTIBLE 23 /* set interruptible flag */
+#define CLGET_INTERRUPTIBLE 24 /* set interruptible flag */
+#endif
+
+
+/*
+ * void
+ * CLNT_DESTROY(rh);
+ * CLIENT *rh;
+ */
+#define CLNT_DESTROY(rh) ((*(rh)->cl_ops->cl_destroy)(rh))
+#define clnt_destroy(rh) ((*(rh)->cl_ops->cl_destroy)(rh))
+
+
+/*
+ * RPCTEST is a test program which is accessible on every rpc
+ * transport/port. It is used for testing, performance evaluation,
+ * and network administration.
+ */
+
+#define RPCTEST_PROGRAM ((rpcprog_t)1)
+#define RPCTEST_VERSION ((rpcvers_t)1)
+#define RPCTEST_NULL_PROC ((rpcproc_t)2)
+#define RPCTEST_NULL_BATCH_PROC ((rpcproc_t)3)
+
+/*
+ * By convention, procedure 0 takes null arguments and returns them
+ */
+
+#define NULLPROC ((rpcproc_t)0)
+
+/*
+ * Below are the client handle creation routines for the various
+ * implementations of client side rpc. They can return NULL if a
+ * creation failure occurs.
+ */
+
+/*
+ * Generic client creation routine. Supported protocols are those that
+ * belong to the nettype namespace (/etc/netconfig).
+ */
+__BEGIN_DECLS
+#ifdef _KERNEL
+
+/*
+ * struct socket *so; -- socket
+ * struct sockaddr *svcaddr; -- servers address
+ * rpcprog_t prog; -- program number
+ * rpcvers_t vers; -- version number
+ * size_t sendsz; -- buffer recv size
+ * size_t recvsz; -- buffer send size
+ */
+extern CLIENT *clnt_dg_create(struct socket *so,
+ struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version,
+ size_t sendsz, size_t recvsz);
+
+/*
+ * struct socket *so; -- socket
+ * struct sockaddr *svcaddr; -- servers address
+ * rpcprog_t prog; -- program number
+ * rpcvers_t vers; -- version number
+ * size_t sendsz; -- buffer recv size
+ * size_t recvsz; -- buffer send size
+ */
+extern CLIENT *clnt_vc_create(struct socket *so,
+ struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version,
+ size_t sendsz, size_t recvsz);
+
+/*
+ * struct netconfig *nconf; -- network type
+ * struct sockaddr *svcaddr; -- servers address
+ * rpcprog_t prog; -- program number
+ * rpcvers_t vers; -- version number
+ * size_t sendsz; -- buffer recv size
+ * size_t recvsz; -- buffer send size
+ */
+extern CLIENT *clnt_reconnect_create(struct netconfig *nconf,
+ struct sockaddr *svcaddr, rpcprog_t program, rpcvers_t version,
+ size_t sendsz, size_t recvsz);
+
+#else
+
+extern CLIENT *clnt_create(const char *, const rpcprog_t, const rpcvers_t,
+ const char *);
+/*
+ *
+ * const char *hostname; -- hostname
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const char *nettype; -- network type
+ */
+
+ /*
+ * Generic client creation routine. Just like clnt_create(), except
+ * it takes an additional timeout parameter.
+ */
+extern CLIENT * clnt_create_timed(const char *, const rpcprog_t,
+ const rpcvers_t, const char *, const struct timeval *);
+/*
+ *
+ * const char *hostname; -- hostname
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const char *nettype; -- network type
+ * const struct timeval *tp; -- timeout
+ */
+
+/*
+ * Generic client creation routine. Supported protocols are which belong
+ * to the nettype name space.
+ */
+extern CLIENT *clnt_create_vers(const char *, const rpcprog_t, rpcvers_t *,
+ const rpcvers_t, const rpcvers_t,
+ const char *);
+/*
+ * const char *host; -- hostname
+ * const rpcprog_t prog; -- program number
+ * rpcvers_t *vers_out; -- servers highest available version
+ * const rpcvers_t vers_low; -- low version number
+ * const rpcvers_t vers_high; -- high version number
+ * const char *nettype; -- network type
+ */
+
+/*
+ * Generic client creation routine. Supported protocols are which belong
+ * to the nettype name space.
+ */
+extern CLIENT * clnt_create_vers_timed(const char *, const rpcprog_t,
+ rpcvers_t *, const rpcvers_t, const rpcvers_t, const char *,
+ const struct timeval *);
+/*
+ * const char *host; -- hostname
+ * const rpcprog_t prog; -- program number
+ * rpcvers_t *vers_out; -- servers highest available version
+ * const rpcvers_t vers_low; -- low version number
+ * const rpcvers_t vers_high; -- high version number
+ * const char *nettype; -- network type
+ * const struct timeval *tp -- timeout
+ */
+
+/*
+ * Generic client creation routine. It takes a netconfig structure
+ * instead of nettype
+ */
+extern CLIENT *clnt_tp_create(const char *, const rpcprog_t,
+ const rpcvers_t, const struct netconfig *);
+/*
+ * const char *hostname; -- hostname
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const struct netconfig *netconf; -- network config structure
+ */
+
+/*
+ * Generic client creation routine. Just like clnt_tp_create(), except
+ * it takes an additional timeout parameter.
+ */
+extern CLIENT * clnt_tp_create_timed(const char *, const rpcprog_t,
+ const rpcvers_t, const struct netconfig *, const struct timeval *);
+/*
+ * const char *hostname; -- hostname
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const struct netconfig *netconf; -- network config structure
+ * const struct timeval *tp -- timeout
+ */
+
+/*
+ * Generic TLI create routine. Only provided for compatibility.
+ */
+
+extern CLIENT *clnt_tli_create(const int, const struct netconfig *,
+ struct netbuf *, const rpcprog_t,
+ const rpcvers_t, const u_int, const u_int);
+/*
+ * const register int fd; -- fd
+ * const struct netconfig *nconf; -- netconfig structure
+ * struct netbuf *svcaddr; -- servers address
+ * const u_long prog; -- program number
+ * const u_long vers; -- version number
+ * const u_int sendsz; -- send size
+ * const u_int recvsz; -- recv size
+ */
+
+/*
+ * Low level clnt create routine for connectionful transports, e.g. tcp.
+ */
+extern CLIENT *clnt_vc_create(const int, const struct netbuf *,
+ const rpcprog_t, const rpcvers_t,
+ u_int, u_int);
+/*
+ * Added for compatibility to old rpc 4.0. Obsoleted by clnt_vc_create().
+ */
+extern CLIENT *clntunix_create(struct sockaddr_un *,
+ u_long, u_long, int *, u_int, u_int);
+/*
+ * const int fd; -- open file descriptor
+ * const struct netbuf *svcaddr; -- servers address
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const u_int sendsz; -- buffer recv size
+ * const u_int recvsz; -- buffer send size
+ */
+
+/*
+ * Low level clnt create routine for connectionless transports, e.g. udp.
+ */
+extern CLIENT *clnt_dg_create(const int, const struct netbuf *,
+ const rpcprog_t, const rpcvers_t,
+ const u_int, const u_int);
+/*
+ * const int fd; -- open file descriptor
+ * const struct netbuf *svcaddr; -- servers address
+ * const rpcprog_t program; -- program number
+ * const rpcvers_t version; -- version number
+ * const u_int sendsz; -- buffer recv size
+ * const u_int recvsz; -- buffer send size
+ */
+
+/*
+ * Memory based rpc (for speed check and testing)
+ * CLIENT *
+ * clnt_raw_create(prog, vers)
+ * u_long prog;
+ * u_long vers;
+ */
+extern CLIENT *clnt_raw_create(rpcprog_t, rpcvers_t);
+#endif
+
+__END_DECLS
+
+
+/*
+ * Print why creation failed
+ */
+__BEGIN_DECLS
+extern void clnt_pcreateerror(const char *); /* stderr */
+extern char *clnt_spcreateerror(const char *); /* string */
+__END_DECLS
+
+/*
+ * Like clnt_perror(), but is more verbose in its output
+ */
+__BEGIN_DECLS
+extern void clnt_perrno(enum clnt_stat); /* stderr */
+extern char *clnt_sperrno(enum clnt_stat); /* string */
+__END_DECLS
+
+/*
+ * Print an English error message, given the client error code
+ */
+__BEGIN_DECLS
+extern void clnt_perror(CLIENT *, const char *); /* stderr */
+extern char *clnt_sperror(CLIENT *, const char *); /* string */
+__END_DECLS
+
+
+/*
+ * If a creation fails, the following allows the user to figure out why.
+ */
+struct rpc_createerr {
+ enum clnt_stat cf_stat;
+ struct rpc_err cf_error; /* useful when cf_stat == RPC_PMAPFAILURE */
+};
+
+#ifdef _KERNEL
+extern struct rpc_createerr rpc_createerr;
+#else
+__BEGIN_DECLS
+extern struct rpc_createerr *__rpc_createerr(void);
+__END_DECLS
+#define rpc_createerr (*(__rpc_createerr()))
+#endif
+
+/*
+ * The simplified interface:
+ * enum clnt_stat
+ * rpc_call(host, prognum, versnum, procnum, inproc, in, outproc, out, nettype)
+ * const char *host;
+ * const rpcprog_t prognum;
+ * const rpcvers_t versnum;
+ * const rpcproc_t procnum;
+ * const xdrproc_t inproc, outproc;
+ * const char *in;
+ * char *out;
+ * const char *nettype;
+ */
+__BEGIN_DECLS
+extern enum clnt_stat rpc_call(const char *, const rpcprog_t,
+ const rpcvers_t, const rpcproc_t,
+ const xdrproc_t, const char *,
+ const xdrproc_t, char *, const char *);
+__END_DECLS
+
+/*
+ * RPC broadcast interface
+ * The call is broadcasted to all locally connected nets.
+ *
+ * extern enum clnt_stat
+ * rpc_broadcast(prog, vers, proc, xargs, argsp, xresults, resultsp,
+ * eachresult, nettype)
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const rpcproc_t proc; -- procedure number
+ * const xdrproc_t xargs; -- xdr routine for args
+ * caddr_t argsp; -- pointer to args
+ * const xdrproc_t xresults; -- xdr routine for results
+ * caddr_t resultsp; -- pointer to results
+ * const resultproc_t eachresult; -- call with each result
+ * const char *nettype; -- Transport type
+ *
+ * For each valid response received, the procedure eachresult is called.
+ * Its form is:
+ * done = eachresult(resp, raddr, nconf)
+ * bool_t done;
+ * caddr_t resp;
+ * struct netbuf *raddr;
+ * struct netconfig *nconf;
+ * where resp points to the results of the call and raddr is the
+ * address if the responder to the broadcast. nconf is the transport
+ * on which the response was received.
+ *
+ * extern enum clnt_stat
+ * rpc_broadcast_exp(prog, vers, proc, xargs, argsp, xresults, resultsp,
+ * eachresult, inittime, waittime, nettype)
+ * const rpcprog_t prog; -- program number
+ * const rpcvers_t vers; -- version number
+ * const rpcproc_t proc; -- procedure number
+ * const xdrproc_t xargs; -- xdr routine for args
+ * caddr_t argsp; -- pointer to args
+ * const xdrproc_t xresults; -- xdr routine for results
+ * caddr_t resultsp; -- pointer to results
+ * const resultproc_t eachresult; -- call with each result
+ * const int inittime; -- how long to wait initially
+ * const int waittime; -- maximum time to wait
+ * const char *nettype; -- Transport type
+ */
+
+typedef bool_t (*resultproc_t)(caddr_t, ...);
+
+__BEGIN_DECLS
+extern enum clnt_stat rpc_broadcast(const rpcprog_t, const rpcvers_t,
+ const rpcproc_t, const xdrproc_t,
+ caddr_t, const xdrproc_t, caddr_t,
+ const resultproc_t, const char *);
+extern enum clnt_stat rpc_broadcast_exp(const rpcprog_t, const rpcvers_t,
+ const rpcproc_t, const xdrproc_t,
+ caddr_t, const xdrproc_t, caddr_t,
+ const resultproc_t, const int,
+ const int, const char *);
+__END_DECLS
+
+#ifndef _KERNEL
+/* For backward compatibility */
+#include <rpc/clnt_soc.h>
+#endif
+
+#endif /* !_RPC_CLNT_H_ */
diff --git a/sys/rpc/clnt_dg.c b/sys/rpc/clnt_dg.c
new file mode 100644
index 0000000..87c4aa4
--- /dev/null
+++ b/sys/rpc/clnt_dg.c
@@ -0,0 +1,865 @@
+/* $NetBSD: clnt_dg.c,v 1.4 2000/07/14 08:40:41 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+#ident "@(#)clnt_dg.c 1.23 94/04/22 SMI"
+static char sccsid[] = "@(#)clnt_dg.c 1.19 89/03/16 Copyr 1988 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Implements a connectionless client side RPC.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+
+#include <rpc/rpc.h>
+#include "rpc_com.h"
+
+
+#ifdef _FREEFALL_CONFIG
+/*
+ * Disable RPC exponential back-off for FreeBSD.org systems.
+ */
+#define RPC_MAX_BACKOFF 1 /* second */
+#else
+#define RPC_MAX_BACKOFF 30 /* seconds */
+#endif
+
+static bool_t time_not_ok(struct timeval *);
+static enum clnt_stat clnt_dg_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
+ xdrproc_t, void *, struct timeval);
+static void clnt_dg_geterr(CLIENT *, struct rpc_err *);
+static bool_t clnt_dg_freeres(CLIENT *, xdrproc_t, void *);
+static void clnt_dg_abort(CLIENT *);
+static bool_t clnt_dg_control(CLIENT *, u_int, void *);
+static void clnt_dg_destroy(CLIENT *);
+static void clnt_dg_soupcall(struct socket *so, void *arg, int waitflag);
+
+static struct clnt_ops clnt_dg_ops = {
+ .cl_call = clnt_dg_call,
+ .cl_abort = clnt_dg_abort,
+ .cl_geterr = clnt_dg_geterr,
+ .cl_freeres = clnt_dg_freeres,
+ .cl_destroy = clnt_dg_destroy,
+ .cl_control = clnt_dg_control
+};
+
+static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
+
+/*
+ * A pending RPC request which awaits a reply.
+ */
+struct cu_request {
+ TAILQ_ENTRY(cu_request) cr_link;
+ uint32_t cr_xid; /* XID of request */
+ struct mbuf *cr_mrep; /* reply received by upcall */
+ int cr_error; /* any error from upcall */
+};
+
+TAILQ_HEAD(cu_request_list, cu_request);
+
+#define MCALL_MSG_SIZE 24
+
+/*
+ * This structure is pointed to by the socket's so_upcallarg
+ * member. It is separate from the client private data to facilitate
+ * multiple clients sharing the same socket. The cs_lock mutex is used
+ * to protect all fields of this structure, the socket's receive
+ * buffer SOCKBUF_LOCK is used to ensure that exactly one of these
+ * structures is installed on the socket.
+ */
+struct cu_socket {
+ struct mtx cs_lock;
+ int cs_refs; /* Count of clients */
+ struct cu_request_list cs_pending; /* Requests awaiting replies */
+
+};
+
+/*
+ * Private data kept per client handle
+ */
+struct cu_data {
+ struct socket *cu_socket; /* connection socket */
+ bool_t cu_closeit; /* opened by library */
+ struct sockaddr_storage cu_raddr; /* remote address */
+ int cu_rlen;
+ struct timeval cu_wait; /* retransmit interval */
+ struct timeval cu_total; /* total time for the call */
+ struct rpc_err cu_error;
+ uint32_t cu_xid;
+ char cu_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */
+ size_t cu_mcalllen;
+ size_t cu_sendsz; /* send size */
+ size_t cu_recvsz; /* recv size */
+ int cu_async;
+ int cu_connect; /* Use connect(). */
+ int cu_connected; /* Have done connect(). */
+ const char *cu_waitchan;
+ int cu_waitflag;
+};
+
+/*
+ * Connection less client creation returns with client handle parameters.
+ * Default options are set, which the user can change using clnt_control().
+ * fd should be open and bound.
+ * NB: The rpch->cl_auth is initialized to null authentication.
+ * Caller may wish to set this something more useful.
+ *
+ * sendsz and recvsz are the maximum allowable packet sizes that can be
+ * sent and received. Normally they are the same, but they can be
+ * changed to improve the program efficiency and buffer allocation.
+ * If they are 0, use the transport default.
+ *
+ * If svcaddr is NULL, returns NULL.
+ */
+CLIENT *
+clnt_dg_create(
+ struct socket *so,
+ struct sockaddr *svcaddr, /* servers address */
+ rpcprog_t program, /* program number */
+ rpcvers_t version, /* version number */
+ size_t sendsz, /* buffer recv size */
+ size_t recvsz) /* buffer send size */
+{
+ CLIENT *cl = NULL; /* client handle */
+ struct cu_data *cu = NULL; /* private data */
+ struct cu_socket *cs = NULL;
+ struct timeval now;
+ struct rpc_msg call_msg;
+ struct __rpc_sockinfo si;
+ XDR xdrs;
+
+ if (svcaddr == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
+ return (NULL);
+ }
+
+ if (!__rpc_socket2sockinfo(so, &si)) {
+ rpc_createerr.cf_stat = RPC_TLIERROR;
+ rpc_createerr.cf_error.re_errno = 0;
+ return (NULL);
+ }
+
+ /*
+ * Find the receive and the send size
+ */
+ sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
+ recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
+ if ((sendsz == 0) || (recvsz == 0)) {
+ rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
+ rpc_createerr.cf_error.re_errno = 0;
+ return (NULL);
+ }
+
+ cl = mem_alloc(sizeof (CLIENT));
+
+ /*
+ * Should be multiple of 4 for XDR.
+ */
+ sendsz = ((sendsz + 3) / 4) * 4;
+ recvsz = ((recvsz + 3) / 4) * 4;
+ cu = mem_alloc(sizeof (*cu));
+ (void) memcpy(&cu->cu_raddr, svcaddr, (size_t)svcaddr->sa_len);
+ cu->cu_rlen = svcaddr->sa_len;
+ /* Other values can also be set through clnt_control() */
+ cu->cu_wait.tv_sec = 15; /* heuristically chosen */
+ cu->cu_wait.tv_usec = 0;
+ cu->cu_total.tv_sec = -1;
+ cu->cu_total.tv_usec = -1;
+ cu->cu_sendsz = sendsz;
+ cu->cu_recvsz = recvsz;
+ cu->cu_async = FALSE;
+ cu->cu_connect = FALSE;
+ cu->cu_connected = FALSE;
+ cu->cu_waitchan = "rpcrecv";
+ cu->cu_waitflag = 0;
+ (void) getmicrotime(&now);
+ cu->cu_xid = __RPC_GETXID(&now);
+ call_msg.rm_xid = cu->cu_xid;
+ call_msg.rm_call.cb_prog = program;
+ call_msg.rm_call.cb_vers = version;
+ xdrmem_create(&xdrs, cu->cu_mcallc, MCALL_MSG_SIZE, XDR_ENCODE);
+ if (! xdr_callhdr(&xdrs, &call_msg)) {
+ rpc_createerr.cf_stat = RPC_CANTENCODEARGS; /* XXX */
+ rpc_createerr.cf_error.re_errno = 0;
+ goto err2;
+ }
+ cu->cu_mcalllen = XDR_GETPOS(&xdrs);;
+
+ /*
+ * By default, closeit is always FALSE. It is users responsibility
+ * to do a close on it, else the user may use clnt_control
+ * to let clnt_destroy do it for him/her.
+ */
+ cu->cu_closeit = FALSE;
+ cu->cu_socket = so;
+
+ SOCKBUF_LOCK(&so->so_rcv);
+recheck_socket:
+ if (so->so_upcall) {
+ if (so->so_upcall != clnt_dg_soupcall) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ printf("clnt_dg_create(): socket already has an incompatible upcall\n");
+ goto err2;
+ }
+ cs = (struct cu_socket *) so->so_upcallarg;
+ mtx_lock(&cs->cs_lock);
+ cs->cs_refs++;
+ mtx_unlock(&cs->cs_lock);
+ } else {
+ /*
+ * We are the first on this socket - allocate the
+ * structure and install it in the socket.
+ */
+ SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
+ cs = mem_alloc(sizeof(*cs));
+ SOCKBUF_LOCK(&cu->cu_socket->so_rcv);
+ if (so->so_upcall) {
+ /*
+ * We have lost a race with some other client.
+ */
+ mem_free(cs, sizeof(*cs));
+ goto recheck_socket;
+ }
+ mtx_init(&cs->cs_lock, "cs->cs_lock", NULL, MTX_DEF);
+ cs->cs_refs = 1;
+ TAILQ_INIT(&cs->cs_pending);
+ so->so_upcallarg = cs;
+ so->so_upcall = clnt_dg_soupcall;
+ so->so_rcv.sb_flags |= SB_UPCALL;
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ cl->cl_ops = &clnt_dg_ops;
+ cl->cl_private = (caddr_t)(void *)cu;
+ cl->cl_auth = authnone_create();
+ cl->cl_tp = NULL;
+ cl->cl_netid = NULL;
+ return (cl);
+err2:
+ if (cl) {
+ mem_free(cl, sizeof (CLIENT));
+ if (cu)
+ mem_free(cu, sizeof (*cu));
+ }
+ return (NULL);
+}
+
+static enum clnt_stat
+clnt_dg_call(
+ CLIENT *cl, /* client handle */
+ rpcproc_t proc, /* procedure number */
+ xdrproc_t xargs, /* xdr routine for args */
+ void *argsp, /* pointer to args */
+ xdrproc_t xresults, /* xdr routine for results */
+ void *resultsp, /* pointer to results */
+ struct timeval utimeout) /* seconds to wait before giving up */
+{
+ struct cu_data *cu = (struct cu_data *)cl->cl_private;
+ struct cu_socket *cs = (struct cu_socket *) cu->cu_socket->so_upcallarg;
+ XDR xdrs;
+ struct rpc_msg reply_msg;
+ bool_t ok;
+ int nrefreshes = 2; /* number of times to refresh cred */
+ struct timeval timeout;
+ struct timeval retransmit_time;
+ struct timeval next_sendtime, starttime, time_waited, tv;
+ struct sockaddr *sa;
+ socklen_t salen;
+ uint32_t xid;
+ struct mbuf *mreq = NULL;
+ struct cu_request cr;
+ int error;
+
+ mtx_lock(&cs->cs_lock);
+
+ cr.cr_mrep = NULL;
+ cr.cr_error = 0;
+
+ if (cu->cu_total.tv_usec == -1) {
+ timeout = utimeout; /* use supplied timeout */
+ } else {
+ timeout = cu->cu_total; /* use default timeout */
+ }
+
+ if (cu->cu_connect && !cu->cu_connected) {
+ mtx_unlock(&cs->cs_lock);
+ error = soconnect(cu->cu_socket,
+ (struct sockaddr *)&cu->cu_raddr, curthread);
+ mtx_lock(&cs->cs_lock);
+ if (error) {
+ cu->cu_error.re_errno = error;
+ cu->cu_error.re_status = RPC_CANTSEND;
+ goto out;
+ }
+ cu->cu_connected = 1;
+ }
+ if (cu->cu_connected) {
+ sa = NULL;
+ salen = 0;
+ } else {
+ sa = (struct sockaddr *)&cu->cu_raddr;
+ salen = cu->cu_rlen;
+ }
+ time_waited.tv_sec = 0;
+ time_waited.tv_usec = 0;
+ retransmit_time = next_sendtime = cu->cu_wait;
+
+ getmicrotime(&starttime);
+
+call_again:
+ mtx_assert(&cs->cs_lock, MA_OWNED);
+
+ cu->cu_xid++;
+ xid = cu->cu_xid;
+
+send_again:
+ mtx_unlock(&cs->cs_lock);
+
+ MGETHDR(mreq, M_WAIT, MT_DATA);
+ MCLGET(mreq, M_WAIT);
+ mreq->m_len = 0;
+ m_append(mreq, cu->cu_mcalllen, cu->cu_mcallc);
+
+ /*
+ * The XID is the first thing in the request.
+ */
+ *mtod(mreq, uint32_t *) = htonl(xid);
+
+ xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
+
+ if (cu->cu_async == TRUE && xargs == NULL)
+ goto get_reply;
+
+ if ((! XDR_PUTINT32(&xdrs, &proc)) ||
+ (! AUTH_MARSHALL(cl->cl_auth, &xdrs)) ||
+ (! (*xargs)(&xdrs, argsp))) {
+ cu->cu_error.re_status = RPC_CANTENCODEARGS;
+ mtx_lock(&cs->cs_lock);
+ goto out;
+ }
+ m_fixhdr(mreq);
+
+ cr.cr_xid = xid;
+ mtx_lock(&cs->cs_lock);
+ TAILQ_INSERT_TAIL(&cs->cs_pending, &cr, cr_link);
+ mtx_unlock(&cs->cs_lock);
+
+ /*
+ * sosend consumes mreq.
+ */
+ error = sosend(cu->cu_socket, sa, NULL, mreq, NULL, 0, curthread);
+ mreq = NULL;
+
+ /*
+ * sub-optimal code appears here because we have
+ * some clock time to spare while the packets are in flight.
+ * (We assume that this is actually only executed once.)
+ */
+ reply_msg.acpted_rply.ar_verf = _null_auth;
+ reply_msg.acpted_rply.ar_results.where = resultsp;
+ reply_msg.acpted_rply.ar_results.proc = xresults;
+
+ mtx_lock(&cs->cs_lock);
+ if (error) {
+ TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
+
+ cu->cu_error.re_errno = error;
+ cu->cu_error.re_status = RPC_CANTSEND;
+ goto out;
+ }
+
+ /*
+ * Check to see if we got an upcall while waiting for the
+ * lock. In both these cases, the request has been removed
+ * from cs->cs_pending.
+ */
+ if (cr.cr_error) {
+ cu->cu_error.re_errno = cr.cr_error;
+ cu->cu_error.re_status = RPC_CANTRECV;
+ goto out;
+ }
+ if (cr.cr_mrep) {
+ goto got_reply;
+ }
+
+ /*
+ * Hack to provide rpc-based message passing
+ */
+ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
+ cu->cu_error.re_status = RPC_TIMEDOUT;
+ goto out;
+ }
+
+get_reply:
+ for (;;) {
+ /* Decide how long to wait. */
+ if (timevalcmp(&next_sendtime, &timeout, <)) {
+ tv = next_sendtime;
+ } else {
+ tv = timeout;
+ }
+ timevalsub(&tv, &time_waited);
+ if (tv.tv_sec < 0 || tv.tv_usec < 0)
+ tv.tv_sec = tv.tv_usec = 0;
+
+ error = msleep(&cr, &cs->cs_lock, cu->cu_waitflag,
+ cu->cu_waitchan, tvtohz(&tv));
+
+ if (!error) {
+ /*
+ * We were woken up by the upcall. If the
+ * upcall had a receive error, report that,
+ * otherwise we have a reply.
+ */
+ if (cr.cr_error) {
+ cu->cu_error.re_errno = cr.cr_error;
+ cu->cu_error.re_status = RPC_CANTRECV;
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * The sleep returned an error so our request is still
+ * on the list. If we got EWOULDBLOCK, we may want to
+ * re-send the request.
+ */
+ if (error != EWOULDBLOCK) {
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
+ cu->cu_error.re_errno = error;
+ if (error == EINTR)
+ cu->cu_error.re_status = RPC_INTR;
+ else
+ cu->cu_error.re_status = RPC_CANTRECV;
+ goto out;
+ }
+
+ getmicrotime(&tv);
+ time_waited = tv;
+ timevalsub(&time_waited, &starttime);
+
+ /* Check for timeout. */
+ if (timevalcmp(&time_waited, &timeout, >)) {
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
+ cu->cu_error.re_errno = EWOULDBLOCK;
+ cu->cu_error.re_status = RPC_TIMEDOUT;
+ goto out;
+ }
+
+ /* Retransmit if necessary. */
+ if (timevalcmp(&time_waited, &next_sendtime, >)) {
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&cs->cs_pending, &cr, cr_link);
+ /* update retransmit_time */
+ if (retransmit_time.tv_sec < RPC_MAX_BACKOFF)
+ timevaladd(&retransmit_time, &retransmit_time);
+ timevaladd(&next_sendtime, &retransmit_time);
+ goto send_again;
+ }
+ }
+
+got_reply:
+ /*
+ * Now decode and validate the response. We need to drop the
+ * lock since xdr_replymsg may end up sleeping in malloc.
+ */
+ mtx_unlock(&cs->cs_lock);
+
+ xdrmbuf_create(&xdrs, cr.cr_mrep, XDR_DECODE);
+ ok = xdr_replymsg(&xdrs, &reply_msg);
+ XDR_DESTROY(&xdrs);
+ cr.cr_mrep = NULL;
+
+ mtx_lock(&cs->cs_lock);
+
+ if (ok) {
+ if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
+ (reply_msg.acpted_rply.ar_stat == SUCCESS))
+ cu->cu_error.re_status = RPC_SUCCESS;
+ else
+ _seterr_reply(&reply_msg, &(cu->cu_error));
+
+ if (cu->cu_error.re_status == RPC_SUCCESS) {
+ if (! AUTH_VALIDATE(cl->cl_auth,
+ &reply_msg.acpted_rply.ar_verf)) {
+ cu->cu_error.re_status = RPC_AUTHERROR;
+ cu->cu_error.re_why = AUTH_INVALIDRESP;
+ }
+ if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
+ xdrs.x_op = XDR_FREE;
+ (void) xdr_opaque_auth(&xdrs,
+ &(reply_msg.acpted_rply.ar_verf));
+ }
+ } /* end successful completion */
+ /*
+ * If unsuccesful AND error is an authentication error
+ * then refresh credentials and try again, else break
+ */
+ else if (cu->cu_error.re_status == RPC_AUTHERROR)
+ /* maybe our credentials need to be refreshed ... */
+ if (nrefreshes > 0 &&
+ AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
+ nrefreshes--;
+ goto call_again;
+ }
+ /* end of unsuccessful completion */
+ } /* end of valid reply message */
+ else {
+ cu->cu_error.re_status = RPC_CANTDECODERES;
+
+ }
+out:
+ mtx_assert(&cs->cs_lock, MA_OWNED);
+
+ if (mreq)
+ m_freem(mreq);
+ if (cr.cr_mrep)
+ m_freem(cr.cr_mrep);
+
+ mtx_unlock(&cs->cs_lock);
+ return (cu->cu_error.re_status);
+}
+
+static void
+clnt_dg_geterr(CLIENT *cl, struct rpc_err *errp)
+{
+ struct cu_data *cu = (struct cu_data *)cl->cl_private;
+
+ *errp = cu->cu_error;
+}
+
+static bool_t
+clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
+{
+ XDR xdrs;
+ bool_t dummy;
+
+ xdrs.x_op = XDR_FREE;
+ dummy = (*xdr_res)(&xdrs, res_ptr);
+
+ return (dummy);
+}
+
+/*ARGSUSED*/
+static void
+clnt_dg_abort(CLIENT *h)
+{
+}
+
+static bool_t
+clnt_dg_control(CLIENT *cl, u_int request, void *info)
+{
+ struct cu_data *cu = (struct cu_data *)cl->cl_private;
+ struct cu_socket *cs = (struct cu_socket *) cu->cu_socket->so_upcallarg;
+ struct sockaddr *addr;
+
+ mtx_lock(&cs->cs_lock);
+
+ switch (request) {
+ case CLSET_FD_CLOSE:
+ cu->cu_closeit = TRUE;
+ mtx_unlock(&cs->cs_lock);
+ return (TRUE);
+ case CLSET_FD_NCLOSE:
+ cu->cu_closeit = FALSE;
+ mtx_unlock(&cs->cs_lock);
+ return (TRUE);
+ }
+
+ /* for other requests which use info */
+ if (info == NULL) {
+ mtx_unlock(&cs->cs_lock);
+ return (FALSE);
+ }
+ switch (request) {
+ case CLSET_TIMEOUT:
+ if (time_not_ok((struct timeval *)info)) {
+ mtx_unlock(&cs->cs_lock);
+ return (FALSE);
+ }
+ cu->cu_total = *(struct timeval *)info;
+ break;
+ case CLGET_TIMEOUT:
+ *(struct timeval *)info = cu->cu_total;
+ break;
+ case CLSET_RETRY_TIMEOUT:
+ if (time_not_ok((struct timeval *)info)) {
+ mtx_unlock(&cs->cs_lock);
+ return (FALSE);
+ }
+ cu->cu_wait = *(struct timeval *)info;
+ break;
+ case CLGET_RETRY_TIMEOUT:
+ *(struct timeval *)info = cu->cu_wait;
+ break;
+ case CLGET_SVC_ADDR:
+ /*
+ * Slightly different semantics to userland - we use
+ * sockaddr instead of netbuf.
+ */
+ memcpy(info, &cu->cu_raddr, cu->cu_raddr.ss_len);
+ break;
+ case CLSET_SVC_ADDR: /* set to new address */
+ addr = (struct sockaddr *)info;
+ (void) memcpy(&cu->cu_raddr, addr, addr->sa_len);
+ break;
+ case CLGET_XID:
+ *(uint32_t *)info = cu->cu_xid;
+ break;
+
+ case CLSET_XID:
+ /* This will set the xid of the NEXT call */
+ /* decrement by 1 as clnt_dg_call() increments once */
+ cu->cu_xid = *(uint32_t *)info - 1;
+ break;
+
+ case CLGET_VERS:
+ /*
+ * This RELIES on the information that, in the call body,
+ * the version number field is the fifth field from the
+ * begining of the RPC header. MUST be changed if the
+ * call_struct is changed
+ */
+ *(uint32_t *)info =
+ ntohl(*(uint32_t *)(void *)(cu->cu_mcallc +
+ 4 * BYTES_PER_XDR_UNIT));
+ break;
+
+ case CLSET_VERS:
+ *(uint32_t *)(void *)(cu->cu_mcallc + 4 * BYTES_PER_XDR_UNIT)
+ = htonl(*(uint32_t *)info);
+ break;
+
+ case CLGET_PROG:
+ /*
+ * This RELIES on the information that, in the call body,
+ * the program number field is the fourth field from the
+ * begining of the RPC header. MUST be changed if the
+ * call_struct is changed
+ */
+ *(uint32_t *)info =
+ ntohl(*(uint32_t *)(void *)(cu->cu_mcallc +
+ 3 * BYTES_PER_XDR_UNIT));
+ break;
+
+ case CLSET_PROG:
+ *(uint32_t *)(void *)(cu->cu_mcallc + 3 * BYTES_PER_XDR_UNIT)
+ = htonl(*(uint32_t *)info);
+ break;
+ case CLSET_ASYNC:
+ cu->cu_async = *(int *)info;
+ break;
+ case CLSET_CONNECT:
+ cu->cu_connect = *(int *)info;
+ break;
+ case CLSET_WAITCHAN:
+ cu->cu_waitchan = *(const char **)info;
+ break;
+ case CLGET_WAITCHAN:
+ *(const char **) info = cu->cu_waitchan;
+ break;
+ case CLSET_INTERRUPTIBLE:
+ if (*(int *) info)
+ cu->cu_waitflag = PCATCH;
+ else
+ cu->cu_waitflag = 0;
+ break;
+ case CLGET_INTERRUPTIBLE:
+ if (cu->cu_waitflag)
+ *(int *) info = TRUE;
+ else
+ *(int *) info = FALSE;
+ break;
+ default:
+ mtx_unlock(&cs->cs_lock);
+ return (FALSE);
+ }
+ mtx_unlock(&cs->cs_lock);
+ return (TRUE);
+}
+
+static void
+clnt_dg_destroy(CLIENT *cl)
+{
+ struct cu_data *cu = (struct cu_data *)cl->cl_private;
+ struct cu_socket *cs = (struct cu_socket *) cu->cu_socket->so_upcallarg;
+ struct socket *so = NULL;
+ bool_t lastsocketref;
+
+ SOCKBUF_LOCK(&cu->cu_socket->so_rcv);
+
+ mtx_lock(&cs->cs_lock);
+ cs->cs_refs--;
+ if (cs->cs_refs == 0) {
+ cu->cu_socket->so_upcallarg = NULL;
+ cu->cu_socket->so_upcall = NULL;
+ cu->cu_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ mtx_destroy(&cs->cs_lock);
+ SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
+ mem_free(cs, sizeof(*cs));
+ lastsocketref = TRUE;
+ } else {
+ mtx_unlock(&cs->cs_lock);
+ SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
+ lastsocketref = FALSE;
+ }
+
+ if (cu->cu_closeit) {
+ KASSERT(lastsocketref, ("clnt_dg_destroy(): closing a socket "
+ "shared with other clients"));
+ so = cu->cu_socket;
+ cu->cu_socket = NULL;
+ }
+
+ if (so)
+ soclose(so);
+
+ if (cl->cl_netid && cl->cl_netid[0])
+ mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
+ if (cl->cl_tp && cl->cl_tp[0])
+ mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
+ mem_free(cu, sizeof (*cu));
+ mem_free(cl, sizeof (CLIENT));
+}
+
+/*
+ * Make sure that the time is not garbage. -1 value is allowed.
+ */
+static bool_t
+time_not_ok(struct timeval *t)
+{
+ return (t->tv_sec < -1 || t->tv_sec > 100000000 ||
+ t->tv_usec < -1 || t->tv_usec > 1000000);
+}
+
+void
+clnt_dg_soupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct cu_socket *cs = (struct cu_socket *) arg;
+ struct uio uio;
+ struct mbuf *m;
+ struct mbuf *control;
+ struct cu_request *cr;
+ int error, rcvflag, foundreq;
+ uint32_t xid;
+
+ uio.uio_resid = 1000000000;
+ uio.uio_td = curthread;
+ do {
+ m = NULL;
+ control = NULL;
+ rcvflag = MSG_DONTWAIT;
+ error = soreceive(so, NULL, &uio, &m, &control, &rcvflag);
+ if (control)
+ m_freem(control);
+
+ if (error == EWOULDBLOCK)
+ break;
+
+ /*
+ * If there was an error, wake up all pending
+ * requests.
+ */
+ if (error) {
+ mtx_lock(&cs->cs_lock);
+ TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) {
+ cr->cr_error = error;
+ wakeup(cr);
+ }
+ TAILQ_INIT(&cs->cs_pending);
+ mtx_unlock(&cs->cs_lock);
+ break;
+ }
+
+ /*
+ * The XID is in the first uint32_t of the reply.
+ */
+ m = m_pullup(m, sizeof(xid));
+ if (!m)
+ break;
+ xid = ntohl(*mtod(m, uint32_t *));
+
+ /*
+ * Attempt to match this reply with a pending request.
+ */
+ mtx_lock(&cs->cs_lock);
+ foundreq = 0;
+ TAILQ_FOREACH(cr, &cs->cs_pending, cr_link) {
+ if (cr->cr_xid == xid) {
+ /*
+ * This one matches. We snip it out of
+ * the pending list and leave the
+ * reply mbuf in cr->cr_mrep. Set the
+ * XID to zero so that clnt_dg_call
+ * can know not to repeat the
+ * TAILQ_REMOVE.
+ */
+ TAILQ_REMOVE(&cs->cs_pending, cr, cr_link);
+ cr->cr_xid = 0;
+ cr->cr_mrep = m;
+ cr->cr_error = 0;
+ foundreq = 1;
+ wakeup(cr);
+ break;
+ }
+ }
+ mtx_unlock(&cs->cs_lock);
+
+ /*
+ * If we didn't find the matching request, just drop
+ * it - its probably a repeated reply.
+ */
+ if (!foundreq)
+ m_freem(m);
+ } while (m);
+}
+
diff --git a/sys/rpc/clnt_rc.c b/sys/rpc/clnt_rc.c
new file mode 100644
index 0000000..e767410
--- /dev/null
+++ b/sys/rpc/clnt_rc.c
@@ -0,0 +1,307 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+
+#include <rpc/rpc.h>
+#include "rpc_com.h"
+
+static enum clnt_stat clnt_reconnect_call(CLIENT *, rpcproc_t,
+ xdrproc_t, void *, xdrproc_t, void *, struct timeval);
+static void clnt_reconnect_geterr(CLIENT *, struct rpc_err *);
+static bool_t clnt_reconnect_freeres(CLIENT *, xdrproc_t, void *);
+static void clnt_reconnect_abort(CLIENT *);
+static bool_t clnt_reconnect_control(CLIENT *, u_int, void *);
+static void clnt_reconnect_destroy(CLIENT *);
+
+static struct clnt_ops clnt_reconnect_ops = {
+ .cl_call = clnt_reconnect_call,
+ .cl_abort = clnt_reconnect_abort,
+ .cl_geterr = clnt_reconnect_geterr,
+ .cl_freeres = clnt_reconnect_freeres,
+ .cl_destroy = clnt_reconnect_destroy,
+ .cl_control = clnt_reconnect_control
+};
+
+struct rc_data {
+ struct sockaddr_storage rc_addr; /* server address */
+ struct netconfig* rc_nconf; /* network type */
+ rpcprog_t rc_prog; /* program number */
+ rpcvers_t rc_vers; /* version number */
+ size_t rc_sendsz;
+ size_t rc_recvsz;
+ struct timeval rc_timeout;
+ struct timeval rc_retry;
+ const char *rc_waitchan;
+ int rc_intr;
+ CLIENT* rc_client; /* underlying RPC client */
+};
+
+CLIENT *
+clnt_reconnect_create(
+ struct netconfig *nconf, /* network type */
+ struct sockaddr *svcaddr, /* servers address */
+ rpcprog_t program, /* program number */
+ rpcvers_t version, /* version number */
+ size_t sendsz, /* buffer recv size */
+ size_t recvsz) /* buffer send size */
+{
+ CLIENT *cl = NULL; /* client handle */
+ struct rc_data *rc = NULL; /* private data */
+
+ if (svcaddr == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
+ return (NULL);
+ }
+
+ cl = mem_alloc(sizeof (CLIENT));
+ rc = mem_alloc(sizeof (*rc));
+ (void) memcpy(&rc->rc_addr, svcaddr, (size_t)svcaddr->sa_len);
+ rc->rc_nconf = nconf;
+ rc->rc_prog = program;
+ rc->rc_vers = version;
+ rc->rc_sendsz = sendsz;
+ rc->rc_recvsz = recvsz;
+ rc->rc_timeout.tv_sec = -1;
+ rc->rc_timeout.tv_usec = -1;
+ rc->rc_retry.tv_sec = 15;
+ rc->rc_retry.tv_usec = 0;
+ rc->rc_waitchan = "rpcrecv";
+ rc->rc_intr = 0;
+ rc->rc_client = NULL;
+
+ cl->cl_ops = &clnt_reconnect_ops;
+ cl->cl_private = (caddr_t)(void *)rc;
+ cl->cl_auth = authnone_create();
+ cl->cl_tp = NULL;
+ cl->cl_netid = NULL;
+ return (cl);
+}
+
+static enum clnt_stat
+clnt_reconnect_connect(CLIENT *cl)
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+ struct socket *so;
+ int one = 1;
+
+ so = __rpc_nconf2socket(rc->rc_nconf);
+ if (!so) {
+ rpc_createerr.cf_stat = RPC_TLIERROR;
+ rpc_createerr.cf_error.re_errno = 0;
+ return (RPC_TLIERROR);
+ }
+
+ if (rc->rc_nconf->nc_semantics == NC_TPI_CLTS)
+ rc->rc_client = clnt_dg_create(so,
+ (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers,
+ rc->rc_sendsz, rc->rc_recvsz);
+ else
+ rc->rc_client = clnt_vc_create(so,
+ (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers,
+ rc->rc_sendsz, rc->rc_recvsz);
+
+ CLNT_CONTROL(rc->rc_client, CLSET_FD_CLOSE, 0);
+ CLNT_CONTROL(rc->rc_client, CLSET_CONNECT, &one);
+ CLNT_CONTROL(rc->rc_client, CLSET_TIMEOUT, &rc->rc_timeout);
+ CLNT_CONTROL(rc->rc_client, CLSET_RETRY_TIMEOUT, &rc->rc_retry);
+ CLNT_CONTROL(rc->rc_client, CLSET_WAITCHAN, &rc->rc_waitchan);
+ CLNT_CONTROL(rc->rc_client, CLSET_INTERRUPTIBLE, &rc->rc_intr);
+
+ return (RPC_SUCCESS);
+}
+
+static enum clnt_stat
+clnt_reconnect_call(
+ CLIENT *cl, /* client handle */
+ rpcproc_t proc, /* procedure number */
+ xdrproc_t xargs, /* xdr routine for args */
+ void *argsp, /* pointer to args */
+ xdrproc_t xresults, /* xdr routine for results */
+ void *resultsp, /* pointer to results */
+ struct timeval utimeout) /* seconds to wait before giving up */
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+ enum clnt_stat stat;
+
+ do {
+ if (!rc->rc_client)
+ clnt_reconnect_connect(cl);
+
+ stat = CLNT_CALL(rc->rc_client, proc, xargs, argsp,
+ xresults, resultsp, utimeout);
+
+ if (stat == RPC_TIMEDOUT) {
+ /*
+ * Check for async send misfeature for NLM
+ * protocol.
+ */
+ if ((rc->rc_timeout.tv_sec == 0
+ && rc->rc_timeout.tv_usec == 0)
+ || (rc->rc_timeout.tv_sec == -1
+ && utimeout.tv_sec == 0
+ && utimeout.tv_usec == 0))
+ break;
+ }
+
+ if (stat == RPC_INTR)
+ break;
+
+ if (stat != RPC_SUCCESS) {
+ CLNT_DESTROY(rc->rc_client);
+ rc->rc_client = NULL;
+ }
+ } while (stat != RPC_SUCCESS);
+
+ return (stat);
+}
+
+static void
+clnt_reconnect_geterr(CLIENT *cl, struct rpc_err *errp)
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+
+ if (rc->rc_client)
+ CLNT_GETERR(rc->rc_client, errp);
+ else
+ memset(errp, 0, sizeof(*errp));
+}
+
+static bool_t
+clnt_reconnect_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+
+ return (CLNT_FREERES(rc->rc_client, xdr_res, res_ptr));
+}
+
+/*ARGSUSED*/
+static void
+clnt_reconnect_abort(CLIENT *h)
+{
+}
+
+static bool_t
+clnt_reconnect_control(CLIENT *cl, u_int request, void *info)
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+
+ if (info == NULL) {
+ return (FALSE);
+ }
+ switch (request) {
+ case CLSET_TIMEOUT:
+ rc->rc_timeout = *(struct timeval *)info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, request, info);
+ break;
+
+ case CLGET_TIMEOUT:
+ *(struct timeval *)info = rc->rc_timeout;
+ break;
+
+ case CLSET_RETRY_TIMEOUT:
+ rc->rc_retry = *(struct timeval *)info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, request, info);
+ break;
+
+ case CLGET_RETRY_TIMEOUT:
+ *(struct timeval *)info = rc->rc_retry;
+ break;
+
+ case CLGET_VERS:
+ *(uint32_t *)info = rc->rc_vers;
+ break;
+
+ case CLSET_VERS:
+ rc->rc_vers = *(uint32_t *) info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, CLSET_VERS, info);
+ break;
+
+ case CLGET_PROG:
+ *(uint32_t *)info = rc->rc_prog;
+ break;
+
+ case CLSET_PROG:
+ rc->rc_prog = *(uint32_t *) info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, request, info);
+ break;
+
+ case CLSET_WAITCHAN:
+ rc->rc_waitchan = *(const char **)info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, request, info);
+ break;
+
+ case CLGET_WAITCHAN:
+ *(const char **) info = rc->rc_waitchan;
+ break;
+
+ case CLSET_INTERRUPTIBLE:
+ rc->rc_intr = *(int *) info;
+ if (rc->rc_client)
+ CLNT_CONTROL(rc->rc_client, request, info);
+ break;
+
+ case CLGET_INTERRUPTIBLE:
+ *(int *) info = rc->rc_intr;
+ break;
+
+ default:
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+static void
+clnt_reconnect_destroy(CLIENT *cl)
+{
+ struct rc_data *rc = (struct rc_data *)cl->cl_private;
+
+ if (rc->rc_client)
+ CLNT_DESTROY(rc->rc_client);
+ mem_free(rc, sizeof(*rc));
+ mem_free(cl, sizeof (CLIENT));
+}
diff --git a/sys/rpc/clnt_stat.h b/sys/rpc/clnt_stat.h
new file mode 100644
index 0000000..6148b4e
--- /dev/null
+++ b/sys/rpc/clnt_stat.h
@@ -0,0 +1,83 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 1986 - 1991, 1994, 1996, 1997 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * clnt_stat.h - Client side remote procedure call enum
+ *
+ */
+
+#ifndef _RPC_CLNT_STAT_H
+#define _RPC_CLNT_STAT_H
+
+/* #pragma ident "@(#)clnt_stat.h 1.2 97/04/28 SMI" */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum clnt_stat {
+ RPC_SUCCESS = 0, /* call succeeded */
+ /*
+ * local errors
+ */
+ RPC_CANTENCODEARGS = 1, /* can't encode arguments */
+ RPC_CANTDECODERES = 2, /* can't decode results */
+ RPC_CANTSEND = 3, /* failure in sending call */
+ RPC_CANTRECV = 4,
+ /* failure in receiving result */
+ RPC_TIMEDOUT = 5, /* call timed out */
+ RPC_INTR = 18, /* call interrupted */
+ RPC_UDERROR = 23, /* recv got uderr indication */
+ /*
+ * remote errors
+ */
+ RPC_VERSMISMATCH = 6, /* rpc versions not compatible */
+ RPC_AUTHERROR = 7, /* authentication error */
+ RPC_PROGUNAVAIL = 8, /* program not available */
+ RPC_PROGVERSMISMATCH = 9, /* program version mismatched */
+ RPC_PROCUNAVAIL = 10, /* procedure unavailable */
+ RPC_CANTDECODEARGS = 11, /* decode arguments error */
+ RPC_SYSTEMERROR = 12, /* generic "other problem" */
+
+ /*
+ * rpc_call & clnt_create errors
+ */
+ RPC_UNKNOWNHOST = 13, /* unknown host name */
+ RPC_UNKNOWNPROTO = 17, /* unknown protocol */
+ RPC_UNKNOWNADDR = 19, /* Remote address unknown */
+ RPC_NOBROADCAST = 21, /* Broadcasting not supported */
+
+ /*
+ * rpcbind errors
+ */
+ RPC_RPCBFAILURE = 14, /* the pmapper failed in its call */
+#define RPC_PMAPFAILURE RPC_RPCBFAILURE
+ RPC_PROGNOTREGISTERED = 15, /* remote program is not registered */
+ RPC_N2AXLATEFAILURE = 22,
+ /* Name to address translation failed */
+ /*
+ * Misc error in the TLI library
+ */
+ RPC_TLIERROR = 20,
+ /*
+ * unspecified error
+ */
+ RPC_FAILED = 16,
+ /*
+ * asynchronous errors
+ */
+ RPC_INPROGRESS = 24,
+ RPC_STALERACHANDLE = 25,
+ RPC_CANTCONNECT = 26, /* couldn't make connection (cots) */
+ RPC_XPRTFAILED = 27, /* received discon from remote (cots) */
+ RPC_CANTCREATESTREAM = 28 /* can't push rpc module (cots) */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_RPC_CLNT_STAT_H */
diff --git a/sys/rpc/clnt_vc.c b/sys/rpc/clnt_vc.c
new file mode 100644
index 0000000..a92d800
--- /dev/null
+++ b/sys/rpc/clnt_vc.c
@@ -0,0 +1,827 @@
+/* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC";
+static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * TCP based RPC supports 'batched calls'.
+ * A sequence of calls may be batched-up in a send buffer. The rpc call
+ * return immediately to the client even though the call was not necessarily
+ * sent. The batching occurs if the results' xdr routine is NULL (0) AND
+ * the rpc timeout value is zero (see clnt.h, rpc).
+ *
+ * Clients should NOT casually batch calls that in fact return results; that is,
+ * the server side should be aware that a call is batched and not produce any
+ * return message. Batched calls that produce many result messages can
+ * deadlock (netlock) the client and the server....
+ *
+ * Now go hang yourself.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/syslog.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+
+#include <rpc/rpc.h>
+#include "rpc_com.h"
+
+#define MCALL_MSG_SIZE 24
+
+struct cmessage {
+ struct cmsghdr cmsg;
+ struct cmsgcred cmcred;
+};
+
+static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
+ xdrproc_t, void *, struct timeval);
+static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
+static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
+static void clnt_vc_abort(CLIENT *);
+static bool_t clnt_vc_control(CLIENT *, u_int, void *);
+static void clnt_vc_destroy(CLIENT *);
+static bool_t time_not_ok(struct timeval *);
+static void clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
+
+static struct clnt_ops clnt_vc_ops = {
+ .cl_call = clnt_vc_call,
+ .cl_abort = clnt_vc_abort,
+ .cl_geterr = clnt_vc_geterr,
+ .cl_freeres = clnt_vc_freeres,
+ .cl_destroy = clnt_vc_destroy,
+ .cl_control = clnt_vc_control
+};
+
+/*
+ * A pending RPC request which awaits a reply.
+ */
+struct ct_request {
+ TAILQ_ENTRY(ct_request) cr_link;
+ uint32_t cr_xid; /* XID of request */
+ struct mbuf *cr_mrep; /* reply received by upcall */
+ int cr_error; /* any error from upcall */
+};
+
+TAILQ_HEAD(ct_request_list, ct_request);
+
+struct ct_data {
+ struct mtx ct_lock;
+ struct socket *ct_socket; /* connection socket */
+ bool_t ct_closeit; /* close it on destroy */
+ struct timeval ct_wait; /* wait interval in milliseconds */
+ struct sockaddr_storage ct_addr; /* remote addr */
+ struct rpc_err ct_error;
+ uint32_t ct_xid;
+ char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */
+ size_t ct_mpos; /* pos after marshal */
+ const char *ct_waitchan;
+ int ct_waitflag;
+ struct mbuf *ct_record; /* current reply record */
+ size_t ct_record_resid; /* how much left of reply to read */
+ bool_t ct_record_eor; /* true if reading last fragment */
+ struct ct_request_list ct_pending;
+};
+
+static const char clnt_vc_errstr[] = "%s : %s";
+static const char clnt_vc_str[] = "clnt_vc_create";
+static const char clnt_read_vc_str[] = "read_vc";
+static const char __no_mem_str[] = "out of memory";
+
+/*
+ * Create a client handle for a connection.
+ * Default options are set, which the user can change using clnt_control()'s.
+ * The rpc/vc package does buffering similar to stdio, so the client
+ * must pick send and receive buffer sizes, 0 => use the default.
+ * NB: fd is copied into a private area.
+ * NB: The rpch->cl_auth is set null authentication. Caller may wish to
+ * set this something more useful.
+ *
+ * fd should be an open socket
+ */
+CLIENT *
+clnt_vc_create(
+ struct socket *so, /* open file descriptor */
+ struct sockaddr *raddr, /* servers address */
+ const rpcprog_t prog, /* program number */
+ const rpcvers_t vers, /* version number */
+ size_t sendsz, /* buffer recv size */
+ size_t recvsz) /* buffer send size */
+{
+ CLIENT *cl; /* client handle */
+ struct ct_data *ct = NULL; /* client handle */
+ struct timeval now;
+ struct rpc_msg call_msg;
+ static uint32_t disrupt;
+ struct __rpc_sockinfo si;
+ XDR xdrs;
+ int error;
+
+ if (disrupt == 0)
+ disrupt = (uint32_t)(long)raddr;
+
+ cl = (CLIENT *)mem_alloc(sizeof (*cl));
+ ct = (struct ct_data *)mem_alloc(sizeof (*ct));
+
+ mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
+
+ if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
+ error = soconnect(so, raddr, curthread);
+ if (error) {
+ rpc_createerr.cf_stat = RPC_SYSTEMERROR;
+ rpc_createerr.cf_error.re_errno = error;
+ goto err;
+ }
+ }
+
+ if (!__rpc_socket2sockinfo(so, &si))
+ goto err;
+
+ ct->ct_closeit = FALSE;
+
+ /*
+ * Set up private data struct
+ */
+ ct->ct_socket = so;
+ ct->ct_wait.tv_sec = -1;
+ ct->ct_wait.tv_usec = -1;
+ memcpy(&ct->ct_addr, raddr, raddr->sa_len);
+
+ /*
+ * Initialize call message
+ */
+ getmicrotime(&now);
+ ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
+ call_msg.rm_xid = ct->ct_xid;
+ call_msg.rm_direction = CALL;
+ call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
+ call_msg.rm_call.cb_prog = (uint32_t)prog;
+ call_msg.rm_call.cb_vers = (uint32_t)vers;
+
+ /*
+ * pre-serialize the static part of the call msg and stash it away
+ */
+ xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
+ XDR_ENCODE);
+ if (! xdr_callhdr(&xdrs, &call_msg)) {
+ if (ct->ct_closeit) {
+ soclose(ct->ct_socket);
+ }
+ goto err;
+ }
+ ct->ct_mpos = XDR_GETPOS(&xdrs);
+ XDR_DESTROY(&xdrs);
+ ct->ct_waitchan = "rpcrecv";
+ ct->ct_waitflag = 0;
+
+ /*
+ * Create a client handle which uses xdrrec for serialization
+ * and authnone for authentication.
+ */
+ cl->cl_ops = &clnt_vc_ops;
+ cl->cl_private = ct;
+ cl->cl_auth = authnone_create();
+ sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
+ recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
+
+ SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
+ ct->ct_socket->so_upcallarg = ct;
+ ct->ct_socket->so_upcall = clnt_vc_soupcall;
+ ct->ct_socket->so_rcv.sb_flags |= SB_UPCALL;
+ SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
+
+ ct->ct_record = NULL;
+ ct->ct_record_resid = 0;
+ TAILQ_INIT(&ct->ct_pending);
+ return (cl);
+
+err:
+ if (cl) {
+ if (ct) {
+ mem_free(ct, sizeof (struct ct_data));
+ }
+ if (cl)
+ mem_free(cl, sizeof (CLIENT));
+ }
+ return ((CLIENT *)NULL);
+}
+
+static enum clnt_stat
+clnt_vc_call(
+ CLIENT *cl,
+ rpcproc_t proc,
+ xdrproc_t xdr_args,
+ void *args_ptr,
+ xdrproc_t xdr_results,
+ void *results_ptr,
+ struct timeval utimeout)
+{
+ struct ct_data *ct = (struct ct_data *) cl->cl_private;
+ XDR xdrs;
+ struct rpc_msg reply_msg;
+ bool_t ok;
+ int nrefreshes = 2; /* number of times to refresh cred */
+ struct timeval timeout;
+ uint32_t xid;
+ struct mbuf *mreq = NULL;
+ struct ct_request cr;
+ int error;
+
+ mtx_lock(&ct->ct_lock);
+
+ cr.cr_mrep = NULL;
+ cr.cr_error = 0;
+
+ if (ct->ct_wait.tv_usec == -1) {
+ timeout = utimeout; /* use supplied timeout */
+ } else {
+ timeout = ct->ct_wait; /* use default timeout */
+ }
+
+call_again:
+ mtx_assert(&ct->ct_lock, MA_OWNED);
+
+ ct->ct_xid++;
+ xid = ct->ct_xid;
+
+ mtx_unlock(&ct->ct_lock);
+
+ /*
+ * Leave space to pre-pend the record mark.
+ */
+ MGETHDR(mreq, M_WAIT, MT_DATA);
+ MCLGET(mreq, M_WAIT);
+ mreq->m_len = 0;
+ mreq->m_data += sizeof(uint32_t);
+ m_append(mreq, ct->ct_mpos, ct->ct_mcallc);
+
+ /*
+ * The XID is the first thing in the request.
+ */
+ *mtod(mreq, uint32_t *) = htonl(xid);
+
+ xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
+
+ ct->ct_error.re_status = RPC_SUCCESS;
+
+ if ((! XDR_PUTINT32(&xdrs, &proc)) ||
+ (! AUTH_MARSHALL(cl->cl_auth, &xdrs)) ||
+ (! (*xdr_args)(&xdrs, args_ptr))) {
+ if (ct->ct_error.re_status == RPC_SUCCESS)
+ ct->ct_error.re_status = RPC_CANTENCODEARGS;
+ m_freem(mreq);
+ return (ct->ct_error.re_status);
+ }
+ m_fixhdr(mreq);
+
+ /*
+ * Prepend a record marker containing the packet length.
+ */
+ M_PREPEND(mreq, sizeof(uint32_t), M_WAIT);
+ *mtod(mreq, uint32_t *) =
+ htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
+
+ cr.cr_xid = xid;
+ mtx_lock(&ct->ct_lock);
+ TAILQ_INSERT_TAIL(&ct->ct_pending, &cr, cr_link);
+ mtx_unlock(&ct->ct_lock);
+
+ /*
+ * sosend consumes mreq.
+ */
+ error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
+ mreq = NULL;
+
+ reply_msg.acpted_rply.ar_verf = _null_auth;
+ reply_msg.acpted_rply.ar_results.where = results_ptr;
+ reply_msg.acpted_rply.ar_results.proc = xdr_results;
+
+ mtx_lock(&ct->ct_lock);
+
+ if (error) {
+ TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
+
+ ct->ct_error.re_errno = error;
+ ct->ct_error.re_status = RPC_CANTSEND;
+ goto out;
+ }
+
+ /*
+ * Check to see if we got an upcall while waiting for the
+ * lock. In both these cases, the request has been removed
+ * from ct->ct_pending.
+ */
+ if (cr.cr_error) {
+ ct->ct_error.re_errno = cr.cr_error;
+ ct->ct_error.re_status = RPC_CANTRECV;
+ goto out;
+ }
+ if (cr.cr_mrep) {
+ goto got_reply;
+ }
+
+ /*
+ * Hack to provide rpc-based message passing
+ */
+ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
+ ct->ct_error.re_status = RPC_TIMEDOUT;
+ goto out;
+ }
+
+ error = msleep(&cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
+ tvtohz(&timeout));
+
+ if (error) {
+ /*
+ * The sleep returned an error so our request is still
+ * on the list. Turn the error code into an
+ * appropriate client status.
+ */
+ if (cr.cr_xid)
+ TAILQ_REMOVE(&ct->ct_pending, &cr, cr_link);
+ ct->ct_error.re_errno = error;
+ switch (error) {
+ case EINTR:
+ ct->ct_error.re_status = RPC_INTR;
+ break;
+ case EWOULDBLOCK:
+ ct->ct_error.re_status = RPC_TIMEDOUT;
+ break;
+ default:
+ ct->ct_error.re_status = RPC_CANTRECV;
+ }
+ goto out;
+ } else {
+ /*
+ * We were woken up by the upcall. If the
+ * upcall had a receive error, report that,
+ * otherwise we have a reply.
+ */
+ if (cr.cr_error) {
+ ct->ct_error.re_errno = cr.cr_error;
+ ct->ct_error.re_status = RPC_CANTRECV;
+ goto out;
+ }
+ }
+
+got_reply:
+ /*
+ * Now decode and validate the response. We need to drop the
+ * lock since xdr_replymsg may end up sleeping in malloc.
+ */
+ mtx_unlock(&ct->ct_lock);
+
+ xdrmbuf_create(&xdrs, cr.cr_mrep, XDR_DECODE);
+ ok = xdr_replymsg(&xdrs, &reply_msg);
+ XDR_DESTROY(&xdrs);
+ cr.cr_mrep = NULL;
+
+ mtx_lock(&ct->ct_lock);
+
+ if (ok) {
+ if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
+ (reply_msg.acpted_rply.ar_stat == SUCCESS))
+ ct->ct_error.re_status = RPC_SUCCESS;
+ else
+ _seterr_reply(&reply_msg, &(ct->ct_error));
+
+ if (ct->ct_error.re_status == RPC_SUCCESS) {
+ if (! AUTH_VALIDATE(cl->cl_auth,
+ &reply_msg.acpted_rply.ar_verf)) {
+ ct->ct_error.re_status = RPC_AUTHERROR;
+ ct->ct_error.re_why = AUTH_INVALIDRESP;
+ }
+ if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
+ xdrs.x_op = XDR_FREE;
+ (void) xdr_opaque_auth(&xdrs,
+ &(reply_msg.acpted_rply.ar_verf));
+ }
+ } /* end successful completion */
+ /*
+ * If unsuccesful AND error is an authentication error
+ * then refresh credentials and try again, else break
+ */
+ else if (ct->ct_error.re_status == RPC_AUTHERROR)
+ /* maybe our credentials need to be refreshed ... */
+ if (nrefreshes > 0 &&
+ AUTH_REFRESH(cl->cl_auth, &reply_msg)) {
+ nrefreshes--;
+ goto call_again;
+ }
+ /* end of unsuccessful completion */
+ } /* end of valid reply message */
+ else {
+ ct->ct_error.re_status = RPC_CANTDECODERES;
+ }
+out:
+ mtx_assert(&ct->ct_lock, MA_OWNED);
+
+ if (mreq)
+ m_freem(mreq);
+ if (cr.cr_mrep)
+ m_freem(cr.cr_mrep);
+
+ mtx_unlock(&ct->ct_lock);
+ return (ct->ct_error.re_status);
+}
+
+static void
+clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
+{
+ struct ct_data *ct = (struct ct_data *) cl->cl_private;
+
+ *errp = ct->ct_error;
+}
+
+static bool_t
+clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
+{
+ XDR xdrs;
+ bool_t dummy;
+
+ xdrs.x_op = XDR_FREE;
+ dummy = (*xdr_res)(&xdrs, res_ptr);
+
+ return (dummy);
+}
+
+/*ARGSUSED*/
+static void
+clnt_vc_abort(CLIENT *cl)
+{
+}
+
+static bool_t
+clnt_vc_control(CLIENT *cl, u_int request, void *info)
+{
+ struct ct_data *ct = (struct ct_data *)cl->cl_private;
+ void *infop = info;
+
+ mtx_lock(&ct->ct_lock);
+
+ switch (request) {
+ case CLSET_FD_CLOSE:
+ ct->ct_closeit = TRUE;
+ mtx_unlock(&ct->ct_lock);
+ return (TRUE);
+ case CLSET_FD_NCLOSE:
+ ct->ct_closeit = FALSE;
+ mtx_unlock(&ct->ct_lock);
+ return (TRUE);
+ default:
+ break;
+ }
+
+ /* for other requests which use info */
+ if (info == NULL) {
+ mtx_unlock(&ct->ct_lock);
+ return (FALSE);
+ }
+ switch (request) {
+ case CLSET_TIMEOUT:
+ if (time_not_ok((struct timeval *)info)) {
+ mtx_unlock(&ct->ct_lock);
+ return (FALSE);
+ }
+ ct->ct_wait = *(struct timeval *)infop;
+ break;
+ case CLGET_TIMEOUT:
+ *(struct timeval *)infop = ct->ct_wait;
+ break;
+ case CLGET_SERVER_ADDR:
+ (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
+ break;
+ case CLGET_SVC_ADDR:
+ /*
+ * Slightly different semantics to userland - we use
+ * sockaddr instead of netbuf.
+ */
+ memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
+ break;
+ case CLSET_SVC_ADDR: /* set to new address */
+ mtx_unlock(&ct->ct_lock);
+ return (FALSE);
+ case CLGET_XID:
+ *(uint32_t *)info = ct->ct_xid;
+ break;
+ case CLSET_XID:
+ /* This will set the xid of the NEXT call */
+ /* decrement by 1 as clnt_vc_call() increments once */
+ ct->ct_xid = *(uint32_t *)info - 1;
+ break;
+ case CLGET_VERS:
+ /*
+ * This RELIES on the information that, in the call body,
+ * the version number field is the fifth field from the
+ * begining of the RPC header. MUST be changed if the
+ * call_struct is changed
+ */
+ *(uint32_t *)info =
+ ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
+ 4 * BYTES_PER_XDR_UNIT));
+ break;
+
+ case CLSET_VERS:
+ *(uint32_t *)(void *)(ct->ct_mcallc +
+ 4 * BYTES_PER_XDR_UNIT) =
+ htonl(*(uint32_t *)info);
+ break;
+
+ case CLGET_PROG:
+ /*
+ * This RELIES on the information that, in the call body,
+ * the program number field is the fourth field from the
+ * begining of the RPC header. MUST be changed if the
+ * call_struct is changed
+ */
+ *(uint32_t *)info =
+ ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
+ 3 * BYTES_PER_XDR_UNIT));
+ break;
+
+ case CLSET_PROG:
+ *(uint32_t *)(void *)(ct->ct_mcallc +
+ 3 * BYTES_PER_XDR_UNIT) =
+ htonl(*(uint32_t *)info);
+ break;
+
+ case CLSET_WAITCHAN:
+ ct->ct_waitchan = *(const char **)info;
+ break;
+
+ case CLGET_WAITCHAN:
+ *(const char **) info = ct->ct_waitchan;
+ break;
+
+ case CLSET_INTERRUPTIBLE:
+ if (*(int *) info)
+ ct->ct_waitflag = PCATCH;
+ else
+ ct->ct_waitflag = 0;
+ break;
+
+ case CLGET_INTERRUPTIBLE:
+ if (ct->ct_waitflag)
+ *(int *) info = TRUE;
+ else
+ *(int *) info = FALSE;
+ break;
+
+ default:
+ mtx_unlock(&ct->ct_lock);
+ return (FALSE);
+ }
+
+ mtx_unlock(&ct->ct_lock);
+ return (TRUE);
+}
+
+static void
+clnt_vc_destroy(CLIENT *cl)
+{
+ struct ct_data *ct = (struct ct_data *) cl->cl_private;
+ struct socket *so = NULL;
+
+ mtx_lock(&ct->ct_lock);
+
+ if (ct->ct_socket) {
+ SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
+ ct->ct_socket->so_upcallarg = NULL;
+ ct->ct_socket->so_upcall = NULL;
+ ct->ct_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
+
+ KASSERT(!TAILQ_FIRST(&ct->ct_pending),
+ ("Destroying RPC client with pending RPC requests"));
+
+ if (ct->ct_closeit) {
+ so = ct->ct_socket;
+ }
+ }
+
+ mtx_unlock(&ct->ct_lock);
+
+ mtx_destroy(&ct->ct_lock);
+ if (so) {
+ soshutdown(so, SHUT_WR);
+ soclose(so);
+ }
+ mem_free(ct, sizeof(struct ct_data));
+ mem_free(cl, sizeof(CLIENT));
+}
+
+/*
+ * Make sure that the time is not garbage. -1 value is disallowed.
+ * Note this is different from time_not_ok in clnt_dg.c
+ */
+static bool_t
+time_not_ok(struct timeval *t)
+{
+ return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
+ t->tv_usec <= -1 || t->tv_usec > 1000000);
+}
+
+void
+clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
+{
+ struct ct_data *ct = (struct ct_data *) arg;
+ struct uio uio;
+ struct mbuf *m;
+ struct ct_request *cr;
+ int error, rcvflag, foundreq;
+ uint32_t xid, header;
+
+ uio.uio_td = curthread;
+ do {
+ /*
+ * If ct_record_resid is zero, we are waiting for a
+ * record mark.
+ */
+ if (ct->ct_record_resid == 0) {
+ bool_t do_read;
+
+ /*
+ * Make sure there is either a whole record
+ * mark in the buffer or there is some other
+ * error condition
+ */
+ do_read = FALSE;
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (so->so_rcv.sb_cc >= sizeof(uint32_t)
+ || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
+ || so->so_error)
+ do_read = TRUE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (!do_read)
+ return;
+
+ uio.uio_resid = sizeof(uint32_t);
+ m = NULL;
+ rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
+ error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
+
+ if (error == EWOULDBLOCK)
+ break;
+
+ /*
+ * If there was an error, wake up all pending
+ * requests.
+ */
+ if (error || uio.uio_resid > 0) {
+ wakeup_all:
+ mtx_lock(&ct->ct_lock);
+ if (!error) {
+ /*
+ * We must have got EOF trying
+ * to read from the stream.
+ */
+ error = ECONNRESET;
+ }
+ ct->ct_error.re_status = RPC_CANTRECV;
+ ct->ct_error.re_errno = error;
+ TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
+ cr->cr_error = error;
+ wakeup(cr);
+ }
+ TAILQ_INIT(&ct->ct_pending);
+ mtx_unlock(&ct->ct_lock);
+ break;
+ }
+ memcpy(&header, mtod(m, uint32_t *), sizeof(uint32_t));
+ header = ntohl(header);
+ ct->ct_record = NULL;
+ ct->ct_record_resid = header & 0x7fffffff;
+ ct->ct_record_eor = ((header & 0x80000000) != 0);
+ m_freem(m);
+ } else {
+ /*
+ * We have the record mark. Read as much as
+ * the socket has buffered up to the end of
+ * this record.
+ */
+ uio.uio_resid = ct->ct_record_resid;
+ m = NULL;
+ rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
+ error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
+
+ if (error == EWOULDBLOCK)
+ break;
+
+ if (error || uio.uio_resid == ct->ct_record_resid)
+ goto wakeup_all;
+
+ /*
+ * If we have part of the record already,
+ * chain this bit onto the end.
+ */
+ if (ct->ct_record)
+ m_last(ct->ct_record)->m_next = m;
+ else
+ ct->ct_record = m;
+
+ ct->ct_record_resid = uio.uio_resid;
+
+ /*
+ * If we have the entire record, see if we can
+ * match it to a request.
+ */
+ if (ct->ct_record_resid == 0
+ && ct->ct_record_eor) {
+ /*
+ * The XID is in the first uint32_t of
+ * the reply.
+ */
+ ct->ct_record =
+ m_pullup(ct->ct_record, sizeof(xid));
+ if (!ct->ct_record)
+ break;
+ memcpy(&xid,
+ mtod(ct->ct_record, uint32_t *),
+ sizeof(uint32_t));
+ xid = ntohl(xid);
+
+ mtx_lock(&ct->ct_lock);
+ foundreq = 0;
+ TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
+ if (cr->cr_xid == xid) {
+ /*
+ * This one
+ * matches. We snip it
+ * out of the pending
+ * list and leave the
+ * reply mbuf in
+ * cr->cr_mrep. Set
+ * the XID to zero so
+ * that clnt_vc_call
+ * can know not to
+ * repeat the
+ * TAILQ_REMOVE.
+ */
+ TAILQ_REMOVE(&ct->ct_pending,
+ cr, cr_link);
+ cr->cr_xid = 0;
+ cr->cr_mrep = ct->ct_record;
+ cr->cr_error = 0;
+ foundreq = 1;
+ wakeup(cr);
+ break;
+ }
+ }
+ mtx_unlock(&ct->ct_lock);
+
+ if (!foundreq)
+ m_freem(ct->ct_record);
+ ct->ct_record = NULL;
+ }
+ }
+ } while (m);
+}
diff --git a/sys/rpc/getnetconfig.c b/sys/rpc/getnetconfig.c
new file mode 100644
index 0000000..782a525
--- /dev/null
+++ b/sys/rpc/getnetconfig.c
@@ -0,0 +1,138 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include <rpc/types.h>
+
+/*
+ * For in-kernel use, we use a simple compiled-in configuration.
+ */
+
+static struct netconfig netconfigs[] = {
+#ifdef INET6
+ {
+ .nc_netid = "udp6",
+ .nc_semantics = NC_TPI_CLTS,
+ .nc_flag = NC_VISIBLE,
+ .nc_protofmly = "inet6",
+ .nc_proto = "udp",
+ },
+ {
+ .nc_netid = "tcp6",
+ .nc_semantics = NC_TPI_COTS_ORD,
+ .nc_flag = NC_VISIBLE,
+ .nc_protofmly = "inet6",
+ .nc_proto = "tcp",
+ },
+#endif
+ {
+ .nc_netid = "udp",
+ .nc_semantics = NC_TPI_CLTS,
+ .nc_flag = NC_VISIBLE,
+ .nc_protofmly = "inet",
+ .nc_proto = "udp",
+ },
+ {
+ .nc_netid = "tcp",
+ .nc_semantics = NC_TPI_COTS_ORD,
+ .nc_flag = NC_VISIBLE,
+ .nc_protofmly = "inet",
+ .nc_proto = "tcp",
+ },
+ {
+ .nc_netid = "local",
+ .nc_semantics = NC_TPI_COTS_ORD,
+ .nc_flag = 0,
+ .nc_protofmly = "loopback",
+ .nc_proto = "",
+ },
+ {
+ .nc_netid = NULL,
+ }
+};
+
+void *
+setnetconfig(void)
+{
+ struct netconfig **nconfp;
+
+ nconfp = malloc(sizeof(struct netconfig *), M_RPC, M_WAITOK);
+ *nconfp = netconfigs;
+
+ return ((void *) nconfp);
+}
+
+struct netconfig *
+getnetconfig(void *handle)
+{
+ struct netconfig **nconfp = (struct netconfig **) handle;
+ struct netconfig *nconf;
+
+ nconf = *nconfp;
+ if (nconf->nc_netid == NULL)
+ return (NULL);
+
+ (*nconfp)++;
+
+ return (nconf);
+}
+
+struct netconfig *
+getnetconfigent(const char *netid)
+{
+ struct netconfig *nconf;
+
+ for (nconf = netconfigs; nconf->nc_netid; nconf++) {
+ if (!strcmp(netid, nconf->nc_netid))
+ return (nconf);
+ }
+
+ return (NULL);
+}
+
+void
+freenetconfigent(struct netconfig *nconf)
+{
+
+}
+
+int
+endnetconfig(void * handle)
+{
+ struct netconfig **nconfp = (struct netconfig **) handle;
+
+ free(nconfp, M_RPC);
+ return (0);
+}
diff --git a/sys/rpc/inet_ntop.c b/sys/rpc/inet_ntop.c
new file mode 100644
index 0000000..2043be3
--- /dev/null
+++ b/sys/rpc/inet_ntop.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996-1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static const char rcsid[] = "$Id: inet_ntop.c,v 1.3.18.2 2005/11/03 23:02:22 marka Exp $";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/systm.h>
+
+#include <rpc/types.h>
+#include "rpc_com.h"
+
+/*%
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static const char *inet_ntop4(const u_char *src, char *dst, socklen_t size);
+static const char *inet_ntop6(const u_char *src, char *dst, socklen_t size);
+
+/* char *
+ * inet_ntop(af, src, dst, size)
+ * convert a network format address to presentation format.
+ * return:
+ * pointer to presentation format address (`dst'), or NULL (see errno).
+ * author:
+ * Paul Vixie, 1996.
+ */
+const char *
+__rpc_inet_ntop(int af, const void * __restrict src, char * __restrict dst,
+ socklen_t size)
+{
+ switch (af) {
+ case AF_INET:
+ return (inet_ntop4(src, dst, size));
+ case AF_INET6:
+ return (inet_ntop6(src, dst, size));
+ default:
+ return (NULL);
+ }
+ /* NOTREACHED */
+}
+
+/* const char *
+ * inet_ntop4(src, dst, size)
+ * format an IPv4 address
+ * return:
+ * `dst' (as a const)
+ * notes:
+ * (1) uses no statics
+ * (2) takes a u_char* not an in_addr as input
+ * author:
+ * Paul Vixie, 1996.
+ */
+static const char *
+inet_ntop4(const u_char *src, char *dst, socklen_t size)
+{
+ static const char fmt[] = "%u.%u.%u.%u";
+ char tmp[sizeof "255.255.255.255"];
+ int l;
+
+ l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
+ if (l <= 0 || (socklen_t) l >= size) {
+ return (NULL);
+ }
+ strlcpy(dst, tmp, size);
+ return (dst);
+}
+
+/* const char *
+ * inet_ntop6(src, dst, size)
+ * convert IPv6 binary address into presentation (printable) format
+ * author:
+ * Paul Vixie, 1996.
+ */
+static const char *
+inet_ntop6(const u_char *src, char *dst, socklen_t size)
+{
+ /*
+ * Note that int32_t and int16_t need only be "at least" large enough
+ * to contain a value of the specified size. On some systems, like
+ * Crays, there is no such thing as an integer variable with 16 bits.
+ * Keep this in mind if you think this function should have been coded
+ * to use pointer overlays. All the world's not a VAX.
+ */
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp;
+ struct { int base, len; } best, cur;
+#define NS_IN6ADDRSZ 16
+#define NS_INT16SZ 2
+ u_int words[NS_IN6ADDRSZ / NS_INT16SZ];
+ int i;
+
+ /*
+ * Preprocess:
+ * Copy the input (bytewise) array into a wordwise array.
+ * Find the longest run of 0x00's in src[] for :: shorthanding.
+ */
+ memset(words, '\0', sizeof words);
+ for (i = 0; i < NS_IN6ADDRSZ; i++)
+ words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
+ best.base = -1;
+ best.len = 0;
+ cur.base = -1;
+ cur.len = 0;
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
+ if (words[i] == 0) {
+ if (cur.base == -1)
+ cur.base = i, cur.len = 1;
+ else
+ cur.len++;
+ } else {
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ cur.base = -1;
+ }
+ }
+ }
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ }
+ if (best.base != -1 && best.len < 2)
+ best.base = -1;
+
+ /*
+ * Format the result.
+ */
+ tp = tmp;
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
+ /* Are we inside the best run of 0x00's? */
+ if (best.base != -1 && i >= best.base &&
+ i < (best.base + best.len)) {
+ if (i == best.base)
+ *tp++ = ':';
+ continue;
+ }
+ /* Are we following an initial run of 0x00s or any real hex? */
+ if (i != 0)
+ *tp++ = ':';
+ /* Is this address an encapsulated IPv4? */
+ if (i == 6 && best.base == 0 && (best.len == 6 ||
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff))) {
+ if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp)))
+ return (NULL);
+ tp += strlen(tp);
+ break;
+ }
+ tp += sprintf(tp, "%x", words[i]);
+ }
+ /* Was it a trailing run of 0x00's? */
+ if (best.base != -1 && (best.base + best.len) ==
+ (NS_IN6ADDRSZ / NS_INT16SZ))
+ *tp++ = ':';
+ *tp++ = '\0';
+
+ /*
+ * Check for overflow, copy, and we're done.
+ */
+ if ((socklen_t)(tp - tmp) > size) {
+ return (NULL);
+ }
+ strcpy(dst, tmp);
+ return (dst);
+}
+
+/*! \file */
diff --git a/sys/rpc/inet_pton.c b/sys/rpc/inet_pton.c
new file mode 100644
index 0000000..49bf957
--- /dev/null
+++ b/sys/rpc/inet_pton.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996,1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static const char rcsid[] = "$Id: inet_pton.c,v 1.3.18.2 2005/07/28 07:38:07 marka Exp $";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/systm.h>
+
+#include <rpc/types.h>
+#include "rpc_com.h"
+
+/*%
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static int inet_pton4(const char *src, u_char *dst);
+static int inet_pton6(const char *src, u_char *dst);
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+int
+__rpc_inet_pton(int af, const char * __restrict src, void * __restrict dst)
+{
+ switch (af) {
+ case AF_INET:
+ return (inet_pton4(src, dst));
+ case AF_INET6:
+ return (inet_pton6(src, dst));
+ default:
+ return (-1);
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, u_char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+#define NS_INADDRSZ 4
+ u_char tmp[NS_INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr(digits, ch)) != NULL) {
+ u_int new = *tp * 10 + (pch - digits);
+
+ if (saw_digit && *tp == 0)
+ return (0);
+ if (new > 255)
+ return (0);
+ *tp = new;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return (0);
+ saw_digit = 1;
+ }
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return (0);
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return (0);
+ }
+ if (octets < 4)
+ return (0);
+ memcpy(dst, tmp, NS_INADDRSZ);
+ return (1);
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, u_char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+#define NS_IN6ADDRSZ 16
+#define NS_INT16SZ 2
+ u_char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp;
+ const char *xdigits, *curtok;
+ int ch, seen_xdigits;
+ u_int val;
+
+ memset((tp = tmp), '\0', NS_IN6ADDRSZ);
+ endp = tp + NS_IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return (0);
+ curtok = src;
+ seen_xdigits = 0;
+ val = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (++seen_xdigits > 4)
+ return (0);
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!seen_xdigits) {
+ if (colonp)
+ return (0);
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return (0);
+ }
+ if (tp + NS_INT16SZ > endp)
+ return (0);
+ *tp++ = (u_char) (val >> 8) & 0xff;
+ *tp++ = (u_char) val & 0xff;
+ seen_xdigits = 0;
+ val = 0;
+ continue;
+ }
+ if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += NS_INADDRSZ;
+ seen_xdigits = 0;
+ break; /*%< '\\0' was seen by inet_pton4(). */
+ }
+ return (0);
+ }
+ if (seen_xdigits) {
+ if (tp + NS_INT16SZ > endp)
+ return (0);
+ *tp++ = (u_char) (val >> 8) & 0xff;
+ *tp++ = (u_char) val & 0xff;
+ }
+ if (colonp != NULL) {
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ if (tp == endp)
+ return (0);
+ for (i = 1; i <= n; i++) {
+ endp[- i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return (0);
+ memcpy(dst, tmp, NS_IN6ADDRSZ);
+ return (1);
+}
+
+/*
+ * Weak aliases for applications that use certain private entry points,
+ * and fail to include <arpa/inet.h>.
+ */
+#undef inet_pton
+__weak_reference(__inet_pton, inet_pton);
+
+/*! \file */
diff --git a/sys/rpc/netconfig.h b/sys/rpc/netconfig.h
new file mode 100644
index 0000000..d49b925
--- /dev/null
+++ b/sys/rpc/netconfig.h
@@ -0,0 +1,99 @@
+/* $NetBSD: netconfig.h,v 1.1 2000/06/02 22:57:54 fvdl Exp $ */
+/* $FreeBSD$ */
+
+
+#ifndef _NETCONFIG_H_
+#define _NETCONFIG_H_
+
+#include <sys/cdefs.h>
+
+#define NETCONFIG "/etc/netconfig"
+#define NETPATH "NETPATH"
+
+struct netconfig {
+ char *nc_netid; /* Network ID */
+ unsigned long nc_semantics; /* Semantics (see below) */
+ unsigned long nc_flag; /* Flags (see below) */
+ char *nc_protofmly; /* Protocol family */
+ char *nc_proto; /* Protocol name */
+ char *nc_device; /* Network device pathname */
+ unsigned long nc_nlookups; /* Number of directory lookup libs */
+ char **nc_lookups; /* Names of the libraries */
+ unsigned long nc_unused[9]; /* reserved */
+};
+
+typedef struct {
+ struct netconfig **nc_head;
+ struct netconfig **nc_curr;
+} NCONF_HANDLE;
+
+/*
+ * nc_semantics values
+ */
+#define NC_TPI_CLTS 1
+#define NC_TPI_COTS 2
+#define NC_TPI_COTS_ORD 3
+#define NC_TPI_RAW 4
+
+/*
+ * nc_flag values
+ */
+#define NC_NOFLAG 0x00
+#define NC_VISIBLE 0x01
+#define NC_BROADCAST 0x02
+
+/*
+ * nc_protofmly values
+ */
+#define NC_NOPROTOFMLY "-"
+#define NC_LOOPBACK "loopback"
+#define NC_INET "inet"
+#define NC_INET6 "inet6"
+#define NC_IMPLINK "implink"
+#define NC_PUP "pup"
+#define NC_CHAOS "chaos"
+#define NC_NS "ns"
+#define NC_NBS "nbs"
+#define NC_ECMA "ecma"
+#define NC_DATAKIT "datakit"
+#define NC_CCITT "ccitt"
+#define NC_SNA "sna"
+#define NC_DECNET "decnet"
+#define NC_DLI "dli"
+#define NC_LAT "lat"
+#define NC_HYLINK "hylink"
+#define NC_APPLETALK "appletalk"
+#define NC_NIT "nit"
+#define NC_IEEE802 "ieee802"
+#define NC_OSI "osi"
+#define NC_X25 "x25"
+#define NC_OSINET "osinet"
+#define NC_GOSIP "gosip"
+
+/*
+ * nc_proto values
+ */
+#define NC_NOPROTO "-"
+#define NC_TCP "tcp"
+#define NC_UDP "udp"
+#define NC_ICMP "icmp"
+
+__BEGIN_DECLS
+void *setnetconfig(void);
+struct netconfig *getnetconfig(void *);
+struct netconfig *getnetconfigent(const char *);
+void freenetconfigent(struct netconfig *);
+int endnetconfig(void *);
+
+#ifndef _KERNEL
+void *setnetpath(void);
+struct netconfig *getnetpath(void *);
+int endnetpath(void *);
+
+void nc_perror(const char *);
+char *nc_sperror(void);
+#endif
+
+__END_DECLS
+
+#endif /* _NETCONFIG_H_ */
diff --git a/sys/rpc/nettype.h b/sys/rpc/nettype.h
new file mode 100644
index 0000000..c325b4f
--- /dev/null
+++ b/sys/rpc/nettype.h
@@ -0,0 +1,68 @@
+/* $NetBSD: nettype.h,v 1.2 2000/07/06 03:17:19 christos Exp $ */
+/* $FreeBSD$ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986 - 1991 by Sun Microsystems, Inc.
+ */
+
+/*
+ * nettype.h, Nettype definitions.
+ * All for the topmost layer of rpc
+ *
+ */
+
+#ifndef _RPC_NETTYPE_H
+#define _RPC_NETTYPE_H
+
+#ifdef _KERNEL
+#include <rpc/netconfig.h>
+#else
+#include <netconfig.h>
+#endif
+
+#define _RPC_NONE 0
+#define _RPC_NETPATH 1
+#define _RPC_VISIBLE 2
+#define _RPC_CIRCUIT_V 3
+#define _RPC_DATAGRAM_V 4
+#define _RPC_CIRCUIT_N 5
+#define _RPC_DATAGRAM_N 6
+#define _RPC_TCP 7
+#define _RPC_UDP 8
+
+__BEGIN_DECLS
+extern void *__rpc_setconf(const char *);
+extern void __rpc_endconf(void *);
+extern struct netconfig *__rpc_getconf(void *);
+extern struct netconfig *__rpc_getconfip(const char *);
+__END_DECLS
+
+#endif /* !_RPC_NETTYPE_H */
diff --git a/sys/rpc/pmap_prot.h b/sys/rpc/pmap_prot.h
new file mode 100644
index 0000000..366832a
--- /dev/null
+++ b/sys/rpc/pmap_prot.h
@@ -0,0 +1,107 @@
+/* $NetBSD: pmap_prot.h,v 1.8 2000/06/02 22:57:55 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)pmap_prot.h 1.14 88/02/08 SMI
+ * from: @(#)pmap_prot.h 2.1 88/07/29 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * pmap_prot.h
+ * Protocol for the local binder service, or pmap.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * The following procedures are supported by the protocol:
+ *
+ * PMAPPROC_NULL() returns ()
+ * takes nothing, returns nothing
+ *
+ * PMAPPROC_SET(struct pmap) returns (bool_t)
+ * TRUE is success, FALSE is failure. Registers the tuple
+ * [prog, vers, prot, port].
+ *
+ * PMAPPROC_UNSET(struct pmap) returns (bool_t)
+ * TRUE is success, FALSE is failure. Un-registers pair
+ * [prog, vers]. prot and port are ignored.
+ *
+ * PMAPPROC_GETPORT(struct pmap) returns (long unsigned).
+ * 0 is failure. Otherwise returns the port number where the pair
+ * [prog, vers] is registered. It may lie!
+ *
+ * PMAPPROC_DUMP() RETURNS (struct pmaplist *)
+ *
+ * PMAPPROC_CALLIT(unsigned, unsigned, unsigned, string<>)
+ * RETURNS (port, string<>);
+ * usage: encapsulatedresults = PMAPPROC_CALLIT(prog, vers, proc, encapsulatedargs);
+ * Calls the procedure on the local machine. If it is not registered,
+ * this procedure is quite; ie it does not return error information!!!
+ * This procedure only is supported on rpc/udp and calls via
+ * rpc/udp. This routine only passes null authentication parameters.
+ * This file has no interface to xdr routines for PMAPPROC_CALLIT.
+ *
+ * The service supports remote procedure calls on udp/ip or tcp/ip socket 111.
+ */
+
+#ifndef _RPC_PMAP_PROT_H
+#define _RPC_PMAP_PROT_H
+#include <sys/cdefs.h>
+
+#define PMAPPORT ((u_short)111)
+#define PMAPPROG ((u_long)100000)
+#define PMAPVERS ((u_long)2)
+#define PMAPVERS_PROTO ((u_long)2)
+#define PMAPVERS_ORIG ((u_long)1)
+#define PMAPPROC_NULL ((u_long)0)
+#define PMAPPROC_SET ((u_long)1)
+#define PMAPPROC_UNSET ((u_long)2)
+#define PMAPPROC_GETPORT ((u_long)3)
+#define PMAPPROC_DUMP ((u_long)4)
+#define PMAPPROC_CALLIT ((u_long)5)
+
+struct pmap {
+ long unsigned pm_prog;
+ long unsigned pm_vers;
+ long unsigned pm_prot;
+ long unsigned pm_port;
+};
+
+struct pmaplist {
+ struct pmap pml_map;
+ struct pmaplist *pml_next;
+};
+
+__BEGIN_DECLS
+extern bool_t xdr_pmap(XDR *, struct pmap *);
+extern bool_t xdr_pmaplist(XDR *, struct pmaplist **);
+extern bool_t xdr_pmaplist_ptr(XDR *, struct pmaplist *);
+__END_DECLS
+
+#endif /* !_RPC_PMAP_PROT_H */
diff --git a/sys/rpc/rpc.h b/sys/rpc/rpc.h
new file mode 100644
index 0000000..32ca5fd
--- /dev/null
+++ b/sys/rpc/rpc.h
@@ -0,0 +1,125 @@
+/* $NetBSD: rpc.h,v 1.13 2000/06/02 22:57:56 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)rpc.h 1.9 88/02/08 SMI
+ * from: @(#)rpc.h 2.4 89/07/11 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * rpc.h, Just includes the billions of rpc header files necessary to
+ * do remote procedure calling.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+#ifndef _RPC_RPC_H
+#define _RPC_RPC_H
+
+#include <rpc/types.h> /* some typedefs */
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+/* external data representation interfaces */
+#include <rpc/xdr.h> /* generic (de)serializer */
+
+/* Client side only authentication */
+#include <rpc/auth.h> /* generic authenticator (client side) */
+
+/* Client side (mostly) remote procedure call */
+#include <rpc/clnt.h> /* generic rpc stuff */
+
+/* semi-private protocol headers */
+#include <rpc/rpc_msg.h> /* protocol for rpc messages */
+
+#ifndef _KERNEL
+#include <rpc/auth_unix.h> /* protocol for unix style cred */
+/*
+ * Uncomment-out the next line if you are building the rpc library with
+ * DES Authentication (see the README file in the secure_rpc/ directory).
+ */
+#include <rpc/auth_des.h> /* protocol for des style cred */
+#endif
+
+/* Server side only remote procedure callee */
+#include <rpc/svc.h> /* service manager and multiplexer */
+#include <rpc/svc_auth.h> /* service side authenticator */
+
+#ifndef _KERNEL
+/* Portmapper client, server, and protocol headers */
+#include <rpc/pmap_clnt.h>
+#endif
+#include <rpc/pmap_prot.h>
+
+#include <rpc/rpcb_clnt.h> /* rpcbind interface functions */
+
+#ifndef _KERNEL
+#include <rpc/rpcent.h>
+#endif
+
+#ifndef UDPMSGSIZE
+#define UDPMSGSIZE 8800
+#endif
+
+__BEGIN_DECLS
+extern int get_myaddress(struct sockaddr_in *);
+#ifndef _KERNEL
+extern int bindresvport(int, struct sockaddr_in *);
+#endif
+extern int registerrpc(int, int, int, char *(*)(char [UDPMSGSIZE]),
+ xdrproc_t, xdrproc_t);
+extern int callrpc(const char *, int, int, int, xdrproc_t, void *,
+ xdrproc_t , void *);
+extern int getrpcport(char *, int, int, int);
+
+char *taddr2uaddr(const struct netconfig *, const struct netbuf *);
+struct netbuf *uaddr2taddr(const struct netconfig *, const char *);
+
+struct sockaddr;
+extern int bindresvport_sa(int, struct sockaddr *);
+__END_DECLS
+
+/*
+ * The following are not exported interfaces, they are for internal library
+ * and rpcbind use only. Do not use, they may change without notice.
+ */
+__BEGIN_DECLS
+#ifndef _KERNEL
+int __rpc_nconf2fd(const struct netconfig *);
+int __rpc_nconf2sockinfo(const struct netconfig *, struct __rpc_sockinfo *);
+int __rpc_fd2sockinfo(int, struct __rpc_sockinfo *);
+#else
+struct socket *__rpc_nconf2socket(const struct netconfig *);
+int __rpc_nconf2sockinfo(const struct netconfig *, struct __rpc_sockinfo *);
+int __rpc_socket2sockinfo(struct socket *, struct __rpc_sockinfo *);
+#endif
+u_int __rpc_get_t_size(int, int, int);
+__END_DECLS
+
+#endif /* !_RPC_RPC_H */
diff --git a/sys/rpc/rpc_callmsg.c b/sys/rpc/rpc_callmsg.c
new file mode 100644
index 0000000..65637c5
--- /dev/null
+++ b/sys/rpc/rpc_callmsg.c
@@ -0,0 +1,200 @@
+/* $NetBSD: rpc_callmsg.c,v 1.16 2000/07/14 08:40:42 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)rpc_callmsg.c 1.4 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)rpc_callmsg.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * rpc_callmsg.c
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <rpc/rpc.h>
+
+/*
+ * XDR a call message
+ */
+bool_t
+xdr_callmsg(XDR *xdrs, struct rpc_msg *cmsg)
+{
+ enum msg_type *prm_direction;
+ int32_t *buf;
+ struct opaque_auth *oa;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (cmsg->rm_call.cb_cred.oa_length > MAX_AUTH_BYTES) {
+ return (FALSE);
+ }
+ if (cmsg->rm_call.cb_verf.oa_length > MAX_AUTH_BYTES) {
+ return (FALSE);
+ }
+ buf = XDR_INLINE(xdrs, 8 * BYTES_PER_XDR_UNIT
+ + RNDUP(cmsg->rm_call.cb_cred.oa_length)
+ + 2 * BYTES_PER_XDR_UNIT
+ + RNDUP(cmsg->rm_call.cb_verf.oa_length));
+ if (buf != NULL) {
+ IXDR_PUT_INT32(buf, cmsg->rm_xid);
+ IXDR_PUT_ENUM(buf, cmsg->rm_direction);
+ if (cmsg->rm_direction != CALL) {
+ return (FALSE);
+ }
+ IXDR_PUT_INT32(buf, cmsg->rm_call.cb_rpcvers);
+ if (cmsg->rm_call.cb_rpcvers != RPC_MSG_VERSION) {
+ return (FALSE);
+ }
+ IXDR_PUT_INT32(buf, cmsg->rm_call.cb_prog);
+ IXDR_PUT_INT32(buf, cmsg->rm_call.cb_vers);
+ IXDR_PUT_INT32(buf, cmsg->rm_call.cb_proc);
+ oa = &cmsg->rm_call.cb_cred;
+ IXDR_PUT_ENUM(buf, oa->oa_flavor);
+ IXDR_PUT_INT32(buf, oa->oa_length);
+ if (oa->oa_length) {
+ memcpy(buf, oa->oa_base, oa->oa_length);
+ buf += RNDUP(oa->oa_length) / sizeof (int32_t);
+ }
+ oa = &cmsg->rm_call.cb_verf;
+ IXDR_PUT_ENUM(buf, oa->oa_flavor);
+ IXDR_PUT_INT32(buf, oa->oa_length);
+ if (oa->oa_length) {
+ memcpy(buf, oa->oa_base, oa->oa_length);
+ /* no real need....
+ buf += RNDUP(oa->oa_length) / sizeof (int32_t);
+ */
+ }
+ return (TRUE);
+ }
+ }
+ if (xdrs->x_op == XDR_DECODE) {
+ buf = XDR_INLINE(xdrs, 8 * BYTES_PER_XDR_UNIT);
+ if (buf != NULL) {
+ cmsg->rm_xid = IXDR_GET_UINT32(buf);
+ cmsg->rm_direction = IXDR_GET_ENUM(buf, enum msg_type);
+ if (cmsg->rm_direction != CALL) {
+ return (FALSE);
+ }
+ cmsg->rm_call.cb_rpcvers = IXDR_GET_UINT32(buf);
+ if (cmsg->rm_call.cb_rpcvers != RPC_MSG_VERSION) {
+ return (FALSE);
+ }
+ cmsg->rm_call.cb_prog = IXDR_GET_UINT32(buf);
+ cmsg->rm_call.cb_vers = IXDR_GET_UINT32(buf);
+ cmsg->rm_call.cb_proc = IXDR_GET_UINT32(buf);
+ oa = &cmsg->rm_call.cb_cred;
+ oa->oa_flavor = IXDR_GET_ENUM(buf, enum_t);
+ oa->oa_length = (u_int)IXDR_GET_UINT32(buf);
+ if (oa->oa_length) {
+ if (oa->oa_length > MAX_AUTH_BYTES) {
+ return (FALSE);
+ }
+ if (oa->oa_base == NULL) {
+ oa->oa_base = (caddr_t)
+ mem_alloc(oa->oa_length);
+ if (oa->oa_base == NULL)
+ return (FALSE);
+ }
+ buf = XDR_INLINE(xdrs, RNDUP(oa->oa_length));
+ if (buf == NULL) {
+ if (xdr_opaque(xdrs, oa->oa_base,
+ oa->oa_length) == FALSE) {
+ return (FALSE);
+ }
+ } else {
+ memcpy(oa->oa_base, buf,
+ oa->oa_length);
+ /* no real need....
+ buf += RNDUP(oa->oa_length) /
+ sizeof (int32_t);
+ */
+ }
+ }
+ oa = &cmsg->rm_call.cb_verf;
+ buf = XDR_INLINE(xdrs, 2 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (xdr_enum(xdrs, &oa->oa_flavor) == FALSE ||
+ xdr_u_int(xdrs, &oa->oa_length) == FALSE) {
+ return (FALSE);
+ }
+ } else {
+ oa->oa_flavor = IXDR_GET_ENUM(buf, enum_t);
+ oa->oa_length = (u_int)IXDR_GET_UINT32(buf);
+ }
+ if (oa->oa_length) {
+ if (oa->oa_length > MAX_AUTH_BYTES) {
+ return (FALSE);
+ }
+ if (oa->oa_base == NULL) {
+ oa->oa_base = (caddr_t)
+ mem_alloc(oa->oa_length);
+ if (oa->oa_base == NULL)
+ return (FALSE);
+ }
+ buf = XDR_INLINE(xdrs, RNDUP(oa->oa_length));
+ if (buf == NULL) {
+ if (xdr_opaque(xdrs, oa->oa_base,
+ oa->oa_length) == FALSE) {
+ return (FALSE);
+ }
+ } else {
+ memcpy(oa->oa_base, buf,
+ oa->oa_length);
+ /* no real need...
+ buf += RNDUP(oa->oa_length) /
+ sizeof (int32_t);
+ */
+ }
+ }
+ return (TRUE);
+ }
+ }
+ prm_direction = &cmsg->rm_direction;
+ if (
+ xdr_uint32_t(xdrs, &(cmsg->rm_xid)) &&
+ xdr_enum(xdrs, (enum_t *) prm_direction) &&
+ (cmsg->rm_direction == CALL) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_rpcvers)) &&
+ (cmsg->rm_call.cb_rpcvers == RPC_MSG_VERSION) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_prog)) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_vers)) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_proc)) &&
+ xdr_opaque_auth(xdrs, &(cmsg->rm_call.cb_cred)) )
+ return (xdr_opaque_auth(xdrs, &(cmsg->rm_call.cb_verf)));
+ return (FALSE);
+}
diff --git a/sys/rpc/rpc_com.h b/sys/rpc/rpc_com.h
new file mode 100644
index 0000000..ad9cc68
--- /dev/null
+++ b/sys/rpc/rpc_com.h
@@ -0,0 +1,126 @@
+/* $NetBSD: rpc_com.h,v 1.3 2000/12/10 04:10:08 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright (c) 1986 - 1991 by Sun Microsystems, Inc.
+ */
+
+/*
+ * rpc_com.h, Common definitions for both the server and client side.
+ * All for the topmost layer of rpc
+ *
+ * In Sun's tirpc distribution, this was installed as <rpc/rpc_com.h>,
+ * but as it contains only non-exported interfaces, it was moved here.
+ */
+
+#ifndef _RPC_RPCCOM_H
+#define _RPC_RPCCOM_H
+
+#include <sys/cdefs.h>
+
+/* #pragma ident "@(#)rpc_com.h 1.11 93/07/05 SMI" */
+
+/*
+ * The max size of the transport, if the size cannot be determined
+ * by other means.
+ */
+#define RPC_MAXDATASIZE 9000
+#define RPC_MAXADDRSIZE 1024
+
+#ifdef _KERNEL
+
+#define __RPC_GETXID(now) ((uint32_t)curproc->p_pid ^ (uint32_t)(now)->tv_sec ^ \
+ (uint32_t)(now)->tv_usec)
+
+#else
+
+#define __RPC_GETXID(now) ((uint32_t)getpid() ^ (uint32_t)(now)->tv_sec ^ \
+ (uint32_t)(now)->tv_usec)
+
+#endif
+
+__BEGIN_DECLS
+#ifndef _KERNEL
+extern u_int __rpc_get_a_size(int);
+extern int __rpc_dtbsize(void);
+extern struct netconfig * __rpcgettp(int);
+extern int __rpc_get_default_domain(char **);
+
+char *__rpc_taddr2uaddr_af(int, const struct netbuf *);
+struct netbuf *__rpc_uaddr2taddr_af(int, const char *);
+int __rpc_fixup_addr(struct netbuf *, const struct netbuf *);
+int __rpc_sockinfo2netid(struct __rpc_sockinfo *, const char **);
+int __rpc_seman2socktype(int);
+int __rpc_socktype2seman(int);
+void *rpc_nullproc(CLIENT *);
+int __rpc_sockisbound(int);
+
+struct netbuf *__rpcb_findaddr_timed(rpcprog_t, rpcvers_t,
+ const struct netconfig *, const char *host, CLIENT **clpp,
+ struct timeval *tp);
+
+bool_t __rpc_control(int,void *);
+
+char *_get_next_token(char *, int);
+
+bool_t __svc_clean_idle(fd_set *, int, bool_t);
+bool_t __xdrrec_setnonblock(XDR *, int);
+bool_t __xdrrec_getrec(XDR *, enum xprt_stat *, bool_t);
+void __xprt_unregister_unlocked(SVCXPRT *);
+
+SVCXPRT **__svc_xports;
+int __svc_maxrec;
+
+#else
+
+#define SUN_LEN(su) \
+ (sizeof(*(su)) - sizeof((su)->sun_path) + strlen((su)->sun_path))
+
+extern u_int __rpc_get_a_size(int);
+extern char *__rpc_taddr2uaddr_af(int, const struct netbuf *);
+extern struct netbuf *__rpc_uaddr2taddr_af(int, const char *);
+extern int __rpc_seman2socktype(int);
+extern int __rpc_socktype2seman(int);
+extern int __rpc_sockisbound(struct socket*);
+extern const char *__rpc_inet_ntop(int af, const void * __restrict src,
+ char * __restrict dst, socklen_t size);
+extern int __rpc_inet_pton(int af, const char * __restrict src,
+ void * __restrict dst);
+
+struct xucred;
+struct __rpc_xdr;
+bool_t xdr_authunix_parms(struct __rpc_xdr *xdrs, uint32_t *time, struct xucred *cred);
+#endif
+
+__END_DECLS
+
+#endif /* _RPC_RPCCOM_H */
diff --git a/sys/rpc/rpc_generic.c b/sys/rpc/rpc_generic.c
new file mode 100644
index 0000000..c2a898b
--- /dev/null
+++ b/sys/rpc/rpc_generic.c
@@ -0,0 +1,716 @@
+/* $NetBSD: rpc_generic.c,v 1.4 2000/09/28 09:07:04 kleink Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+/* #pragma ident "@(#)rpc_generic.c 1.17 94/04/24 SMI" */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * rpc_generic.c, Miscl routines for RPC.
+ *
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/sbuf.h>
+#include <sys/systm.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/syslog.h>
+
+#include <rpc/rpc.h>
+#include <rpc/nettype.h>
+
+#include "rpc_com.h"
+
+struct handle {
+ NCONF_HANDLE *nhandle;
+ int nflag; /* Whether NETPATH or NETCONFIG */
+ int nettype;
+};
+
+static const struct _rpcnettype {
+ const char *name;
+ const int type;
+} _rpctypelist[] = {
+ { "netpath", _RPC_NETPATH },
+ { "visible", _RPC_VISIBLE },
+ { "circuit_v", _RPC_CIRCUIT_V },
+ { "datagram_v", _RPC_DATAGRAM_V },
+ { "circuit_n", _RPC_CIRCUIT_N },
+ { "datagram_n", _RPC_DATAGRAM_N },
+ { "tcp", _RPC_TCP },
+ { "udp", _RPC_UDP },
+ { 0, _RPC_NONE }
+};
+
+struct netid_af {
+ const char *netid;
+ int af;
+ int protocol;
+};
+
+static const struct netid_af na_cvt[] = {
+ { "udp", AF_INET, IPPROTO_UDP },
+ { "tcp", AF_INET, IPPROTO_TCP },
+#ifdef INET6
+ { "udp6", AF_INET6, IPPROTO_UDP },
+ { "tcp6", AF_INET6, IPPROTO_TCP },
+#endif
+ { "local", AF_LOCAL, 0 }
+};
+
+struct rpc_createerr rpc_createerr;
+
+/*
+ * Find the appropriate buffer size
+ */
+u_int
+/*ARGSUSED*/
+__rpc_get_t_size(int af, int proto, int size)
+{
+ int maxsize, defsize;
+
+ maxsize = 256 * 1024; /* XXX */
+ switch (proto) {
+ case IPPROTO_TCP:
+ defsize = 64 * 1024; /* XXX */
+ break;
+ case IPPROTO_UDP:
+ defsize = UDPMSGSIZE;
+ break;
+ default:
+ defsize = RPC_MAXDATASIZE;
+ break;
+ }
+ if (size == 0)
+ return defsize;
+
+ /* Check whether the value is within the upper max limit */
+ return (size > maxsize ? (u_int)maxsize : (u_int)size);
+}
+
+/*
+ * Find the appropriate address buffer size
+ */
+u_int
+__rpc_get_a_size(af)
+ int af;
+{
+ switch (af) {
+ case AF_INET:
+ return sizeof (struct sockaddr_in);
+#ifdef INET6
+ case AF_INET6:
+ return sizeof (struct sockaddr_in6);
+#endif
+ case AF_LOCAL:
+ return sizeof (struct sockaddr_un);
+ default:
+ break;
+ }
+ return ((u_int)RPC_MAXADDRSIZE);
+}
+
+#if 0
+
+/*
+ * Used to ping the NULL procedure for clnt handle.
+ * Returns NULL if fails, else a non-NULL pointer.
+ */
+void *
+rpc_nullproc(clnt)
+ CLIENT *clnt;
+{
+ struct timeval TIMEOUT = {25, 0};
+
+ if (clnt_call(clnt, NULLPROC, (xdrproc_t) xdr_void, NULL,
+ (xdrproc_t) xdr_void, NULL, TIMEOUT) != RPC_SUCCESS) {
+ return (NULL);
+ }
+ return ((void *) clnt);
+}
+
+#endif
+
+int
+__rpc_socket2sockinfo(struct socket *so, struct __rpc_sockinfo *sip)
+{
+ int type, proto;
+ struct sockaddr *sa;
+ sa_family_t family;
+ struct sockopt opt;
+ int error;
+
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ return 0;
+
+ sip->si_alen = sa->sa_len;
+ family = sa->sa_family;
+ free(sa, M_SONAME);
+
+ opt.sopt_dir = SOPT_GET;
+ opt.sopt_level = SOL_SOCKET;
+ opt.sopt_name = SO_TYPE;
+ opt.sopt_val = &type;
+ opt.sopt_valsize = sizeof type;
+ opt.sopt_td = NULL;
+ error = sogetopt(so, &opt);
+ if (error)
+ return 0;
+
+ /* XXX */
+ if (family != AF_LOCAL) {
+ if (type == SOCK_STREAM)
+ proto = IPPROTO_TCP;
+ else if (type == SOCK_DGRAM)
+ proto = IPPROTO_UDP;
+ else
+ return 0;
+ } else
+ proto = 0;
+
+ sip->si_af = family;
+ sip->si_proto = proto;
+ sip->si_socktype = type;
+
+ return 1;
+}
+
+/*
+ * Linear search, but the number of entries is small.
+ */
+int
+__rpc_nconf2sockinfo(const struct netconfig *nconf, struct __rpc_sockinfo *sip)
+{
+ int i;
+
+ for (i = 0; i < (sizeof na_cvt) / (sizeof (struct netid_af)); i++)
+ if (strcmp(na_cvt[i].netid, nconf->nc_netid) == 0 || (
+ strcmp(nconf->nc_netid, "unix") == 0 &&
+ strcmp(na_cvt[i].netid, "local") == 0)) {
+ sip->si_af = na_cvt[i].af;
+ sip->si_proto = na_cvt[i].protocol;
+ sip->si_socktype =
+ __rpc_seman2socktype((int)nconf->nc_semantics);
+ if (sip->si_socktype == -1)
+ return 0;
+ sip->si_alen = __rpc_get_a_size(sip->si_af);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct socket *
+__rpc_nconf2socket(const struct netconfig *nconf)
+{
+ struct __rpc_sockinfo si;
+ struct socket *so;
+ int error;
+
+ if (!__rpc_nconf2sockinfo(nconf, &si))
+ return 0;
+
+ so = NULL;
+ error = socreate(si.si_af, &so, si.si_socktype, si.si_proto,
+ curthread->td_ucred, curthread);
+
+ if (error)
+ return NULL;
+ else
+ return so;
+}
+
+char *
+taddr2uaddr(const struct netconfig *nconf, const struct netbuf *nbuf)
+{
+ struct __rpc_sockinfo si;
+
+ if (!__rpc_nconf2sockinfo(nconf, &si))
+ return NULL;
+ return __rpc_taddr2uaddr_af(si.si_af, nbuf);
+}
+
+struct netbuf *
+uaddr2taddr(const struct netconfig *nconf, const char *uaddr)
+{
+ struct __rpc_sockinfo si;
+
+ if (!__rpc_nconf2sockinfo(nconf, &si))
+ return NULL;
+ return __rpc_uaddr2taddr_af(si.si_af, uaddr);
+}
+
+char *
+__rpc_taddr2uaddr_af(int af, const struct netbuf *nbuf)
+{
+ char *ret;
+ struct sbuf sb;
+ struct sockaddr_in *sin;
+ struct sockaddr_un *sun;
+ char namebuf[INET_ADDRSTRLEN];
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ char namebuf6[INET6_ADDRSTRLEN];
+#endif
+ u_int16_t port;
+
+ sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND);
+
+ switch (af) {
+ case AF_INET:
+ sin = nbuf->buf;
+ if (__rpc_inet_ntop(af, &sin->sin_addr, namebuf, sizeof namebuf)
+ == NULL)
+ return NULL;
+ port = ntohs(sin->sin_port);
+ if (sbuf_printf(&sb, "%s.%u.%u", namebuf,
+ ((uint32_t)port) >> 8,
+ port & 0xff) < 0)
+ return NULL;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ sin6 = nbuf->buf;
+ if (__rpc_inet_ntop(af, &sin6->sin6_addr, namebuf6, sizeof namebuf6)
+ == NULL)
+ return NULL;
+ port = ntohs(sin6->sin6_port);
+ if (sbuf_printf(&sb, "%s.%u.%u", namebuf6,
+ ((uint32_t)port) >> 8,
+ port & 0xff) < 0)
+ return NULL;
+ break;
+#endif
+ case AF_LOCAL:
+ sun = nbuf->buf;
+ if (sbuf_printf(&sb, "%.*s", (int)(sun->sun_len -
+ offsetof(struct sockaddr_un, sun_path)),
+ sun->sun_path) < 0)
+ return (NULL);
+ break;
+ default:
+ return NULL;
+ }
+
+ sbuf_finish(&sb);
+ ret = strdup(sbuf_data(&sb), M_RPC);
+ sbuf_delete(&sb);
+
+ return ret;
+}
+
+struct netbuf *
+__rpc_uaddr2taddr_af(int af, const char *uaddr)
+{
+ struct netbuf *ret = NULL;
+ char *addrstr, *p;
+ unsigned port, portlo, porthi;
+ struct sockaddr_in *sin;
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ struct sockaddr_un *sun;
+
+ port = 0;
+ sin = NULL;
+ addrstr = strdup(uaddr, M_RPC);
+ if (addrstr == NULL)
+ return NULL;
+
+ /*
+ * AF_LOCAL addresses are expected to be absolute
+ * pathnames, anything else will be AF_INET or AF_INET6.
+ */
+ if (*addrstr != '/') {
+ p = strrchr(addrstr, '.');
+ if (p == NULL)
+ goto out;
+ portlo = (unsigned)strtol(p + 1, NULL, 10);
+ *p = '\0';
+
+ p = strrchr(addrstr, '.');
+ if (p == NULL)
+ goto out;
+ porthi = (unsigned)strtol(p + 1, NULL, 10);
+ *p = '\0';
+ port = (porthi << 8) | portlo;
+ }
+
+ ret = (struct netbuf *)malloc(sizeof *ret, M_RPC, M_WAITOK);
+ if (ret == NULL)
+ goto out;
+
+ switch (af) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)malloc(sizeof *sin, M_RPC,
+ M_WAITOK);
+ if (sin == NULL)
+ goto out;
+ memset(sin, 0, sizeof *sin);
+ sin->sin_family = AF_INET;
+ sin->sin_port = htons(port);
+ if (__rpc_inet_pton(AF_INET, addrstr, &sin->sin_addr) <= 0) {
+ free(sin, M_RPC);
+ free(ret, M_RPC);
+ ret = NULL;
+ goto out;
+ }
+ sin->sin_len = ret->maxlen = ret->len = sizeof *sin;
+ ret->buf = sin;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)malloc(sizeof *sin6, M_RPC,
+ M_WAITOK);
+ if (sin6 == NULL)
+ goto out;
+ memset(sin6, 0, sizeof *sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = htons(port);
+ if (__rpc_inet_pton(AF_INET6, addrstr, &sin6->sin6_addr) <= 0) {
+ free(sin6, M_RPC);
+ free(ret, M_RPC);
+ ret = NULL;
+ goto out;
+ }
+ sin6->sin6_len = ret->maxlen = ret->len = sizeof *sin6;
+ ret->buf = sin6;
+ break;
+#endif
+ case AF_LOCAL:
+ sun = (struct sockaddr_un *)malloc(sizeof *sun, M_RPC,
+ M_WAITOK);
+ if (sun == NULL)
+ goto out;
+ memset(sun, 0, sizeof *sun);
+ sun->sun_family = AF_LOCAL;
+ strncpy(sun->sun_path, addrstr, sizeof(sun->sun_path) - 1);
+ ret->len = ret->maxlen = sun->sun_len = SUN_LEN(sun);
+ ret->buf = sun;
+ break;
+ default:
+ break;
+ }
+out:
+ free(addrstr, M_RPC);
+ return ret;
+}
+
+int
+__rpc_seman2socktype(int semantics)
+{
+ switch (semantics) {
+ case NC_TPI_CLTS:
+ return SOCK_DGRAM;
+ case NC_TPI_COTS_ORD:
+ return SOCK_STREAM;
+ case NC_TPI_RAW:
+ return SOCK_RAW;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+int
+__rpc_socktype2seman(int socktype)
+{
+ switch (socktype) {
+ case SOCK_DGRAM:
+ return NC_TPI_CLTS;
+ case SOCK_STREAM:
+ return NC_TPI_COTS_ORD;
+ case SOCK_RAW:
+ return NC_TPI_RAW;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/*
+ * Returns the type of the network as defined in <rpc/nettype.h>
+ * If nettype is NULL, it defaults to NETPATH.
+ */
+static int
+getnettype(const char *nettype)
+{
+ int i;
+
+ if ((nettype == NULL) || (nettype[0] == 0)) {
+ return (_RPC_NETPATH); /* Default */
+ }
+
+#if 0
+ nettype = strlocase(nettype);
+#endif
+ for (i = 0; _rpctypelist[i].name; i++)
+ if (strcasecmp(nettype, _rpctypelist[i].name) == 0) {
+ return (_rpctypelist[i].type);
+ }
+ return (_rpctypelist[i].type);
+}
+
+/*
+ * For the given nettype (tcp or udp only), return the first structure found.
+ * This should be freed by calling freenetconfigent()
+ */
+struct netconfig *
+__rpc_getconfip(const char *nettype)
+{
+ char *netid;
+ static char *netid_tcp = (char *) NULL;
+ static char *netid_udp = (char *) NULL;
+ struct netconfig *dummy;
+
+ if (!netid_udp && !netid_tcp) {
+ struct netconfig *nconf;
+ void *confighandle;
+
+ if (!(confighandle = setnetconfig())) {
+ log(LOG_ERR, "rpc: failed to open " NETCONFIG);
+ return (NULL);
+ }
+ while ((nconf = getnetconfig(confighandle)) != NULL) {
+ if (strcmp(nconf->nc_protofmly, NC_INET) == 0) {
+ if (strcmp(nconf->nc_proto, NC_TCP) == 0) {
+ netid_tcp = strdup(nconf->nc_netid,
+ M_RPC);
+ } else
+ if (strcmp(nconf->nc_proto, NC_UDP) == 0) {
+ netid_udp = strdup(nconf->nc_netid,
+ M_RPC);
+ }
+ }
+ }
+ endnetconfig(confighandle);
+ }
+ if (strcmp(nettype, "udp") == 0)
+ netid = netid_udp;
+ else if (strcmp(nettype, "tcp") == 0)
+ netid = netid_tcp;
+ else {
+ return (NULL);
+ }
+ if ((netid == NULL) || (netid[0] == 0)) {
+ return (NULL);
+ }
+ dummy = getnetconfigent(netid);
+ return (dummy);
+}
+
+/*
+ * Returns the type of the nettype, which should then be used with
+ * __rpc_getconf().
+ *
+ * For simplicity in the kernel, we don't support the NETPATH
+ * environment variable. We behave as userland would then NETPATH is
+ * unset, i.e. iterate over all visible entries in netconfig.
+ */
+void *
+__rpc_setconf(nettype)
+ const char *nettype;
+{
+ struct handle *handle;
+
+ handle = (struct handle *) malloc(sizeof (struct handle),
+ M_RPC, M_WAITOK);
+ switch (handle->nettype = getnettype(nettype)) {
+ case _RPC_NETPATH:
+ case _RPC_CIRCUIT_N:
+ case _RPC_DATAGRAM_N:
+ if (!(handle->nhandle = setnetconfig()))
+ goto failed;
+ handle->nflag = TRUE;
+ break;
+ case _RPC_VISIBLE:
+ case _RPC_CIRCUIT_V:
+ case _RPC_DATAGRAM_V:
+ case _RPC_TCP:
+ case _RPC_UDP:
+ if (!(handle->nhandle = setnetconfig())) {
+ log(LOG_ERR, "rpc: failed to open " NETCONFIG);
+ goto failed;
+ }
+ handle->nflag = FALSE;
+ break;
+ default:
+ goto failed;
+ }
+
+ return (handle);
+
+failed:
+ free(handle, M_RPC);
+ return (NULL);
+}
+
+/*
+ * Returns the next netconfig struct for the given "net" type.
+ * __rpc_setconf() should have been called previously.
+ */
+struct netconfig *
+__rpc_getconf(void *vhandle)
+{
+ struct handle *handle;
+ struct netconfig *nconf;
+
+ handle = (struct handle *)vhandle;
+ if (handle == NULL) {
+ return (NULL);
+ }
+ for (;;) {
+ if (handle->nflag) {
+ nconf = getnetconfig(handle->nhandle);
+ if (nconf && !(nconf->nc_flag & NC_VISIBLE))
+ continue;
+ } else {
+ nconf = getnetconfig(handle->nhandle);
+ }
+ if (nconf == NULL)
+ break;
+ if ((nconf->nc_semantics != NC_TPI_CLTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS_ORD))
+ continue;
+ switch (handle->nettype) {
+ case _RPC_VISIBLE:
+ if (!(nconf->nc_flag & NC_VISIBLE))
+ continue;
+ /* FALLTHROUGH */
+ case _RPC_NETPATH: /* Be happy */
+ break;
+ case _RPC_CIRCUIT_V:
+ if (!(nconf->nc_flag & NC_VISIBLE))
+ continue;
+ /* FALLTHROUGH */
+ case _RPC_CIRCUIT_N:
+ if ((nconf->nc_semantics != NC_TPI_COTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS_ORD))
+ continue;
+ break;
+ case _RPC_DATAGRAM_V:
+ if (!(nconf->nc_flag & NC_VISIBLE))
+ continue;
+ /* FALLTHROUGH */
+ case _RPC_DATAGRAM_N:
+ if (nconf->nc_semantics != NC_TPI_CLTS)
+ continue;
+ break;
+ case _RPC_TCP:
+ if (((nconf->nc_semantics != NC_TPI_COTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS_ORD)) ||
+ (strcmp(nconf->nc_protofmly, NC_INET)
+#ifdef INET6
+ && strcmp(nconf->nc_protofmly, NC_INET6))
+#else
+ )
+#endif
+ ||
+ strcmp(nconf->nc_proto, NC_TCP))
+ continue;
+ break;
+ case _RPC_UDP:
+ if ((nconf->nc_semantics != NC_TPI_CLTS) ||
+ (strcmp(nconf->nc_protofmly, NC_INET)
+#ifdef INET6
+ && strcmp(nconf->nc_protofmly, NC_INET6))
+#else
+ )
+#endif
+ ||
+ strcmp(nconf->nc_proto, NC_UDP))
+ continue;
+ break;
+ }
+ break;
+ }
+ return (nconf);
+}
+
+void
+__rpc_endconf(vhandle)
+ void * vhandle;
+{
+ struct handle *handle;
+
+ handle = (struct handle *) vhandle;
+ if (handle == NULL) {
+ return;
+ }
+ endnetconfig(handle->nhandle);
+ free(handle, M_RPC);
+}
+
+int
+__rpc_sockisbound(struct socket *so)
+{
+ struct sockaddr *sa;
+ int error, bound;
+
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ return (0);
+
+ switch (sa->sa_family) {
+ case AF_INET:
+ bound = (((struct sockaddr_in *) sa)->sin_port != 0);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ bound = (((struct sockaddr_in6 *) sa)->sin6_port != 0);
+ break;
+#endif
+ case AF_LOCAL:
+ /* XXX check this */
+ bound = (((struct sockaddr_un *) sa)->sun_path[0] != '\0');
+ break;
+ default:
+ bound = FALSE;
+ break;
+ }
+
+ free(sa, M_SONAME);
+
+ return bound;
+}
diff --git a/sys/rpc/rpc_msg.h b/sys/rpc/rpc_msg.h
new file mode 100644
index 0000000..707250a
--- /dev/null
+++ b/sys/rpc/rpc_msg.h
@@ -0,0 +1,214 @@
+/* $NetBSD: rpc_msg.h,v 1.11 2000/06/02 22:57:56 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)rpc_msg.h 1.7 86/07/16 SMI
+ * from: @(#)rpc_msg.h 2.1 88/07/29 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * rpc_msg.h
+ * rpc message definition
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#ifndef _RPC_RPC_MSG_H
+#define _RPC_RPC_MSG_H
+
+#define RPC_MSG_VERSION ((uint32_t) 2)
+#define RPC_SERVICE_PORT ((u_short) 2048)
+
+/*
+ * Bottom up definition of an rpc message.
+ * NOTE: call and reply use the same overall stuct but
+ * different parts of unions within it.
+ */
+
+enum msg_type {
+ CALL=0,
+ REPLY=1
+};
+
+enum reply_stat {
+ MSG_ACCEPTED=0,
+ MSG_DENIED=1
+};
+
+enum accept_stat {
+ SUCCESS=0,
+ PROG_UNAVAIL=1,
+ PROG_MISMATCH=2,
+ PROC_UNAVAIL=3,
+ GARBAGE_ARGS=4,
+ SYSTEM_ERR=5
+};
+
+enum reject_stat {
+ RPC_MISMATCH=0,
+ AUTH_ERROR=1
+};
+
+/*
+ * Reply part of an rpc exchange
+ */
+
+/*
+ * Reply to an rpc request that was accepted by the server.
+ * Note: there could be an error even though the request was
+ * accepted.
+ */
+struct accepted_reply {
+ struct opaque_auth ar_verf;
+ enum accept_stat ar_stat;
+ union {
+ struct {
+ rpcvers_t low;
+ rpcvers_t high;
+ } AR_versions;
+ struct {
+ caddr_t where;
+ xdrproc_t proc;
+ } AR_results;
+ /* and many other null cases */
+ } ru;
+#define ar_results ru.AR_results
+#define ar_vers ru.AR_versions
+};
+
+/*
+ * Reply to an rpc request that was rejected by the server.
+ */
+struct rejected_reply {
+ enum reject_stat rj_stat;
+ union {
+ struct {
+ rpcvers_t low;
+ rpcvers_t high;
+ } RJ_versions;
+ enum auth_stat RJ_why; /* why authentication did not work */
+ } ru;
+#define rj_vers ru.RJ_versions
+#define rj_why ru.RJ_why
+};
+
+/*
+ * Body of a reply to an rpc request.
+ */
+struct reply_body {
+ enum reply_stat rp_stat;
+ union {
+ struct accepted_reply RP_ar;
+ struct rejected_reply RP_dr;
+ } ru;
+#define rp_acpt ru.RP_ar
+#define rp_rjct ru.RP_dr
+};
+
+/*
+ * Body of an rpc request call.
+ */
+struct call_body {
+ rpcvers_t cb_rpcvers; /* must be equal to two */
+ rpcprog_t cb_prog;
+ rpcvers_t cb_vers;
+ rpcproc_t cb_proc;
+ struct opaque_auth cb_cred;
+ struct opaque_auth cb_verf; /* protocol specific - provided by client */
+};
+
+/*
+ * The rpc message
+ */
+struct rpc_msg {
+ uint32_t rm_xid;
+ enum msg_type rm_direction;
+ union {
+ struct call_body RM_cmb;
+ struct reply_body RM_rmb;
+ } ru;
+#define rm_call ru.RM_cmb
+#define rm_reply ru.RM_rmb
+};
+#define acpted_rply ru.RM_rmb.ru.RP_ar
+#define rjcted_rply ru.RM_rmb.ru.RP_dr
+
+__BEGIN_DECLS
+/*
+ * XDR routine to handle a rpc message.
+ * xdr_callmsg(xdrs, cmsg)
+ * XDR *xdrs;
+ * struct rpc_msg *cmsg;
+ */
+extern bool_t xdr_callmsg(XDR *, struct rpc_msg *);
+
+/*
+ * XDR routine to pre-serialize the static part of a rpc message.
+ * xdr_callhdr(xdrs, cmsg)
+ * XDR *xdrs;
+ * struct rpc_msg *cmsg;
+ */
+extern bool_t xdr_callhdr(XDR *, struct rpc_msg *);
+
+/*
+ * XDR routine to handle a rpc reply.
+ * xdr_replymsg(xdrs, rmsg)
+ * XDR *xdrs;
+ * struct rpc_msg *rmsg;
+ */
+extern bool_t xdr_replymsg(XDR *, struct rpc_msg *);
+
+
+/*
+ * XDR routine to handle an accepted rpc reply.
+ * xdr_accepted_reply(xdrs, rej)
+ * XDR *xdrs;
+ * struct accepted_reply *rej;
+ */
+extern bool_t xdr_accepted_reply(XDR *, struct accepted_reply *);
+
+/*
+ * XDR routine to handle a rejected rpc reply.
+ * xdr_rejected_reply(xdrs, rej)
+ * XDR *xdrs;
+ * struct rejected_reply *rej;
+ */
+extern bool_t xdr_rejected_reply(XDR *, struct rejected_reply *);
+
+/*
+ * Fills in the error part of a reply message.
+ * _seterr_reply(msg, error)
+ * struct rpc_msg *msg;
+ * struct rpc_err *error;
+ */
+extern void _seterr_reply(struct rpc_msg *, struct rpc_err *);
+__END_DECLS
+
+#endif /* !_RPC_RPC_MSG_H */
diff --git a/sys/rpc/rpc_prot.c b/sys/rpc/rpc_prot.c
new file mode 100644
index 0000000..16f602f
--- /dev/null
+++ b/sys/rpc/rpc_prot.c
@@ -0,0 +1,348 @@
+/* $NetBSD: rpc_prot.c,v 1.16 2000/06/02 23:11:13 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)rpc_prot.c 1.36 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)rpc_prot.c 2.3 88/08/07 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * rpc_prot.c
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * This set of routines implements the rpc message definition,
+ * its serializer and some common rpc utility routines.
+ * The routines are meant for various implementations of rpc -
+ * they are NOT for the rpc client or rpc service implementations!
+ * Because authentication stuff is easy and is part of rpc, the opaque
+ * routines are also in this program.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+#include <rpc/auth.h>
+#include <rpc/clnt.h>
+#include <rpc/rpc_msg.h>
+
+MALLOC_DEFINE(M_RPC, "rpc", "Remote Procedure Call");
+
+#define assert(exp) KASSERT(exp, ("bad arguments"))
+
+static void accepted(enum accept_stat, struct rpc_err *);
+static void rejected(enum reject_stat, struct rpc_err *);
+
+/* * * * * * * * * * * * * * XDR Authentication * * * * * * * * * * * */
+
+struct opaque_auth _null_auth;
+
+/*
+ * XDR an opaque authentication struct
+ * (see auth.h)
+ */
+bool_t
+xdr_opaque_auth(XDR *xdrs, struct opaque_auth *ap)
+{
+
+ assert(xdrs != NULL);
+ assert(ap != NULL);
+
+ if (xdr_enum(xdrs, &(ap->oa_flavor)))
+ return (xdr_bytes(xdrs, &ap->oa_base,
+ &ap->oa_length, MAX_AUTH_BYTES));
+ return (FALSE);
+}
+
+/* * * * * * * * * * * * * * XDR RPC MESSAGE * * * * * * * * * * * * * * * */
+
+/*
+ * XDR the MSG_ACCEPTED part of a reply message union
+ */
+bool_t
+xdr_accepted_reply(XDR *xdrs, struct accepted_reply *ar)
+{
+ enum accept_stat *par_stat;
+
+ assert(xdrs != NULL);
+ assert(ar != NULL);
+
+ par_stat = &ar->ar_stat;
+
+ /* personalized union, rather than calling xdr_union */
+ if (! xdr_opaque_auth(xdrs, &(ar->ar_verf)))
+ return (FALSE);
+ if (! xdr_enum(xdrs, (enum_t *) par_stat))
+ return (FALSE);
+ switch (ar->ar_stat) {
+
+ case SUCCESS:
+ return ((*(ar->ar_results.proc))(xdrs, ar->ar_results.where));
+
+ case PROG_MISMATCH:
+ if (! xdr_uint32_t(xdrs, &(ar->ar_vers.low)))
+ return (FALSE);
+ return (xdr_uint32_t(xdrs, &(ar->ar_vers.high)));
+
+ case GARBAGE_ARGS:
+ case SYSTEM_ERR:
+ case PROC_UNAVAIL:
+ case PROG_UNAVAIL:
+ break;
+ }
+ return (TRUE); /* TRUE => open ended set of problems */
+}
+
+/*
+ * XDR the MSG_DENIED part of a reply message union
+ */
+bool_t
+xdr_rejected_reply(XDR *xdrs, struct rejected_reply *rr)
+{
+ enum reject_stat *prj_stat;
+ enum auth_stat *prj_why;
+
+ assert(xdrs != NULL);
+ assert(rr != NULL);
+
+ prj_stat = &rr->rj_stat;
+
+ /* personalized union, rather than calling xdr_union */
+ if (! xdr_enum(xdrs, (enum_t *) prj_stat))
+ return (FALSE);
+ switch (rr->rj_stat) {
+
+ case RPC_MISMATCH:
+ if (! xdr_uint32_t(xdrs, &(rr->rj_vers.low)))
+ return (FALSE);
+ return (xdr_uint32_t(xdrs, &(rr->rj_vers.high)));
+
+ case AUTH_ERROR:
+ prj_why = &rr->rj_why;
+ return (xdr_enum(xdrs, (enum_t *) prj_why));
+ }
+ /* NOTREACHED */
+ assert(0);
+ return (FALSE);
+}
+
+static const struct xdr_discrim reply_dscrm[3] = {
+ { (int)MSG_ACCEPTED, (xdrproc_t)xdr_accepted_reply },
+ { (int)MSG_DENIED, (xdrproc_t)xdr_rejected_reply },
+ { __dontcare__, NULL_xdrproc_t } };
+
+/*
+ * XDR a reply message
+ */
+bool_t
+xdr_replymsg(XDR *xdrs, struct rpc_msg *rmsg)
+{
+ enum msg_type *prm_direction;
+ enum reply_stat *prp_stat;
+
+ assert(xdrs != NULL);
+ assert(rmsg != NULL);
+
+ prm_direction = &rmsg->rm_direction;
+ prp_stat = &rmsg->rm_reply.rp_stat;
+
+ if (
+ xdr_uint32_t(xdrs, &(rmsg->rm_xid)) &&
+ xdr_enum(xdrs, (enum_t *) prm_direction) &&
+ (rmsg->rm_direction == REPLY) )
+ return (xdr_union(xdrs, (enum_t *) prp_stat,
+ (caddr_t)(void *)&(rmsg->rm_reply.ru), reply_dscrm,
+ NULL_xdrproc_t));
+ return (FALSE);
+}
+
+
+/*
+ * Serializes the "static part" of a call message header.
+ * The fields include: rm_xid, rm_direction, rpcvers, prog, and vers.
+ * The rm_xid is not really static, but the user can easily munge on the fly.
+ */
+bool_t
+xdr_callhdr(XDR *xdrs, struct rpc_msg *cmsg)
+{
+ enum msg_type *prm_direction;
+
+ assert(xdrs != NULL);
+ assert(cmsg != NULL);
+
+ prm_direction = &cmsg->rm_direction;
+
+ cmsg->rm_direction = CALL;
+ cmsg->rm_call.cb_rpcvers = RPC_MSG_VERSION;
+ if (
+ (xdrs->x_op == XDR_ENCODE) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_xid)) &&
+ xdr_enum(xdrs, (enum_t *) prm_direction) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_rpcvers)) &&
+ xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_prog)) )
+ return (xdr_uint32_t(xdrs, &(cmsg->rm_call.cb_vers)));
+ return (FALSE);
+}
+
+/* ************************** Client utility routine ************* */
+
+static void
+accepted(enum accept_stat acpt_stat, struct rpc_err *error)
+{
+
+ assert(error != NULL);
+
+ switch (acpt_stat) {
+
+ case PROG_UNAVAIL:
+ error->re_status = RPC_PROGUNAVAIL;
+ return;
+
+ case PROG_MISMATCH:
+ error->re_status = RPC_PROGVERSMISMATCH;
+ return;
+
+ case PROC_UNAVAIL:
+ error->re_status = RPC_PROCUNAVAIL;
+ return;
+
+ case GARBAGE_ARGS:
+ error->re_status = RPC_CANTDECODEARGS;
+ return;
+
+ case SYSTEM_ERR:
+ error->re_status = RPC_SYSTEMERROR;
+ return;
+
+ case SUCCESS:
+ error->re_status = RPC_SUCCESS;
+ return;
+ }
+ /* NOTREACHED */
+ /* something's wrong, but we don't know what ... */
+ error->re_status = RPC_FAILED;
+ error->re_lb.s1 = (int32_t)MSG_ACCEPTED;
+ error->re_lb.s2 = (int32_t)acpt_stat;
+}
+
+static void
+rejected(enum reject_stat rjct_stat, struct rpc_err *error)
+{
+
+ assert(error != NULL);
+
+ switch (rjct_stat) {
+ case RPC_MISMATCH:
+ error->re_status = RPC_VERSMISMATCH;
+ return;
+
+ case AUTH_ERROR:
+ error->re_status = RPC_AUTHERROR;
+ return;
+ }
+ /* something's wrong, but we don't know what ... */
+ /* NOTREACHED */
+ error->re_status = RPC_FAILED;
+ error->re_lb.s1 = (int32_t)MSG_DENIED;
+ error->re_lb.s2 = (int32_t)rjct_stat;
+}
+
+/*
+ * given a reply message, fills in the error
+ */
+void
+_seterr_reply(struct rpc_msg *msg, struct rpc_err *error)
+{
+
+ assert(msg != NULL);
+ assert(error != NULL);
+
+ /* optimized for normal, SUCCESSful case */
+ switch (msg->rm_reply.rp_stat) {
+
+ case MSG_ACCEPTED:
+ if (msg->acpted_rply.ar_stat == SUCCESS) {
+ error->re_status = RPC_SUCCESS;
+ return;
+ }
+ accepted(msg->acpted_rply.ar_stat, error);
+ break;
+
+ case MSG_DENIED:
+ rejected(msg->rjcted_rply.rj_stat, error);
+ break;
+
+ default:
+ error->re_status = RPC_FAILED;
+ error->re_lb.s1 = (int32_t)(msg->rm_reply.rp_stat);
+ break;
+ }
+ switch (error->re_status) {
+
+ case RPC_VERSMISMATCH:
+ error->re_vers.low = msg->rjcted_rply.rj_vers.low;
+ error->re_vers.high = msg->rjcted_rply.rj_vers.high;
+ break;
+
+ case RPC_AUTHERROR:
+ error->re_why = msg->rjcted_rply.rj_why;
+ break;
+
+ case RPC_PROGVERSMISMATCH:
+ error->re_vers.low = msg->acpted_rply.ar_vers.low;
+ error->re_vers.high = msg->acpted_rply.ar_vers.high;
+ break;
+
+ case RPC_FAILED:
+ case RPC_SUCCESS:
+ case RPC_PROGNOTREGISTERED:
+ case RPC_PMAPFAILURE:
+ case RPC_UNKNOWNPROTO:
+ case RPC_UNKNOWNHOST:
+ case RPC_SYSTEMERROR:
+ case RPC_CANTDECODEARGS:
+ case RPC_PROCUNAVAIL:
+ case RPC_PROGUNAVAIL:
+ case RPC_TIMEDOUT:
+ case RPC_CANTRECV:
+ case RPC_CANTSEND:
+ case RPC_CANTDECODERES:
+ case RPC_CANTENCODEARGS:
+ default:
+ break;
+ }
+}
diff --git a/sys/rpc/rpcb_clnt.c b/sys/rpc/rpcb_clnt.c
new file mode 100644
index 0000000..cf47f70
--- /dev/null
+++ b/sys/rpc/rpcb_clnt.c
@@ -0,0 +1,1382 @@
+/* $NetBSD: rpcb_clnt.c,v 1.6 2000/07/16 06:41:43 itojun Exp $ */
+
+/*
+ * The contents of this file are subject to the Sun Standards
+ * License Version 1.0 the (the "License";) You may not use
+ * this file except in compliance with the License. You may
+ * obtain a copy of the License at lib/libc/rpc/LICENSE
+ *
+ * Software distributed under the License is distributed on
+ * an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either
+ * express or implied. See the License for the specific
+ * language governing rights and limitations under the License.
+ *
+ * The Original Code is Copyright 1998 by Sun Microsystems, Inc
+ *
+ * The Initial Developer of the Original Code is: Sun
+ * Microsystems, Inc.
+ *
+ * All Rights Reserved.
+ *
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+/* #ident "@(#)rpcb_clnt.c 1.27 94/04/24 SMI" */
+
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)rpcb_clnt.c 1.30 89/06/21 Copyr 1988 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * rpcb_clnt.c
+ * interface to rpcbind rpc service.
+ *
+ * Copyright (C) 1988, Sun Microsystems, Inc.
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+
+#include <rpc/rpc.h>
+#include <rpc/rpcb_clnt.h>
+#include <rpc/rpcb_prot.h>
+
+#include "rpc_com.h"
+
+static struct timeval tottimeout = { 60, 0 };
+static const struct timeval rmttimeout = { 3, 0 };
+static const char nullstring[] = "\000";
+
+static CLIENT *local_rpcb(void);
+
+#if 0
+
+static struct timeval rpcbrmttime = { 15, 0 };
+
+#define CACHESIZE 6
+
+struct address_cache {
+ char *ac_host;
+ char *ac_netid;
+ char *ac_uaddr;
+ struct netbuf *ac_taddr;
+ struct address_cache *ac_next;
+};
+
+static struct address_cache *front;
+static int cachesize;
+
+#define CLCR_GET_RPCB_TIMEOUT 1
+#define CLCR_SET_RPCB_TIMEOUT 2
+
+
+extern int __rpc_lowvers;
+
+static struct address_cache *check_cache(const char *, const char *);
+static void delete_cache(struct netbuf *);
+static void add_cache(const char *, const char *, struct netbuf *, char *);
+static CLIENT *getclnthandle(const char *, const struct netconfig *, char **);
+static CLIENT *local_rpcb(void);
+static struct netbuf *got_entry(rpcb_entry_list_ptr, const struct netconfig *);
+
+/*
+ * This routine adjusts the timeout used for calls to the remote rpcbind.
+ * Also, this routine can be used to set the use of portmapper version 2
+ * only when doing rpc_broadcasts
+ * These are private routines that may not be provided in future releases.
+ */
+bool_t
+__rpc_control(request, info)
+ int request;
+ void *info;
+{
+ switch (request) {
+ case CLCR_GET_RPCB_TIMEOUT:
+ *(struct timeval *)info = tottimeout;
+ break;
+ case CLCR_SET_RPCB_TIMEOUT:
+ tottimeout = *(struct timeval *)info;
+ break;
+ case CLCR_SET_LOWVERS:
+ __rpc_lowvers = *(int *)info;
+ break;
+ case CLCR_GET_LOWVERS:
+ *(int *)info = __rpc_lowvers;
+ break;
+ default:
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+/*
+ * It might seem that a reader/writer lock would be more reasonable here.
+ * However because getclnthandle(), the only user of the cache functions,
+ * may do a delete_cache() operation if a check_cache() fails to return an
+ * address useful to clnt_tli_create(), we may as well use a mutex.
+ */
+/*
+ * As it turns out, if the cache lock is *not* a reader/writer lock, we will
+ * block all clnt_create's if we are trying to connect to a host that's down,
+ * since the lock will be held all during that time.
+ */
+
+/*
+ * The routines check_cache(), add_cache(), delete_cache() manage the
+ * cache of rpcbind addresses for (host, netid).
+ */
+
+static struct address_cache *
+check_cache(host, netid)
+ const char *host, *netid;
+{
+ struct address_cache *cptr;
+
+ /* READ LOCK HELD ON ENTRY: rpcbaddr_cache_lock */
+
+ for (cptr = front; cptr != NULL; cptr = cptr->ac_next) {
+ if (!strcmp(cptr->ac_host, host) &&
+ !strcmp(cptr->ac_netid, netid)) {
+#ifdef ND_DEBUG
+ fprintf(stderr, "Found cache entry for %s: %s\n",
+ host, netid);
+#endif
+ return (cptr);
+ }
+ }
+ return ((struct address_cache *) NULL);
+}
+
+static void
+delete_cache(addr)
+ struct netbuf *addr;
+{
+ struct address_cache *cptr, *prevptr = NULL;
+
+ /* WRITE LOCK HELD ON ENTRY: rpcbaddr_cache_lock */
+ for (cptr = front; cptr != NULL; cptr = cptr->ac_next) {
+ if (!memcmp(cptr->ac_taddr->buf, addr->buf, addr->len)) {
+ free(cptr->ac_host);
+ free(cptr->ac_netid);
+ free(cptr->ac_taddr->buf);
+ free(cptr->ac_taddr);
+ if (cptr->ac_uaddr)
+ free(cptr->ac_uaddr);
+ if (prevptr)
+ prevptr->ac_next = cptr->ac_next;
+ else
+ front = cptr->ac_next;
+ free(cptr);
+ cachesize--;
+ break;
+ }
+ prevptr = cptr;
+ }
+}
+
+static void
+add_cache(host, netid, taddr, uaddr)
+ const char *host, *netid;
+ char *uaddr;
+ struct netbuf *taddr;
+{
+ struct address_cache *ad_cache, *cptr, *prevptr;
+
+ ad_cache = (struct address_cache *)
+ malloc(sizeof (struct address_cache));
+ if (!ad_cache) {
+ return;
+ }
+ ad_cache->ac_host = strdup(host);
+ ad_cache->ac_netid = strdup(netid);
+ ad_cache->ac_uaddr = uaddr ? strdup(uaddr) : NULL;
+ ad_cache->ac_taddr = (struct netbuf *)malloc(sizeof (struct netbuf));
+ if (!ad_cache->ac_host || !ad_cache->ac_netid || !ad_cache->ac_taddr ||
+ (uaddr && !ad_cache->ac_uaddr)) {
+ goto out;
+ }
+ ad_cache->ac_taddr->len = ad_cache->ac_taddr->maxlen = taddr->len;
+ ad_cache->ac_taddr->buf = (char *) malloc(taddr->len);
+ if (ad_cache->ac_taddr->buf == NULL) {
+out:
+ if (ad_cache->ac_host)
+ free(ad_cache->ac_host);
+ if (ad_cache->ac_netid)
+ free(ad_cache->ac_netid);
+ if (ad_cache->ac_uaddr)
+ free(ad_cache->ac_uaddr);
+ if (ad_cache->ac_taddr)
+ free(ad_cache->ac_taddr);
+ free(ad_cache);
+ return;
+ }
+ memcpy(ad_cache->ac_taddr->buf, taddr->buf, taddr->len);
+#ifdef ND_DEBUG
+ fprintf(stderr, "Added to cache: %s : %s\n", host, netid);
+#endif
+
+/* VARIABLES PROTECTED BY rpcbaddr_cache_lock: cptr */
+
+ rwlock_wrlock(&rpcbaddr_cache_lock);
+ if (cachesize < CACHESIZE) {
+ ad_cache->ac_next = front;
+ front = ad_cache;
+ cachesize++;
+ } else {
+ /* Free the last entry */
+ cptr = front;
+ prevptr = NULL;
+ while (cptr->ac_next) {
+ prevptr = cptr;
+ cptr = cptr->ac_next;
+ }
+
+#ifdef ND_DEBUG
+ fprintf(stderr, "Deleted from cache: %s : %s\n",
+ cptr->ac_host, cptr->ac_netid);
+#endif
+ free(cptr->ac_host);
+ free(cptr->ac_netid);
+ free(cptr->ac_taddr->buf);
+ free(cptr->ac_taddr);
+ if (cptr->ac_uaddr)
+ free(cptr->ac_uaddr);
+
+ if (prevptr) {
+ prevptr->ac_next = NULL;
+ ad_cache->ac_next = front;
+ front = ad_cache;
+ } else {
+ front = ad_cache;
+ ad_cache->ac_next = NULL;
+ }
+ free(cptr);
+ }
+ rwlock_unlock(&rpcbaddr_cache_lock);
+}
+
+/*
+ * This routine will return a client handle that is connected to the
+ * rpcbind. If targaddr is non-NULL, the "universal address" of the
+ * host will be stored in *targaddr; the caller is responsible for
+ * freeing this string.
+ * On error, returns NULL and free's everything.
+ */
+static CLIENT *
+getclnthandle(host, nconf, targaddr)
+ const char *host;
+ const struct netconfig *nconf;
+ char **targaddr;
+{
+ CLIENT *client;
+ struct netbuf *addr, taddr;
+ struct netbuf addr_to_delete;
+ struct __rpc_sockinfo si;
+ struct addrinfo hints, *res, *tres;
+ struct address_cache *ad_cache;
+ char *tmpaddr;
+
+/* VARIABLES PROTECTED BY rpcbaddr_cache_lock: ad_cache */
+
+ /* Get the address of the rpcbind. Check cache first */
+ client = NULL;
+ addr_to_delete.len = 0;
+ rwlock_rdlock(&rpcbaddr_cache_lock);
+ ad_cache = NULL;
+ if (host != NULL)
+ ad_cache = check_cache(host, nconf->nc_netid);
+ if (ad_cache != NULL) {
+ addr = ad_cache->ac_taddr;
+ client = clnt_tli_create(RPC_ANYFD, nconf, addr,
+ (rpcprog_t)RPCBPROG, (rpcvers_t)RPCBVERS4, 0, 0);
+ if (client != NULL) {
+ if (targaddr)
+ *targaddr = strdup(ad_cache->ac_uaddr);
+ rwlock_unlock(&rpcbaddr_cache_lock);
+ return (client);
+ }
+ addr_to_delete.len = addr->len;
+ addr_to_delete.buf = (char *)malloc(addr->len);
+ if (addr_to_delete.buf == NULL) {
+ addr_to_delete.len = 0;
+ } else {
+ memcpy(addr_to_delete.buf, addr->buf, addr->len);
+ }
+ }
+ rwlock_unlock(&rpcbaddr_cache_lock);
+ if (addr_to_delete.len != 0) {
+ /*
+ * Assume this may be due to cache data being
+ * outdated
+ */
+ rwlock_wrlock(&rpcbaddr_cache_lock);
+ delete_cache(&addr_to_delete);
+ rwlock_unlock(&rpcbaddr_cache_lock);
+ free(addr_to_delete.buf);
+ }
+ if (!__rpc_nconf2sockinfo(nconf, &si)) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return NULL;
+ }
+
+ memset(&hints, 0, sizeof hints);
+ hints.ai_family = si.si_af;
+ hints.ai_socktype = si.si_socktype;
+ hints.ai_protocol = si.si_proto;
+
+#ifdef CLNT_DEBUG
+ printf("trying netid %s family %d proto %d socktype %d\n",
+ nconf->nc_netid, si.si_af, si.si_proto, si.si_socktype);
+#endif
+
+ if (nconf->nc_protofmly != NULL && strcmp(nconf->nc_protofmly, NC_LOOPBACK) == 0) {
+ client = local_rpcb();
+ if (! client) {
+#ifdef ND_DEBUG
+ clnt_pcreateerror("rpcbind clnt interface");
+#endif
+ return (NULL);
+ } else {
+ struct sockaddr_un sun;
+ if (targaddr) {
+ *targaddr = malloc(sizeof(sun.sun_path));
+ if (*targaddr == NULL) {
+ CLNT_DESTROY(client);
+ return (NULL);
+ }
+ strncpy(*targaddr, _PATH_RPCBINDSOCK,
+ sizeof(sun.sun_path));
+ }
+ return (client);
+ }
+ } else {
+ if (getaddrinfo(host, "sunrpc", &hints, &res) != 0) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNHOST;
+ return NULL;
+ }
+ }
+
+ for (tres = res; tres != NULL; tres = tres->ai_next) {
+ taddr.buf = tres->ai_addr;
+ taddr.len = taddr.maxlen = tres->ai_addrlen;
+
+#ifdef ND_DEBUG
+ {
+ char *ua;
+
+ ua = taddr2uaddr(nconf, &taddr);
+ fprintf(stderr, "Got it [%s]\n", ua);
+ free(ua);
+ }
+#endif
+
+#ifdef ND_DEBUG
+ {
+ int i;
+
+ fprintf(stderr, "\tnetbuf len = %d, maxlen = %d\n",
+ taddr.len, taddr.maxlen);
+ fprintf(stderr, "\tAddress is ");
+ for (i = 0; i < taddr.len; i++)
+ fprintf(stderr, "%u.", ((char *)(taddr.buf))[i]);
+ fprintf(stderr, "\n");
+ }
+#endif
+ client = clnt_tli_create(RPC_ANYFD, nconf, &taddr,
+ (rpcprog_t)RPCBPROG, (rpcvers_t)RPCBVERS4, 0, 0);
+#ifdef ND_DEBUG
+ if (! client) {
+ clnt_pcreateerror("rpcbind clnt interface");
+ }
+#endif
+
+ if (client) {
+ tmpaddr = targaddr ? taddr2uaddr(nconf, &taddr) : NULL;
+ add_cache(host, nconf->nc_netid, &taddr, tmpaddr);
+ if (targaddr)
+ *targaddr = tmpaddr;
+ break;
+ }
+ }
+ if (res)
+ freeaddrinfo(res);
+ return (client);
+}
+
+#endif
+
+/* XXX */
+#define IN4_LOCALHOST_STRING "127.0.0.1"
+#define IN6_LOCALHOST_STRING "::1"
+
+/*
+ * This routine will return a client handle that is connected to the local
+ * rpcbind. Returns NULL on error and free's everything.
+ */
+static CLIENT *
+local_rpcb()
+{
+ CLIENT *client;
+ struct socket *so;
+ size_t tsize;
+ struct sockaddr_un sun;
+ int error;
+
+ /*
+ * Try connecting to the local rpcbind through a local socket
+ * first. If this doesn't work, try all transports defined in
+ * the netconfig file.
+ */
+ memset(&sun, 0, sizeof sun);
+ so = NULL;
+ error = socreate(AF_LOCAL, &so, SOCK_STREAM, 0, curthread->td_ucred,
+ curthread);
+ if (error)
+ goto try_nconf;
+ sun.sun_family = AF_LOCAL;
+ strcpy(sun.sun_path, _PATH_RPCBINDSOCK);
+ sun.sun_len = SUN_LEN(&sun);
+
+ tsize = __rpc_get_t_size(AF_LOCAL, 0, 0);
+ client = clnt_vc_create(so, (struct sockaddr *)&sun, (rpcprog_t)RPCBPROG,
+ (rpcvers_t)RPCBVERS, tsize, tsize);
+
+ if (client != NULL) {
+ /* Mark the socket to be closed in destructor */
+ (void) CLNT_CONTROL(client, CLSET_FD_CLOSE, NULL);
+ return client;
+ }
+
+ /* Nobody needs this socket anymore; free the descriptor. */
+ soclose(so);
+
+try_nconf:
+
+#if 0
+ static struct netconfig *loopnconf;
+ static char *hostname;
+
+/* VARIABLES PROTECTED BY loopnconf_lock: loopnconf */
+ mutex_lock(&loopnconf_lock);
+ if (loopnconf == NULL) {
+ struct netconfig *nconf, *tmpnconf = NULL;
+ void *nc_handle;
+ int fd;
+
+ nc_handle = setnetconfig();
+ if (nc_handle == NULL) {
+ /* fails to open netconfig file */
+ syslog (LOG_ERR, "rpc: failed to open " NETCONFIG);
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ mutex_unlock(&loopnconf_lock);
+ return (NULL);
+ }
+ while ((nconf = getnetconfig(nc_handle)) != NULL) {
+ if ((
+#ifdef INET6
+ strcmp(nconf->nc_protofmly, NC_INET6) == 0 ||
+#endif
+ strcmp(nconf->nc_protofmly, NC_INET) == 0) &&
+ (nconf->nc_semantics == NC_TPI_COTS ||
+ nconf->nc_semantics == NC_TPI_COTS_ORD)) {
+ fd = __rpc_nconf2fd(nconf);
+ /*
+ * Can't create a socket, assume that
+ * this family isn't configured in the kernel.
+ */
+ if (fd < 0)
+ continue;
+ _close(fd);
+ tmpnconf = nconf;
+ if (!strcmp(nconf->nc_protofmly, NC_INET))
+ hostname = IN4_LOCALHOST_STRING;
+ else
+ hostname = IN6_LOCALHOST_STRING;
+ }
+ }
+ if (tmpnconf == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ mutex_unlock(&loopnconf_lock);
+ return (NULL);
+ }
+ loopnconf = getnetconfigent(tmpnconf->nc_netid);
+ /* loopnconf is never freed */
+ endnetconfig(nc_handle);
+ }
+ mutex_unlock(&loopnconf_lock);
+ client = getclnthandle(hostname, loopnconf, NULL);
+ return (client);
+#else
+ return (NULL);
+#endif
+}
+
+/*
+ * Set a mapping between program, version and address.
+ * Calls the rpcbind service to do the mapping.
+ */
+bool_t
+rpcb_set(rpcprog_t program, rpcvers_t version,
+ const struct netconfig *nconf, /* Network structure of transport */
+ const struct netbuf *address) /* Services netconfig address */
+{
+ CLIENT *client;
+ bool_t rslt = FALSE;
+ RPCB parms;
+#if 0
+ char uidbuf[32];
+#endif
+ struct netconfig nconfcopy;
+ struct netbuf addresscopy;
+
+ /* parameter checking */
+ if (nconf == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (FALSE);
+ }
+ if (address == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
+ return (FALSE);
+ }
+ client = local_rpcb();
+ if (! client) {
+ return (FALSE);
+ }
+
+ /* convert to universal */
+ /*LINTED const castaway*/
+ nconfcopy = *nconf;
+ addresscopy = *address;
+ parms.r_addr = taddr2uaddr(&nconfcopy, &addresscopy);
+ if (!parms.r_addr) {
+ CLNT_DESTROY(client);
+ rpc_createerr.cf_stat = RPC_N2AXLATEFAILURE;
+ return (FALSE); /* no universal address */
+ }
+ parms.r_prog = program;
+ parms.r_vers = version;
+ parms.r_netid = nconf->nc_netid;
+#if 0
+ /*
+ * Though uid is not being used directly, we still send it for
+ * completeness. For non-unix platforms, perhaps some other
+ * string or an empty string can be sent.
+ */
+ (void) snprintf(uidbuf, sizeof uidbuf, "%d", geteuid());
+ parms.r_owner = uidbuf;
+#else
+ parms.r_owner = "";
+#endif
+
+ CLNT_CALL(client, (rpcproc_t)RPCBPROC_SET, (xdrproc_t) xdr_rpcb,
+ (char *)(void *)&parms, (xdrproc_t) xdr_bool,
+ (char *)(void *)&rslt, tottimeout);
+
+ CLNT_DESTROY(client);
+ free(parms.r_addr, M_RPC);
+ return (rslt);
+}
+
+/*
+ * Remove the mapping between program, version and netbuf address.
+ * Calls the rpcbind service to do the un-mapping.
+ * If netbuf is NULL, unset for all the transports, otherwise unset
+ * only for the given transport.
+ */
+bool_t
+rpcb_unset(rpcprog_t program, rpcvers_t version, const struct netconfig *nconf)
+{
+ CLIENT *client;
+ bool_t rslt = FALSE;
+ RPCB parms;
+#if 0
+ char uidbuf[32];
+#endif
+
+ client = local_rpcb();
+ if (! client) {
+ return (FALSE);
+ }
+
+ parms.r_prog = program;
+ parms.r_vers = version;
+ if (nconf)
+ parms.r_netid = nconf->nc_netid;
+ else {
+ /*LINTED const castaway*/
+ parms.r_netid = (char *)(uintptr_t) &nullstring[0]; /* unsets all */
+ }
+ /*LINTED const castaway*/
+ parms.r_addr = (char *)(uintptr_t) &nullstring[0];
+#if 0
+ (void) snprintf(uidbuf, sizeof uidbuf, "%d", geteuid());
+ parms.r_owner = uidbuf;
+#else
+ parms.r_owner = "";
+#endif
+
+ CLNT_CALL(client, (rpcproc_t)RPCBPROC_UNSET, (xdrproc_t) xdr_rpcb,
+ (char *)(void *)&parms, (xdrproc_t) xdr_bool,
+ (char *)(void *)&rslt, tottimeout);
+
+ CLNT_DESTROY(client);
+ return (rslt);
+}
+
+#if 0
+
+/*
+ * From the merged list, find the appropriate entry
+ */
+static struct netbuf *
+got_entry(relp, nconf)
+ rpcb_entry_list_ptr relp;
+ const struct netconfig *nconf;
+{
+ struct netbuf *na = NULL;
+ rpcb_entry_list_ptr sp;
+ rpcb_entry *rmap;
+
+ for (sp = relp; sp != NULL; sp = sp->rpcb_entry_next) {
+ rmap = &sp->rpcb_entry_map;
+ if ((strcmp(nconf->nc_proto, rmap->r_nc_proto) == 0) &&
+ (strcmp(nconf->nc_protofmly, rmap->r_nc_protofmly) == 0) &&
+ (nconf->nc_semantics == rmap->r_nc_semantics) &&
+ (rmap->r_maddr != NULL) && (rmap->r_maddr[0] != 0)) {
+ na = uaddr2taddr(nconf, rmap->r_maddr);
+#ifdef ND_DEBUG
+ fprintf(stderr, "\tRemote address is [%s].\n",
+ rmap->r_maddr);
+ if (!na)
+ fprintf(stderr,
+ "\tCouldn't resolve remote address!\n");
+#endif
+ break;
+ }
+ }
+ return (na);
+}
+
+/*
+ * Quick check to see if rpcbind is up. Tries to connect over
+ * local transport.
+ */
+static bool_t
+__rpcbind_is_up()
+{
+ struct netconfig *nconf;
+ struct sockaddr_un sun;
+ void *localhandle;
+ int sock;
+
+ nconf = NULL;
+ localhandle = setnetconfig();
+ while ((nconf = getnetconfig(localhandle)) != NULL) {
+ if (nconf->nc_protofmly != NULL &&
+ strcmp(nconf->nc_protofmly, NC_LOOPBACK) == 0)
+ break;
+ }
+ if (nconf == NULL)
+ return (FALSE);
+
+ endnetconfig(localhandle);
+
+ memset(&sun, 0, sizeof sun);
+ sock = _socket(AF_LOCAL, SOCK_STREAM, 0);
+ if (sock < 0)
+ return (FALSE);
+ sun.sun_family = AF_LOCAL;
+ strncpy(sun.sun_path, _PATH_RPCBINDSOCK, sizeof(sun.sun_path));
+ sun.sun_len = SUN_LEN(&sun);
+
+ if (_connect(sock, (struct sockaddr *)&sun, sun.sun_len) < 0) {
+ _close(sock);
+ return (FALSE);
+ }
+
+ _close(sock);
+ return (TRUE);
+}
+
+/*
+ * An internal function which optimizes rpcb_getaddr function. It also
+ * returns the client handle that it uses to contact the remote rpcbind.
+ *
+ * The algorithm used: If the transports is TCP or UDP, it first tries
+ * version 2 (portmap), 4 and then 3 (svr4). This order should be
+ * changed in the next OS release to 4, 2 and 3. We are assuming that by
+ * that time, version 4 would be available on many machines on the network.
+ * With this algorithm, we get performance as well as a plan for
+ * obsoleting version 2.
+ *
+ * For all other transports, the algorithm remains as 4 and then 3.
+ *
+ * XXX: Due to some problems with t_connect(), we do not reuse the same client
+ * handle for COTS cases and hence in these cases we do not return the
+ * client handle. This code will change if t_connect() ever
+ * starts working properly. Also look under clnt_vc.c.
+ */
+struct netbuf *
+__rpcb_findaddr_timed(program, version, nconf, host, clpp, tp)
+ rpcprog_t program;
+ rpcvers_t version;
+ const struct netconfig *nconf;
+ const char *host;
+ CLIENT **clpp;
+ struct timeval *tp;
+{
+ static bool_t check_rpcbind = TRUE;
+ CLIENT *client = NULL;
+ RPCB parms;
+ enum clnt_stat clnt_st;
+ char *ua = NULL;
+ rpcvers_t vers;
+ struct netbuf *address = NULL;
+ rpcvers_t start_vers = RPCBVERS4;
+ struct netbuf servaddr;
+
+ /* parameter checking */
+ if (nconf == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (NULL);
+ }
+
+ parms.r_addr = NULL;
+
+ /*
+ * Use default total timeout if no timeout is specified.
+ */
+ if (tp == NULL)
+ tp = &tottimeout;
+
+#ifdef PORTMAP
+ /* Try version 2 for TCP or UDP */
+ if (strcmp(nconf->nc_protofmly, NC_INET) == 0) {
+ u_short port = 0;
+ struct netbuf remote;
+ rpcvers_t pmapvers = 2;
+ struct pmap pmapparms;
+
+ /*
+ * Try UDP only - there are some portmappers out
+ * there that use UDP only.
+ */
+ if (strcmp(nconf->nc_proto, NC_TCP) == 0) {
+ struct netconfig *newnconf;
+
+ if ((newnconf = getnetconfigent("udp")) == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (NULL);
+ }
+ client = getclnthandle(host, newnconf, &parms.r_addr);
+ freenetconfigent(newnconf);
+ } else {
+ client = getclnthandle(host, nconf, &parms.r_addr);
+ }
+ if (client == NULL)
+ return (NULL);
+
+ /*
+ * Set version and retry timeout.
+ */
+ CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, (char *)&rpcbrmttime);
+ CLNT_CONTROL(client, CLSET_VERS, (char *)&pmapvers);
+
+ pmapparms.pm_prog = program;
+ pmapparms.pm_vers = version;
+ pmapparms.pm_prot = strcmp(nconf->nc_proto, NC_TCP) ?
+ IPPROTO_UDP : IPPROTO_TCP;
+ pmapparms.pm_port = 0; /* not needed */
+ clnt_st = CLNT_CALL(client, (rpcproc_t)PMAPPROC_GETPORT,
+ (xdrproc_t) xdr_pmap, (caddr_t)(void *)&pmapparms,
+ (xdrproc_t) xdr_u_short, (caddr_t)(void *)&port,
+ *tp);
+ if (clnt_st != RPC_SUCCESS) {
+ if ((clnt_st == RPC_PROGVERSMISMATCH) ||
+ (clnt_st == RPC_PROGUNAVAIL))
+ goto try_rpcbind; /* Try different versions */
+ rpc_createerr.cf_stat = RPC_PMAPFAILURE;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+ goto error;
+ } else if (port == 0) {
+ address = NULL;
+ rpc_createerr.cf_stat = RPC_PROGNOTREGISTERED;
+ goto error;
+ }
+ port = htons(port);
+ CLNT_CONTROL(client, CLGET_SVC_ADDR, (char *)&remote);
+ if (((address = (struct netbuf *)
+ malloc(sizeof (struct netbuf))) == NULL) ||
+ ((address->buf = (char *)
+ malloc(remote.len)) == NULL)) {
+ rpc_createerr.cf_stat = RPC_SYSTEMERROR;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+ if (address) {
+ free(address);
+ address = NULL;
+ }
+ goto error;
+ }
+ memcpy(address->buf, remote.buf, remote.len);
+ memcpy(&((char *)address->buf)[sizeof (short)],
+ (char *)(void *)&port, sizeof (short));
+ address->len = address->maxlen = remote.len;
+ goto done;
+ }
+#endif /* PORTMAP */
+
+try_rpcbind:
+ /*
+ * Check if rpcbind is up. This prevents needless delays when
+ * accessing applications such as the keyserver while booting
+ * disklessly.
+ */
+ if (check_rpcbind && strcmp(nconf->nc_protofmly, NC_LOOPBACK) == 0) {
+ if (!__rpcbind_is_up()) {
+ rpc_createerr.cf_stat = RPC_PMAPFAILURE;
+ rpc_createerr.cf_error.re_errno = 0;
+ goto error;
+ }
+ check_rpcbind = FALSE;
+ }
+
+ /*
+ * Now we try version 4 and then 3.
+ * We also send the remote system the address we used to
+ * contact it in case it can help to connect back with us
+ */
+ parms.r_prog = program;
+ parms.r_vers = version;
+ /*LINTED const castaway*/
+ parms.r_owner = (char *) &nullstring[0]; /* not needed; */
+ /* just for xdring */
+ parms.r_netid = nconf->nc_netid; /* not really needed */
+
+ /*
+ * If a COTS transport is being used, try getting address via CLTS
+ * transport. This works only with version 4.
+ */
+ if (nconf->nc_semantics == NC_TPI_COTS_ORD ||
+ nconf->nc_semantics == NC_TPI_COTS) {
+
+ void *handle;
+ struct netconfig *nconf_clts;
+ rpcb_entry_list_ptr relp = NULL;
+
+ if (client == NULL) {
+ /* This did not go through the above PORTMAP/TCP code */
+ if ((handle = __rpc_setconf("datagram_v")) != NULL) {
+ while ((nconf_clts = __rpc_getconf(handle))
+ != NULL) {
+ if (strcmp(nconf_clts->nc_protofmly,
+ nconf->nc_protofmly) != 0) {
+ continue;
+ }
+ client = getclnthandle(host, nconf_clts,
+ &parms.r_addr);
+ break;
+ }
+ __rpc_endconf(handle);
+ }
+ if (client == NULL)
+ goto regular_rpcbind; /* Go the regular way */
+ } else {
+ /* This is a UDP PORTMAP handle. Change to version 4 */
+ vers = RPCBVERS4;
+ CLNT_CONTROL(client, CLSET_VERS, (char *)(void *)&vers);
+ }
+ /*
+ * We also send the remote system the address we used to
+ * contact it in case it can help it connect back with us
+ */
+ if (parms.r_addr == NULL) {
+ /*LINTED const castaway*/
+ parms.r_addr = (char *) &nullstring[0]; /* for XDRing */
+ }
+
+ CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, (char *)&rpcbrmttime);
+
+ clnt_st = CLNT_CALL(client, (rpcproc_t)RPCBPROC_GETADDRLIST,
+ (xdrproc_t) xdr_rpcb, (char *)(void *)&parms,
+ (xdrproc_t) xdr_rpcb_entry_list_ptr,
+ (char *)(void *)&relp, *tp);
+ if (clnt_st == RPC_SUCCESS) {
+ if ((address = got_entry(relp, nconf)) != NULL) {
+ xdr_free((xdrproc_t) xdr_rpcb_entry_list_ptr,
+ (char *)(void *)&relp);
+ CLNT_CONTROL(client, CLGET_SVC_ADDR,
+ (char *)(void *)&servaddr);
+ __rpc_fixup_addr(address, &servaddr);
+ goto done;
+ }
+ /* Entry not found for this transport */
+ xdr_free((xdrproc_t) xdr_rpcb_entry_list_ptr,
+ (char *)(void *)&relp);
+ /*
+ * XXX: should have perhaps returned with error but
+ * since the remote machine might not always be able
+ * to send the address on all transports, we try the
+ * regular way with regular_rpcbind
+ */
+ goto regular_rpcbind;
+ } else if ((clnt_st == RPC_PROGVERSMISMATCH) ||
+ (clnt_st == RPC_PROGUNAVAIL)) {
+ start_vers = RPCBVERS; /* Try version 3 now */
+ goto regular_rpcbind; /* Try different versions */
+ } else {
+ rpc_createerr.cf_stat = RPC_PMAPFAILURE;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+ goto error;
+ }
+ }
+
+regular_rpcbind:
+
+ /* Now the same transport is to be used to get the address */
+ if (client && ((nconf->nc_semantics == NC_TPI_COTS_ORD) ||
+ (nconf->nc_semantics == NC_TPI_COTS))) {
+ /* A CLTS type of client - destroy it */
+ CLNT_DESTROY(client);
+ client = NULL;
+ }
+
+ if (client == NULL) {
+ client = getclnthandle(host, nconf, &parms.r_addr);
+ if (client == NULL) {
+ goto error;
+ }
+ }
+ if (parms.r_addr == NULL) {
+ /*LINTED const castaway*/
+ parms.r_addr = (char *) &nullstring[0];
+ }
+
+ /* First try from start_vers and then version 3 (RPCBVERS) */
+
+ CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, (char *) &rpcbrmttime);
+ for (vers = start_vers; vers >= RPCBVERS; vers--) {
+ /* Set the version */
+ CLNT_CONTROL(client, CLSET_VERS, (char *)(void *)&vers);
+ clnt_st = CLNT_CALL(client, (rpcproc_t)RPCBPROC_GETADDR,
+ (xdrproc_t) xdr_rpcb, (char *)(void *)&parms,
+ (xdrproc_t) xdr_wrapstring, (char *)(void *) &ua, *tp);
+ if (clnt_st == RPC_SUCCESS) {
+ if ((ua == NULL) || (ua[0] == 0)) {
+ /* address unknown */
+ rpc_createerr.cf_stat = RPC_PROGNOTREGISTERED;
+ goto error;
+ }
+ address = uaddr2taddr(nconf, ua);
+#ifdef ND_DEBUG
+ fprintf(stderr, "\tRemote address is [%s]\n", ua);
+ if (!address)
+ fprintf(stderr,
+ "\tCouldn't resolve remote address!\n");
+#endif
+ xdr_free((xdrproc_t)xdr_wrapstring,
+ (char *)(void *)&ua);
+
+ if (! address) {
+ /* We don't know about your universal address */
+ rpc_createerr.cf_stat = RPC_N2AXLATEFAILURE;
+ goto error;
+ }
+ CLNT_CONTROL(client, CLGET_SVC_ADDR,
+ (char *)(void *)&servaddr);
+ __rpc_fixup_addr(address, &servaddr);
+ goto done;
+ } else if (clnt_st == RPC_PROGVERSMISMATCH) {
+ struct rpc_err rpcerr;
+
+ clnt_geterr(client, &rpcerr);
+ if (rpcerr.re_vers.low > RPCBVERS4)
+ goto error; /* a new version, can't handle */
+ } else if (clnt_st != RPC_PROGUNAVAIL) {
+ /* Cant handle this error */
+ rpc_createerr.cf_stat = clnt_st;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+ goto error;
+ }
+ }
+
+error:
+ if (client) {
+ CLNT_DESTROY(client);
+ client = NULL;
+ }
+done:
+ if (nconf->nc_semantics != NC_TPI_CLTS) {
+ /* This client is the connectionless one */
+ if (client) {
+ CLNT_DESTROY(client);
+ client = NULL;
+ }
+ }
+ if (clpp) {
+ *clpp = client;
+ } else if (client) {
+ CLNT_DESTROY(client);
+ }
+ if (parms.r_addr != NULL && parms.r_addr != nullstring)
+ free(parms.r_addr);
+ return (address);
+}
+
+
+/*
+ * Find the mapped address for program, version.
+ * Calls the rpcbind service remotely to do the lookup.
+ * Uses the transport specified in nconf.
+ * Returns FALSE (0) if no map exists, else returns 1.
+ *
+ * Assuming that the address is all properly allocated
+ */
+int
+rpcb_getaddr(program, version, nconf, address, host)
+ rpcprog_t program;
+ rpcvers_t version;
+ const struct netconfig *nconf;
+ struct netbuf *address;
+ const char *host;
+{
+ struct netbuf *na;
+
+ if ((na = __rpcb_findaddr_timed(program, version,
+ (struct netconfig *) nconf, (char *) host,
+ (CLIENT **) NULL, (struct timeval *) NULL)) == NULL)
+ return (FALSE);
+
+ if (na->len > address->maxlen) {
+ /* Too long address */
+ free(na->buf);
+ free(na);
+ rpc_createerr.cf_stat = RPC_FAILED;
+ return (FALSE);
+ }
+ memcpy(address->buf, na->buf, (size_t)na->len);
+ address->len = na->len;
+ free(na->buf);
+ free(na);
+ return (TRUE);
+}
+
+/*
+ * Get a copy of the current maps.
+ * Calls the rpcbind service remotely to get the maps.
+ *
+ * It returns only a list of the services
+ * It returns NULL on failure.
+ */
+rpcblist *
+rpcb_getmaps(nconf, host)
+ const struct netconfig *nconf;
+ const char *host;
+{
+ rpcblist_ptr head = NULL;
+ CLIENT *client;
+ enum clnt_stat clnt_st;
+ rpcvers_t vers = 0;
+
+ client = getclnthandle(host, nconf, NULL);
+ if (client == NULL) {
+ return (head);
+ }
+ clnt_st = CLNT_CALL(client, (rpcproc_t)RPCBPROC_DUMP,
+ (xdrproc_t) xdr_void, NULL, (xdrproc_t) xdr_rpcblist_ptr,
+ (char *)(void *)&head, tottimeout);
+ if (clnt_st == RPC_SUCCESS)
+ goto done;
+
+ if ((clnt_st != RPC_PROGVERSMISMATCH) &&
+ (clnt_st != RPC_PROGUNAVAIL)) {
+ rpc_createerr.cf_stat = RPC_RPCBFAILURE;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+ goto done;
+ }
+
+ /* fall back to earlier version */
+ CLNT_CONTROL(client, CLGET_VERS, (char *)(void *)&vers);
+ if (vers == RPCBVERS4) {
+ vers = RPCBVERS;
+ CLNT_CONTROL(client, CLSET_VERS, (char *)(void *)&vers);
+ if (CLNT_CALL(client, (rpcproc_t)RPCBPROC_DUMP,
+ (xdrproc_t) xdr_void, NULL, (xdrproc_t) xdr_rpcblist_ptr,
+ (char *)(void *)&head, tottimeout) == RPC_SUCCESS)
+ goto done;
+ }
+ rpc_createerr.cf_stat = RPC_RPCBFAILURE;
+ clnt_geterr(client, &rpc_createerr.cf_error);
+
+done:
+ CLNT_DESTROY(client);
+ return (head);
+}
+
+/*
+ * rpcbinder remote-call-service interface.
+ * This routine is used to call the rpcbind remote call service
+ * which will look up a service program in the address maps, and then
+ * remotely call that routine with the given parameters. This allows
+ * programs to do a lookup and call in one step.
+*/
+enum clnt_stat
+rpcb_rmtcall(nconf, host, prog, vers, proc, xdrargs, argsp,
+ xdrres, resp, tout, addr_ptr)
+ const struct netconfig *nconf; /* Netconfig structure */
+ const char *host; /* Remote host name */
+ rpcprog_t prog;
+ rpcvers_t vers;
+ rpcproc_t proc; /* Remote proc identifiers */
+ xdrproc_t xdrargs, xdrres; /* XDR routines */
+ caddr_t argsp, resp; /* Argument and Result */
+ struct timeval tout; /* Timeout value for this call */
+ const struct netbuf *addr_ptr; /* Preallocated netbuf address */
+{
+ CLIENT *client;
+ enum clnt_stat stat;
+ struct r_rpcb_rmtcallargs a;
+ struct r_rpcb_rmtcallres r;
+ rpcvers_t rpcb_vers;
+
+ stat = 0;
+ client = getclnthandle(host, nconf, NULL);
+ if (client == NULL) {
+ return (RPC_FAILED);
+ }
+ /*LINTED const castaway*/
+ CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, (char *)(void *)&rmttimeout);
+ a.prog = prog;
+ a.vers = vers;
+ a.proc = proc;
+ a.args.args_val = argsp;
+ a.xdr_args = xdrargs;
+ r.addr = NULL;
+ r.results.results_val = resp;
+ r.xdr_res = xdrres;
+
+ for (rpcb_vers = RPCBVERS4; rpcb_vers >= RPCBVERS; rpcb_vers--) {
+ CLNT_CONTROL(client, CLSET_VERS, (char *)(void *)&rpcb_vers);
+ stat = CLNT_CALL(client, (rpcproc_t)RPCBPROC_CALLIT,
+ (xdrproc_t) xdr_rpcb_rmtcallargs, (char *)(void *)&a,
+ (xdrproc_t) xdr_rpcb_rmtcallres, (char *)(void *)&r, tout);
+ if ((stat == RPC_SUCCESS) && (addr_ptr != NULL)) {
+ struct netbuf *na;
+ /*LINTED const castaway*/
+ na = uaddr2taddr((struct netconfig *) nconf, r.addr);
+ if (!na) {
+ stat = RPC_N2AXLATEFAILURE;
+ /*LINTED const castaway*/
+ ((struct netbuf *) addr_ptr)->len = 0;
+ goto error;
+ }
+ if (na->len > addr_ptr->maxlen) {
+ /* Too long address */
+ stat = RPC_FAILED; /* XXX A better error no */
+ free(na->buf);
+ free(na);
+ /*LINTED const castaway*/
+ ((struct netbuf *) addr_ptr)->len = 0;
+ goto error;
+ }
+ memcpy(addr_ptr->buf, na->buf, (size_t)na->len);
+ /*LINTED const castaway*/
+ ((struct netbuf *)addr_ptr)->len = na->len;
+ free(na->buf);
+ free(na);
+ break;
+ } else if ((stat != RPC_PROGVERSMISMATCH) &&
+ (stat != RPC_PROGUNAVAIL)) {
+ goto error;
+ }
+ }
+error:
+ CLNT_DESTROY(client);
+ if (r.addr)
+ xdr_free((xdrproc_t) xdr_wrapstring, (char *)(void *)&r.addr);
+ return (stat);
+}
+
+/*
+ * Gets the time on the remote host.
+ * Returns 1 if succeeds else 0.
+ */
+bool_t
+rpcb_gettime(host, timep)
+ const char *host;
+ time_t *timep;
+{
+ CLIENT *client = NULL;
+ void *handle;
+ struct netconfig *nconf;
+ rpcvers_t vers;
+ enum clnt_stat st;
+
+
+ if ((host == NULL) || (host[0] == 0)) {
+ time(timep);
+ return (TRUE);
+ }
+
+ if ((handle = __rpc_setconf("netpath")) == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (FALSE);
+ }
+ rpc_createerr.cf_stat = RPC_SUCCESS;
+ while (client == NULL) {
+ if ((nconf = __rpc_getconf(handle)) == NULL) {
+ if (rpc_createerr.cf_stat == RPC_SUCCESS)
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ break;
+ }
+ client = getclnthandle(host, nconf, NULL);
+ if (client)
+ break;
+ }
+ __rpc_endconf(handle);
+ if (client == (CLIENT *) NULL) {
+ return (FALSE);
+ }
+
+ st = CLNT_CALL(client, (rpcproc_t)RPCBPROC_GETTIME,
+ (xdrproc_t) xdr_void, NULL,
+ (xdrproc_t) xdr_int, (char *)(void *)timep, tottimeout);
+
+ if ((st == RPC_PROGVERSMISMATCH) || (st == RPC_PROGUNAVAIL)) {
+ CLNT_CONTROL(client, CLGET_VERS, (char *)(void *)&vers);
+ if (vers == RPCBVERS4) {
+ /* fall back to earlier version */
+ vers = RPCBVERS;
+ CLNT_CONTROL(client, CLSET_VERS, (char *)(void *)&vers);
+ st = CLNT_CALL(client, (rpcproc_t)RPCBPROC_GETTIME,
+ (xdrproc_t) xdr_void, NULL,
+ (xdrproc_t) xdr_int, (char *)(void *)timep,
+ tottimeout);
+ }
+ }
+ CLNT_DESTROY(client);
+ return (st == RPC_SUCCESS? TRUE: FALSE);
+}
+
+static bool_t
+xdr_netbuf(XDR *xdrs, struct netbuf *objp)
+{
+ bool_t dummy;
+ void **pp;
+
+ if (!xdr_uint32_t(xdrs, (uint32_t *) &objp->maxlen)) {
+ return (FALSE);
+ }
+ pp = &objp->buf;
+ dummy = xdr_bytes(xdrs, (char **) pp,
+ (u_int *)&(objp->len), objp->maxlen);
+ return (dummy);
+}
+
+/*
+ * Converts taddr to universal address. This routine should never
+ * really be called because local n2a libraries are always provided.
+ */
+char *
+rpcb_taddr2uaddr(struct netconfig *nconf, struct netbuf *taddr)
+{
+ CLIENT *client;
+ char *uaddr = NULL;
+
+
+ /* parameter checking */
+ if (nconf == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (NULL);
+ }
+ if (taddr == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
+ return (NULL);
+ }
+ client = local_rpcb();
+ if (! client) {
+ return (NULL);
+ }
+
+ CLNT_CALL(client, (rpcproc_t)RPCBPROC_TADDR2UADDR,
+ (xdrproc_t) xdr_netbuf, (char *)(void *)taddr,
+ (xdrproc_t) xdr_wrapstring, (char *)(void *)&uaddr, tottimeout);
+ CLNT_DESTROY(client);
+ return (uaddr);
+}
+
+/*
+ * Converts universal address to netbuf. This routine should never
+ * really be called because local n2a libraries are always provided.
+ */
+struct netbuf *
+rpcb_uaddr2taddr(struct netconfig *nconf, char *uaddr)
+{
+ CLIENT *client;
+ struct netbuf *taddr;
+
+
+ /* parameter checking */
+ if (nconf == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNPROTO;
+ return (NULL);
+ }
+ if (uaddr == NULL) {
+ rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
+ return (NULL);
+ }
+ client = local_rpcb();
+ if (! client) {
+ return (NULL);
+ }
+
+ taddr = (struct netbuf *)malloc(sizeof (struct netbuf), M_RPC, M_WAITOK|M_ZERO);
+ if (CLNT_CALL(client, (rpcproc_t)RPCBPROC_UADDR2TADDR,
+ (xdrproc_t) xdr_wrapstring, (char *)(void *)&uaddr,
+ (xdrproc_t) xdr_netbuf, (char *)(void *)taddr,
+ tottimeout) != RPC_SUCCESS) {
+ free(taddr);
+ taddr = NULL;
+ }
+ CLNT_DESTROY(client);
+ return (taddr);
+}
+
+#endif
diff --git a/sys/rpc/rpcb_clnt.h b/sys/rpc/rpcb_clnt.h
new file mode 100644
index 0000000..638bb5d
--- /dev/null
+++ b/sys/rpc/rpcb_clnt.h
@@ -0,0 +1,89 @@
+/* $NetBSD: rpcb_clnt.h,v 1.1 2000/06/02 22:57:56 fvdl Exp $ */
+/* $FreeBSD$ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986 - 1991 by Sun Microsystems, Inc.
+ */
+
+/*
+ * rpcb_clnt.h
+ * Supplies C routines to get to rpcbid services.
+ *
+ */
+
+/*
+ * Usage:
+ * success = rpcb_set(program, version, nconf, address);
+ * success = rpcb_unset(program, version, nconf);
+ * success = rpcb_getaddr(program, version, nconf, host);
+ * head = rpcb_getmaps(nconf, host);
+ * clnt_stat = rpcb_rmtcall(nconf, host, program, version, procedure,
+ * xdrargs, argsp, xdrres, resp, tout, addr_ptr)
+ * success = rpcb_gettime(host, timep)
+ * uaddr = rpcb_taddr2uaddr(nconf, taddr);
+ * taddr = rpcb_uaddr2uaddr(nconf, uaddr);
+ */
+
+#ifndef _RPC_RPCB_CLNT_H
+#define _RPC_RPCB_CLNT_H
+
+/* #pragma ident "@(#)rpcb_clnt.h 1.13 94/04/25 SMI" */
+/* rpcb_clnt.h 1.3 88/12/05 SMI */
+
+#include <rpc/types.h>
+#ifndef _KERNEL
+#include <rpc/rpcb_prot.h>
+#endif
+
+__BEGIN_DECLS
+extern bool_t rpcb_set(const rpcprog_t, const rpcvers_t,
+ const struct netconfig *, const struct netbuf *);
+extern bool_t rpcb_unset(const rpcprog_t, const rpcvers_t,
+ const struct netconfig *);
+#ifndef _KERNEL
+extern rpcblist *rpcb_getmaps(const struct netconfig *, const char *);
+extern enum clnt_stat rpcb_rmtcall(const struct netconfig *,
+ const char *, const rpcprog_t,
+ const rpcvers_t, const rpcproc_t,
+ const xdrproc_t, const caddr_t,
+ const xdrproc_t, const caddr_t,
+ const struct timeval,
+ const struct netbuf *);
+extern bool_t rpcb_getaddr(const rpcprog_t, const rpcvers_t,
+ const struct netconfig *, struct netbuf *,
+ const char *);
+extern bool_t rpcb_gettime(const char *, time_t *);
+extern char *rpcb_taddr2uaddr(struct netconfig *, struct netbuf *);
+extern struct netbuf *rpcb_uaddr2taddr(struct netconfig *, char *);
+#endif
+__END_DECLS
+
+#endif /* !_RPC_RPCB_CLNT_H */
diff --git a/sys/rpc/rpcb_prot.c b/sys/rpc/rpcb_prot.c
new file mode 100644
index 0000000..06a659e
--- /dev/null
+++ b/sys/rpc/rpcb_prot.c
@@ -0,0 +1,244 @@
+/* $NetBSD: rpcb_prot.c,v 1.3 2000/07/14 08:40:42 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+/* #ident "@(#)rpcb_prot.c 1.13 94/04/24 SMI" */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)rpcb_prot.c 1.9 89/04/21 Copyr 1984 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * rpcb_prot.c
+ * XDR routines for the rpcbinder version 3.
+ *
+ * Copyright (C) 1984, 1988, Sun Microsystems, Inc.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <rpc/rpc.h>
+#include <rpc/rpcb_prot.h>
+
+bool_t
+xdr_pmap(XDR *xdrs, struct pmap *regs)
+{
+
+ if (xdr_u_long(xdrs, &regs->pm_prog) &&
+ xdr_u_long(xdrs, &regs->pm_vers) &&
+ xdr_u_long(xdrs, &regs->pm_prot))
+ return (xdr_u_long(xdrs, &regs->pm_port));
+ return (FALSE);
+}
+
+bool_t
+xdr_rpcb(XDR *xdrs, RPCB *objp)
+{
+ if (!xdr_uint32_t(xdrs, &objp->r_prog)) {
+ return (FALSE);
+ }
+ if (!xdr_uint32_t(xdrs, &objp->r_vers)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_netid, (u_int)~0)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_addr, (u_int)~0)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_owner, (u_int)~0)) {
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+/*
+ * rpcblist_ptr implements a linked list. The RPCL definition from
+ * rpcb_prot.x is:
+ *
+ * struct rpcblist {
+ * rpcb rpcb_map;
+ * struct rpcblist *rpcb_next;
+ * };
+ * typedef rpcblist *rpcblist_ptr;
+ *
+ * Recall that "pointers" in XDR are encoded as a boolean, indicating whether
+ * there's any data behind the pointer, followed by the data (if any exists).
+ * The boolean can be interpreted as ``more data follows me''; if FALSE then
+ * nothing follows the boolean; if TRUE then the boolean is followed by an
+ * actual struct rpcb, and another rpcblist_ptr (declared in RPCL as "struct
+ * rpcblist *").
+ *
+ * This could be implemented via the xdr_pointer type, though this would
+ * result in one recursive call per element in the list. Rather than do that
+ * we can ``unwind'' the recursion into a while loop and use xdr_reference to
+ * serialize the rpcb elements.
+ */
+
+bool_t
+xdr_rpcblist_ptr(XDR *xdrs, rpcblist_ptr *rp)
+{
+ /*
+ * more_elements is pre-computed in case the direction is
+ * XDR_ENCODE or XDR_FREE. more_elements is overwritten by
+ * xdr_bool when the direction is XDR_DECODE.
+ */
+ bool_t more_elements;
+ int freeing = (xdrs->x_op == XDR_FREE);
+ rpcblist_ptr next;
+ rpcblist_ptr next_copy;
+
+ next = NULL;
+ for (;;) {
+ more_elements = (bool_t)(*rp != NULL);
+ if (! xdr_bool(xdrs, &more_elements)) {
+ return (FALSE);
+ }
+ if (! more_elements) {
+ return (TRUE); /* we are done */
+ }
+ /*
+ * the unfortunate side effect of non-recursion is that in
+ * the case of freeing we must remember the next object
+ * before we free the current object ...
+ */
+ if (freeing && *rp)
+ next = (*rp)->rpcb_next;
+ if (! xdr_reference(xdrs, (caddr_t *)rp,
+ (u_int)sizeof (RPCBLIST), (xdrproc_t)xdr_rpcb)) {
+ return (FALSE);
+ }
+ if (freeing) {
+ next_copy = next;
+ rp = &next_copy;
+ /*
+ * Note that in the subsequent iteration, next_copy
+ * gets nulled out by the xdr_reference
+ * but next itself survives.
+ */
+ } else if (*rp) {
+ rp = &((*rp)->rpcb_next);
+ }
+ }
+ /*NOTREACHED*/
+}
+
+#if 0
+/*
+ * xdr_rpcblist() is specified to take a RPCBLIST **, but is identical in
+ * functionality to xdr_rpcblist_ptr().
+ */
+bool_t
+xdr_rpcblist(XDR *xdrs, RPCBLIST **rp)
+{
+ bool_t dummy;
+
+ dummy = xdr_rpcblist_ptr(xdrs, (rpcblist_ptr *)rp);
+ return (dummy);
+}
+#endif
+
+bool_t
+xdr_rpcb_entry(XDR *xdrs, rpcb_entry *objp)
+{
+ if (!xdr_string(xdrs, &objp->r_maddr, (u_int)~0)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_nc_netid, (u_int)~0)) {
+ return (FALSE);
+ }
+ if (!xdr_uint32_t(xdrs, &objp->r_nc_semantics)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_nc_protofmly, (u_int)~0)) {
+ return (FALSE);
+ }
+ if (!xdr_string(xdrs, &objp->r_nc_proto, (u_int)~0)) {
+ return (FALSE);
+ }
+ return (TRUE);
+}
+
+bool_t
+xdr_rpcb_entry_list_ptr(XDR *xdrs, rpcb_entry_list_ptr *rp)
+{
+ /*
+ * more_elements is pre-computed in case the direction is
+ * XDR_ENCODE or XDR_FREE. more_elements is overwritten by
+ * xdr_bool when the direction is XDR_DECODE.
+ */
+ bool_t more_elements;
+ int freeing = (xdrs->x_op == XDR_FREE);
+ rpcb_entry_list_ptr next;
+ rpcb_entry_list_ptr next_copy;
+
+ next = NULL;
+ for (;;) {
+ more_elements = (bool_t)(*rp != NULL);
+ if (! xdr_bool(xdrs, &more_elements)) {
+ return (FALSE);
+ }
+ if (! more_elements) {
+ return (TRUE); /* we are done */
+ }
+ /*
+ * the unfortunate side effect of non-recursion is that in
+ * the case of freeing we must remember the next object
+ * before we free the current object ...
+ */
+ if (freeing)
+ next = (*rp)->rpcb_entry_next;
+ if (! xdr_reference(xdrs, (caddr_t *)rp,
+ (u_int)sizeof (rpcb_entry_list),
+ (xdrproc_t)xdr_rpcb_entry)) {
+ return (FALSE);
+ }
+ if (freeing && *rp) {
+ next_copy = next;
+ rp = &next_copy;
+ /*
+ * Note that in the subsequent iteration, next_copy
+ * gets nulled out by the xdr_reference
+ * but next itself survives.
+ */
+ } else if (*rp) {
+ rp = &((*rp)->rpcb_entry_next);
+ }
+ }
+ /*NOTREACHED*/
+}
diff --git a/sys/rpc/rpcb_prot.h b/sys/rpc/rpcb_prot.h
new file mode 100644
index 0000000..ac4ef6c
--- /dev/null
+++ b/sys/rpc/rpcb_prot.h
@@ -0,0 +1,579 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _RPCB_PROT_H_RPCGEN
+#define _RPCB_PROT_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * $FreeBSD$
+ *
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1988 by Sun Microsystems, Inc.
+ */
+/* from rpcb_prot.x */
+
+/* #pragma ident "@(#)rpcb_prot.x 1.5 94/04/29 SMI" */
+
+#ifndef _KERNEL
+
+
+/*
+ * The following procedures are supported by the protocol in version 3:
+ *
+ * RPCBPROC_NULL() returns ()
+ * takes nothing, returns nothing
+ *
+ * RPCBPROC_SET(rpcb) returns (bool_t)
+ * TRUE is success, FALSE is failure. Registers the tuple
+ * [prog, vers, address, owner, netid].
+ * Finds out owner and netid information on its own.
+ *
+ * RPCBPROC_UNSET(rpcb) returns (bool_t)
+ * TRUE is success, FALSE is failure. Un-registers tuple
+ * [prog, vers, netid]. addresses is ignored.
+ * If netid is NULL, unregister all.
+ *
+ * RPCBPROC_GETADDR(rpcb) returns (string).
+ * 0 is failure. Otherwise returns the universal address where the
+ * triple [prog, vers, netid] is registered. Ignore address and owner.
+ *
+ * RPCBPROC_DUMP() RETURNS (rpcblist_ptr)
+ * used to dump the entire rpcbind maps
+ *
+ * RPCBPROC_CALLIT(rpcb_rmtcallargs)
+ * RETURNS (rpcb_rmtcallres);
+ * Calls the procedure on the remote machine. If it is not registered,
+ * this procedure is quiet; i.e. it does not return error information!!!
+ * This routine only passes null authentication parameters.
+ * It has no interface to xdr routines for RPCBPROC_CALLIT.
+ *
+ * RPCBPROC_GETTIME() returns (int).
+ * Gets the remote machines time
+ *
+ * RPCBPROC_UADDR2TADDR(strint) RETURNS (struct netbuf)
+ * Returns the netbuf address from universal address.
+ *
+ * RPCBPROC_TADDR2UADDR(struct netbuf) RETURNS (string)
+ * Returns the universal address from netbuf address.
+ *
+ * END OF RPCBIND VERSION 3 PROCEDURES
+ */
+/*
+ * Except for RPCBPROC_CALLIT, the procedures above are carried over to
+ * rpcbind version 4. Those below are added or modified for version 4.
+ * NOTE: RPCBPROC_BCAST HAS THE SAME FUNCTIONALITY AND PROCEDURE NUMBER
+ * AS RPCBPROC_CALLIT.
+ *
+ * RPCBPROC_BCAST(rpcb_rmtcallargs)
+ * RETURNS (rpcb_rmtcallres);
+ * Calls the procedure on the remote machine. If it is not registered,
+ * this procedure IS quiet; i.e. it DOES NOT return error information!!!
+ * This routine should be used for broadcasting and nothing else.
+ *
+ * RPCBPROC_GETVERSADDR(rpcb) returns (string).
+ * 0 is failure. Otherwise returns the universal address where the
+ * triple [prog, vers, netid] is registered. Ignore address and owner.
+ * Same as RPCBPROC_GETADDR except that if the given version number
+ * is not available, the address is not returned.
+ *
+ * RPCBPROC_INDIRECT(rpcb_rmtcallargs)
+ * RETURNS (rpcb_rmtcallres);
+ * Calls the procedure on the remote machine. If it is not registered,
+ * this procedure is NOT quiet; i.e. it DOES return error information!!!
+ * as any normal application would expect.
+ *
+ * RPCBPROC_GETADDRLIST(rpcb) returns (rpcb_entry_list_ptr).
+ * Same as RPCBPROC_GETADDR except that it returns a list of all the
+ * addresses registered for the combination (prog, vers) (for all
+ * transports).
+ *
+ * RPCBPROC_GETSTAT(void) returns (rpcb_stat_byvers)
+ * Returns the statistics about the kind of requests received by rpcbind.
+ */
+
+/*
+ * A mapping of (program, version, network ID) to address
+ */
+
+struct rpcb {
+ rpcprog_t r_prog;
+ rpcvers_t r_vers;
+ char *r_netid;
+ char *r_addr;
+ char *r_owner;
+};
+typedef struct rpcb rpcb;
+
+typedef rpcb RPCB;
+
+
+/*
+ * A list of mappings
+ *
+ * Below are two definitions for the rpcblist structure. This is done because
+ * xdr_rpcblist() is specified to take a struct rpcblist **, rather than a
+ * struct rpcblist * that rpcgen would produce. One version of the rpcblist
+ * structure (actually called rp__list) is used with rpcgen, and the other is
+ * defined only in the header file for compatibility with the specified
+ * interface.
+ */
+
+struct rp__list {
+ rpcb rpcb_map;
+ struct rp__list *rpcb_next;
+};
+typedef struct rp__list rp__list;
+
+typedef rp__list *rpcblist_ptr;
+
+typedef struct rp__list rpcblist;
+typedef struct rp__list RPCBLIST;
+
+#ifndef __cplusplus
+struct rpcblist {
+ RPCB rpcb_map;
+ struct rpcblist *rpcb_next;
+};
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern bool_t xdr_rpcblist(XDR *, rpcblist**);
+#ifdef __cplusplus
+}
+#endif
+
+
+/*
+ * Arguments of remote calls
+ */
+
+struct rpcb_rmtcallargs {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ rpcproc_t proc;
+ struct {
+ u_int args_len;
+ char *args_val;
+ } args;
+};
+typedef struct rpcb_rmtcallargs rpcb_rmtcallargs;
+
+/*
+ * Client-side only representation of rpcb_rmtcallargs structure.
+ *
+ * The routine that XDRs the rpcb_rmtcallargs structure must deal with the
+ * opaque arguments in the "args" structure. xdr_rpcb_rmtcallargs() needs to
+ * be passed the XDR routine that knows the args' structure. This routine
+ * doesn't need to go over-the-wire (and it wouldn't make sense anyway) since
+ * the application being called already knows the args structure. So we use a
+ * different "XDR" structure on the client side, r_rpcb_rmtcallargs, which
+ * includes the args' XDR routine.
+ */
+struct r_rpcb_rmtcallargs {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ rpcproc_t proc;
+ struct {
+ u_int args_len;
+ char *args_val;
+ } args;
+ xdrproc_t xdr_args; /* encodes args */
+};
+
+
+/*
+ * Results of the remote call
+ */
+
+struct rpcb_rmtcallres {
+ char *addr;
+ struct {
+ u_int results_len;
+ char *results_val;
+ } results;
+};
+typedef struct rpcb_rmtcallres rpcb_rmtcallres;
+
+/*
+ * Client-side only representation of rpcb_rmtcallres structure.
+ */
+struct r_rpcb_rmtcallres {
+ char *addr;
+ struct {
+ uint32_t results_len;
+ char *results_val;
+ } results;
+ xdrproc_t xdr_res; /* decodes results */
+};
+
+/*
+ * rpcb_entry contains a merged address of a service on a particular
+ * transport, plus associated netconfig information. A list of rpcb_entrys
+ * is returned by RPCBPROC_GETADDRLIST. See netconfig.h for values used
+ * in r_nc_* fields.
+ */
+
+struct rpcb_entry {
+ char *r_maddr;
+ char *r_nc_netid;
+ u_int r_nc_semantics;
+ char *r_nc_protofmly;
+ char *r_nc_proto;
+};
+typedef struct rpcb_entry rpcb_entry;
+
+/*
+ * A list of addresses supported by a service.
+ */
+
+struct rpcb_entry_list {
+ rpcb_entry rpcb_entry_map;
+ struct rpcb_entry_list *rpcb_entry_next;
+};
+typedef struct rpcb_entry_list rpcb_entry_list;
+
+typedef rpcb_entry_list *rpcb_entry_list_ptr;
+
+/*
+ * rpcbind statistics
+ */
+
+#define rpcb_highproc_2 RPCBPROC_CALLIT
+#define rpcb_highproc_3 RPCBPROC_TADDR2UADDR
+#define rpcb_highproc_4 RPCBPROC_GETSTAT
+#define RPCBSTAT_HIGHPROC 13
+#define RPCBVERS_STAT 3
+#define RPCBVERS_4_STAT 2
+#define RPCBVERS_3_STAT 1
+#define RPCBVERS_2_STAT 0
+
+/* Link list of all the stats about getport and getaddr */
+
+struct rpcbs_addrlist {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ int success;
+ int failure;
+ char *netid;
+ struct rpcbs_addrlist *next;
+};
+typedef struct rpcbs_addrlist rpcbs_addrlist;
+
+/* Link list of all the stats about rmtcall */
+
+struct rpcbs_rmtcalllist {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ rpcproc_t proc;
+ int success;
+ int failure;
+ int indirect;
+ char *netid;
+ struct rpcbs_rmtcalllist *next;
+};
+typedef struct rpcbs_rmtcalllist rpcbs_rmtcalllist;
+
+typedef int rpcbs_proc[RPCBSTAT_HIGHPROC];
+
+typedef rpcbs_addrlist *rpcbs_addrlist_ptr;
+
+typedef rpcbs_rmtcalllist *rpcbs_rmtcalllist_ptr;
+
+struct rpcb_stat {
+ rpcbs_proc info;
+ int setinfo;
+ int unsetinfo;
+ rpcbs_addrlist_ptr addrinfo;
+ rpcbs_rmtcalllist_ptr rmtinfo;
+};
+typedef struct rpcb_stat rpcb_stat;
+
+/*
+ * One rpcb_stat structure is returned for each version of rpcbind
+ * being monitored.
+ */
+
+typedef rpcb_stat rpcb_stat_byvers[RPCBVERS_STAT];
+
+/*
+ * We don't define netbuf in RPCL, since it would contain structure member
+ * names that would conflict with the definition of struct netbuf in
+ * <tiuser.h>. Instead we merely declare the XDR routine xdr_netbuf() here,
+ * and implement it ourselves in rpc/rpcb_prot.c.
+ */
+#ifdef __cplusplus
+extern "C" bool_t xdr_netbuf(XDR *, struct netbuf *);
+
+#else /* __STDC__ */
+extern bool_t xdr_netbuf(XDR *, struct netbuf *);
+
+#endif
+
+#define RPCBVERS_3 RPCBVERS
+#define RPCBVERS_4 RPCBVERS4
+
+#else /* ndef _KERNEL */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A mapping of (program, version, network ID) to address
+ */
+struct rpcb {
+ rpcprog_t r_prog; /* program number */
+ rpcvers_t r_vers; /* version number */
+ char *r_netid; /* network id */
+ char *r_addr; /* universal address */
+ char *r_owner; /* owner of the mapping */
+};
+typedef struct rpcb RPCB;
+
+/*
+ * A list of mappings
+ */
+struct rpcblist {
+ RPCB rpcb_map;
+ struct rpcblist *rpcb_next;
+};
+typedef struct rpcblist RPCBLIST;
+typedef struct rpcblist *rpcblist_ptr;
+
+/*
+ * Remote calls arguments
+ */
+struct rpcb_rmtcallargs {
+ rpcprog_t prog; /* program number */
+ rpcvers_t vers; /* version number */
+ rpcproc_t proc; /* procedure number */
+ uint32_t arglen; /* arg len */
+ caddr_t args_ptr; /* argument */
+ xdrproc_t xdr_args; /* XDR routine for argument */
+};
+typedef struct rpcb_rmtcallargs rpcb_rmtcallargs;
+
+/*
+ * Remote calls results
+ */
+struct rpcb_rmtcallres {
+ char *addr_ptr; /* remote universal address */
+ uint32_t resultslen; /* results length */
+ caddr_t results_ptr; /* results */
+ xdrproc_t xdr_results; /* XDR routine for result */
+};
+typedef struct rpcb_rmtcallres rpcb_rmtcallres;
+
+struct rpcb_entry {
+ char *r_maddr;
+ char *r_nc_netid;
+ unsigned int r_nc_semantics;
+ char *r_nc_protofmly;
+ char *r_nc_proto;
+};
+typedef struct rpcb_entry rpcb_entry;
+
+/*
+ * A list of addresses supported by a service.
+ */
+
+struct rpcb_entry_list {
+ rpcb_entry rpcb_entry_map;
+ struct rpcb_entry_list *rpcb_entry_next;
+};
+typedef struct rpcb_entry_list rpcb_entry_list;
+
+typedef rpcb_entry_list *rpcb_entry_list_ptr;
+
+/*
+ * rpcbind statistics
+ */
+
+#define rpcb_highproc_2 RPCBPROC_CALLIT
+#define rpcb_highproc_3 RPCBPROC_TADDR2UADDR
+#define rpcb_highproc_4 RPCBPROC_GETSTAT
+#define RPCBSTAT_HIGHPROC 13
+#define RPCBVERS_STAT 3
+#define RPCBVERS_4_STAT 2
+#define RPCBVERS_3_STAT 1
+#define RPCBVERS_2_STAT 0
+
+/* Link list of all the stats about getport and getaddr */
+
+struct rpcbs_addrlist {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ int success;
+ int failure;
+ char *netid;
+ struct rpcbs_addrlist *next;
+};
+typedef struct rpcbs_addrlist rpcbs_addrlist;
+
+/* Link list of all the stats about rmtcall */
+
+struct rpcbs_rmtcalllist {
+ rpcprog_t prog;
+ rpcvers_t vers;
+ rpcproc_t proc;
+ int success;
+ int failure;
+ int indirect;
+ char *netid;
+ struct rpcbs_rmtcalllist *next;
+};
+typedef struct rpcbs_rmtcalllist rpcbs_rmtcalllist;
+
+typedef int rpcbs_proc[RPCBSTAT_HIGHPROC];
+
+typedef rpcbs_addrlist *rpcbs_addrlist_ptr;
+
+typedef rpcbs_rmtcalllist *rpcbs_rmtcalllist_ptr;
+
+struct rpcb_stat {
+ rpcbs_proc info;
+ int setinfo;
+ int unsetinfo;
+ rpcbs_addrlist_ptr addrinfo;
+ rpcbs_rmtcalllist_ptr rmtinfo;
+};
+typedef struct rpcb_stat rpcb_stat;
+
+/*
+ * One rpcb_stat structure is returned for each version of rpcbind
+ * being monitored.
+ */
+
+typedef rpcb_stat rpcb_stat_byvers[RPCBVERS_STAT];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ndef _KERNEL */
+
+#define _PATH_RPCBINDSOCK "/var/run/rpcbind.sock"
+
+#define RPCBPROG ((unsigned long)(100000))
+#define RPCBVERS ((unsigned long)(3))
+
+extern void rpcbprog_3(struct svc_req *rqstp, SVCXPRT *transp);
+#define RPCBPROC_SET ((unsigned long)(1))
+extern bool_t * rpcbproc_set_3(RPCB *, CLIENT *);
+extern bool_t * rpcbproc_set_3_svc(RPCB *, struct svc_req *);
+#define RPCBPROC_UNSET ((unsigned long)(2))
+extern bool_t * rpcbproc_unset_3(RPCB *, CLIENT *);
+extern bool_t * rpcbproc_unset_3_svc(RPCB *, struct svc_req *);
+#define RPCBPROC_GETADDR ((unsigned long)(3))
+extern char ** rpcbproc_getaddr_3(RPCB *, CLIENT *);
+extern char ** rpcbproc_getaddr_3_svc(RPCB *, struct svc_req *);
+#define RPCBPROC_DUMP ((unsigned long)(4))
+extern rpcblist_ptr * rpcbproc_dump_3(void *, CLIENT *);
+extern rpcblist_ptr * rpcbproc_dump_3_svc(void *, struct svc_req *);
+#define RPCBPROC_CALLIT ((unsigned long)(5))
+extern rpcb_rmtcallres * rpcbproc_callit_3(rpcb_rmtcallargs *, CLIENT *);
+extern rpcb_rmtcallres * rpcbproc_callit_3_svc(rpcb_rmtcallargs *, struct svc_req *);
+#define RPCBPROC_GETTIME ((unsigned long)(6))
+extern u_int * rpcbproc_gettime_3(void *, CLIENT *);
+extern u_int * rpcbproc_gettime_3_svc(void *, struct svc_req *);
+#define RPCBPROC_UADDR2TADDR ((unsigned long)(7))
+extern struct netbuf * rpcbproc_uaddr2taddr_3(char **, CLIENT *);
+extern struct netbuf * rpcbproc_uaddr2taddr_3_svc(char **, struct svc_req *);
+#define RPCBPROC_TADDR2UADDR ((unsigned long)(8))
+extern char ** rpcbproc_taddr2uaddr_3(struct netbuf *, CLIENT *);
+extern char ** rpcbproc_taddr2uaddr_3_svc(struct netbuf *, struct svc_req *);
+extern int rpcbprog_3_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+#define RPCBVERS4 ((unsigned long)(4))
+
+extern void rpcbprog_4(struct svc_req *rqstp, SVCXPRT *transp);
+extern bool_t * rpcbproc_set_4(RPCB *, CLIENT *);
+extern bool_t * rpcbproc_set_4_svc(RPCB *, struct svc_req *);
+extern bool_t * rpcbproc_unset_4(RPCB *, CLIENT *);
+extern bool_t * rpcbproc_unset_4_svc(RPCB *, struct svc_req *);
+extern char ** rpcbproc_getaddr_4(RPCB *, CLIENT *);
+extern char ** rpcbproc_getaddr_4_svc(RPCB *, struct svc_req *);
+extern rpcblist_ptr * rpcbproc_dump_4(void *, CLIENT *);
+extern rpcblist_ptr * rpcbproc_dump_4_svc(void *, struct svc_req *);
+#define RPCBPROC_BCAST ((unsigned long)(RPCBPROC_CALLIT))
+extern rpcb_rmtcallres * rpcbproc_bcast_4(rpcb_rmtcallargs *, CLIENT *);
+extern rpcb_rmtcallres * rpcbproc_bcast_4_svc(rpcb_rmtcallargs *, struct svc_req *);
+extern u_int * rpcbproc_gettime_4(void *, CLIENT *);
+extern u_int * rpcbproc_gettime_4_svc(void *, struct svc_req *);
+extern struct netbuf * rpcbproc_uaddr2taddr_4(char **, CLIENT *);
+extern struct netbuf * rpcbproc_uaddr2taddr_4_svc(char **, struct svc_req *);
+extern char ** rpcbproc_taddr2uaddr_4(struct netbuf *, CLIENT *);
+extern char ** rpcbproc_taddr2uaddr_4_svc(struct netbuf *, struct svc_req *);
+#define RPCBPROC_GETVERSADDR ((unsigned long)(9))
+extern char ** rpcbproc_getversaddr_4(RPCB *, CLIENT *);
+extern char ** rpcbproc_getversaddr_4_svc(RPCB *, struct svc_req *);
+#define RPCBPROC_INDIRECT ((unsigned long)(10))
+extern rpcb_rmtcallres * rpcbproc_indirect_4(rpcb_rmtcallargs *, CLIENT *);
+extern rpcb_rmtcallres * rpcbproc_indirect_4_svc(rpcb_rmtcallargs *, struct svc_req *);
+#define RPCBPROC_GETADDRLIST ((unsigned long)(11))
+extern rpcb_entry_list_ptr * rpcbproc_getaddrlist_4(RPCB *, CLIENT *);
+extern rpcb_entry_list_ptr * rpcbproc_getaddrlist_4_svc(RPCB *, struct svc_req *);
+#define RPCBPROC_GETSTAT ((unsigned long)(12))
+extern rpcb_stat * rpcbproc_getstat_4(void *, CLIENT *);
+extern rpcb_stat * rpcbproc_getstat_4_svc(void *, struct svc_req *);
+extern int rpcbprog_4_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+
+/* the xdr functions */
+extern bool_t xdr_rpcb(XDR *, RPCB *);
+#ifndef _KERNEL
+extern bool_t xdr_rp__list(XDR *, rp__list*);
+#endif
+extern bool_t xdr_rpcblist_ptr(XDR *, rpcblist_ptr*);
+extern bool_t xdr_rpcb_rmtcallargs(XDR *, rpcb_rmtcallargs*);
+extern bool_t xdr_rpcb_rmtcallres(XDR *, rpcb_rmtcallres*);
+extern bool_t xdr_rpcb_entry(XDR *, rpcb_entry*);
+extern bool_t xdr_rpcb_entry_list(XDR *, rpcb_entry_list*);
+extern bool_t xdr_rpcb_entry_list_ptr(XDR *, rpcb_entry_list_ptr*);
+extern bool_t xdr_rpcbs_addrlist(XDR *, rpcbs_addrlist*);
+extern bool_t xdr_rpcbs_rmtcalllist(XDR *, rpcbs_rmtcalllist*);
+extern bool_t xdr_rpcbs_proc(XDR *, rpcbs_proc);
+extern bool_t xdr_rpcbs_addrlist_ptr(XDR *, rpcbs_addrlist_ptr*);
+extern bool_t xdr_rpcbs_rmtcalllist_ptr(XDR *, rpcbs_rmtcalllist_ptr*);
+extern bool_t xdr_rpcb_stat(XDR *, rpcb_stat*);
+extern bool_t xdr_rpcb_stat_byvers(XDR *, rpcb_stat_byvers);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_RPCB_PROT_H_RPCGEN */
diff --git a/sys/rpc/svc.c b/sys/rpc/svc.c
new file mode 100644
index 0000000..8be9805
--- /dev/null
+++ b/sys/rpc/svc.c
@@ -0,0 +1,574 @@
+/* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc.c, Server-side remote procedure call interface.
+ *
+ * There are two sets of procedures here. The xprt routines are
+ * for handling transport handles. The svc routines handle the
+ * list of service routines.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/systm.h>
+#include <sys/ucred.h>
+
+#include <rpc/rpc.h>
+#include <rpc/rpcb_clnt.h>
+
+#include "rpc_com.h"
+
+#define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */
+#define version_keepquiet(xp) ((u_long)(xp)->xp_p3 & SVC_VERSQUIET)
+
+static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
+ char *);
+static void __xprt_do_unregister (SVCXPRT *xprt, bool_t dolock);
+
+/* *************** SVCXPRT related stuff **************** */
+
+SVCPOOL*
+svcpool_create(void)
+{
+ SVCPOOL *pool;
+
+ pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
+
+ mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
+ TAILQ_INIT(&pool->sp_xlist);
+ TAILQ_INIT(&pool->sp_active);
+ TAILQ_INIT(&pool->sp_callouts);
+
+ return pool;
+}
+
+void
+svcpool_destroy(SVCPOOL *pool)
+{
+ SVCXPRT *xprt;
+ struct svc_callout *s;
+
+ mtx_lock(&pool->sp_lock);
+
+ while (TAILQ_FIRST(&pool->sp_xlist)) {
+ xprt = TAILQ_FIRST(&pool->sp_xlist);
+ mtx_unlock(&pool->sp_lock);
+ SVC_DESTROY(xprt);
+ mtx_lock(&pool->sp_lock);
+ }
+
+ while (TAILQ_FIRST(&pool->sp_callouts)) {
+ s = TAILQ_FIRST(&pool->sp_callouts);
+ mtx_unlock(&pool->sp_lock);
+ svc_unreg(pool, s->sc_prog, s->sc_vers);
+ mtx_lock(&pool->sp_lock);
+ }
+
+ mtx_destroy(&pool->sp_lock);
+ free(pool, M_RPC);
+}
+
+/*
+ * Activate a transport handle.
+ */
+void
+xprt_register(SVCXPRT *xprt)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+
+ mtx_lock(&pool->sp_lock);
+ xprt->xp_registered = TRUE;
+ xprt->xp_active = FALSE;
+ TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link);
+ mtx_unlock(&pool->sp_lock);
+}
+
+void
+xprt_unregister(SVCXPRT *xprt)
+{
+ __xprt_do_unregister(xprt, TRUE);
+}
+
+void
+__xprt_unregister_unlocked(SVCXPRT *xprt)
+{
+ __xprt_do_unregister(xprt, FALSE);
+}
+
+/*
+ * De-activate a transport handle.
+ */
+static void
+__xprt_do_unregister(SVCXPRT *xprt, bool_t dolock)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+
+ //__svc_generic_cleanup(xprt);
+
+ if (dolock)
+ mtx_lock(&pool->sp_lock);
+
+ if (xprt->xp_active) {
+ TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
+ xprt->xp_active = FALSE;
+ }
+ TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link);
+ xprt->xp_registered = FALSE;
+
+ if (dolock)
+ mtx_unlock(&pool->sp_lock);
+}
+
+void
+xprt_active(SVCXPRT *xprt)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+
+ mtx_lock(&pool->sp_lock);
+
+ if (!xprt->xp_active) {
+ TAILQ_INSERT_TAIL(&pool->sp_active, xprt, xp_alink);
+ xprt->xp_active = TRUE;
+ }
+ wakeup(&pool->sp_active);
+
+ mtx_unlock(&pool->sp_lock);
+}
+
+void
+xprt_inactive(SVCXPRT *xprt)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+
+ mtx_lock(&pool->sp_lock);
+
+ if (xprt->xp_active) {
+ TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
+ xprt->xp_active = FALSE;
+ }
+ wakeup(&pool->sp_active);
+
+ mtx_unlock(&pool->sp_lock);
+}
+
+/*
+ * Add a service program to the callout list.
+ * The dispatch routine will be called when a rpc request for this
+ * program number comes in.
+ */
+bool_t
+svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
+ void (*dispatch)(struct svc_req *, SVCXPRT *),
+ const struct netconfig *nconf)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+ struct svc_callout *s;
+ char *netid = NULL;
+ int flag = 0;
+
+/* VARIABLES PROTECTED BY svc_lock: s, svc_head */
+
+ if (xprt->xp_netid) {
+ netid = strdup(xprt->xp_netid, M_RPC);
+ flag = 1;
+ } else if (nconf && nconf->nc_netid) {
+ netid = strdup(nconf->nc_netid, M_RPC);
+ flag = 1;
+ } /* must have been created with svc_raw_create */
+ if ((netid == NULL) && (flag == 1)) {
+ return (FALSE);
+ }
+
+ mtx_lock(&pool->sp_lock);
+ if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
+ if (netid)
+ free(netid, M_RPC);
+ if (s->sc_dispatch == dispatch)
+ goto rpcb_it; /* he is registering another xptr */
+ mtx_unlock(&pool->sp_lock);
+ return (FALSE);
+ }
+ s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
+ if (s == NULL) {
+ if (netid)
+ free(netid, M_RPC);
+ mtx_unlock(&pool->sp_lock);
+ return (FALSE);
+ }
+
+ s->sc_prog = prog;
+ s->sc_vers = vers;
+ s->sc_dispatch = dispatch;
+ s->sc_netid = netid;
+ TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
+
+ if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
+ ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
+
+rpcb_it:
+ mtx_unlock(&pool->sp_lock);
+ /* now register the information with the local binder service */
+ if (nconf) {
+ bool_t dummy;
+ struct netconfig tnc;
+ tnc = *nconf;
+ dummy = rpcb_set(prog, vers, &tnc,
+ &((SVCXPRT *) xprt)->xp_ltaddr);
+ return (dummy);
+ }
+ return (TRUE);
+}
+
+/*
+ * Remove a service program from the callout list.
+ */
+void
+svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
+{
+ struct svc_callout *s;
+
+ /* unregister the information anyway */
+ (void) rpcb_unset(prog, vers, NULL);
+ mtx_lock(&pool->sp_lock);
+ while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
+ TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
+ if (s->sc_netid)
+ mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
+ mem_free(s, sizeof (struct svc_callout));
+ }
+ mtx_unlock(&pool->sp_lock);
+}
+
+/* ********************** CALLOUT list related stuff ************* */
+
+/*
+ * Search the callout list for a program number, return the callout
+ * struct.
+ */
+static struct svc_callout *
+svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
+{
+ struct svc_callout *s;
+
+ mtx_assert(&pool->sp_lock, MA_OWNED);
+ TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
+ if (s->sc_prog == prog && s->sc_vers == vers
+ && (netid == NULL || s->sc_netid == NULL ||
+ strcmp(netid, s->sc_netid) == 0))
+ break;
+ }
+
+ return (s);
+}
+
+/* ******************* REPLY GENERATION ROUTINES ************ */
+
+/*
+ * Send a reply to an rpc request
+ */
+bool_t
+svc_sendreply(SVCXPRT *xprt, xdrproc_t xdr_results, void * xdr_location)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = SUCCESS;
+ rply.acpted_rply.ar_results.where = xdr_location;
+ rply.acpted_rply.ar_results.proc = xdr_results;
+
+ return (SVC_REPLY(xprt, &rply));
+}
+
+/*
+ * No procedure error reply
+ */
+void
+svcerr_noproc(SVCXPRT *xprt)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = PROC_UNAVAIL;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/*
+ * Can't decode args error reply
+ */
+void
+svcerr_decode(SVCXPRT *xprt)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = GARBAGE_ARGS;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/*
+ * Some system error
+ */
+void
+svcerr_systemerr(SVCXPRT *xprt)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = SYSTEM_ERR;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/*
+ * Authentication error reply
+ */
+void
+svcerr_auth(SVCXPRT *xprt, enum auth_stat why)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_DENIED;
+ rply.rjcted_rply.rj_stat = AUTH_ERROR;
+ rply.rjcted_rply.rj_why = why;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/*
+ * Auth too weak error reply
+ */
+void
+svcerr_weakauth(SVCXPRT *xprt)
+{
+
+ svcerr_auth(xprt, AUTH_TOOWEAK);
+}
+
+/*
+ * Program unavailable error reply
+ */
+void
+svcerr_noprog(SVCXPRT *xprt)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = PROG_UNAVAIL;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/*
+ * Program version mismatch error reply
+ */
+void
+svcerr_progvers(SVCXPRT *xprt, rpcvers_t low_vers, rpcvers_t high_vers)
+{
+ struct rpc_msg rply;
+
+ rply.rm_direction = REPLY;
+ rply.rm_reply.rp_stat = MSG_ACCEPTED;
+ rply.acpted_rply.ar_verf = xprt->xp_verf;
+ rply.acpted_rply.ar_stat = PROG_MISMATCH;
+ rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
+ rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
+
+ SVC_REPLY(xprt, &rply);
+}
+
+/* ******************* SERVER INPUT STUFF ******************* */
+
+/*
+ * Get server side input from some transport.
+ *
+ * Statement of authentication parameters management:
+ * This function owns and manages all authentication parameters, specifically
+ * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
+ * the "cooked" credentials (rqst->rq_clntcred).
+ * In-kernel, we represent non-trivial cooked creds with struct ucred.
+ * In all events, all three parameters are freed upon exit from this routine.
+ * The storage is trivially management on the call stack in user land, but
+ * is mallocated in kernel land.
+ */
+
+static void
+svc_getreq(SVCXPRT *xprt)
+{
+ SVCPOOL *pool = xprt->xp_pool;
+ struct svc_req r;
+ struct rpc_msg msg;
+ int prog_found;
+ rpcvers_t low_vers;
+ rpcvers_t high_vers;
+ enum xprt_stat stat;
+ char cred_area[2*MAX_AUTH_BYTES + sizeof(struct xucred)];
+
+ msg.rm_call.cb_cred.oa_base = cred_area;
+ msg.rm_call.cb_verf.oa_base = &cred_area[MAX_AUTH_BYTES];
+ r.rq_clntcred = &cred_area[2*MAX_AUTH_BYTES];
+
+ /* now receive msgs from xprtprt (support batch calls) */
+ do {
+ if (SVC_RECV(xprt, &msg)) {
+
+ /* now find the exported program and call it */
+ struct svc_callout *s;
+ enum auth_stat why;
+
+ r.rq_xprt = xprt;
+ r.rq_prog = msg.rm_call.cb_prog;
+ r.rq_vers = msg.rm_call.cb_vers;
+ r.rq_proc = msg.rm_call.cb_proc;
+ r.rq_cred = msg.rm_call.cb_cred;
+ /* first authenticate the message */
+ if ((why = _authenticate(&r, &msg)) != AUTH_OK) {
+ svcerr_auth(xprt, why);
+ goto call_done;
+ }
+ /* now match message with a registered service*/
+ prog_found = FALSE;
+ low_vers = (rpcvers_t) -1L;
+ high_vers = (rpcvers_t) 0L;
+ TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
+ if (s->sc_prog == r.rq_prog) {
+ if (s->sc_vers == r.rq_vers) {
+ (*s->sc_dispatch)(&r, xprt);
+ goto call_done;
+ } /* found correct version */
+ prog_found = TRUE;
+ if (s->sc_vers < low_vers)
+ low_vers = s->sc_vers;
+ if (s->sc_vers > high_vers)
+ high_vers = s->sc_vers;
+ } /* found correct program */
+ }
+ /*
+ * if we got here, the program or version
+ * is not served ...
+ */
+ if (prog_found)
+ svcerr_progvers(xprt, low_vers, high_vers);
+ else
+ svcerr_noprog(xprt);
+ /* Fall through to ... */
+ }
+ /*
+ * Check if the xprt has been disconnected in a
+ * recursive call in the service dispatch routine.
+ * If so, then break.
+ */
+ mtx_lock(&pool->sp_lock);
+ if (!xprt->xp_registered) {
+ mtx_unlock(&pool->sp_lock);
+ break;
+ }
+ mtx_unlock(&pool->sp_lock);
+call_done:
+ if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
+ SVC_DESTROY(xprt);
+ break;
+ }
+ } while (stat == XPRT_MOREREQS);
+}
+
+void
+svc_run(SVCPOOL *pool)
+{
+ SVCXPRT *xprt;
+ int error;
+
+ mtx_lock(&pool->sp_lock);
+
+ pool->sp_exited = FALSE;
+
+ while (!pool->sp_exited) {
+ xprt = TAILQ_FIRST(&pool->sp_active);
+ if (!xprt) {
+ error = msleep(&pool->sp_active, &pool->sp_lock, PCATCH,
+ "rpcsvc", 0);
+ if (error)
+ break;
+ continue;
+ }
+
+ /*
+ * Move this transport to the end to ensure fairness
+ * when multiple transports are active. If this was
+ * the last queued request, svc_getreq will end up
+ * calling xprt_inactive to remove from the active
+ * list.
+ */
+ TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink);
+ TAILQ_INSERT_TAIL(&pool->sp_active, xprt, xp_alink);
+
+ mtx_unlock(&pool->sp_lock);
+ svc_getreq(xprt);
+ mtx_lock(&pool->sp_lock);
+ }
+
+ mtx_unlock(&pool->sp_lock);
+}
+
+void
+svc_exit(SVCPOOL *pool)
+{
+ mtx_lock(&pool->sp_lock);
+ pool->sp_exited = TRUE;
+ wakeup(&pool->sp_active);
+ mtx_unlock(&pool->sp_lock);
+}
diff --git a/sys/rpc/svc.h b/sys/rpc/svc.h
new file mode 100644
index 0000000..21c7491
--- /dev/null
+++ b/sys/rpc/svc.h
@@ -0,0 +1,614 @@
+/* $NetBSD: svc.h,v 1.17 2000/06/02 22:57:56 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)svc.h 1.35 88/12/17 SMI
+ * from: @(#)svc.h 1.27 94/04/25 SMI
+ * $FreeBSD$
+ */
+
+/*
+ * svc.h, Server-side remote procedure call interface.
+ *
+ * Copyright (C) 1986-1993 by Sun Microsystems, Inc.
+ */
+
+#ifndef _RPC_SVC_H
+#define _RPC_SVC_H
+#include <sys/cdefs.h>
+
+#ifdef _KERNEL
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#endif
+
+/*
+ * This interface must manage two items concerning remote procedure calling:
+ *
+ * 1) An arbitrary number of transport connections upon which rpc requests
+ * are received. The two most notable transports are TCP and UDP; they are
+ * created and registered by routines in svc_tcp.c and svc_udp.c, respectively;
+ * they in turn call xprt_register and xprt_unregister.
+ *
+ * 2) An arbitrary number of locally registered services. Services are
+ * described by the following four data: program number, version number,
+ * "service dispatch" function, a transport handle, and a boolean that
+ * indicates whether or not the exported program should be registered with a
+ * local binder service; if true the program's number and version and the
+ * port number from the transport handle are registered with the binder.
+ * These data are registered with the rpc svc system via svc_register.
+ *
+ * A service's dispatch function is called whenever an rpc request comes in
+ * on a transport. The request's program and version numbers must match
+ * those of the registered service. The dispatch function is passed two
+ * parameters, struct svc_req * and SVCXPRT *, defined below.
+ */
+
+/*
+ * Service control requests
+ */
+#define SVCGET_VERSQUIET 1
+#define SVCSET_VERSQUIET 2
+#define SVCGET_CONNMAXREC 3
+#define SVCSET_CONNMAXREC 4
+
+/*
+ * Operations for rpc_control().
+ */
+#define RPC_SVC_CONNMAXREC_SET 0 /* set max rec size, enable nonblock */
+#define RPC_SVC_CONNMAXREC_GET 1
+
+enum xprt_stat {
+ XPRT_DIED,
+ XPRT_MOREREQS,
+ XPRT_IDLE
+};
+
+struct __rpc_svcxprt;
+
+struct xp_ops {
+ /* receive incoming requests */
+ bool_t (*xp_recv)(struct __rpc_svcxprt *, struct rpc_msg *);
+ /* get transport status */
+ enum xprt_stat (*xp_stat)(struct __rpc_svcxprt *);
+ /* get arguments */
+ bool_t (*xp_getargs)(struct __rpc_svcxprt *, xdrproc_t, void *);
+ /* send reply */
+ bool_t (*xp_reply)(struct __rpc_svcxprt *, struct rpc_msg *);
+ /* free mem allocated for args */
+ bool_t (*xp_freeargs)(struct __rpc_svcxprt *, xdrproc_t, void *);
+ /* destroy this struct */
+ void (*xp_destroy)(struct __rpc_svcxprt *);
+#ifdef _KERNEL
+ /* catch-all function */
+ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *);
+#endif
+};
+
+#ifndef _KERNEL
+struct xp_ops2 {
+ /* catch-all function */
+ bool_t (*xp_control)(struct __rpc_svcxprt *, const u_int, void *);
+};
+#endif
+
+#ifdef _KERNEL
+struct __rpc_svcpool;
+#endif
+
+/*
+ * Server side transport handle
+ */
+typedef struct __rpc_svcxprt {
+#ifdef _KERNEL
+ struct mtx xp_lock;
+ struct __rpc_svcpool *xp_pool; /* owning pool (see below) */
+ TAILQ_ENTRY(__rpc_svcxprt) xp_link;
+ TAILQ_ENTRY(__rpc_svcxprt) xp_alink;
+ bool_t xp_registered; /* xprt_register has been called */
+ bool_t xp_active; /* xprt_active has been called */
+ struct socket* xp_socket;
+ const struct xp_ops *xp_ops;
+ char *xp_netid; /* network token */
+ struct netbuf xp_ltaddr; /* local transport address */
+ struct netbuf xp_rtaddr; /* remote transport address */
+ struct opaque_auth xp_verf; /* raw response verifier */
+ uint32_t xp_xid; /* current transaction ID */
+ XDR xp_xdrreq; /* xdr stream for decoding request */
+ XDR xp_xdrrep; /* xdr stream for encoding reply */
+ void *xp_p1; /* private: for use by svc ops */
+ void *xp_p2; /* private: for use by svc ops */
+ void *xp_p3; /* private: for use by svc lib */
+ int xp_type; /* transport type */
+#else
+ int xp_fd;
+ u_short xp_port; /* associated port number */
+ const struct xp_ops *xp_ops;
+ int xp_addrlen; /* length of remote address */
+ struct sockaddr_in xp_raddr; /* remote addr. (backward ABI compat) */
+ /* XXX - fvdl stick this here for ABI backward compat reasons */
+ const struct xp_ops2 *xp_ops2;
+ char *xp_tp; /* transport provider device name */
+ char *xp_netid; /* network token */
+ struct netbuf xp_ltaddr; /* local transport address */
+ struct netbuf xp_rtaddr; /* remote transport address */
+ struct opaque_auth xp_verf; /* raw response verifier */
+ void *xp_p1; /* private: for use by svc ops */
+ void *xp_p2; /* private: for use by svc ops */
+ void *xp_p3; /* private: for use by svc lib */
+ int xp_type; /* transport type */
+#endif
+} SVCXPRT;
+
+#ifdef _KERNEL
+
+/*
+ * The services list
+ * Each entry represents a set of procedures (an rpc program).
+ * The dispatch routine takes request structs and runs the
+ * apropriate procedure.
+ */
+struct svc_callout {
+ TAILQ_ENTRY(svc_callout) sc_link;
+ rpcprog_t sc_prog;
+ rpcvers_t sc_vers;
+ char *sc_netid;
+ void (*sc_dispatch)(struct svc_req *, SVCXPRT *);
+};
+TAILQ_HEAD(svc_callout_list, svc_callout);
+
+/*
+ * In the kernel, we can't use global variables to store lists of
+ * transports etc. since otherwise we could not have two unrelated RPC
+ * services running, each on its own thread. We solve this by
+ * importing a tiny part of a Solaris kernel concept, SVCPOOL.
+ *
+ * A service pool contains a set of transports and service callbacks
+ * for a set of related RPC services. The pool handle should be passed
+ * when creating new transports etc. Future work may include extending
+ * this to support something similar to the Solaris multi-threaded RPC
+ * server.
+ */
+TAILQ_HEAD(svcxprt_list, __rpc_svcxprt);
+typedef struct __rpc_svcpool {
+ struct mtx sp_lock; /* protect the transport lists */
+ struct svcxprt_list sp_xlist; /* all transports in the pool */
+ struct svcxprt_list sp_active; /* transports needing service */
+ struct svc_callout_list sp_callouts; /* (prog,vers)->dispatch list */
+ bool_t sp_exited; /* true if shutting down */
+} SVCPOOL;
+
+#endif
+
+/*
+ * Service request
+ */
+struct svc_req {
+ uint32_t rq_prog; /* service program number */
+ uint32_t rq_vers; /* service protocol version */
+ uint32_t rq_proc; /* the desired procedure */
+ struct opaque_auth rq_cred; /* raw creds from the wire */
+ void *rq_clntcred; /* read only cooked cred */
+ SVCXPRT *rq_xprt; /* associated transport */
+};
+
+/*
+ * Approved way of getting address of caller
+ */
+#define svc_getrpccaller(x) (&(x)->xp_rtaddr)
+
+/*
+ * Operations defined on an SVCXPRT handle
+ *
+ * SVCXPRT *xprt;
+ * struct rpc_msg *msg;
+ * xdrproc_t xargs;
+ * void * argsp;
+ */
+#define SVC_RECV(xprt, msg) \
+ (*(xprt)->xp_ops->xp_recv)((xprt), (msg))
+#define svc_recv(xprt, msg) \
+ (*(xprt)->xp_ops->xp_recv)((xprt), (msg))
+
+#define SVC_STAT(xprt) \
+ (*(xprt)->xp_ops->xp_stat)(xprt)
+#define svc_stat(xprt) \
+ (*(xprt)->xp_ops->xp_stat)(xprt)
+
+#define SVC_GETARGS(xprt, xargs, argsp) \
+ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp))
+#define svc_getargs(xprt, xargs, argsp) \
+ (*(xprt)->xp_ops->xp_getargs)((xprt), (xargs), (argsp))
+
+#define SVC_REPLY(xprt, msg) \
+ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg))
+#define svc_reply(xprt, msg) \
+ (*(xprt)->xp_ops->xp_reply) ((xprt), (msg))
+
+#define SVC_FREEARGS(xprt, xargs, argsp) \
+ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp))
+#define svc_freeargs(xprt, xargs, argsp) \
+ (*(xprt)->xp_ops->xp_freeargs)((xprt), (xargs), (argsp))
+
+#define SVC_DESTROY(xprt) \
+ (*(xprt)->xp_ops->xp_destroy)(xprt)
+#define svc_destroy(xprt) \
+ (*(xprt)->xp_ops->xp_destroy)(xprt)
+
+#ifdef _KERNEL
+#define SVC_CONTROL(xprt, rq, in) \
+ (*(xprt)->xp_ops->xp_control)((xprt), (rq), (in))
+#else
+#define SVC_CONTROL(xprt, rq, in) \
+ (*(xprt)->xp_ops2->xp_control)((xprt), (rq), (in))
+#endif
+
+/*
+ * Service registration
+ *
+ * svc_reg(xprt, prog, vers, dispatch, nconf)
+ * const SVCXPRT *xprt;
+ * const rpcprog_t prog;
+ * const rpcvers_t vers;
+ * const void (*dispatch)();
+ * const struct netconfig *nconf;
+ */
+
+__BEGIN_DECLS
+extern bool_t svc_reg(SVCXPRT *, const rpcprog_t, const rpcvers_t,
+ void (*)(struct svc_req *, SVCXPRT *),
+ const struct netconfig *);
+__END_DECLS
+
+/*
+ * Service un-registration
+ *
+ * svc_unreg(prog, vers)
+ * const rpcprog_t prog;
+ * const rpcvers_t vers;
+ */
+
+__BEGIN_DECLS
+#ifdef _KERNEL
+extern void svc_unreg(SVCPOOL *, const rpcprog_t, const rpcvers_t);
+#else
+extern void svc_unreg(const rpcprog_t, const rpcvers_t);
+#endif
+__END_DECLS
+
+/*
+ * Transport registration.
+ *
+ * xprt_register(xprt)
+ * SVCXPRT *xprt;
+ */
+__BEGIN_DECLS
+extern void xprt_register(SVCXPRT *);
+__END_DECLS
+
+/*
+ * Transport un-register
+ *
+ * xprt_unregister(xprt)
+ * SVCXPRT *xprt;
+ */
+__BEGIN_DECLS
+extern void xprt_unregister(SVCXPRT *);
+extern void __xprt_unregister_unlocked(SVCXPRT *);
+__END_DECLS
+
+#ifdef _KERNEL
+
+/*
+ * Called when a transport has pending requests.
+ */
+__BEGIN_DECLS
+extern void xprt_active(SVCXPRT *);
+extern void xprt_inactive(SVCXPRT *);
+__END_DECLS
+
+#endif
+
+/*
+ * When the service routine is called, it must first check to see if it
+ * knows about the procedure; if not, it should call svcerr_noproc
+ * and return. If so, it should deserialize its arguments via
+ * SVC_GETARGS (defined above). If the deserialization does not work,
+ * svcerr_decode should be called followed by a return. Successful
+ * decoding of the arguments should be followed the execution of the
+ * procedure's code and a call to svc_sendreply.
+ *
+ * Also, if the service refuses to execute the procedure due to too-
+ * weak authentication parameters, svcerr_weakauth should be called.
+ * Note: do not confuse access-control failure with weak authentication!
+ *
+ * NB: In pure implementations of rpc, the caller always waits for a reply
+ * msg. This message is sent when svc_sendreply is called.
+ * Therefore pure service implementations should always call
+ * svc_sendreply even if the function logically returns void; use
+ * xdr.h - xdr_void for the xdr routine. HOWEVER, tcp based rpc allows
+ * for the abuse of pure rpc via batched calling or pipelining. In the
+ * case of a batched call, svc_sendreply should NOT be called since
+ * this would send a return message, which is what batching tries to avoid.
+ * It is the service/protocol writer's responsibility to know which calls are
+ * batched and which are not. Warning: responding to batch calls may
+ * deadlock the caller and server processes!
+ */
+
+__BEGIN_DECLS
+extern bool_t svc_sendreply(SVCXPRT *, xdrproc_t, void *);
+extern void svcerr_decode(SVCXPRT *);
+extern void svcerr_weakauth(SVCXPRT *);
+extern void svcerr_noproc(SVCXPRT *);
+extern void svcerr_progvers(SVCXPRT *, rpcvers_t, rpcvers_t);
+extern void svcerr_auth(SVCXPRT *, enum auth_stat);
+extern void svcerr_noprog(SVCXPRT *);
+extern void svcerr_systemerr(SVCXPRT *);
+extern int rpc_reg(rpcprog_t, rpcvers_t, rpcproc_t,
+ char *(*)(char *), xdrproc_t, xdrproc_t,
+ char *);
+__END_DECLS
+
+/*
+ * Lowest level dispatching -OR- who owns this process anyway.
+ * Somebody has to wait for incoming requests and then call the correct
+ * service routine. The routine svc_run does infinite waiting; i.e.,
+ * svc_run never returns.
+ * Since another (co-existant) package may wish to selectively wait for
+ * incoming calls or other events outside of the rpc architecture, the
+ * routine svc_getreq is provided. It must be passed readfds, the
+ * "in-place" results of a select system call (see select, section 2).
+ */
+
+#ifndef _KERNEL
+/*
+ * Global keeper of rpc service descriptors in use
+ * dynamic; must be inspected before each call to select
+ */
+extern int svc_maxfd;
+#ifdef FD_SETSIZE
+extern fd_set svc_fdset;
+#define svc_fds svc_fdset.fds_bits[0] /* compatibility */
+#else
+extern int svc_fds;
+#endif /* def FD_SETSIZE */
+#endif
+
+/*
+ * a small program implemented by the svc_rpc implementation itself;
+ * also see clnt.h for protocol numbers.
+ */
+__BEGIN_DECLS
+extern void rpctest_service(void);
+__END_DECLS
+
+__BEGIN_DECLS
+#ifndef _KERNEL
+extern void svc_getreq(int);
+extern void svc_getreqset(fd_set *);
+extern void svc_getreq_common(int);
+struct pollfd;
+extern void svc_getreq_poll(struct pollfd *, int);
+extern void svc_run(void);
+extern void svc_exit(void);
+#else
+extern void svc_run(SVCPOOL *);
+extern void svc_exit(SVCPOOL *);
+#endif
+__END_DECLS
+
+/*
+ * Socket to use on svcxxx_create call to get default socket
+ */
+#define RPC_ANYSOCK -1
+#define RPC_ANYFD RPC_ANYSOCK
+
+/*
+ * These are the existing service side transport implementations
+ */
+
+__BEGIN_DECLS
+
+#ifdef _KERNEL
+
+/*
+ * Create a new service pool.
+ */
+extern SVCPOOL* svcpool_create(void);
+
+/*
+ * Destroy a service pool, including all registered transports.
+ */
+extern void svcpool_destroy(SVCPOOL *pool);
+
+/*
+ * Transport independent svc_create routine.
+ */
+extern int svc_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *),
+ const rpcprog_t, const rpcvers_t, const char *);
+/*
+ * void (*dispatch)(); -- dispatch routine
+ * const rpcprog_t prognum; -- program number
+ * const rpcvers_t versnum; -- version number
+ * const char *nettype; -- network type
+ */
+
+
+/*
+ * Generic server creation routine. It takes a netconfig structure
+ * instead of a nettype.
+ */
+
+extern SVCXPRT *svc_tp_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *),
+ const rpcprog_t, const rpcvers_t, const char *uaddr,
+ const struct netconfig *);
+ /*
+ * void (*dispatch)(); -- dispatch routine
+ * const rpcprog_t prognum; -- program number
+ * const rpcvers_t versnum; -- version number
+ * const char *uaddr; -- universal address of service
+ * const struct netconfig *nconf; -- netconfig structure
+ */
+
+extern SVCXPRT *svc_dg_create(SVCPOOL *, struct socket *,
+ const size_t, const size_t);
+ /*
+ * struct socket *; -- open connection
+ * const size_t sendsize; -- max send size
+ * const size_t recvsize; -- max recv size
+ */
+
+extern SVCXPRT *svc_vc_create(SVCPOOL *, struct socket *,
+ const size_t, const size_t);
+ /*
+ * struct socket *; -- open connection
+ * const size_t sendsize; -- max send size
+ * const size_t recvsize; -- max recv size
+ */
+
+/*
+ * Generic TLI create routine
+ */
+extern SVCXPRT *svc_tli_create(SVCPOOL *, struct socket *,
+ const struct netconfig *, const struct t_bind *, const size_t, const size_t);
+/*
+ * struct socket * so; -- connection end point
+ * const struct netconfig *nconf; -- netconfig structure for network
+ * const struct t_bind *bindaddr; -- local bind address
+ * const size_t sendsz; -- max sendsize
+ * const size_t recvsz; -- max recvsize
+ */
+
+#else /* !_KERNEL */
+
+/*
+ * Transport independent svc_create routine.
+ */
+extern int svc_create(void (*)(struct svc_req *, SVCXPRT *),
+ const rpcprog_t, const rpcvers_t, const char *);
+/*
+ * void (*dispatch)(); -- dispatch routine
+ * const rpcprog_t prognum; -- program number
+ * const rpcvers_t versnum; -- version number
+ * const char *nettype; -- network type
+ */
+
+
+/*
+ * Generic server creation routine. It takes a netconfig structure
+ * instead of a nettype.
+ */
+
+extern SVCXPRT *svc_tp_create(void (*)(struct svc_req *, SVCXPRT *),
+ const rpcprog_t, const rpcvers_t,
+ const struct netconfig *);
+ /*
+ * void (*dispatch)(); -- dispatch routine
+ * const rpcprog_t prognum; -- program number
+ * const rpcvers_t versnum; -- version number
+ * const struct netconfig *nconf; -- netconfig structure
+ */
+
+/*
+ * Generic TLI create routine
+ */
+extern SVCXPRT *svc_tli_create(const int, const struct netconfig *,
+ const struct t_bind *, const u_int,
+ const u_int);
+/*
+ * const int fd; -- connection end point
+ * const struct netconfig *nconf; -- netconfig structure for network
+ * const struct t_bind *bindaddr; -- local bind address
+ * const u_int sendsz; -- max sendsize
+ * const u_int recvsz; -- max recvsize
+ */
+
+/*
+ * Connectionless and connectionful create routines
+ */
+
+extern SVCXPRT *svc_vc_create(const int, const u_int, const u_int);
+/*
+ * const int fd; -- open connection end point
+ * const u_int sendsize; -- max send size
+ * const u_int recvsize; -- max recv size
+ */
+
+/*
+ * Added for compatibility to old rpc 4.0. Obsoleted by svc_vc_create().
+ */
+extern SVCXPRT *svcunix_create(int, u_int, u_int, char *);
+
+extern SVCXPRT *svc_dg_create(const int, const u_int, const u_int);
+ /*
+ * const int fd; -- open connection
+ * const u_int sendsize; -- max send size
+ * const u_int recvsize; -- max recv size
+ */
+
+
+/*
+ * the routine takes any *open* connection
+ * descriptor as its first input and is used for open connections.
+ */
+extern SVCXPRT *svc_fd_create(const int, const u_int, const u_int);
+/*
+ * const int fd; -- open connection end point
+ * const u_int sendsize; -- max send size
+ * const u_int recvsize; -- max recv size
+ */
+
+/*
+ * Added for compatibility to old rpc 4.0. Obsoleted by svc_fd_create().
+ */
+extern SVCXPRT *svcunixfd_create(int, u_int, u_int);
+
+/*
+ * Memory based rpc (for speed check and testing)
+ */
+extern SVCXPRT *svc_raw_create(void);
+
+/*
+ * svc_dg_enable_cache() enables the cache on dg transports.
+ */
+int svc_dg_enablecache(SVCXPRT *, const u_int);
+
+int __rpc_get_local_uid(SVCXPRT *_transp, uid_t *_uid);
+
+#endif /* !_KERNEL */
+
+__END_DECLS
+
+#ifndef _KERNEL
+/* for backward compatibility */
+#include <rpc/svc_soc.h>
+#endif
+
+#endif /* !_RPC_SVC_H */
diff --git a/sys/rpc/svc_auth.c b/sys/rpc/svc_auth.c
new file mode 100644
index 0000000..22d4e61
--- /dev/null
+++ b/sys/rpc/svc_auth.c
@@ -0,0 +1,133 @@
+/* $NetBSD: svc_auth.c,v 1.12 2000/07/06 03:10:35 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+#ident "@(#)svc_auth.c 1.16 94/04/24 SMI"
+static char sccsid[] = "@(#)svc_auth.c 1.26 89/02/07 Copyr 1984 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc_auth.c, Server-side rpc authenticator interface.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/ucred.h>
+
+#include <rpc/rpc.h>
+
+/*
+ * The call rpc message, msg has been obtained from the wire. The msg contains
+ * the raw form of credentials and verifiers. authenticate returns AUTH_OK
+ * if the msg is successfully authenticated. If AUTH_OK then the routine also
+ * does the following things:
+ * set rqst->rq_xprt->verf to the appropriate response verifier;
+ * sets rqst->rq_client_cred to the "cooked" form of the credentials.
+ *
+ * NB: rqst->rq_cxprt->verf must be pre-alloctaed;
+ * its length is set appropriately.
+ *
+ * The caller still owns and is responsible for msg->u.cmb.cred and
+ * msg->u.cmb.verf. The authentication system retains ownership of
+ * rqst->rq_client_cred, the cooked credentials.
+ *
+ * There is an assumption that any flavour less than AUTH_NULL is
+ * invalid.
+ */
+enum auth_stat
+_authenticate(struct svc_req *rqst, struct rpc_msg *msg)
+{
+ int cred_flavor;
+ enum auth_stat dummy;
+
+ rqst->rq_cred = msg->rm_call.cb_cred;
+ rqst->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
+ rqst->rq_xprt->xp_verf.oa_length = 0;
+ cred_flavor = rqst->rq_cred.oa_flavor;
+ switch (cred_flavor) {
+ case AUTH_NULL:
+ dummy = _svcauth_null(rqst, msg);
+ return (dummy);
+ case AUTH_SYS:
+ dummy = _svcauth_unix(rqst, msg);
+ return (dummy);
+ case AUTH_SHORT:
+ dummy = _svcauth_short(rqst, msg);
+ return (dummy);
+ default:
+ break;
+ }
+
+ return (AUTH_REJECTEDCRED);
+}
+
+/*ARGSUSED*/
+enum auth_stat
+_svcauth_null(struct svc_req *rqst, struct rpc_msg *msg)
+{
+ return (AUTH_OK);
+}
+
+int
+svc_getcred(struct svc_req *rqst, struct ucred *cr, int *flavorp)
+{
+ int flavor, i;
+ struct xucred *xcr;
+
+ KASSERT(!crshared(cr), ("svc_getcred with shared cred"));
+
+ flavor = rqst->rq_cred.oa_flavor;
+ if (flavorp)
+ *flavorp = flavor;
+
+ switch (flavor) {
+ case AUTH_UNIX:
+ xcr = (struct xucred *) rqst->rq_clntcred;
+ cr->cr_uid = cr->cr_ruid = cr->cr_svuid = xcr->cr_uid;
+ cr->cr_ngroups = xcr->cr_ngroups;
+ for (i = 0; i < xcr->cr_ngroups; i++)
+ cr->cr_groups[i] = xcr->cr_groups[i];
+ cr->cr_rgid = cr->cr_groups[0];
+ return (TRUE);
+
+ default:
+ return (FALSE);
+ }
+}
+
diff --git a/sys/rpc/svc_auth.h b/sys/rpc/svc_auth.h
new file mode 100644
index 0000000..26c191a
--- /dev/null
+++ b/sys/rpc/svc_auth.h
@@ -0,0 +1,67 @@
+/* $NetBSD: svc_auth.h,v 1.8 2000/06/02 22:57:57 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)svc_auth.h 1.6 86/07/16 SMI
+ * @(#)svc_auth.h 2.1 88/07/29 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * svc_auth.h, Service side of rpc authentication.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#ifndef _RPC_SVC_AUTH_H
+#define _RPC_SVC_AUTH_H
+
+/*
+ * Server side authenticator
+ */
+__BEGIN_DECLS
+extern enum auth_stat _authenticate(struct svc_req *, struct rpc_msg *);
+
+extern int svc_getcred(struct svc_req *, struct ucred *, int *);
+/*
+ * struct svc_req *req; -- RPC request
+ * struct ucred *cr -- Kernel cred to modify
+ * int *flavorp -- Return RPC auth flavor
+ *
+ * Retrieve unix creds corresponding to an RPC request, if
+ * possible. The auth flavor (AUTH_NONE or AUTH_UNIX) is returned in
+ * *flavorp. If the flavor is AUTH_UNIX the caller's ucred structure
+ * will be modified to reflect the values from the request. Return's
+ * non-zero if credentials were retrieved form the request, otherwise
+ * zero.
+ */
+
+__END_DECLS
+
+#endif /* !_RPC_SVC_AUTH_H */
diff --git a/sys/rpc/svc_auth_unix.c b/sys/rpc/svc_auth_unix.c
new file mode 100644
index 0000000..3b6969d
--- /dev/null
+++ b/sys/rpc/svc_auth_unix.c
@@ -0,0 +1,144 @@
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)svc_auth_unix.c 1.28 88/02/08 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)svc_auth_unix.c 2.3 88/08/01 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc_auth_unix.c
+ * Handles UNIX flavor authentication parameters on the service side of rpc.
+ * There are two svc auth implementations here: AUTH_UNIX and AUTH_SHORT.
+ * _svcauth_unix does full blown unix style uid,gid+gids auth,
+ * _svcauth_short uses a shorthand auth to index into a cache of longhand auths.
+ * Note: the shorthand has been gutted for efficiency.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/ucred.h>
+
+#include <rpc/rpc.h>
+
+#include "rpc_com.h"
+
+#define MAX_MACHINE_NAME 255
+#define NGRPS 16
+
+/*
+ * Unix longhand authenticator
+ */
+enum auth_stat
+_svcauth_unix(struct svc_req *rqst, struct rpc_msg *msg)
+{
+ enum auth_stat stat;
+ XDR xdrs;
+ int32_t *buf;
+ uint32_t time;
+ struct xucred *xcr;
+ u_int auth_len;
+ size_t str_len, gid_len;
+ u_int i;
+
+ xcr = rqst->rq_clntcred;
+ auth_len = (u_int)msg->rm_call.cb_cred.oa_length;
+ xdrmem_create(&xdrs, msg->rm_call.cb_cred.oa_base, auth_len,
+ XDR_DECODE);
+ buf = XDR_INLINE(&xdrs, auth_len);
+ if (buf != NULL) {
+ time = IXDR_GET_UINT32(buf);
+ str_len = (size_t)IXDR_GET_UINT32(buf);
+ if (str_len > MAX_MACHINE_NAME) {
+ stat = AUTH_BADCRED;
+ goto done;
+ }
+ str_len = RNDUP(str_len);
+ buf += str_len / sizeof (int32_t);
+ xcr->cr_uid = IXDR_GET_UINT32(buf);
+ xcr->cr_groups[0] = IXDR_GET_UINT32(buf);
+ gid_len = (size_t)IXDR_GET_UINT32(buf);
+ if (gid_len > NGRPS) {
+ stat = AUTH_BADCRED;
+ goto done;
+ }
+ for (i = 0; i < gid_len; i++) {
+ if (i + 1 < NGROUPS)
+ xcr->cr_groups[i + 1] = IXDR_GET_INT32(buf);
+ else
+ buf++;
+ }
+ if (gid_len + 1 > NGROUPS)
+ xcr->cr_ngroups = NGROUPS;
+ else
+ xcr->cr_ngroups = gid_len + 1;
+
+ /*
+ * five is the smallest unix credentials structure -
+ * timestamp, hostname len (0), uid, gid, and gids len (0).
+ */
+ if ((5 + gid_len) * BYTES_PER_XDR_UNIT + str_len > auth_len) {
+ (void) printf("bad auth_len gid %ld str %ld auth %u\n",
+ (long)gid_len, (long)str_len, auth_len);
+ stat = AUTH_BADCRED;
+ goto done;
+ }
+ } else if (! xdr_authunix_parms(&xdrs, &time, xcr)) {
+ stat = AUTH_BADCRED;
+ goto done;
+ }
+
+ rqst->rq_xprt->xp_verf.oa_flavor = AUTH_NULL;
+ rqst->rq_xprt->xp_verf.oa_length = 0;
+ stat = AUTH_OK;
+done:
+ XDR_DESTROY(&xdrs);
+
+ return (stat);
+}
+
+
+/*
+ * Shorthand unix authenticator
+ * Looks up longhand in a cache.
+ */
+/*ARGSUSED*/
+enum auth_stat
+_svcauth_short(rqst, msg)
+ struct svc_req *rqst;
+ struct rpc_msg *msg;
+{
+ return (AUTH_REJECTEDCRED);
+}
diff --git a/sys/rpc/svc_dg.c b/sys/rpc/svc_dg.c
new file mode 100644
index 0000000..08d5947
--- /dev/null
+++ b/sys/rpc/svc_dg.c
@@ -0,0 +1,334 @@
+/* $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+#ident "@(#)svc_dg.c 1.17 94/04/24 SMI"
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc_dg.c, Server side for connectionless RPC.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/protosw.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <rpc/rpc.h>
+
+#include "rpc_com.h"
+
+static enum xprt_stat svc_dg_stat(SVCXPRT *);
+static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
+static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
+static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
+static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
+static void svc_dg_destroy(SVCXPRT *);
+static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
+static void svc_dg_soupcall(struct socket *so, void *arg, int waitflag);
+
+static struct xp_ops svc_dg_ops = {
+ .xp_recv = svc_dg_recv,
+ .xp_stat = svc_dg_stat,
+ .xp_getargs = svc_dg_getargs,
+ .xp_reply = svc_dg_reply,
+ .xp_freeargs = svc_dg_freeargs,
+ .xp_destroy = svc_dg_destroy,
+ .xp_control = svc_dg_control,
+};
+
+/*
+ * Usage:
+ * xprt = svc_dg_create(sock, sendsize, recvsize);
+ * Does other connectionless specific initializations.
+ * Once *xprt is initialized, it is registered.
+ * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
+ * system defaults are chosen.
+ * The routines returns NULL if a problem occurred.
+ */
+static const char svc_dg_str[] = "svc_dg_create: %s";
+static const char svc_dg_err1[] = "could not get transport information";
+static const char svc_dg_err2[] = "transport does not support data transfer";
+static const char __no_mem_str[] = "out of memory";
+
+SVCXPRT *
+svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
+ size_t recvsize)
+{
+ SVCXPRT *xprt;
+ struct __rpc_sockinfo si;
+ struct sockaddr* sa;
+ int error;
+
+ if (!__rpc_socket2sockinfo(so, &si)) {
+ printf(svc_dg_str, svc_dg_err1);
+ return (NULL);
+ }
+ /*
+ * Find the receive and the send size
+ */
+ sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
+ recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
+ if ((sendsize == 0) || (recvsize == 0)) {
+ printf(svc_dg_str, svc_dg_err2);
+ return (NULL);
+ }
+
+ xprt = mem_alloc(sizeof (SVCXPRT));
+ memset(xprt, 0, sizeof (SVCXPRT));
+ mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
+ xprt->xp_pool = pool;
+ xprt->xp_socket = so;
+ xprt->xp_p1 = NULL;
+ xprt->xp_p2 = NULL;
+ xprt->xp_ops = &svc_dg_ops;
+
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ goto freedata;
+
+ xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
+ xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
+ xprt->xp_ltaddr.len = sa->sa_len;
+ memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
+ free(sa, M_SONAME);
+
+ xprt->xp_rtaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
+ xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
+ xprt->xp_rtaddr.len = 0;
+
+ xprt_register(xprt);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_upcallarg = xprt;
+ so->so_upcall = svc_dg_soupcall;
+ so->so_rcv.sb_flags |= SB_UPCALL;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ return (xprt);
+freedata:
+ (void) printf(svc_dg_str, __no_mem_str);
+ if (xprt) {
+ (void) mem_free(xprt, sizeof (SVCXPRT));
+ }
+ return (NULL);
+}
+
+/*ARGSUSED*/
+static enum xprt_stat
+svc_dg_stat(SVCXPRT *xprt)
+{
+
+ return (XPRT_IDLE);
+}
+
+static bool_t
+svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
+{
+ struct uio uio;
+ struct sockaddr *raddr;
+ struct mbuf *mreq;
+ int error, rcvflag;
+
+ /*
+ * The socket upcall calls xprt_active() which will eventually
+ * cause the server to call us here. We attempt to read a
+ * packet from the socket and process it. If the read fails,
+ * we have drained all pending requests so we call
+ * xprt_inactive().
+ *
+ * The lock protects us in the case where a new packet arrives
+ * on the socket after our call to soreceive fails with
+ * EWOULDBLOCK - the call to xprt_active() in the upcall will
+ * happen only after our call to xprt_inactive() which ensures
+ * that we will remain active. It might be possible to use
+ * SOCKBUF_LOCK for this - its not clear to me what locks are
+ * held during the upcall.
+ */
+ mtx_lock(&xprt->xp_lock);
+
+ uio.uio_resid = 1000000000;
+ uio.uio_td = curthread;
+ mreq = NULL;
+ rcvflag = MSG_DONTWAIT;
+ error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag);
+
+ if (error == EWOULDBLOCK) {
+ xprt_inactive(xprt);
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ if (error) {
+ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
+ xprt->xp_socket->so_upcallarg = NULL;
+ xprt->xp_socket->so_upcall = NULL;
+ xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
+ xprt_inactive(xprt);
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ mtx_unlock(&xprt->xp_lock);
+
+ KASSERT(raddr->sa_len < xprt->xp_rtaddr.maxlen,
+ ("Unexpected remote address length"));
+ memcpy(xprt->xp_rtaddr.buf, raddr, raddr->sa_len);
+ xprt->xp_rtaddr.len = raddr->sa_len;
+ free(raddr, M_SONAME);
+
+ xdrmbuf_create(&xprt->xp_xdrreq, mreq, XDR_DECODE);
+ if (! xdr_callmsg(&xprt->xp_xdrreq, msg)) {
+ XDR_DESTROY(&xprt->xp_xdrreq);
+ return (FALSE);
+ }
+ xprt->xp_xid = msg->rm_xid;
+
+ return (TRUE);
+}
+
+static bool_t
+svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
+{
+ struct mbuf *mrep;
+ bool_t stat = FALSE;
+ int error;
+
+ MGETHDR(mrep, M_WAIT, MT_DATA);
+ MCLGET(mrep, M_WAIT);
+ mrep->m_len = 0;
+
+ xdrmbuf_create(&xprt->xp_xdrrep, mrep, XDR_ENCODE);
+ msg->rm_xid = xprt->xp_xid;
+ if (xdr_replymsg(&xprt->xp_xdrrep, msg)) {
+ m_fixhdr(mrep);
+ error = sosend(xprt->xp_socket,
+ (struct sockaddr *) xprt->xp_rtaddr.buf, NULL, mrep, NULL,
+ 0, curthread);
+ if (!error) {
+ stat = TRUE;
+ }
+ } else {
+ m_freem(mrep);
+ }
+
+ /*
+ * This frees the request mbuf chain as well. The reply mbuf
+ * chain was consumed by sosend.
+ */
+ XDR_DESTROY(&xprt->xp_xdrreq);
+ XDR_DESTROY(&xprt->xp_xdrrep);
+ xprt->xp_p2 = NULL;
+
+ return (stat);
+}
+
+static bool_t
+svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
+{
+
+ return (xdr_args(&xprt->xp_xdrreq, args_ptr));
+}
+
+static bool_t
+svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
+{
+ XDR xdrs;
+
+ /*
+ * Free the request mbuf here - this allows us to handle
+ * protocols where not all requests have replies
+ * (i.e. NLM). Note that xdrmbuf_destroy handles being called
+ * twice correctly - the mbuf will only be freed once.
+ */
+ XDR_DESTROY(&xprt->xp_xdrreq);
+
+ xdrs.x_op = XDR_FREE;
+ return (xdr_args(&xdrs, args_ptr));
+}
+
+static void
+svc_dg_destroy(SVCXPRT *xprt)
+{
+ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
+ xprt->xp_socket->so_upcallarg = NULL;
+ xprt->xp_socket->so_upcall = NULL;
+ xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
+
+ xprt_unregister(xprt);
+
+ mtx_destroy(&xprt->xp_lock);
+ if (xprt->xp_socket)
+ (void)soclose(xprt->xp_socket);
+
+ if (xprt->xp_rtaddr.buf)
+ (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
+ if (xprt->xp_ltaddr.buf)
+ (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
+ (void) mem_free(xprt, sizeof (SVCXPRT));
+}
+
+static bool_t
+/*ARGSUSED*/
+svc_dg_control(xprt, rq, in)
+ SVCXPRT *xprt;
+ const u_int rq;
+ void *in;
+{
+
+ return (FALSE);
+}
+
+static void
+svc_dg_soupcall(struct socket *so, void *arg, int waitflag)
+{
+ SVCXPRT *xprt = (SVCXPRT *) arg;
+
+ mtx_lock(&xprt->xp_lock);
+ xprt_active(xprt);
+ mtx_unlock(&xprt->xp_lock);
+}
diff --git a/sys/rpc/svc_generic.c b/sys/rpc/svc_generic.c
new file mode 100644
index 0000000..522b413
--- /dev/null
+++ b/sys/rpc/svc_generic.c
@@ -0,0 +1,407 @@
+/* $NetBSD: svc_generic.c,v 1.3 2000/07/06 03:10:35 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+/*
+ * Copyright (c) 1986-1991 by Sun Microsystems Inc.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+#ident "@(#)svc_generic.c 1.19 94/04/24 SMI"
+static char sccsid[] = "@(#)svc_generic.c 1.21 89/02/28 Copyr 1988 Sun Micro";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc_generic.c, Server side for RPC.
+ *
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/protosw.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/systm.h>
+#include <sys/sx.h>
+#include <sys/ucred.h>
+
+#include <rpc/rpc.h>
+#include <rpc/rpcb_clnt.h>
+#include <rpc/nettype.h>
+
+#include "rpc_com.h"
+
+extern int __svc_vc_setflag(SVCXPRT *, int);
+
+/*
+ * The highest level interface for server creation.
+ * It tries for all the nettokens in that particular class of token
+ * and returns the number of handles it can create and/or find.
+ *
+ * It creates a link list of all the handles it could create.
+ * If svc_create() is called multiple times, it uses the handle
+ * created earlier instead of creating a new handle every time.
+ */
+int
+svc_create(
+ SVCPOOL *pool,
+ void (*dispatch)(struct svc_req *, SVCXPRT *),
+ rpcprog_t prognum, /* Program number */
+ rpcvers_t versnum, /* Version number */
+ const char *nettype) /* Networktype token */
+{
+ int num = 0;
+ SVCXPRT *xprt;
+ struct netconfig *nconf;
+ void *handle;
+
+ if ((handle = __rpc_setconf(nettype)) == NULL) {
+ printf("svc_create: unknown protocol");
+ return (0);
+ }
+ while ((nconf = __rpc_getconf(handle)) != NULL) {
+ mtx_lock(&pool->sp_lock);
+ TAILQ_FOREACH(xprt, &pool->sp_xlist, xp_link) {
+ if (strcmp(xprt->xp_netid, nconf->nc_netid) == 0) {
+ /* Found an old one, use it */
+ mtx_unlock(&pool->sp_lock);
+ (void) rpcb_unset(prognum, versnum, nconf);
+ if (svc_reg(xprt, prognum, versnum,
+ dispatch, nconf) == FALSE) {
+ printf(
+ "svc_create: could not register prog %u vers %u on %s\n",
+ (unsigned)prognum, (unsigned)versnum,
+ nconf->nc_netid);
+ mtx_lock(&pool->sp_lock);
+ } else {
+ num++;
+ mtx_lock(&pool->sp_lock);
+ break;
+ }
+ }
+ }
+ mtx_unlock(&pool->sp_lock);
+ if (xprt == NULL) {
+ /* It was not found. Now create a new one */
+ xprt = svc_tp_create(pool, dispatch, prognum, versnum,
+ NULL, nconf);
+ if (xprt)
+ num++;
+ }
+ }
+ __rpc_endconf(handle);
+ /*
+ * In case of num == 0; the error messages are generated by the
+ * underlying layers; and hence not needed here.
+ */
+ return (num);
+}
+
+/*
+ * The high level interface to svc_tli_create().
+ * It tries to create a server for "nconf" and registers the service
+ * with the rpcbind. It calls svc_tli_create();
+ */
+SVCXPRT *
+svc_tp_create(
+ SVCPOOL *pool,
+ void (*dispatch)(struct svc_req *, SVCXPRT *),
+ rpcprog_t prognum, /* Program number */
+ rpcvers_t versnum, /* Version number */
+ const char *uaddr, /* Address (or null for default) */
+ const struct netconfig *nconf) /* Netconfig structure for the network */
+{
+ struct netconfig nconfcopy;
+ struct netbuf *taddr;
+ struct t_bind bind;
+ SVCXPRT *xprt;
+
+ if (nconf == NULL) {
+ printf(
+ "svc_tp_create: invalid netconfig structure for prog %u vers %u\n",
+ (unsigned)prognum, (unsigned)versnum);
+ return (NULL);
+ }
+ if (uaddr) {
+ taddr = uaddr2taddr(nconf, uaddr);
+ bind.addr = *taddr;
+ free(taddr, M_RPC);
+ bind.qlen = SOMAXCONN;
+ xprt = svc_tli_create(pool, NULL, nconf, &bind, 0, 0);
+ free(bind.addr.buf, M_RPC);
+ } else {
+ xprt = svc_tli_create(pool, NULL, nconf, NULL, 0, 0);
+ }
+ if (xprt == NULL) {
+ return (NULL);
+ }
+ /*LINTED const castaway*/
+ nconfcopy = *nconf;
+ (void) rpcb_unset(prognum, versnum, &nconfcopy);
+ if (svc_reg(xprt, prognum, versnum, dispatch, nconf) == FALSE) {
+ printf(
+ "svc_tp_create: Could not register prog %u vers %u on %s\n",
+ (unsigned)prognum, (unsigned)versnum,
+ nconf->nc_netid);
+ SVC_DESTROY(xprt);
+ return (NULL);
+ }
+ return (xprt);
+}
+
+/*
+ * Bind a socket to a privileged IP port
+ */
+int bindresvport(struct socket *so, struct sockaddr *sa);
+int
+bindresvport(struct socket *so, struct sockaddr *sa)
+{
+ int old, error, af;
+ bool_t freesa = FALSE;
+ struct sockaddr_in *sin;
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ struct sockopt opt;
+ int proto, portrange, portlow;
+ u_int16_t *portp;
+ socklen_t salen;
+
+ if (sa == NULL) {
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ return (error);
+ freesa = TRUE;
+ af = sa->sa_family;
+ salen = sa->sa_len;
+ memset(sa, 0, sa->sa_len);
+ } else {
+ af = sa->sa_family;
+ salen = sa->sa_len;
+ }
+
+ switch (af) {
+ case AF_INET:
+ proto = IPPROTO_IP;
+ portrange = IP_PORTRANGE;
+ portlow = IP_PORTRANGE_LOW;
+ sin = (struct sockaddr_in *)sa;
+ portp = &sin->sin_port;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ proto = IPPROTO_IPV6;
+ portrange = IPV6_PORTRANGE;
+ portlow = IPV6_PORTRANGE_LOW;
+ sin6 = (struct sockaddr_in6 *)sa;
+ portp = &sin6->sin6_port;
+ break;
+#endif
+ default:
+ return (EPFNOSUPPORT);
+ }
+
+ sa->sa_family = af;
+ sa->sa_len = salen;
+
+ if (*portp == 0) {
+ bzero(&opt, sizeof(opt));
+ opt.sopt_dir = SOPT_GET;
+ opt.sopt_level = proto;
+ opt.sopt_name = portrange;
+ opt.sopt_val = &old;
+ opt.sopt_valsize = sizeof(old);
+ error = sogetopt(so, &opt);
+ if (error)
+ goto out;
+
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_val = &portlow;
+ error = sosetopt(so, &opt);
+ if (error)
+ goto out;
+ }
+
+ error = sobind(so, sa, curthread);
+
+ if (*portp == 0) {
+ if (error) {
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_val = &old;
+ sosetopt(so, &opt);
+ }
+ }
+out:
+ if (freesa)
+ free(sa, M_SONAME);
+
+ return (error);
+}
+
+/*
+ * If so is NULL, then it opens a socket for the given transport
+ * provider (nconf cannot be NULL then). If the t_state is T_UNBND and
+ * bindaddr is NON-NULL, it performs a t_bind using the bindaddr. For
+ * NULL bindadr and Connection oriented transports, the value of qlen
+ * is set to 8.
+ *
+ * If sendsz or recvsz are zero, their default values are chosen.
+ */
+SVCXPRT *
+svc_tli_create(
+ SVCPOOL *pool,
+ struct socket *so, /* Connection end point */
+ const struct netconfig *nconf, /* Netconfig struct for nettoken */
+ const struct t_bind *bindaddr, /* Local bind address */
+ size_t sendsz, /* Max sendsize */
+ size_t recvsz) /* Max recvsize */
+{
+ SVCXPRT *xprt = NULL; /* service handle */
+ bool_t madeso = FALSE; /* whether so opened here */
+ struct __rpc_sockinfo si;
+ struct sockaddr_storage ss;
+
+ if (!so) {
+ if (nconf == NULL) {
+ printf("svc_tli_create: invalid netconfig\n");
+ return (NULL);
+ }
+ so = __rpc_nconf2socket(nconf);
+ if (!so) {
+ printf(
+ "svc_tli_create: could not open connection for %s\n",
+ nconf->nc_netid);
+ return (NULL);
+ }
+ __rpc_nconf2sockinfo(nconf, &si);
+ madeso = TRUE;
+ } else {
+ /*
+ * It is an open socket. Get the transport info.
+ */
+ if (!__rpc_socket2sockinfo(so, &si)) {
+ printf(
+ "svc_tli_create: could not get transport information\n");
+ return (NULL);
+ }
+ }
+
+ /*
+ * If the socket is unbound, try to bind it.
+ */
+ if (madeso || !__rpc_sockisbound(so)) {
+ if (bindaddr == NULL) {
+ if (bindresvport(so, NULL)) {
+ memset(&ss, 0, sizeof ss);
+ ss.ss_family = si.si_af;
+ ss.ss_len = si.si_alen;
+ if (sobind(so, (struct sockaddr *)&ss,
+ curthread)) {
+ printf(
+ "svc_tli_create: could not bind to anonymous port\n");
+ goto freedata;
+ }
+ }
+ solisten(so, SOMAXCONN, curthread);
+ } else {
+ if (bindresvport(so,
+ (struct sockaddr *)bindaddr->addr.buf)) {
+ printf(
+ "svc_tli_create: could not bind to requested address\n");
+ goto freedata;
+ }
+ solisten(so, (int)bindaddr->qlen, curthread);
+ }
+
+ }
+ /*
+ * call transport specific function.
+ */
+ switch (si.si_socktype) {
+ case SOCK_STREAM:
+#if 0
+ slen = sizeof ss;
+ if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen)
+ == 0) {
+ /* accepted socket */
+ xprt = svc_fd_create(fd, sendsz, recvsz);
+ } else
+#endif
+ xprt = svc_vc_create(pool, so, sendsz, recvsz);
+ if (!nconf || !xprt)
+ break;
+#if 0
+ /* XXX fvdl */
+ if (strcmp(nconf->nc_protofmly, "inet") == 0 ||
+ strcmp(nconf->nc_protofmly, "inet6") == 0)
+ (void) __svc_vc_setflag(xprt, TRUE);
+#endif
+ break;
+ case SOCK_DGRAM:
+ xprt = svc_dg_create(pool, so, sendsz, recvsz);
+ break;
+ default:
+ printf("svc_tli_create: bad service type");
+ goto freedata;
+ }
+
+ if (xprt == NULL)
+ /*
+ * The error messages here are spitted out by the lower layers:
+ * svc_vc_create(), svc_fd_create() and svc_dg_create().
+ */
+ goto freedata;
+
+ /* Fill in type of service */
+ xprt->xp_type = __rpc_socktype2seman(si.si_socktype);
+
+ if (nconf) {
+ xprt->xp_netid = strdup(nconf->nc_netid, M_RPC);
+ }
+ return (xprt);
+
+freedata:
+ if (madeso)
+ (void)soclose(so);
+ if (xprt) {
+ if (!madeso) /* so that svc_destroy doesnt close fd */
+ xprt->xp_socket = NULL;
+ SVC_DESTROY(xprt);
+ }
+ return (NULL);
+}
diff --git a/sys/rpc/svc_vc.c b/sys/rpc/svc_vc.c
new file mode 100644
index 0000000..cada252
--- /dev/null
+++ b/sys/rpc/svc_vc.c
@@ -0,0 +1,746 @@
+/* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * svc_vc.c, Server side for Connection Oriented based RPC.
+ *
+ * Actually implements two flavors of transporter -
+ * a tcp rendezvouser (a listner and connection establisher)
+ * and a record/tcp stream.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/protosw.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+#include <netinet/tcp.h>
+
+#include <rpc/rpc.h>
+
+#include "rpc_com.h"
+
+static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *);
+static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
+static void svc_vc_rendezvous_destroy(SVCXPRT *);
+static bool_t svc_vc_null(void);
+static void svc_vc_destroy(SVCXPRT *);
+static enum xprt_stat svc_vc_stat(SVCXPRT *);
+static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *);
+static bool_t svc_vc_getargs(SVCXPRT *, xdrproc_t, void *);
+static bool_t svc_vc_freeargs(SVCXPRT *, xdrproc_t, void *);
+static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *);
+static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
+static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
+ void *in);
+static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
+ struct sockaddr *raddr);
+static int svc_vc_accept(struct socket *head, struct socket **sop);
+static void svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
+
+static struct xp_ops svc_vc_rendezvous_ops = {
+ .xp_recv = svc_vc_rendezvous_recv,
+ .xp_stat = svc_vc_rendezvous_stat,
+ .xp_getargs = (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
+ .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *))svc_vc_null,
+ .xp_freeargs = (bool_t (*)(SVCXPRT *, xdrproc_t, void *))svc_vc_null,
+ .xp_destroy = svc_vc_rendezvous_destroy,
+ .xp_control = svc_vc_rendezvous_control
+};
+
+static struct xp_ops svc_vc_ops = {
+ .xp_recv = svc_vc_recv,
+ .xp_stat = svc_vc_stat,
+ .xp_getargs = svc_vc_getargs,
+ .xp_reply = svc_vc_reply,
+ .xp_freeargs = svc_vc_freeargs,
+ .xp_destroy = svc_vc_destroy,
+ .xp_control = svc_vc_control
+};
+
+struct cf_conn { /* kept in xprt->xp_p1 for actual connection */
+ enum xprt_stat strm_stat;
+ struct mbuf *mpending; /* unparsed data read from the socket */
+ struct mbuf *mreq; /* current record being built from mpending */
+ uint32_t resid; /* number of bytes needed for fragment */
+ bool_t eor; /* reading last fragment of current record */
+};
+
+/*
+ * Usage:
+ * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
+ *
+ * Creates, registers, and returns a (rpc) tcp based transporter.
+ * Once *xprt is initialized, it is registered as a transporter
+ * see (svc.h, xprt_register). This routine returns
+ * a NULL if a problem occurred.
+ *
+ * The filedescriptor passed in is expected to refer to a bound, but
+ * not yet connected socket.
+ *
+ * Since streams do buffered io similar to stdio, the caller can specify
+ * how big the send and receive buffers are via the second and third parms;
+ * 0 => use the system default.
+ */
+SVCXPRT *
+svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
+ size_t recvsize)
+{
+ SVCXPRT *xprt;
+ struct sockaddr* sa;
+ int error;
+
+ xprt = mem_alloc(sizeof(SVCXPRT));
+ mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
+ xprt->xp_pool = pool;
+ xprt->xp_socket = so;
+ xprt->xp_p1 = NULL;
+ xprt->xp_p2 = NULL;
+ xprt->xp_p3 = NULL;
+ xprt->xp_verf = _null_auth;
+ xprt->xp_ops = &svc_vc_rendezvous_ops;
+
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ goto cleanup_svc_vc_create;
+
+ xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
+ xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
+ xprt->xp_ltaddr.len = sa->sa_len;
+ memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
+ free(sa, M_SONAME);
+
+ xprt->xp_rtaddr.maxlen = 0;
+
+ xprt_register(xprt);
+
+ solisten(so, SOMAXCONN, curthread);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_upcallarg = xprt;
+ so->so_upcall = svc_vc_soupcall;
+ so->so_rcv.sb_flags |= SB_UPCALL;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ return (xprt);
+cleanup_svc_vc_create:
+ if (xprt)
+ mem_free(xprt, sizeof(*xprt));
+ return (NULL);
+}
+
+/*
+ * Create a new transport for a socket optained via soaccept().
+ */
+SVCXPRT *
+svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
+{
+ SVCXPRT *xprt = NULL;
+ struct cf_conn *cd = NULL;
+ struct sockaddr* sa = NULL;
+ int error;
+
+ cd = mem_alloc(sizeof(*cd));
+ cd->strm_stat = XPRT_IDLE;
+
+ xprt = mem_alloc(sizeof(SVCXPRT));
+ mtx_init(&xprt->xp_lock, "xprt->xp_lock", NULL, MTX_DEF);
+ xprt->xp_pool = pool;
+ xprt->xp_socket = so;
+ xprt->xp_p1 = cd;
+ xprt->xp_p2 = NULL;
+ xprt->xp_p3 = NULL;
+ xprt->xp_verf = _null_auth;
+ xprt->xp_ops = &svc_vc_ops;
+
+ xprt->xp_rtaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
+ xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
+ xprt->xp_rtaddr.len = raddr->sa_len;
+ memcpy(xprt->xp_rtaddr.buf, raddr, raddr->sa_len);
+
+ error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
+ if (error)
+ goto cleanup_svc_vc_create;
+
+ xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
+ xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
+ xprt->xp_ltaddr.len = sa->sa_len;
+ memcpy(xprt->xp_ltaddr.buf, sa, sa->sa_len);
+ free(sa, M_SONAME);
+
+ xprt_register(xprt);
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_upcallarg = xprt;
+ so->so_upcall = svc_vc_soupcall;
+ so->so_rcv.sb_flags |= SB_UPCALL;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ /*
+ * Throw the transport into the active list in case it already
+ * has some data buffered.
+ */
+ mtx_lock(&xprt->xp_lock);
+ xprt_active(xprt);
+ mtx_unlock(&xprt->xp_lock);
+
+ return (xprt);
+cleanup_svc_vc_create:
+ if (xprt) {
+ if (xprt->xp_ltaddr.buf)
+ mem_free(xprt->xp_ltaddr.buf,
+ sizeof(struct sockaddr_storage));
+ if (xprt->xp_rtaddr.buf)
+ mem_free(xprt->xp_rtaddr.buf,
+ sizeof(struct sockaddr_storage));
+ mem_free(xprt, sizeof(*xprt));
+ }
+ if (cd)
+ mem_free(cd, sizeof(*cd));
+ return (NULL);
+}
+
+/*
+ * This does all of the accept except the final call to soaccept. The
+ * caller will call soaccept after dropping its locks (soaccept may
+ * call malloc).
+ */
+int
+svc_vc_accept(struct socket *head, struct socket **sop)
+{
+ int error = 0;
+ struct socket *so;
+
+ if ((head->so_options & SO_ACCEPTCONN) == 0) {
+ error = EINVAL;
+ goto done;
+ }
+#ifdef MAC
+ SOCK_LOCK(head);
+ error = mac_socket_check_accept(td->td_ucred, head);
+ SOCK_UNLOCK(head);
+ if (error != 0)
+ goto done;
+#endif
+ ACCEPT_LOCK();
+ if (TAILQ_EMPTY(&head->so_comp)) {
+ ACCEPT_UNLOCK();
+ error = EWOULDBLOCK;
+ goto done;
+ }
+ so = TAILQ_FIRST(&head->so_comp);
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ * XXX might not need soref() since this is simpler than kern_accept.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ *sop = so;
+
+ /* connection has been removed from the listen queue */
+ KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
+done:
+ return (error);
+}
+
+/*ARGSUSED*/
+static bool_t
+svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg)
+{
+ struct socket *so = NULL;
+ struct sockaddr *sa = NULL;
+ struct sockopt opt;
+ int one = 1;
+ int error;
+
+ /*
+ * The socket upcall calls xprt_active() which will eventually
+ * cause the server to call us here. We attempt to accept a
+ * connection from the socket and turn it into a new
+ * transport. If the accept fails, we have drained all pending
+ * connections so we call xprt_inactive().
+ *
+ * The lock protects us in the case where a new connection arrives
+ * on the socket after our call to accept fails with
+ * EWOULDBLOCK - the call to xprt_active() in the upcall will
+ * happen only after our call to xprt_inactive() which ensures
+ * that we will remain active. It might be possible to use
+ * SOCKBUF_LOCK for this - its not clear to me what locks are
+ * held during the upcall.
+ */
+ mtx_lock(&xprt->xp_lock);
+
+ error = svc_vc_accept(xprt->xp_socket, &so);
+
+ if (error == EWOULDBLOCK) {
+ xprt_inactive(xprt);
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ if (error) {
+ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
+ xprt->xp_socket->so_upcallarg = NULL;
+ xprt->xp_socket->so_upcall = NULL;
+ xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
+ xprt_inactive(xprt);
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ mtx_unlock(&xprt->xp_lock);
+
+ sa = 0;
+ error = soaccept(so, &sa);
+
+ if (!error) {
+ bzero(&opt, sizeof(struct sockopt));
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_TCP;
+ opt.sopt_name = TCP_NODELAY;
+ opt.sopt_val = &one;
+ opt.sopt_valsize = sizeof(one);
+ error = sosetopt(so, &opt);
+ }
+
+ if (error) {
+ /*
+ * XXX not sure if I need to call sofree or soclose here.
+ */
+ if (sa)
+ free(sa, M_SONAME);
+ return (FALSE);
+ }
+
+ /*
+ * svc_vc_create_conn will call xprt_register - we don't need
+ * to do anything with the new connection.
+ */
+ svc_vc_create_conn(xprt->xp_pool, so, sa);
+ free(sa, M_SONAME);
+
+ return (FALSE); /* there is never an rpc msg to be processed */
+}
+
+/*ARGSUSED*/
+static enum xprt_stat
+svc_vc_rendezvous_stat(SVCXPRT *xprt)
+{
+
+ return (XPRT_IDLE);
+}
+
+static void
+svc_vc_destroy_common(SVCXPRT *xprt)
+{
+ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
+ xprt->xp_socket->so_upcallarg = NULL;
+ xprt->xp_socket->so_upcall = NULL;
+ xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
+
+ xprt_unregister(xprt);
+
+ mtx_destroy(&xprt->xp_lock);
+ if (xprt->xp_socket)
+ (void)soclose(xprt->xp_socket);
+
+ if (xprt->xp_rtaddr.buf)
+ (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
+ if (xprt->xp_ltaddr.buf)
+ (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
+ (void) mem_free(xprt, sizeof (SVCXPRT));
+
+}
+
+static void
+svc_vc_rendezvous_destroy(SVCXPRT *xprt)
+{
+
+ svc_vc_destroy_common(xprt);
+}
+
+static void
+svc_vc_destroy(SVCXPRT *xprt)
+{
+ struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
+
+ svc_vc_destroy_common(xprt);
+
+ if (cd->mreq)
+ m_freem(cd->mreq);
+ if (cd->mpending)
+ m_freem(cd->mpending);
+ mem_free(cd, sizeof(*cd));
+}
+
+/*ARGSUSED*/
+static bool_t
+svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
+{
+ return (FALSE);
+}
+
+static bool_t
+svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
+{
+
+ return (FALSE);
+}
+
+static enum xprt_stat
+svc_vc_stat(SVCXPRT *xprt)
+{
+ struct cf_conn *cd;
+ struct mbuf *m;
+ size_t n;
+
+ cd = (struct cf_conn *)(xprt->xp_p1);
+
+ if (cd->strm_stat == XPRT_DIED)
+ return (XPRT_DIED);
+
+ /*
+ * Return XPRT_MOREREQS if we have buffered data and we are
+ * mid-record or if we have enough data for a record marker.
+ */
+ if (cd->mpending) {
+ if (cd->resid)
+ return (XPRT_MOREREQS);
+ n = 0;
+ m = cd->mpending;
+ while (m && n < sizeof(uint32_t)) {
+ n += m->m_len;
+ m = m->m_next;
+ }
+ if (n >= sizeof(uint32_t))
+ return (XPRT_MOREREQS);
+ }
+
+ return (XPRT_IDLE);
+}
+
+static bool_t
+svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
+{
+ struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
+ struct uio uio;
+ struct mbuf *m;
+ int error, rcvflag;
+
+ for (;;) {
+ /*
+ * If we have an mbuf chain in cd->mpending, try to parse a
+ * record from it, leaving the result in cd->mreq. If we don't
+ * have a complete record, leave the partial result in
+ * cd->mreq and try to read more from the socket.
+ */
+ if (cd->mpending) {
+ /*
+ * If cd->resid is non-zero, we have part of the
+ * record already, otherwise we are expecting a record
+ * marker.
+ */
+ if (!cd->resid) {
+ /*
+ * See if there is enough data buffered to
+ * make up a record marker. Make sure we can
+ * handle the case where the record marker is
+ * split across more than one mbuf.
+ */
+ size_t n = 0;
+ uint32_t header;
+
+ m = cd->mpending;
+ while (n < sizeof(uint32_t) && m) {
+ n += m->m_len;
+ m = m->m_next;
+ }
+ if (n < sizeof(uint32_t))
+ goto readmore;
+ cd->mpending = m_pullup(cd->mpending, sizeof(uint32_t));
+ memcpy(&header, mtod(cd->mpending, uint32_t *),
+ sizeof(header));
+ header = ntohl(header);
+ cd->eor = (header & 0x80000000) != 0;
+ cd->resid = header & 0x7fffffff;
+ m_adj(cd->mpending, sizeof(uint32_t));
+ }
+
+ /*
+ * Start pulling off mbufs from cd->mpending
+ * until we either have a complete record or
+ * we run out of data. We use m_split to pull
+ * data - it will pull as much as possible and
+ * split the last mbuf if necessary.
+ */
+ while (cd->mpending && cd->resid) {
+ m = cd->mpending;
+ cd->mpending = m_split(cd->mpending, cd->resid,
+ M_WAIT);
+ if (cd->mreq)
+ m_last(cd->mreq)->m_next = m;
+ else
+ cd->mreq = m;
+ while (m) {
+ cd->resid -= m->m_len;
+ m = m->m_next;
+ }
+ }
+
+ /*
+ * If cd->resid is zero now, we have managed to
+ * receive a record fragment from the stream. Check
+ * for the end-of-record mark to see if we need more.
+ */
+ if (cd->resid == 0) {
+ if (!cd->eor)
+ continue;
+
+ /*
+ * Success - we have a complete record in
+ * cd->mreq.
+ */
+ xdrmbuf_create(&xprt->xp_xdrreq, cd->mreq, XDR_DECODE);
+ cd->mreq = NULL;
+ if (! xdr_callmsg(&xprt->xp_xdrreq, msg)) {
+ XDR_DESTROY(&xprt->xp_xdrreq);
+ return (FALSE);
+ }
+ xprt->xp_xid = msg->rm_xid;
+
+ return (TRUE);
+ }
+ }
+
+ readmore:
+ /*
+ * The socket upcall calls xprt_active() which will eventually
+ * cause the server to call us here. We attempt to
+ * read as much as possible from the socket and put
+ * the result in cd->mpending. If the read fails,
+ * we have drained both cd->mpending and the socket so
+ * we can call xprt_inactive().
+ *
+ * The lock protects us in the case where a new packet arrives
+ * on the socket after our call to soreceive fails with
+ * EWOULDBLOCK - the call to xprt_active() in the upcall will
+ * happen only after our call to xprt_inactive() which ensures
+ * that we will remain active. It might be possible to use
+ * SOCKBUF_LOCK for this - its not clear to me what locks are
+ * held during the upcall.
+ */
+ mtx_lock(&xprt->xp_lock);
+
+ uio.uio_resid = 1000000000;
+ uio.uio_td = curthread;
+ m = NULL;
+ rcvflag = MSG_DONTWAIT;
+ error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL,
+ &rcvflag);
+
+ if (error == EWOULDBLOCK) {
+ xprt_inactive(xprt);
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ if (error) {
+ SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
+ xprt->xp_socket->so_upcallarg = NULL;
+ xprt->xp_socket->so_upcall = NULL;
+ xprt->xp_socket->so_rcv.sb_flags &= ~SB_UPCALL;
+ SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
+ xprt_inactive(xprt);
+ cd->strm_stat = XPRT_DIED;
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ if (!m) {
+ /*
+ * EOF - the other end has closed the socket.
+ */
+ cd->strm_stat = XPRT_DIED;
+ mtx_unlock(&xprt->xp_lock);
+ return (FALSE);
+ }
+
+ if (cd->mpending)
+ m_last(cd->mpending)->m_next = m;
+ else
+ cd->mpending = m;
+
+ mtx_unlock(&xprt->xp_lock);
+ }
+}
+
+static bool_t
+svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
+{
+
+ return (xdr_args(&xprt->xp_xdrreq, args_ptr));
+}
+
+static bool_t
+svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, void *args_ptr)
+{
+ XDR xdrs;
+
+ /*
+ * Free the request mbuf here - this allows us to handle
+ * protocols where not all requests have replies
+ * (i.e. NLM). Note that xdrmbuf_destroy handles being called
+ * twice correctly - the mbuf will only be freed once.
+ */
+ XDR_DESTROY(&xprt->xp_xdrreq);
+
+ xdrs.x_op = XDR_FREE;
+ return (xdr_args(&xdrs, args_ptr));
+}
+
+static bool_t
+svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
+{
+ struct mbuf *mrep;
+ bool_t stat = FALSE;
+ int error;
+
+ /*
+ * Leave space for record mark.
+ */
+ MGETHDR(mrep, M_WAIT, MT_DATA);
+ MCLGET(mrep, M_WAIT);
+ mrep->m_len = 0;
+ mrep->m_data += sizeof(uint32_t);
+
+ xdrmbuf_create(&xprt->xp_xdrrep, mrep, XDR_ENCODE);
+ msg->rm_xid = xprt->xp_xid;
+ if (xdr_replymsg(&xprt->xp_xdrrep, msg)) {
+ m_fixhdr(mrep);
+
+ /*
+ * Prepend a record marker containing the reply length.
+ */
+ M_PREPEND(mrep, sizeof(uint32_t), M_WAIT);
+ *mtod(mrep, uint32_t *) =
+ htonl(0x80000000 | (mrep->m_pkthdr.len
+ - sizeof(uint32_t)));
+ error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
+ 0, curthread);
+ if (!error) {
+ stat = TRUE;
+ }
+ } else {
+ m_freem(mrep);
+ }
+
+ /*
+ * This frees the request mbuf chain as well. The reply mbuf
+ * chain was consumed by sosend.
+ */
+ XDR_DESTROY(&xprt->xp_xdrreq);
+ XDR_DESTROY(&xprt->xp_xdrrep);
+ xprt->xp_p2 = NULL;
+
+ return (stat);
+}
+
+static bool_t
+svc_vc_null()
+{
+
+ return (FALSE);
+}
+
+static void
+svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
+{
+ SVCXPRT *xprt = (SVCXPRT *) arg;
+
+ mtx_lock(&xprt->xp_lock);
+ xprt_active(xprt);
+ mtx_unlock(&xprt->xp_lock);
+}
+
+#if 0
+/*
+ * Get the effective UID of the sending process. Used by rpcbind, keyserv
+ * and rpc.yppasswdd on AF_LOCAL.
+ */
+int
+__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
+ int sock, ret;
+ gid_t egid;
+ uid_t euid;
+ struct sockaddr *sa;
+
+ sock = transp->xp_fd;
+ sa = (struct sockaddr *)transp->xp_rtaddr.buf;
+ if (sa->sa_family == AF_LOCAL) {
+ ret = getpeereid(sock, &euid, &egid);
+ if (ret == 0)
+ *uid = euid;
+ return (ret);
+ } else
+ return (-1);
+}
+#endif
diff --git a/sys/rpc/types.h b/sys/rpc/types.h
index 4be9cc8..b476ada 100644
--- a/sys/rpc/types.h
+++ b/sys/rpc/types.h
@@ -45,12 +45,12 @@
typedef int32_t bool_t;
typedef int32_t enum_t;
-typedef u_int32_t rpcprog_t;
-typedef u_int32_t rpcvers_t;
-typedef u_int32_t rpcproc_t;
-typedef u_int32_t rpcprot_t;
-typedef u_int32_t rpcport_t;
-typedef int32_t rpc_inline_t;
+typedef uint32_t rpcprog_t;
+typedef uint32_t rpcvers_t;
+typedef uint32_t rpcproc_t;
+typedef uint32_t rpcprot_t;
+typedef uint32_t rpcport_t;
+typedef int32_t rpc_inline_t;
#define __dontcare__ -1
@@ -61,11 +61,21 @@ typedef int32_t rpc_inline_t;
# define TRUE (1)
#endif
+#ifdef _KERNEL
+#ifdef _SYS_MALLOC_H_
+MALLOC_DECLARE(M_RPC);
+#endif
+#define mem_alloc(bsize) malloc(bsize, M_RPC, M_WAITOK|M_ZERO)
+#define mem_free(ptr, bsize) free(ptr, M_RPC)
+#else
#define mem_alloc(bsize) calloc(1, bsize)
#define mem_free(ptr, bsize) free(ptr)
+#endif
#include <sys/time.h>
-#ifndef _KERNEL
+#ifdef _KERNEL
+#include <rpc/netconfig.h>
+#else
#include <netconfig.h>
#endif
diff --git a/sys/rpc/xdr.h b/sys/rpc/xdr.h
new file mode 100644
index 0000000..bebd448
--- /dev/null
+++ b/sys/rpc/xdr.h
@@ -0,0 +1,368 @@
+/* $NetBSD: xdr.h,v 1.19 2000/07/17 05:00:45 matt Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ *
+ * from: @(#)xdr.h 1.19 87/04/22 SMI
+ * from: @(#)xdr.h 2.2 88/07/29 4.0 RPCSRC
+ * $FreeBSD$
+ */
+
+/*
+ * xdr.h, External Data Representation Serialization Routines.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ */
+
+#ifndef _KRPC_XDR_H
+#define _KRPC_XDR_H
+#include <sys/cdefs.h>
+
+/*
+ * XDR provides a conventional way for converting between C data
+ * types and an external bit-string representation. Library supplied
+ * routines provide for the conversion on built-in C data types. These
+ * routines and utility routines defined here are used to help implement
+ * a type encode/decode routine for each user-defined type.
+ *
+ * Each data type provides a single procedure which takes two arguments:
+ *
+ * bool_t
+ * xdrproc(xdrs, argresp)
+ * XDR *xdrs;
+ * <type> *argresp;
+ *
+ * xdrs is an instance of a XDR handle, to which or from which the data
+ * type is to be converted. argresp is a pointer to the structure to be
+ * converted. The XDR handle contains an operation field which indicates
+ * which of the operations (ENCODE, DECODE * or FREE) is to be performed.
+ *
+ * XDR_DECODE may allocate space if the pointer argresp is null. This
+ * data can be freed with the XDR_FREE operation.
+ *
+ * We write only one procedure per data type to make it easy
+ * to keep the encode and decode procedures for a data type consistent.
+ * In many cases the same code performs all operations on a user defined type,
+ * because all the hard work is done in the component type routines.
+ * decode as a series of calls on the nested data types.
+ */
+
+/*
+ * Xdr operations. XDR_ENCODE causes the type to be encoded into the
+ * stream. XDR_DECODE causes the type to be extracted from the stream.
+ * XDR_FREE can be used to release the space allocated by an XDR_DECODE
+ * request.
+ */
+enum xdr_op {
+ XDR_ENCODE=0,
+ XDR_DECODE=1,
+ XDR_FREE=2
+};
+
+/*
+ * This is the number of bytes per unit of external data.
+ */
+#define BYTES_PER_XDR_UNIT (4)
+#define RNDUP(x) ((((x) + BYTES_PER_XDR_UNIT - 1) / BYTES_PER_XDR_UNIT) \
+ * BYTES_PER_XDR_UNIT)
+
+/*
+ * The XDR handle.
+ * Contains operation which is being applied to the stream,
+ * an operations vector for the particular implementation (e.g. see xdr_mem.c),
+ * and two private fields for the use of the particular implementation.
+ */
+typedef struct __rpc_xdr {
+ enum xdr_op x_op; /* operation; fast additional param */
+ const struct xdr_ops {
+ /* get a long from underlying stream */
+ bool_t (*x_getlong)(struct __rpc_xdr *, long *);
+ /* put a long to " */
+ bool_t (*x_putlong)(struct __rpc_xdr *, const long *);
+ /* get some bytes from " */
+ bool_t (*x_getbytes)(struct __rpc_xdr *, char *, u_int);
+ /* put some bytes to " */
+ bool_t (*x_putbytes)(struct __rpc_xdr *, const char *, u_int);
+ /* returns bytes off from beginning */
+ u_int (*x_getpostn)(struct __rpc_xdr *);
+ /* lets you reposition the stream */
+ bool_t (*x_setpostn)(struct __rpc_xdr *, u_int);
+ /* buf quick ptr to buffered data */
+ int32_t *(*x_inline)(struct __rpc_xdr *, u_int);
+ /* free privates of this xdr_stream */
+ void (*x_destroy)(struct __rpc_xdr *);
+ bool_t (*x_control)(struct __rpc_xdr *, int, void *);
+ } *x_ops;
+ char * x_public; /* users' data */
+ void * x_private; /* pointer to private data */
+ char * x_base; /* private used for position info */
+ u_int x_handy; /* extra private word */
+} XDR;
+
+/*
+ * A xdrproc_t exists for each data type which is to be encoded or decoded.
+ *
+ * The second argument to the xdrproc_t is a pointer to an opaque pointer.
+ * The opaque pointer generally points to a structure of the data type
+ * to be decoded. If this pointer is 0, then the type routines should
+ * allocate dynamic storage of the appropriate size and return it.
+ */
+#ifdef _KERNEL
+typedef bool_t (*xdrproc_t)(XDR *, void *, ...);
+#else
+/*
+ * XXX can't actually prototype it, because some take three args!!!
+ */
+typedef bool_t (*xdrproc_t)(XDR *, ...);
+#endif
+
+/*
+ * Operations defined on a XDR handle
+ *
+ * XDR *xdrs;
+ * long *longp;
+ * char * addr;
+ * u_int len;
+ * u_int pos;
+ */
+#define XDR_GETLONG(xdrs, longp) \
+ (*(xdrs)->x_ops->x_getlong)(xdrs, longp)
+#define xdr_getlong(xdrs, longp) \
+ (*(xdrs)->x_ops->x_getlong)(xdrs, longp)
+
+#define XDR_PUTLONG(xdrs, longp) \
+ (*(xdrs)->x_ops->x_putlong)(xdrs, longp)
+#define xdr_putlong(xdrs, longp) \
+ (*(xdrs)->x_ops->x_putlong)(xdrs, longp)
+
+static __inline int
+xdr_getint32(XDR *xdrs, int32_t *ip)
+{
+ long l;
+
+ if (!xdr_getlong(xdrs, &l))
+ return (FALSE);
+ *ip = (int32_t)l;
+ return (TRUE);
+}
+
+static __inline int
+xdr_putint32(XDR *xdrs, int32_t *ip)
+{
+ long l;
+
+ l = (long)*ip;
+ return xdr_putlong(xdrs, &l);
+}
+
+#define XDR_GETINT32(xdrs, int32p) xdr_getint32(xdrs, int32p)
+#define XDR_PUTINT32(xdrs, int32p) xdr_putint32(xdrs, int32p)
+
+#define XDR_GETBYTES(xdrs, addr, len) \
+ (*(xdrs)->x_ops->x_getbytes)(xdrs, addr, len)
+#define xdr_getbytes(xdrs, addr, len) \
+ (*(xdrs)->x_ops->x_getbytes)(xdrs, addr, len)
+
+#define XDR_PUTBYTES(xdrs, addr, len) \
+ (*(xdrs)->x_ops->x_putbytes)(xdrs, addr, len)
+#define xdr_putbytes(xdrs, addr, len) \
+ (*(xdrs)->x_ops->x_putbytes)(xdrs, addr, len)
+
+#define XDR_GETPOS(xdrs) \
+ (*(xdrs)->x_ops->x_getpostn)(xdrs)
+#define xdr_getpos(xdrs) \
+ (*(xdrs)->x_ops->x_getpostn)(xdrs)
+
+#define XDR_SETPOS(xdrs, pos) \
+ (*(xdrs)->x_ops->x_setpostn)(xdrs, pos)
+#define xdr_setpos(xdrs, pos) \
+ (*(xdrs)->x_ops->x_setpostn)(xdrs, pos)
+
+#define XDR_INLINE(xdrs, len) \
+ (*(xdrs)->x_ops->x_inline)(xdrs, len)
+#define xdr_inline(xdrs, len) \
+ (*(xdrs)->x_ops->x_inline)(xdrs, len)
+
+#define XDR_DESTROY(xdrs) \
+ if ((xdrs)->x_ops->x_destroy) \
+ (*(xdrs)->x_ops->x_destroy)(xdrs)
+#define xdr_destroy(xdrs) \
+ if ((xdrs)->x_ops->x_destroy) \
+ (*(xdrs)->x_ops->x_destroy)(xdrs)
+
+#define XDR_CONTROL(xdrs, req, op) \
+ if ((xdrs)->x_ops->x_control) \
+ (*(xdrs)->x_ops->x_control)(xdrs, req, op)
+#define xdr_control(xdrs, req, op) XDR_CONTROL(xdrs, req, op)
+
+/*
+ * Solaris strips the '_t' from these types -- not sure why.
+ * But, let's be compatible.
+ */
+#define xdr_rpcvers(xdrs, versp) xdr_uint32_t(xdrs, versp)
+#define xdr_rpcprog(xdrs, progp) xdr_uint32_t(xdrs, progp)
+#define xdr_rpcproc(xdrs, procp) xdr_uint32_t(xdrs, procp)
+#define xdr_rpcprot(xdrs, protp) xdr_uint32_t(xdrs, protp)
+#define xdr_rpcport(xdrs, portp) xdr_uint32_t(xdrs, portp)
+
+/*
+ * Support struct for discriminated unions.
+ * You create an array of xdrdiscrim structures, terminated with
+ * an entry with a null procedure pointer. The xdr_union routine gets
+ * the discriminant value and then searches the array of structures
+ * for a matching value. If a match is found the associated xdr routine
+ * is called to handle that part of the union. If there is
+ * no match, then a default routine may be called.
+ * If there is no match and no default routine it is an error.
+ */
+#define NULL_xdrproc_t ((xdrproc_t)0)
+struct xdr_discrim {
+ int value;
+ xdrproc_t proc;
+};
+
+/*
+ * In-line routines for fast encode/decode of primitive data types.
+ * Caveat emptor: these use single memory cycles to get the
+ * data from the underlying buffer, and will fail to operate
+ * properly if the data is not aligned. The standard way to use these
+ * is to say:
+ * if ((buf = XDR_INLINE(xdrs, count)) == NULL)
+ * return (FALSE);
+ * <<< macro calls >>>
+ * where ``count'' is the number of bytes of data occupied
+ * by the primitive data types.
+ *
+ * N.B. and frozen for all time: each data type here uses 4 bytes
+ * of external representation.
+ */
+#define IXDR_GET_INT32(buf) ((int32_t)__ntohl((uint32_t)*(buf)++))
+#define IXDR_PUT_INT32(buf, v) (*(buf)++ =(int32_t)__htonl((uint32_t)v))
+#define IXDR_GET_U_INT32(buf) ((uint32_t)IXDR_GET_INT32(buf))
+#define IXDR_PUT_U_INT32(buf, v) IXDR_PUT_INT32((buf), ((int32_t)(v)))
+
+#define IXDR_GET_UINT32(buf) ((uint32_t)IXDR_GET_INT32(buf))
+#define IXDR_PUT_UINT32(buf, v) IXDR_PUT_INT32((buf), ((int32_t)(v)))
+
+#define IXDR_GET_LONG(buf) ((long)__ntohl((uint32_t)*(buf)++))
+#define IXDR_PUT_LONG(buf, v) (*(buf)++ =(int32_t)__htonl((uint32_t)v))
+
+#define IXDR_GET_BOOL(buf) ((bool_t)IXDR_GET_LONG(buf))
+#define IXDR_GET_ENUM(buf, t) ((t)IXDR_GET_LONG(buf))
+#define IXDR_GET_U_LONG(buf) ((u_long)IXDR_GET_LONG(buf))
+#define IXDR_GET_SHORT(buf) ((short)IXDR_GET_LONG(buf))
+#define IXDR_GET_U_SHORT(buf) ((u_short)IXDR_GET_LONG(buf))
+
+#define IXDR_PUT_BOOL(buf, v) IXDR_PUT_LONG((buf), (v))
+#define IXDR_PUT_ENUM(buf, v) IXDR_PUT_LONG((buf), (v))
+#define IXDR_PUT_U_LONG(buf, v) IXDR_PUT_LONG((buf), (v))
+#define IXDR_PUT_SHORT(buf, v) IXDR_PUT_LONG((buf), (v))
+#define IXDR_PUT_U_SHORT(buf, v) IXDR_PUT_LONG((buf), (v))
+
+/*
+ * These are the "generic" xdr routines.
+ */
+__BEGIN_DECLS
+extern bool_t xdr_void(void);
+extern bool_t xdr_int(XDR *, int *);
+extern bool_t xdr_u_int(XDR *, u_int *);
+extern bool_t xdr_long(XDR *, long *);
+extern bool_t xdr_u_long(XDR *, u_long *);
+extern bool_t xdr_short(XDR *, short *);
+extern bool_t xdr_u_short(XDR *, u_short *);
+extern bool_t xdr_int16_t(XDR *, int16_t *);
+extern bool_t xdr_uint16_t(XDR *, uint16_t *);
+extern bool_t xdr_int32_t(XDR *, int32_t *);
+extern bool_t xdr_uint32_t(XDR *, uint32_t *);
+extern bool_t xdr_int64_t(XDR *, int64_t *);
+extern bool_t xdr_uint64_t(XDR *, uint64_t *);
+extern bool_t xdr_bool(XDR *, bool_t *);
+extern bool_t xdr_enum(XDR *, enum_t *);
+extern bool_t xdr_array(XDR *, char **, u_int *, u_int, u_int, xdrproc_t);
+extern bool_t xdr_bytes(XDR *, char **, u_int *, u_int);
+extern bool_t xdr_opaque(XDR *, char *, u_int);
+extern bool_t xdr_string(XDR *, char **, u_int);
+extern bool_t xdr_union(XDR *, enum_t *, char *, const struct xdr_discrim *, xdrproc_t);
+extern bool_t xdr_char(XDR *, char *);
+extern bool_t xdr_u_char(XDR *, u_char *);
+extern bool_t xdr_vector(XDR *, char *, u_int, u_int, xdrproc_t);
+extern bool_t xdr_float(XDR *, float *);
+extern bool_t xdr_double(XDR *, double *);
+extern bool_t xdr_quadruple(XDR *, long double *);
+extern bool_t xdr_reference(XDR *, char **, u_int, xdrproc_t);
+extern bool_t xdr_pointer(XDR *, char **, u_int, xdrproc_t);
+extern bool_t xdr_wrapstring(XDR *, char **);
+extern void xdr_free(xdrproc_t, void *);
+extern bool_t xdr_hyper(XDR *, quad_t *);
+extern bool_t xdr_u_hyper(XDR *, u_quad_t *);
+extern bool_t xdr_longlong_t(XDR *, quad_t *);
+extern bool_t xdr_u_longlong_t(XDR *, u_quad_t *);
+extern unsigned long xdr_sizeof(xdrproc_t func, void *data);
+__END_DECLS
+
+/*
+ * Common opaque bytes objects used by many rpc protocols;
+ * declared here due to commonality.
+ */
+#define MAX_NETOBJ_SZ 1024
+struct netobj {
+ u_int n_len;
+ char *n_bytes;
+};
+typedef struct netobj netobj;
+extern bool_t xdr_netobj(XDR *, struct netobj *);
+
+/*
+ * These are the public routines for the various implementations of
+ * xdr streams.
+ */
+__BEGIN_DECLS
+/* XDR using memory buffers */
+extern void xdrmem_create(XDR *, char *, u_int, enum xdr_op);
+
+/* XDR using mbufs */
+struct mbuf;
+extern void xdrmbuf_create(XDR *, struct mbuf *, enum xdr_op);
+
+/* XDR pseudo records for tcp */
+extern void xdrrec_create(XDR *, u_int, u_int, void *,
+ int (*)(void *, void *, int),
+ int (*)(void *, void *, int));
+
+/* make end of xdr record */
+extern bool_t xdrrec_endofrecord(XDR *, int);
+
+/* move to beginning of next record */
+extern bool_t xdrrec_skiprecord(XDR *);
+
+/* true if no more input */
+extern bool_t xdrrec_eof(XDR *);
+extern u_int xdrrec_readbytes(XDR *, caddr_t, u_int);
+__END_DECLS
+
+#endif /* !_KRPC_XDR_H */
diff --git a/sys/sys/fcntl.h b/sys/sys/fcntl.h
index ea8531a..29c7796 100644
--- a/sys/sys/fcntl.h
+++ b/sys/sys/fcntl.h
@@ -178,10 +178,14 @@ typedef __pid_t pid_t;
#define F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */
#define F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */
#endif
-#define F_GETLK 7 /* get record locking information */
-#define F_SETLK 8 /* set record locking information */
-#define F_SETLKW 9 /* F_SETLK; wait if blocked */
+#define F_OGETLK 7 /* get record locking information */
+#define F_OSETLK 8 /* set record locking information */
+#define F_OSETLKW 9 /* F_SETLK; wait if blocked */
#define F_DUP2FD 10 /* duplicate file descriptor to arg */
+#define F_GETLK 11 /* get record locking information */
+#define F_SETLK 12 /* set record locking information */
+#define F_SETLKW 13 /* F_SETLK; wait if blocked */
+#define F_SETLK_REMOTE 14 /* debugging support for remote locks */
/* file descriptor flags (F_GETFD, F_SETFD) */
#define FD_CLOEXEC 1 /* close-on-exec flag */
@@ -190,10 +194,13 @@ typedef __pid_t pid_t;
#define F_RDLCK 1 /* shared or read lock */
#define F_UNLCK 2 /* unlock */
#define F_WRLCK 3 /* exclusive or write lock */
+#define F_UNLCKSYS 4 /* purge locks for a given system ID */
+#define F_CANCEL 5 /* cancel an async lock request */
#ifdef _KERNEL
#define F_WAIT 0x010 /* Wait until lock is granted */
#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */
#define F_POSIX 0x040 /* Use POSIX semantics for lock */
+#define F_REMOTE 0x080 /* Lock owner is remote NFS client */
#endif
/*
@@ -206,6 +213,19 @@ struct flock {
pid_t l_pid; /* lock owner */
short l_type; /* lock type: read/write, etc. */
short l_whence; /* type of l_start */
+ int l_sysid; /* remote system id or zero for local */
+};
+
+/*
+ * Old advisory file segment locking data type,
+ * before adding l_sysid.
+ */
+struct oflock {
+ off_t l_start; /* starting offset */
+ off_t l_len; /* len = 0 means until end of file */
+ pid_t l_pid; /* lock owner */
+ short l_type; /* lock type: read/write, etc. */
+ short l_whence; /* type of l_start */
};
diff --git a/sys/sys/lockf.h b/sys/sys/lockf.h
index 880b487..11e2573 100644
--- a/sys/sys/lockf.h
+++ b/sys/sys/lockf.h
@@ -37,33 +37,85 @@
#define _SYS_LOCKF_H_
#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_sx.h>
struct vop_advlock_args;
+struct vop_advlockasync_args;
/*
- * The lockf structure is a kernel structure which contains the information
- * associated with a byte range lock. The lockf structures are linked into
- * the inode structure. Locks are sorted by the starting byte of the lock for
- * efficiency.
+ * The lockf_entry structure is a kernel structure which contains the
+ * information associated with a byte range lock. The lockf_entry
+ * structures are linked into the inode structure. Locks are sorted by
+ * the starting byte of the lock for efficiency.
+ *
+ * Active and pending locks on a vnode are organised into a
+ * graph. Each pending lock has an out-going edge to each active lock
+ * that blocks it.
+ *
+ * Locks:
+ * (i) locked by the vnode interlock
+ * (s) locked by state->ls_lock
+ * (S) locked by lf_lock_states_lock
+ * (c) const until freeing
*/
-TAILQ_HEAD(locklist, lockf);
+struct lockf_edge {
+ LIST_ENTRY(lockf_edge) le_outlink; /* (s) link from's out-edge list */
+ LIST_ENTRY(lockf_edge) le_inlink; /* (s) link to's in-edge list */
+ struct lockf_entry *le_from; /* (c) out-going from here */
+ struct lockf_entry *le_to; /* (s) in-coming to here */
+};
+LIST_HEAD(lockf_edge_list, lockf_edge);
-struct lockf {
- short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */
- short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
- off_t lf_start; /* Byte # of the start of the lock */
- off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */
- caddr_t lf_id; /* Id of the resource holding the lock */
- struct lockf **lf_head; /* Back pointer to the head of the lockf list */
- struct inode *lf_inode; /* Back pointer to the inode */
- struct lockf *lf_next; /* Pointer to the next lock on this inode */
- struct locklist lf_blkhd; /* List of requests blocked on this lock */
- TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
+struct lockf_entry {
+ short lf_flags; /* (c) Semantics: F_POSIX, F_FLOCK, F_WAIT */
+ short lf_type; /* (s) Lock type: F_RDLCK, F_WRLCK */
+ off_t lf_start; /* (s) Byte # of the start of the lock */
+ off_t lf_end; /* (s) Byte # of the end of the lock (OFF_MAX=EOF) */
+ struct lock_owner *lf_owner; /* (c) Owner of the lock */
+ struct vnode *lf_vnode; /* (c) File being locked (only valid for active lock) */
+ struct inode *lf_inode; /* (c) Back pointer to the inode */
+ struct task *lf_async_task;/* (c) Async lock callback */
+ LIST_ENTRY(lockf_entry) lf_link; /* (s) Linkage for lock lists */
+ struct lockf_edge_list lf_outedges; /* (s) list of out-edges */
+ struct lockf_edge_list lf_inedges; /* (s) list of out-edges */
};
+LIST_HEAD(lockf_entry_list, lockf_entry);
-/* Maximum length of sleep chains to traverse to try and detect deadlock. */
-#define MAXDEPTH 50
+/*
+ * Filesystem private node structures should include space for a
+ * pointer to a struct lockf_state. This pointer is used by the lock
+ * manager to track the locking state for a file.
+ *
+ * The ls_active list contains the set of active locks on the file. It
+ * is strictly ordered by the lock's lf_start value. Each active lock
+ * will have in-coming edges to any pending lock which it blocks.
+ *
+ * Lock requests which are blocked by some other active lock are
+ * listed in ls_pending with newer requests first in the list. Lock
+ * requests in this list will have out-going edges to each active lock
+ * that blocks then. They will also have out-going edges to each
+ * pending lock that is older in the queue - this helps to ensure
+ * fairness when several processes are contenting to lock the same
+ * record.
+
+ * The value of ls_threads is the number of threads currently using
+ * the state structure (typically either setting/clearing locks or
+ * sleeping waiting to do so). This is used to defer freeing the
+ * structure while some thread is still using it.
+ */
+struct lockf {
+ LIST_ENTRY(lockf) ls_link; /* (S) all active lockf states */
+ struct sx ls_lock;
+ struct lockf_entry_list ls_active; /* (s) Active locks */
+ struct lockf_entry_list ls_pending; /* (s) Pending locks */
+ int ls_threads; /* (i) Thread count */
+};
+LIST_HEAD(lockf_list, lockf);
int lf_advlock(struct vop_advlock_args *, struct lockf **, u_quad_t);
+int lf_advlockasync(struct vop_advlockasync_args *, struct lockf **, u_quad_t);
+int lf_countlocks(int sysid);
+void lf_clearremotesys(int sysid);
#endif /* !_SYS_LOCKF_H_ */
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index 57763bc..d65d1fb 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$");
static vop_access_t ufs_access;
static vop_advlock_t ufs_advlock;
+static vop_advlockasync_t ufs_advlockasync;
static int ufs_chmod(struct vnode *, int, struct ucred *, struct thread *);
static int ufs_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *);
static vop_close_t ufs_close;
@@ -2182,6 +2183,25 @@ ufs_advlock(ap)
}
/*
+ * Advisory record locking support
+ */
+static int
+ufs_advlockasync(ap)
+ struct vop_advlockasync_args /* {
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ struct task *a_task;
+ } */ *ap;
+{
+ struct inode *ip = VTOI(ap->a_vp);
+
+ return (lf_advlockasync(ap, &(ip->i_lockf), ip->i_size));
+}
+
+/*
* Initialize the vnode associated with a new inode, handle aliased
* vnodes.
*/
@@ -2449,6 +2469,7 @@ struct vop_vector ufs_vnodeops = {
.vop_write = VOP_PANIC,
.vop_access = ufs_access,
.vop_advlock = ufs_advlock,
+ .vop_advlockasync = ufs_advlockasync,
.vop_bmap = ufs_bmap,
.vop_cachedlookup = ufs_lookup,
.vop_close = ufs_close,
diff --git a/sys/xdr/xdr.c b/sys/xdr/xdr.c
new file mode 100644
index 0000000..69d9e4a
--- /dev/null
+++ b/sys/xdr/xdr.c
@@ -0,0 +1,816 @@
+/* $NetBSD: xdr.c,v 1.22 2000/07/06 03:10:35 christos Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)xdr.c 1.35 87/08/12";
+static char *sccsid = "@(#)xdr.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * xdr.c, Generic XDR routines implementation.
+ *
+ * Copyright (C) 1986, Sun Microsystems, Inc.
+ *
+ * These are the "generic" xdr routines used to serialize and de-serialize
+ * most common data items. See xdr.h for more info on the interface to
+ * xdr.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+typedef quad_t longlong_t; /* ANSI long long type */
+typedef u_quad_t u_longlong_t; /* ANSI unsigned long long type */
+
+/*
+ * constants specific to the xdr "protocol"
+ */
+#define XDR_FALSE ((long) 0)
+#define XDR_TRUE ((long) 1)
+#define LASTUNSIGNED ((u_int) 0-1)
+
+/*
+ * for unit alignment
+ */
+static const char xdr_zero[BYTES_PER_XDR_UNIT] = { 0, 0, 0, 0 };
+
+/*
+ * Free a data structure using XDR
+ * Not a filter, but a convenient utility nonetheless
+ */
+void
+xdr_free(xdrproc_t proc, void *objp)
+{
+ XDR x;
+
+ x.x_op = XDR_FREE;
+ (*proc)(&x, objp);
+}
+
+/*
+ * XDR nothing
+ */
+bool_t
+xdr_void(void)
+{
+
+ return (TRUE);
+}
+
+
+/*
+ * XDR integers
+ */
+bool_t
+xdr_int(XDR *xdrs, int *ip)
+{
+ long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (long) *ip;
+ return (XDR_PUTLONG(xdrs, &l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, &l)) {
+ return (FALSE);
+ }
+ *ip = (int) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR unsigned integers
+ */
+bool_t
+xdr_u_int(XDR *xdrs, u_int *up)
+{
+ u_long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (u_long) *up;
+ return (XDR_PUTLONG(xdrs, (long *)&l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, (long *)&l)) {
+ return (FALSE);
+ }
+ *up = (u_int) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR long integers
+ * same as xdr_u_long - open coded to save a proc call!
+ */
+bool_t
+xdr_long(XDR *xdrs, long *lp)
+{
+ switch (xdrs->x_op) {
+ case XDR_ENCODE:
+ return (XDR_PUTLONG(xdrs, lp));
+ case XDR_DECODE:
+ return (XDR_GETLONG(xdrs, lp));
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR unsigned long integers
+ * same as xdr_long - open coded to save a proc call!
+ */
+bool_t
+xdr_u_long(XDR *xdrs, u_long *ulp)
+{
+ switch (xdrs->x_op) {
+ case XDR_ENCODE:
+ return (XDR_PUTLONG(xdrs, (long *)ulp));
+ case XDR_DECODE:
+ return (XDR_GETLONG(xdrs, (long *)ulp));
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR 32-bit integers
+ * same as xdr_uint32_t - open coded to save a proc call!
+ */
+bool_t
+xdr_int32_t(XDR *xdrs, int32_t *int32_p)
+{
+ long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (long) *int32_p;
+ return (XDR_PUTLONG(xdrs, &l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, &l)) {
+ return (FALSE);
+ }
+ *int32_p = (int32_t) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR unsigned 32-bit integers
+ * same as xdr_int32_t - open coded to save a proc call!
+ */
+bool_t
+xdr_uint32_t(XDR *xdrs, uint32_t *uint32_p)
+{
+ u_long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (u_long) *uint32_p;
+ return (XDR_PUTLONG(xdrs, (long *)&l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, (long *)&l)) {
+ return (FALSE);
+ }
+ *uint32_p = (uint32_t) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR short integers
+ */
+bool_t
+xdr_short(XDR *xdrs, short *sp)
+{
+ long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (long) *sp;
+ return (XDR_PUTLONG(xdrs, &l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, &l)) {
+ return (FALSE);
+ }
+ *sp = (short) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR unsigned short integers
+ */
+bool_t
+xdr_u_short(XDR *xdrs, u_short *usp)
+{
+ u_long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (u_long) *usp;
+ return (XDR_PUTLONG(xdrs, (long *)&l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, (long *)&l)) {
+ return (FALSE);
+ }
+ *usp = (u_short) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR 16-bit integers
+ */
+bool_t
+xdr_int16_t(XDR *xdrs, int16_t *int16_p)
+{
+ long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (long) *int16_p;
+ return (XDR_PUTLONG(xdrs, &l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, &l)) {
+ return (FALSE);
+ }
+ *int16_p = (int16_t) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR unsigned 16-bit integers
+ */
+bool_t
+xdr_uint16_t(XDR *xdrs, uint16_t *uint16_p)
+{
+ u_long l;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ l = (u_long) *uint16_p;
+ return (XDR_PUTLONG(xdrs, (long *)&l));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, (long *)&l)) {
+ return (FALSE);
+ }
+ *uint16_p = (uint16_t) l;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR a char
+ */
+bool_t
+xdr_char(XDR *xdrs, char *cp)
+{
+ int i;
+
+ i = (*cp);
+ if (!xdr_int(xdrs, &i)) {
+ return (FALSE);
+ }
+ *cp = i;
+ return (TRUE);
+}
+
+/*
+ * XDR an unsigned char
+ */
+bool_t
+xdr_u_char(XDR *xdrs, u_char *cp)
+{
+ u_int u;
+
+ u = (*cp);
+ if (!xdr_u_int(xdrs, &u)) {
+ return (FALSE);
+ }
+ *cp = u;
+ return (TRUE);
+}
+
+/*
+ * XDR booleans
+ */
+bool_t
+xdr_bool(XDR *xdrs, bool_t *bp)
+{
+ long lb;
+
+ switch (xdrs->x_op) {
+
+ case XDR_ENCODE:
+ lb = *bp ? XDR_TRUE : XDR_FALSE;
+ return (XDR_PUTLONG(xdrs, &lb));
+
+ case XDR_DECODE:
+ if (!XDR_GETLONG(xdrs, &lb)) {
+ return (FALSE);
+ }
+ *bp = (lb == XDR_FALSE) ? FALSE : TRUE;
+ return (TRUE);
+
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * XDR enumerations
+ */
+bool_t
+xdr_enum(XDR *xdrs, enum_t *ep)
+{
+ enum sizecheck { SIZEVAL }; /* used to find the size of an enum */
+
+ /*
+ * enums are treated as ints
+ */
+ /* LINTED */ if (sizeof (enum sizecheck) == sizeof (long)) {
+ return (xdr_long(xdrs, (long *)(void *)ep));
+ } else /* LINTED */ if (sizeof (enum sizecheck) == sizeof (int)) {
+ return (xdr_int(xdrs, (int *)(void *)ep));
+ } else /* LINTED */ if (sizeof (enum sizecheck) == sizeof (short)) {
+ return (xdr_short(xdrs, (short *)(void *)ep));
+ } else {
+ return (FALSE);
+ }
+}
+
+/*
+ * XDR opaque data
+ * Allows the specification of a fixed size sequence of opaque bytes.
+ * cp points to the opaque object and cnt gives the byte length.
+ */
+bool_t
+xdr_opaque(XDR *xdrs, caddr_t cp, u_int cnt)
+{
+ u_int rndup;
+ static int crud[BYTES_PER_XDR_UNIT];
+
+ /*
+ * if no data we are done
+ */
+ if (cnt == 0)
+ return (TRUE);
+
+ /*
+ * round byte count to full xdr units
+ */
+ rndup = cnt % BYTES_PER_XDR_UNIT;
+ if (rndup > 0)
+ rndup = BYTES_PER_XDR_UNIT - rndup;
+
+ if (xdrs->x_op == XDR_DECODE) {
+ if (!XDR_GETBYTES(xdrs, cp, cnt)) {
+ return (FALSE);
+ }
+ if (rndup == 0)
+ return (TRUE);
+ return (XDR_GETBYTES(xdrs, (caddr_t)(void *)crud, rndup));
+ }
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!XDR_PUTBYTES(xdrs, cp, cnt)) {
+ return (FALSE);
+ }
+ if (rndup == 0)
+ return (TRUE);
+ return (XDR_PUTBYTES(xdrs, xdr_zero, rndup));
+ }
+
+ if (xdrs->x_op == XDR_FREE) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*
+ * XDR counted bytes
+ * *cpp is a pointer to the bytes, *sizep is the count.
+ * If *cpp is NULL maxsize bytes are allocated
+ */
+bool_t
+xdr_bytes(XDR *xdrs, char **cpp, u_int *sizep, u_int maxsize)
+{
+ char *sp = *cpp; /* sp is the actual string pointer */
+ u_int nodesize;
+
+ /*
+ * first deal with the length since xdr bytes are counted
+ */
+ if (! xdr_u_int(xdrs, sizep)) {
+ return (FALSE);
+ }
+ nodesize = *sizep;
+ if ((nodesize > maxsize) && (xdrs->x_op != XDR_FREE)) {
+ return (FALSE);
+ }
+
+ /*
+ * now deal with the actual bytes
+ */
+ switch (xdrs->x_op) {
+
+ case XDR_DECODE:
+ if (nodesize == 0) {
+ return (TRUE);
+ }
+ if (sp == NULL) {
+ *cpp = sp = mem_alloc(nodesize);
+ }
+ if (sp == NULL) {
+ printf("xdr_bytes: out of memory");
+ return (FALSE);
+ }
+ /* FALLTHROUGH */
+
+ case XDR_ENCODE:
+ return (xdr_opaque(xdrs, sp, nodesize));
+
+ case XDR_FREE:
+ if (sp != NULL) {
+ mem_free(sp, nodesize);
+ *cpp = NULL;
+ }
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * Implemented here due to commonality of the object.
+ */
+bool_t
+xdr_netobj(XDR *xdrs, struct netobj *np)
+{
+
+ return (xdr_bytes(xdrs, &np->n_bytes, &np->n_len, MAX_NETOBJ_SZ));
+}
+
+/*
+ * XDR a descriminated union
+ * Support routine for discriminated unions.
+ * You create an array of xdrdiscrim structures, terminated with
+ * an entry with a null procedure pointer. The routine gets
+ * the discriminant value and then searches the array of xdrdiscrims
+ * looking for that value. It calls the procedure given in the xdrdiscrim
+ * to handle the discriminant. If there is no specific routine a default
+ * routine may be called.
+ * If there is no specific or default routine an error is returned.
+ */
+bool_t
+xdr_union(XDR *xdrs,
+ enum_t *dscmp, /* enum to decide which arm to work on */
+ char *unp, /* the union itself */
+ const struct xdr_discrim *choices, /* [value, xdr proc] for each arm */
+ xdrproc_t dfault) /* default xdr routine */
+{
+ enum_t dscm;
+
+ /*
+ * we deal with the discriminator; it's an enum
+ */
+ if (! xdr_enum(xdrs, dscmp)) {
+ return (FALSE);
+ }
+ dscm = *dscmp;
+
+ /*
+ * search choices for a value that matches the discriminator.
+ * if we find one, execute the xdr routine for that value.
+ */
+ for (; choices->proc != NULL_xdrproc_t; choices++) {
+ if (choices->value == dscm)
+ return ((*(choices->proc))(xdrs, unp));
+ }
+
+ /*
+ * no match - execute the default xdr routine if there is one
+ */
+ return ((dfault == NULL_xdrproc_t) ? FALSE :
+ (*dfault)(xdrs, unp));
+}
+
+
+/*
+ * Non-portable xdr primitives.
+ * Care should be taken when moving these routines to new architectures.
+ */
+
+
+/*
+ * XDR null terminated ASCII strings
+ * xdr_string deals with "C strings" - arrays of bytes that are
+ * terminated by a NULL character. The parameter cpp references a
+ * pointer to storage; If the pointer is null, then the necessary
+ * storage is allocated. The last parameter is the max allowed length
+ * of the string as specified by a protocol.
+ */
+bool_t
+xdr_string(XDR *xdrs, char **cpp, u_int maxsize)
+{
+ char *sp = *cpp; /* sp is the actual string pointer */
+ u_int size;
+ u_int nodesize;
+
+ /*
+ * first deal with the length since xdr strings are counted-strings
+ */
+ switch (xdrs->x_op) {
+ case XDR_FREE:
+ if (sp == NULL) {
+ return(TRUE); /* already free */
+ }
+ /* FALLTHROUGH */
+ case XDR_ENCODE:
+ size = strlen(sp);
+ break;
+ case XDR_DECODE:
+ break;
+ }
+ if (! xdr_u_int(xdrs, &size)) {
+ return (FALSE);
+ }
+ if (size > maxsize) {
+ return (FALSE);
+ }
+ nodesize = size + 1;
+
+ /*
+ * now deal with the actual bytes
+ */
+ switch (xdrs->x_op) {
+
+ case XDR_DECODE:
+ if (nodesize == 0) {
+ return (TRUE);
+ }
+ if (sp == NULL)
+ *cpp = sp = mem_alloc(nodesize);
+ if (sp == NULL) {
+ printf("xdr_string: out of memory");
+ return (FALSE);
+ }
+ sp[size] = 0;
+ /* FALLTHROUGH */
+
+ case XDR_ENCODE:
+ return (xdr_opaque(xdrs, sp, size));
+
+ case XDR_FREE:
+ mem_free(sp, nodesize);
+ *cpp = NULL;
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+/*
+ * Wrapper for xdr_string that can be called directly from
+ * routines like clnt_call
+ */
+bool_t
+xdr_wrapstring(XDR *xdrs, char **cpp)
+{
+ return xdr_string(xdrs, cpp, LASTUNSIGNED);
+}
+
+/*
+ * NOTE: xdr_hyper(), xdr_u_hyper(), xdr_longlong_t(), and xdr_u_longlong_t()
+ * are in the "non-portable" section because they require that a `long long'
+ * be a 64-bit type.
+ *
+ * --thorpej@netbsd.org, November 30, 1999
+ */
+
+/*
+ * XDR 64-bit integers
+ */
+bool_t
+xdr_int64_t(XDR *xdrs, int64_t *llp)
+{
+ u_long ul[2];
+
+ switch (xdrs->x_op) {
+ case XDR_ENCODE:
+ ul[0] = (u_long)((uint64_t)*llp >> 32) & 0xffffffff;
+ ul[1] = (u_long)((uint64_t)*llp) & 0xffffffff;
+ if (XDR_PUTLONG(xdrs, (long *)&ul[0]) == FALSE)
+ return (FALSE);
+ return (XDR_PUTLONG(xdrs, (long *)&ul[1]));
+ case XDR_DECODE:
+ if (XDR_GETLONG(xdrs, (long *)&ul[0]) == FALSE)
+ return (FALSE);
+ if (XDR_GETLONG(xdrs, (long *)&ul[1]) == FALSE)
+ return (FALSE);
+ *llp = (int64_t)
+ (((uint64_t)ul[0] << 32) | ((uint64_t)ul[1]));
+ return (TRUE);
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR unsigned 64-bit integers
+ */
+bool_t
+xdr_uint64_t(XDR *xdrs, uint64_t *ullp)
+{
+ u_long ul[2];
+
+ switch (xdrs->x_op) {
+ case XDR_ENCODE:
+ ul[0] = (u_long)(*ullp >> 32) & 0xffffffff;
+ ul[1] = (u_long)(*ullp) & 0xffffffff;
+ if (XDR_PUTLONG(xdrs, (long *)&ul[0]) == FALSE)
+ return (FALSE);
+ return (XDR_PUTLONG(xdrs, (long *)&ul[1]));
+ case XDR_DECODE:
+ if (XDR_GETLONG(xdrs, (long *)&ul[0]) == FALSE)
+ return (FALSE);
+ if (XDR_GETLONG(xdrs, (long *)&ul[1]) == FALSE)
+ return (FALSE);
+ *ullp = (uint64_t)
+ (((uint64_t)ul[0] << 32) | ((uint64_t)ul[1]));
+ return (TRUE);
+ case XDR_FREE:
+ return (TRUE);
+ }
+ /* NOTREACHED */
+ return (FALSE);
+}
+
+
+/*
+ * XDR hypers
+ */
+bool_t
+xdr_hyper(XDR *xdrs, longlong_t *llp)
+{
+
+ /*
+ * Don't bother open-coding this; it's a fair amount of code. Just
+ * call xdr_int64_t().
+ */
+ return (xdr_int64_t(xdrs, (int64_t *)llp));
+}
+
+
+/*
+ * XDR unsigned hypers
+ */
+bool_t
+xdr_u_hyper(XDR *xdrs, u_longlong_t *ullp)
+{
+
+ /*
+ * Don't bother open-coding this; it's a fair amount of code. Just
+ * call xdr_uint64_t().
+ */
+ return (xdr_uint64_t(xdrs, (uint64_t *)ullp));
+}
+
+
+/*
+ * XDR longlong_t's
+ */
+bool_t
+xdr_longlong_t(XDR *xdrs, longlong_t *llp)
+{
+
+ /*
+ * Don't bother open-coding this; it's a fair amount of code. Just
+ * call xdr_int64_t().
+ */
+ return (xdr_int64_t(xdrs, (int64_t *)llp));
+}
+
+
+/*
+ * XDR u_longlong_t's
+ */
+bool_t
+xdr_u_longlong_t(XDR *xdrs, u_longlong_t *ullp)
+{
+
+ /*
+ * Don't bother open-coding this; it's a fair amount of code. Just
+ * call xdr_uint64_t().
+ */
+ return (xdr_uint64_t(xdrs, (uint64_t *)ullp));
+}
diff --git a/sys/xdr/xdr_array.c b/sys/xdr/xdr_array.c
new file mode 100644
index 0000000..31fe934
--- /dev/null
+++ b/sys/xdr/xdr_array.c
@@ -0,0 +1,155 @@
+/* $NetBSD: xdr_array.c,v 1.12 2000/01/22 22:19:18 mycroft Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)xdr_array.c 1.10 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)xdr_array.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * xdr_array.c, Generic XDR routines impelmentation.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * These are the "non-trivial" xdr primitives used to serialize and de-serialize
+ * arrays. See xdr.h for more info on the interface to xdr.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+/*
+ * XDR an array of arbitrary elements
+ * *addrp is a pointer to the array, *sizep is the number of elements.
+ * If addrp is NULL (*sizep * elsize) bytes are allocated.
+ * elsize is the size (in bytes) of each element, and elproc is the
+ * xdr procedure to call to handle each element of the array.
+ */
+bool_t
+xdr_array(XDR *xdrs,
+ caddr_t *addrp, /* array pointer */
+ u_int *sizep, /* number of elements */
+ u_int maxsize, /* max numberof elements */
+ u_int elsize, /* size in bytes of each element */
+ xdrproc_t elproc) /* xdr routine to handle each element */
+{
+ u_int i;
+ caddr_t target = *addrp;
+ u_int c; /* the actual element count */
+ bool_t stat = TRUE;
+ u_int nodesize;
+
+ /* like strings, arrays are really counted arrays */
+ if (!xdr_u_int(xdrs, sizep)) {
+ return (FALSE);
+ }
+ c = *sizep;
+ if ((c > maxsize || UINT_MAX/elsize < c) &&
+ (xdrs->x_op != XDR_FREE)) {
+ return (FALSE);
+ }
+ nodesize = c * elsize;
+
+ /*
+ * if we are deserializing, we may need to allocate an array.
+ * We also save time by checking for a null array if we are freeing.
+ */
+ if (target == NULL)
+ switch (xdrs->x_op) {
+ case XDR_DECODE:
+ if (c == 0)
+ return (TRUE);
+ *addrp = target = mem_alloc(nodesize);
+ if (target == NULL) {
+ printf("xdr_array: out of memory");
+ return (FALSE);
+ }
+ memset(target, 0, nodesize);
+ break;
+
+ case XDR_FREE:
+ return (TRUE);
+
+ case XDR_ENCODE:
+ break;
+ }
+
+ /*
+ * now we xdr each element of array
+ */
+ for (i = 0; (i < c) && stat; i++) {
+ stat = (*elproc)(xdrs, target);
+ target += elsize;
+ }
+
+ /*
+ * the array may need freeing
+ */
+ if (xdrs->x_op == XDR_FREE) {
+ mem_free(*addrp, nodesize);
+ *addrp = NULL;
+ }
+ return (stat);
+}
+
+/*
+ * xdr_vector():
+ *
+ * XDR a fixed length array. Unlike variable-length arrays,
+ * the storage of fixed length arrays is static and unfreeable.
+ * > basep: base of the array
+ * > size: size of the array
+ * > elemsize: size of each element
+ * > xdr_elem: routine to XDR each element
+ */
+bool_t
+xdr_vector(XDR *xdrs, char *basep, u_int nelem, u_int elemsize,
+ xdrproc_t xdr_elem)
+{
+ u_int i;
+ char *elptr;
+
+ elptr = basep;
+ for (i = 0; i < nelem; i++) {
+ if (!(*xdr_elem)(xdrs, elptr)) {
+ return(FALSE);
+ }
+ elptr += elemsize;
+ }
+ return(TRUE);
+}
diff --git a/sys/xdr/xdr_mbuf.c b/sys/xdr/xdr_mbuf.c
new file mode 100644
index 0000000..770dfc3
--- /dev/null
+++ b/sys/xdr/xdr_mbuf.c
@@ -0,0 +1,238 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+static void xdrmbuf_destroy(XDR *);
+static bool_t xdrmbuf_getlong(XDR *, long *);
+static bool_t xdrmbuf_putlong(XDR *, const long *);
+static bool_t xdrmbuf_getbytes(XDR *, char *, u_int);
+static bool_t xdrmbuf_putbytes(XDR *, const char *, u_int);
+/* XXX: w/64-bit pointers, u_int not enough! */
+static u_int xdrmbuf_getpos(XDR *);
+static bool_t xdrmbuf_setpos(XDR *, u_int);
+static int32_t *xdrmbuf_inline(XDR *, u_int);
+
+static const struct xdr_ops xdrmbuf_ops = {
+ xdrmbuf_getlong,
+ xdrmbuf_putlong,
+ xdrmbuf_getbytes,
+ xdrmbuf_putbytes,
+ xdrmbuf_getpos,
+ xdrmbuf_setpos,
+ xdrmbuf_inline,
+ xdrmbuf_destroy
+};
+
+/*
+ * The procedure xdrmbuf_create initializes a stream descriptor for a
+ * mbuf.
+ */
+void
+xdrmbuf_create(XDR *xdrs, struct mbuf *m, enum xdr_op op)
+{
+
+ xdrs->x_op = op;
+ xdrs->x_ops = &xdrmbuf_ops;
+ xdrs->x_base = (char *) m;
+ if (op == XDR_ENCODE) {
+ m = m_last(m);
+ xdrs->x_private = m;
+ xdrs->x_handy = m->m_len;
+ } else {
+ xdrs->x_private = m;
+ xdrs->x_handy = 0;
+ }
+}
+
+static void
+xdrmbuf_destroy(XDR *xdrs)
+{
+
+ if (xdrs->x_op == XDR_DECODE && xdrs->x_base) {
+ m_freem((struct mbuf *) xdrs->x_base);
+ xdrs->x_base = NULL;
+ xdrs->x_private = NULL;
+ }
+}
+
+static bool_t
+xdrmbuf_getlong(XDR *xdrs, long *lp)
+{
+ int32_t t;
+
+ xdrmbuf_getbytes(xdrs, (char *) &t, sizeof(int32_t));
+ *lp = ntohl(t);
+ return (TRUE);
+}
+
+static bool_t
+xdrmbuf_putlong(xdrs, lp)
+ XDR *xdrs;
+ const long *lp;
+{
+ int32_t t = htonl(*lp);
+
+ xdrmbuf_putbytes(xdrs, (char *) &t, sizeof(int32_t));
+ return (TRUE);
+}
+
+static bool_t
+xdrmbuf_getbytes(XDR *xdrs, char *addr, u_int len)
+{
+ struct mbuf *m = (struct mbuf *) xdrs->x_private;
+ size_t sz;
+
+ while (len > 0) {
+ /*
+ * Make sure we haven't hit the end.
+ */
+ if (!m) {
+ return (FALSE);
+ }
+
+ /*
+ * See how much we can get from this mbuf.
+ */
+ sz = m->m_len - xdrs->x_handy;
+ if (sz > len)
+ sz = len;
+ memcpy(addr, mtod(m, const char *) + xdrs->x_handy, sz);
+
+ addr += sz;
+ xdrs->x_handy += sz;
+ len -= sz;
+
+ if (xdrs->x_handy == m->m_len) {
+ m = m->m_next;
+ xdrs->x_private = (void *) m;
+ xdrs->x_handy = 0;
+ }
+ }
+
+ return (TRUE);
+}
+
+static bool_t
+xdrmbuf_putbytes(XDR *xdrs, const char *addr, u_int len)
+{
+ struct mbuf *m = (struct mbuf *) xdrs->x_private;
+ struct mbuf *n;
+ size_t sz;
+
+ while (len > 0) {
+ sz = M_TRAILINGSPACE(m) + (m->m_len - xdrs->x_handy);
+ if (sz > len)
+ sz = len;
+ memcpy(mtod(m, char *) + xdrs->x_handy, addr, sz);
+ addr += sz;
+ xdrs->x_handy += sz;
+ if (xdrs->x_handy > m->m_len)
+ m->m_len = xdrs->x_handy;
+ len -= sz;
+
+ if (xdrs->x_handy == m->m_len && M_TRAILINGSPACE(m) == 0) {
+ if (!m->m_next) {
+ MGET(n, M_TRYWAIT, m->m_type);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ xdrs->x_private = (void *) m;
+ xdrs->x_handy = 0;
+ }
+ }
+
+ return (TRUE);
+}
+
+static u_int
+xdrmbuf_getpos(XDR *xdrs)
+{
+ struct mbuf *m0 = (struct mbuf *) xdrs->x_base;
+ struct mbuf *m = (struct mbuf *) xdrs->x_private;
+ u_int pos = 0;
+
+ while (m0 && m0 != m) {
+ pos += m0->m_len;
+ m0 = m0->m_next;
+ }
+ KASSERT(m0, ("Corrupted mbuf chain"));
+
+ return (pos + xdrs->x_handy);
+}
+
+static bool_t
+xdrmbuf_setpos(XDR *xdrs, u_int pos)
+{
+ struct mbuf *m = (struct mbuf *) xdrs->x_base;
+
+ while (m && pos > m->m_len) {
+ pos -= m->m_len;
+ m = m->m_next;
+ }
+ KASSERT(m, ("Corrupted mbuf chain"));
+
+ xdrs->x_private = (void *) m;
+ xdrs->x_handy = pos;
+
+ return (TRUE);
+}
+
+static int32_t *
+xdrmbuf_inline(XDR *xdrs, u_int len)
+{
+ struct mbuf *m = (struct mbuf *) xdrs->x_private;
+ size_t available;
+ char *p;
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ available = M_TRAILINGSPACE(m) + (m->m_len - xdrs->x_handy);
+ } else {
+ available = m->m_len - xdrs->x_handy;
+ }
+
+ if (available >= len) {
+ p = mtod(m, char *) + xdrs->x_handy;
+ if (((uintptr_t) p) & (sizeof(int32_t) - 1))
+ return (0);
+ xdrs->x_handy += len;
+ if (xdrs->x_handy > m->m_len)
+ m->m_len = xdrs->x_handy;
+ return ((int32_t *) p);
+ }
+
+ return (0);
+}
diff --git a/sys/xdr/xdr_mem.c b/sys/xdr/xdr_mem.c
new file mode 100644
index 0000000..121d5d8
--- /dev/null
+++ b/sys/xdr/xdr_mem.c
@@ -0,0 +1,232 @@
+/* $NetBSD: xdr_mem.c,v 1.15 2000/01/22 22:19:18 mycroft Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)xdr_mem.c 1.19 87/08/11 Copyr 1984 Sun Micro";
+static char *sccsid = "@(#)xdr_mem.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * xdr_mem.h, XDR implementation using memory buffers.
+ *
+ * Copyright (C) 1984, Sun Microsystems, Inc.
+ *
+ * If you have some data to be interpreted as external data representation
+ * or to be converted to external data representation in a memory buffer,
+ * then this is the package for you.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+#define memmove(dst, src, len) bcopy(src, dst, len)
+
+static void xdrmem_destroy(XDR *);
+static bool_t xdrmem_getlong_aligned(XDR *, long *);
+static bool_t xdrmem_putlong_aligned(XDR *, const long *);
+static bool_t xdrmem_getlong_unaligned(XDR *, long *);
+static bool_t xdrmem_putlong_unaligned(XDR *, const long *);
+static bool_t xdrmem_getbytes(XDR *, char *, u_int);
+static bool_t xdrmem_putbytes(XDR *, const char *, u_int);
+/* XXX: w/64-bit pointers, u_int not enough! */
+static u_int xdrmem_getpos(XDR *);
+static bool_t xdrmem_setpos(XDR *, u_int);
+static int32_t *xdrmem_inline_aligned(XDR *, u_int);
+static int32_t *xdrmem_inline_unaligned(XDR *, u_int);
+
+static const struct xdr_ops xdrmem_ops_aligned = {
+ xdrmem_getlong_aligned,
+ xdrmem_putlong_aligned,
+ xdrmem_getbytes,
+ xdrmem_putbytes,
+ xdrmem_getpos,
+ xdrmem_setpos,
+ xdrmem_inline_aligned,
+ xdrmem_destroy
+};
+
+static const struct xdr_ops xdrmem_ops_unaligned = {
+ xdrmem_getlong_unaligned,
+ xdrmem_putlong_unaligned,
+ xdrmem_getbytes,
+ xdrmem_putbytes,
+ xdrmem_getpos,
+ xdrmem_setpos,
+ xdrmem_inline_unaligned,
+ xdrmem_destroy
+};
+
+/*
+ * The procedure xdrmem_create initializes a stream descriptor for a
+ * memory buffer.
+ */
+void
+xdrmem_create(XDR *xdrs, char *addr, u_int size, enum xdr_op op)
+{
+
+ xdrs->x_op = op;
+ xdrs->x_ops = ((unsigned long)addr & (sizeof(int32_t) - 1))
+ ? &xdrmem_ops_unaligned : &xdrmem_ops_aligned;
+ xdrs->x_private = xdrs->x_base = addr;
+ xdrs->x_handy = size;
+}
+
+/*ARGSUSED*/
+static void
+xdrmem_destroy(XDR *xdrs)
+{
+
+}
+
+static bool_t
+xdrmem_getlong_aligned(XDR *xdrs, long *lp)
+{
+
+ if (xdrs->x_handy < sizeof(int32_t))
+ return (FALSE);
+ xdrs->x_handy -= sizeof(int32_t);
+ *lp = ntohl(*(u_int32_t *)xdrs->x_private);
+ xdrs->x_private = (char *)xdrs->x_private + sizeof(int32_t);
+ return (TRUE);
+}
+
+static bool_t
+xdrmem_putlong_aligned(XDR *xdrs, const long *lp)
+{
+
+ if (xdrs->x_handy < sizeof(int32_t))
+ return (FALSE);
+ xdrs->x_handy -= sizeof(int32_t);
+ *(u_int32_t *)xdrs->x_private = htonl((u_int32_t)*lp);
+ xdrs->x_private = (char *)xdrs->x_private + sizeof(int32_t);
+ return (TRUE);
+}
+
+static bool_t
+xdrmem_getlong_unaligned(XDR *xdrs, long *lp)
+{
+ u_int32_t l;
+
+ if (xdrs->x_handy < sizeof(int32_t))
+ return (FALSE);
+ xdrs->x_handy -= sizeof(int32_t);
+ memmove(&l, xdrs->x_private, sizeof(int32_t));
+ *lp = ntohl(l);
+ xdrs->x_private = (char *)xdrs->x_private + sizeof(int32_t);
+ return (TRUE);
+}
+
+static bool_t
+xdrmem_putlong_unaligned(XDR *xdrs, const long *lp)
+{
+ u_int32_t l;
+
+ if (xdrs->x_handy < sizeof(int32_t))
+ return (FALSE);
+ xdrs->x_handy -= sizeof(int32_t);
+ l = htonl((u_int32_t)*lp);
+ memmove(xdrs->x_private, &l, sizeof(int32_t));
+ xdrs->x_private = (char *)xdrs->x_private + sizeof(int32_t);
+ return (TRUE);
+}
+
+static bool_t
+xdrmem_getbytes(XDR *xdrs, char *addr, u_int len)
+{
+
+ if (xdrs->x_handy < len)
+ return (FALSE);
+ xdrs->x_handy -= len;
+ memmove(addr, xdrs->x_private, len);
+ xdrs->x_private = (char *)xdrs->x_private + len;
+ return (TRUE);
+}
+
+static bool_t
+xdrmem_putbytes(XDR *xdrs, const char *addr, u_int len)
+{
+
+ if (xdrs->x_handy < len)
+ return (FALSE);
+ xdrs->x_handy -= len;
+ memmove(xdrs->x_private, addr, len);
+ xdrs->x_private = (char *)xdrs->x_private + len;
+ return (TRUE);
+}
+
+static u_int
+xdrmem_getpos(XDR *xdrs)
+{
+
+ /* XXX w/64-bit pointers, u_int not enough! */
+ return (u_int)((u_long)xdrs->x_private - (u_long)xdrs->x_base);
+}
+
+static bool_t
+xdrmem_setpos(XDR *xdrs, u_int pos)
+{
+ char *newaddr = xdrs->x_base + pos;
+ char *lastaddr = (char *)xdrs->x_private + xdrs->x_handy;
+
+ if (newaddr > lastaddr)
+ return (FALSE);
+ xdrs->x_private = newaddr;
+ xdrs->x_handy = (u_int)(lastaddr - newaddr); /* XXX sizeof(u_int) <? sizeof(ptrdiff_t) */
+ return (TRUE);
+}
+
+static int32_t *
+xdrmem_inline_aligned(XDR *xdrs, u_int len)
+{
+ int32_t *buf = 0;
+
+ if (xdrs->x_handy >= len) {
+ xdrs->x_handy -= len;
+ buf = (int32_t *)xdrs->x_private;
+ xdrs->x_private = (char *)xdrs->x_private + len;
+ }
+ return (buf);
+}
+
+/* ARGSUSED */
+static int32_t *
+xdrmem_inline_unaligned(XDR *xdrs, u_int len)
+{
+
+ return (0);
+}
diff --git a/sys/xdr/xdr_reference.c b/sys/xdr/xdr_reference.c
new file mode 100644
index 0000000..16ff848
--- /dev/null
+++ b/sys/xdr/xdr_reference.c
@@ -0,0 +1,135 @@
+/* $NetBSD: xdr_reference.c,v 1.13 2000/01/22 22:19:18 mycroft Exp $ */
+
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *sccsid2 = "@(#)xdr_reference.c 1.11 87/08/11 SMI";
+static char *sccsid = "@(#)xdr_reference.c 2.1 88/07/29 4.0 RPCSRC";
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * xdr_reference.c, Generic XDR routines impelmentation.
+ *
+ * Copyright (C) 1987, Sun Microsystems, Inc.
+ *
+ * These are the "non-trivial" xdr primitives used to serialize and de-serialize
+ * "pointers". See xdr.h for more info on the interface to xdr.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+/*
+ * XDR an indirect pointer
+ * xdr_reference is for recursively translating a structure that is
+ * referenced by a pointer inside the structure that is currently being
+ * translated. pp references a pointer to storage. If *pp is null
+ * the necessary storage is allocated.
+ * size is the sizeof the referneced structure.
+ * proc is the routine to handle the referenced structure.
+ */
+bool_t
+xdr_reference(XDR *xdrs,
+ caddr_t *pp, /* the pointer to work on */
+ u_int size, /* size of the object pointed to */
+ xdrproc_t proc) /* xdr routine to handle the object */
+{
+ caddr_t loc = *pp;
+ bool_t stat;
+
+ if (loc == NULL)
+ switch (xdrs->x_op) {
+ case XDR_FREE:
+ return (TRUE);
+
+ case XDR_DECODE:
+ *pp = loc = (caddr_t) mem_alloc(size);
+ if (loc == NULL) {
+ printf("xdr_reference: out of memory");
+ return (FALSE);
+ }
+ memset(loc, 0, size);
+ break;
+
+ case XDR_ENCODE:
+ break;
+ }
+
+ stat = (*proc)(xdrs, loc);
+
+ if (xdrs->x_op == XDR_FREE) {
+ mem_free(loc, size);
+ *pp = NULL;
+ }
+ return (stat);
+}
+
+
+/*
+ * xdr_pointer():
+ *
+ * XDR a pointer to a possibly recursive data structure. This
+ * differs with xdr_reference in that it can serialize/deserialiaze
+ * trees correctly.
+ *
+ * What's sent is actually a union:
+ *
+ * union object_pointer switch (boolean b) {
+ * case TRUE: object_data data;
+ * case FALSE: void nothing;
+ * }
+ *
+ * > objpp: Pointer to the pointer to the object.
+ * > obj_size: size of the object.
+ * > xdr_obj: routine to XDR an object.
+ *
+ */
+bool_t
+xdr_pointer(XDR *xdrs, char **objpp, u_int obj_size, xdrproc_t xdr_obj)
+{
+
+ bool_t more_data;
+
+ more_data = (*objpp != NULL);
+ if (! xdr_bool(xdrs,&more_data)) {
+ return (FALSE);
+ }
+ if (! more_data) {
+ *objpp = NULL;
+ return (TRUE);
+ }
+ return (xdr_reference(xdrs,objpp,obj_size,xdr_obj));
+}
diff --git a/sys/xdr/xdr_sizeof.c b/sys/xdr/xdr_sizeof.c
new file mode 100644
index 0000000..5452834
--- /dev/null
+++ b/sys/xdr/xdr_sizeof.c
@@ -0,0 +1,162 @@
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+/*
+ * xdr_sizeof.c
+ *
+ * Copyright 1990 Sun Microsystems, Inc.
+ *
+ * General purpose routine to see how much space something will use
+ * when serialized using XDR.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <rpc/types.h>
+#include <rpc/xdr.h>
+
+/* ARGSUSED */
+static bool_t
+x_putlong(XDR *xdrs, const long *longp)
+{
+
+ xdrs->x_handy += BYTES_PER_XDR_UNIT;
+ return (TRUE);
+}
+
+/* ARGSUSED */
+static bool_t
+x_putbytes(XDR *xdrs, const char *bp, u_int len)
+{
+
+ xdrs->x_handy += len;
+ return (TRUE);
+}
+
+static u_int
+x_getpostn(XDR *xdrs)
+{
+
+ return (xdrs->x_handy);
+}
+
+/* ARGSUSED */
+static bool_t
+x_setpostn(XDR *xdrs, u_int pos)
+{
+
+ /* This is not allowed */
+ return (FALSE);
+}
+
+static int32_t *
+x_inline(XDR *xdrs, u_int len)
+{
+
+ if (len == 0) {
+ return (NULL);
+ }
+ if (xdrs->x_op != XDR_ENCODE) {
+ return (NULL);
+ }
+ if (len < (u_int)(uintptr_t)xdrs->x_base) {
+ /* x_private was already allocated */
+ xdrs->x_handy += len;
+ return ((int32_t *) xdrs->x_private);
+ } else {
+ /* Free the earlier space and allocate new area */
+ if (xdrs->x_private)
+ free(xdrs->x_private, M_RPC);
+ if ((xdrs->x_private = (caddr_t) malloc(len, M_RPC, M_WAITOK)) == NULL) {
+ xdrs->x_base = 0;
+ return (NULL);
+ }
+ xdrs->x_base = (caddr_t)(uintptr_t) len;
+ xdrs->x_handy += len;
+ return ((int32_t *) xdrs->x_private);
+ }
+}
+
+static int
+harmless(void)
+{
+
+ /* Always return FALSE/NULL, as the case may be */
+ return (0);
+}
+
+static void
+x_destroy(XDR *xdrs)
+{
+
+ xdrs->x_handy = 0;
+ xdrs->x_base = 0;
+ if (xdrs->x_private) {
+ free(xdrs->x_private, M_RPC);
+ xdrs->x_private = NULL;
+ }
+ return;
+}
+
+unsigned long
+xdr_sizeof(xdrproc_t func, void *data)
+{
+ XDR x;
+ struct xdr_ops ops;
+ bool_t stat;
+ /* to stop ANSI-C compiler from complaining */
+ typedef bool_t (* dummyfunc1)(XDR *, long *);
+ typedef bool_t (* dummyfunc2)(XDR *, caddr_t, u_int);
+
+ ops.x_putlong = x_putlong;
+ ops.x_putbytes = x_putbytes;
+ ops.x_inline = x_inline;
+ ops.x_getpostn = x_getpostn;
+ ops.x_setpostn = x_setpostn;
+ ops.x_destroy = x_destroy;
+
+ /* the other harmless ones */
+ ops.x_getlong = (dummyfunc1) harmless;
+ ops.x_getbytes = (dummyfunc2) harmless;
+
+ x.x_op = XDR_ENCODE;
+ x.x_ops = &ops;
+ x.x_handy = 0;
+ x.x_private = (caddr_t) NULL;
+ x.x_base = (caddr_t) 0;
+
+ stat = func(&x, data);
+ if (x.x_private)
+ free(x.x_private, M_RPC);
+ return (stat == TRUE ? (unsigned) x.x_handy: 0);
+}
diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile
index 4606f10..36094cf 100644
--- a/usr.sbin/Makefile
+++ b/usr.sbin/Makefile
@@ -32,6 +32,7 @@ SUBDIR= ac \
chown \
chroot \
ckdist \
+ clear_locks \
config \
cron \
crunch \
diff --git a/usr.sbin/clear_locks/Makefile b/usr.sbin/clear_locks/Makefile
new file mode 100644
index 0000000..8f28915
--- /dev/null
+++ b/usr.sbin/clear_locks/Makefile
@@ -0,0 +1,8 @@
+# $FreeBSD$
+
+PROG= clear_locks
+MAN= clear_locks.8
+LDADD= -lrpcsvc
+WARNS= 6
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/clear_locks/clear_locks.8 b/usr.sbin/clear_locks/clear_locks.8
new file mode 100644
index 0000000..9f6cafe
--- /dev/null
+++ b/usr.sbin/clear_locks/clear_locks.8
@@ -0,0 +1,51 @@
+.\" Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+.\" Authors: Doug Rabson <dfr@rabson.org>
+.\" Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd March 19, 2008
+.Dt CLEAR_LOCKS 8
+.Os
+.Sh NAME
+.Nm clear_locks
+.Nd clear locks held on behalf of an NFS client
+.Sh SYNOPSIS
+.Nm
+.Ar hostname
+.Sh DESCRIPTION
+The
+.Nm
+command can be used to clear file locks held by an NFS client.
+This should only be used to handle problems caused by an NFS client
+crashing while holding locks and failing to clear them itself when it
+reboots.
+.Sh SEE ALSO
+.Xr rpc.lockd 8
+.Sh HISTORY
+A version of
+.Nm
+appeared in
+.Tn SunOS
+4.
diff --git a/usr.sbin/clear_locks/clear_locks.c b/usr.sbin/clear_locks/clear_locks.c
new file mode 100644
index 0000000..1249c12
--- /dev/null
+++ b/usr.sbin/clear_locks/clear_locks.c
@@ -0,0 +1,70 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rpc/rpc.h>
+#include <rpcsvc/nlm_prot.h>
+
+int
+main(int argc, char **argv)
+{
+ enum clnt_stat stat;
+ char *hostname;
+ nlm4_notify notify;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: clear_locks <hostname>\n");
+ exit(1);
+ }
+ hostname = argv[1];
+
+ if (geteuid() != 0) {
+ fprintf(stderr, "clear_locks: must be root\n");
+ exit(1);
+ }
+
+ notify.name = hostname;
+ notify.state = 0;
+ stat = rpc_call("localhost", NLM_PROG, NLM_VERS4, NLM4_FREE_ALL,
+ (xdrproc_t) xdr_nlm4_notify, (void *) &notify,
+ (xdrproc_t) xdr_void, NULL, NULL);
+
+ if (stat != RPC_SUCCESS) {
+ clnt_perrno(stat);
+ exit(1);
+ }
+ fprintf(stderr, "clear_locks: cleared locks for hostname %s\n",
+ hostname);
+
+ return (0);
+}
diff --git a/usr.sbin/rpc.lockd/lockd.c b/usr.sbin/rpc.lockd/lockd.c
index 99081d4..3b1572e 100644
--- a/usr.sbin/rpc.lockd/lockd.c
+++ b/usr.sbin/rpc.lockd/lockd.c
@@ -48,6 +48,7 @@ __RCSID("$NetBSD: lockd.c,v 1.7 2000/08/12 18:08:44 thorpej Exp $");
#include <sys/types.h>
#include <sys/socket.h>
+#include <sys/stat.h>
#include <netinet/in.h>
#include <arpa/inet.h>
@@ -76,13 +77,17 @@ int _rpcsvcdirty = 0;
int grace_expired;
int nsm_state;
+int kernel_lockd;
pid_t client_pid;
struct mon mon_host;
char **hosts, *svcport_str = NULL;
int nhosts = 0;
int xcreated = 0;
+char **addrs; /* actually (netid, uaddr) pairs */
+int naddrs; /* count of how many (netid, uaddr) pairs */
void create_service(struct netconfig *nconf);
+void lookup_addresses(struct netconfig *nconf);
void init_nsm(void);
void nlm_prog_0(struct svc_req *, SVCXPRT *);
void nlm_prog_1(struct svc_req *, SVCXPRT *);
@@ -93,6 +98,11 @@ void usage(void);
void sigalarm_handler(void);
+/*
+ * XXX move to some header file.
+ */
+#define _PATH_RPCLOCKDSOCK "/var/run/rpclockd.sock"
+
int
main(int argc, char **argv)
{
@@ -106,7 +116,7 @@ main(int argc, char **argv)
int maxrec = RPC_MAXDATASIZE;
in_port_t svcport = 0;
- while ((ch = getopt(argc, argv, "d:g:h:p:")) != (-1)) {
+ while ((ch = getopt(argc, argv, "d:g:h:kp:")) != (-1)) {
switch (ch) {
case 'd':
debug_level = atoi(optarg);
@@ -143,6 +153,9 @@ main(int argc, char **argv)
out_of_mem();
}
break;
+ case 'k':
+ kernel_lockd = TRUE;
+ break;
case 'p':
endptr = NULL;
svcport = (in_port_t)strtoul(optarg, &endptr, 10);
@@ -221,19 +234,77 @@ main(int argc, char **argv)
hosts[nhosts - 1] = "127.0.0.1";
}
- nc_handle = setnetconfig();
- while ((nconf = getnetconfig(nc_handle))) {
- /* We want to listen only on udp6, tcp6, udp, tcp transports */
- if (nconf->nc_flag & NC_VISIBLE) {
- /* Skip if there's no IPv6 support */
- if (have_v6 == 0 && strcmp(nconf->nc_protofmly, "inet6") == 0) {
- /* DO NOTHING */
- } else {
- create_service(nconf);
+ if (kernel_lockd) {
+ /*
+ * For the kernel lockd case, we run a cut-down RPC
+ * service on a local-domain socket. The kernel's RPC
+ * server will pass what it can't handle (mainly
+ * client replies) down to us. This can go away
+ * entirely if/when we move the client side of NFS
+ * locking into the kernel.
+ */
+ struct sockaddr_un sun;
+ int fd, oldmask;
+ SVCXPRT *xprt;
+
+ memset(&sun, 0, sizeof sun);
+ sun.sun_family = AF_LOCAL;
+ unlink(_PATH_RPCLOCKDSOCK);
+ strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
+ sun.sun_len = SUN_LEN(&sun);
+ fd = socket(AF_LOCAL, SOCK_STREAM, 0);
+ if (!fd) {
+ err(1, "Can't create local lockd socket");
+ }
+ oldmask = umask(S_IXUSR|S_IRWXG|S_IRWXO);
+ if (bind(fd, (struct sockaddr *) &sun, sun.sun_len) < 0) {
+ err(1, "Can't bind local lockd socket");
+ }
+ umask(oldmask);
+ if (listen(fd, SOMAXCONN) < 0) {
+ err(1, "Can't listen on local lockd socket");
+ }
+ xprt = svc_vc_create(fd, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
+ if (!xprt) {
+ err(1, "Can't create transport for local lockd socket");
+ }
+ if (!svc_reg(xprt, NLM_PROG, NLM_VERS4, nlm_prog_4, NULL)) {
+ err(1, "Can't register service for local lockd socket");
+ }
+
+ /*
+ * We need to look up the addresses so that we can
+ * hand uaddrs (ascii encoded address+port strings) to
+ * the kernel.
+ */
+ nc_handle = setnetconfig();
+ while ((nconf = getnetconfig(nc_handle))) {
+ /* We want to listen only on udp6, tcp6, udp, tcp transports */
+ if (nconf->nc_flag & NC_VISIBLE) {
+ /* Skip if there's no IPv6 support */
+ if (have_v6 == 0 && strcmp(nconf->nc_protofmly, "inet6") == 0) {
+ /* DO NOTHING */
+ } else {
+ lookup_addresses(nconf);
+ }
+ }
+ }
+ endnetconfig(nc_handle);
+ } else {
+ nc_handle = setnetconfig();
+ while ((nconf = getnetconfig(nc_handle))) {
+ /* We want to listen only on udp6, tcp6, udp, tcp transports */
+ if (nconf->nc_flag & NC_VISIBLE) {
+ /* Skip if there's no IPv6 support */
+ if (have_v6 == 0 && strcmp(nconf->nc_protofmly, "inet6") == 0) {
+ /* DO NOTHING */
+ } else {
+ create_service(nconf);
+ }
}
}
+ endnetconfig(nc_handle);
}
- endnetconfig(nc_handle);
/*
* Note that it is NOT sensible to run this program from inetd - the
@@ -259,14 +330,28 @@ main(int argc, char **argv)
strerror(errno));
exit(1);
}
- grace_expired = 0;
- alarm(grace_period);
- init_nsm();
+ if (kernel_lockd) {
+ client_pid = client_request();
+
+ /*
+ * Create a child process to enter the kernel and then
+ * wait for RPCs on our local domain socket.
+ */
+ if (!fork())
+ nlm_syscall(debug_level, grace_period, naddrs, addrs);
+ else
+ svc_run();
+ } else {
+ grace_expired = 0;
+ alarm(grace_period);
+
+ init_nsm();
- client_pid = client_request();
+ client_pid = client_request();
- svc_run(); /* Should never return */
+ svc_run(); /* Should never return */
+ }
exit(1);
}
@@ -499,6 +584,155 @@ create_service(struct netconfig *nconf)
} /* end while */
}
+/*
+ * Look up addresses for the kernel to create transports for.
+ */
+void
+lookup_addresses(struct netconfig *nconf)
+{
+ struct addrinfo hints, *res = NULL;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct __rpc_sockinfo si;
+ struct netbuf servaddr;
+ SVCXPRT *transp = NULL;
+ int aicode;
+ int nhostsbak;
+ int r;
+ int registered = 0;
+ u_int32_t host_addr[4]; /* IPv4 or IPv6 */
+ char *uaddr;
+
+ if ((nconf->nc_semantics != NC_TPI_CLTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS) &&
+ (nconf->nc_semantics != NC_TPI_COTS_ORD))
+ return; /* not my type */
+
+ /*
+ * XXX - using RPC library internal functions.
+ */
+ if (!__rpc_nconf2sockinfo(nconf, &si)) {
+ syslog(LOG_ERR, "cannot get information for %s",
+ nconf->nc_netid);
+ return;
+ }
+
+ /* Get rpc.statd's address on this transport */
+ memset(&hints, 0, sizeof hints);
+ hints.ai_flags = AI_PASSIVE;
+ hints.ai_family = si.si_af;
+ hints.ai_socktype = si.si_socktype;
+ hints.ai_protocol = si.si_proto;
+
+ /*
+ * Bind to specific IPs if asked to
+ */
+ nhostsbak = nhosts;
+ while (nhostsbak > 0) {
+ --nhostsbak;
+
+ switch (hints.ai_family) {
+ case AF_INET:
+ if (inet_pton(AF_INET, hosts[nhostsbak],
+ host_addr) == 1) {
+ hints.ai_flags &= AI_NUMERICHOST;
+ } else {
+ /*
+ * Skip if we have an AF_INET6 address.
+ */
+ if (inet_pton(AF_INET6, hosts[nhostsbak],
+ host_addr) == 1) {
+ continue;
+ }
+ }
+ break;
+ case AF_INET6:
+ if (inet_pton(AF_INET6, hosts[nhostsbak],
+ host_addr) == 1) {
+ hints.ai_flags &= AI_NUMERICHOST;
+ } else {
+ /*
+ * Skip if we have an AF_INET address.
+ */
+ if (inet_pton(AF_INET, hosts[nhostsbak],
+ host_addr) == 1) {
+ continue;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If no hosts were specified, just bind to INADDR_ANY
+ */
+ if (strcmp("*", hosts[nhostsbak]) == 0) {
+ if (svcport_str == NULL) {
+ res = malloc(sizeof(struct addrinfo));
+ if (res == NULL)
+ out_of_mem();
+ res->ai_flags = hints.ai_flags;
+ res->ai_family = hints.ai_family;
+ res->ai_protocol = hints.ai_protocol;
+ switch (res->ai_family) {
+ case AF_INET:
+ sin = malloc(sizeof(struct sockaddr_in));
+ if (sin == NULL)
+ out_of_mem();
+ sin->sin_family = AF_INET;
+ sin->sin_port = htons(0);
+ sin->sin_addr.s_addr = htonl(INADDR_ANY);
+ res->ai_addr = (struct sockaddr*) sin;
+ res->ai_addrlen = (socklen_t)
+ sizeof(res->ai_addr);
+ break;
+ case AF_INET6:
+ sin6 = malloc(sizeof(struct sockaddr_in6));
+ if (sin6 == NULL)
+ out_of_mem();
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = htons(0);
+ sin6->sin6_addr = in6addr_any;
+ res->ai_addr = (struct sockaddr*) sin6;
+ res->ai_addrlen = (socklen_t) sizeof(res->ai_addr);
+ break;
+ default:
+ break;
+ }
+ } else {
+ if ((aicode = getaddrinfo(NULL, svcport_str,
+ &hints, &res)) != 0) {
+ syslog(LOG_ERR,
+ "cannot get local address for %s: %s",
+ nconf->nc_netid,
+ gai_strerror(aicode));
+ continue;
+ }
+ }
+ } else {
+ if ((aicode = getaddrinfo(hosts[nhostsbak], svcport_str,
+ &hints, &res)) != 0) {
+ syslog(LOG_ERR,
+ "cannot get local address for %s: %s",
+ nconf->nc_netid, gai_strerror(aicode));
+ continue;
+ }
+ }
+
+ servaddr.len = servaddr.maxlen = res->ai_addr->sa_len;
+ servaddr.buf = res->ai_addr;
+ uaddr = taddr2uaddr(nconf, &servaddr);
+
+ addrs = realloc(addrs, 2 * (naddrs + 1) * sizeof(char *));
+ if (!addrs)
+ out_of_mem();
+ addrs[2 * naddrs] = strdup(nconf->nc_netid);
+ addrs[2 * naddrs + 1] = uaddr;
+ naddrs++;
+ } /* end while */
+}
+
void
sigalarm_handler(void)
{
@@ -509,7 +743,7 @@ sigalarm_handler(void)
void
usage()
{
- errx(1, "usage: rpc.lockd [-d <debuglevel>]"
+ errx(1, "usage: rpc.lockd [-k] [-d <debuglevel>]"
" [-g <grace period>] [-h <bindip>] [-p <port>]");
}
diff --git a/usr.sbin/rpc.lockd/rpc.lockd.8 b/usr.sbin/rpc.lockd/rpc.lockd.8
index 4fabe5d..9cd429b 100644
--- a/usr.sbin/rpc.lockd/rpc.lockd.8
+++ b/usr.sbin/rpc.lockd/rpc.lockd.8
@@ -41,6 +41,7 @@
.Nd NFS file locking daemon
.Sh SYNOPSIS
.Nm
+.Op Fl k
.Op Fl d Ar debug_level
.Op Fl g Ar grace period
.Op Fl h Ar bindip
@@ -58,6 +59,11 @@ with
Options and operands available for
.Nm :
.Bl -tag -width indent
+.It Fl k
+The
+.Fl k
+option specifies the use of the kernel-resident NFS lock manager, if
+possible.
.It Fl d
The
.Fl d
OpenPOWER on IntegriCloud