summaryrefslogtreecommitdiffstats
path: root/sys/nlm
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>2008-03-26 15:23:12 +0000
committerdfr <dfr@FreeBSD.org>2008-03-26 15:23:12 +0000
commit79d2dfdaa69db38c43daed9744a6dbd0568189b5 (patch)
treeeaf6a0fa52bc76253126814ddab4cbf78722a8a5 /sys/nlm
parent632e5d39f769e15274575347233e9d7aa364c0d6 (diff)
downloadFreeBSD-src-79d2dfdaa69db38c43daed9744a6dbd0568189b5.zip
FreeBSD-src-79d2dfdaa69db38c43daed9744a6dbd0568189b5.tar.gz
Add the new kernel-mode NFS Lock Manager. To use it instead of the
user-mode lock manager, build a kernel with the NFSLOCKD option and add '-k' to 'rpc_lockd_flags' in rc.conf. Highlights include: * Thread-safe kernel RPC client - many threads can use the same RPC client handle safely with replies being de-multiplexed at the socket upcall (typically driven directly by the NIC interrupt) and handed off to whichever thread matches the reply. For UDP sockets, many RPC clients can share the same socket. This allows the use of a single privileged UDP port number to talk to an arbitrary number of remote hosts. * Single-threaded kernel RPC server. Adding support for multi-threaded server would be relatively straightforward and would follow approximately the Solaris KPI. A single thread should be sufficient for the NLM since it should rarely block in normal operation. * Kernel mode NLM server supporting cancel requests and granted callbacks. I've tested the NLM server reasonably extensively - it passes both my own tests and the NFS Connectathon locking tests running on Solaris, Mac OS X and Ubuntu Linux. * Userland NLM client supported. While the NLM server doesn't have support for the local NFS client's locking needs, it does have to field async replies and granted callbacks from remote NLMs that the local client has contacted. We relay these replies to the userland rpc.lockd over a local domain RPC socket. * Robust deadlock detection for the local lock manager. In particular it will detect deadlocks caused by a lock request that covers more than one blocking request. As required by the NLM protocol, all deadlock detection happens synchronously - a user is guaranteed that if a lock request isn't rejected immediately, the lock will eventually be granted. The old system allowed for a 'deferred deadlock' condition where a blocked lock request could wake up and find that some other deadlock-causing lock owner had beaten them to the lock. * Since both local and remote locks are managed by the same kernel locking code, local and remote processes can safely use file locks for mutual exclusion. Local processes have no fairness advantage compared to remote processes when contending to lock a region that has just been unlocked - the local lock manager enforces a strict first-come first-served model for both local and remote lockers. Sponsored by: Isilon Systems PR: 95247 107555 115524 116679 MFC after: 2 weeks
Diffstat (limited to 'sys/nlm')
-rw-r--r--sys/nlm/nlm.h119
-rw-r--r--sys/nlm/nlm_prot.h448
-rw-r--r--sys/nlm/nlm_prot_clnt.c372
-rw-r--r--sys/nlm/nlm_prot_impl.c1783
-rw-r--r--sys/nlm/nlm_prot_server.c762
-rw-r--r--sys/nlm/nlm_prot_svc.c509
-rw-r--r--sys/nlm/nlm_prot_xdr.c454
-rw-r--r--sys/nlm/sm_inter.h112
-rw-r--r--sys/nlm/sm_inter_xdr.c107
9 files changed, 4666 insertions, 0 deletions
diff --git a/sys/nlm/nlm.h b/sys/nlm/nlm.h
new file mode 100644
index 0000000..32bb974
--- /dev/null
+++ b/sys/nlm/nlm.h
@@ -0,0 +1,119 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NLM_NLM_H_
+#define _NLM_NLM_H_
+
+#ifdef _KERNEL
+
+#ifdef _SYS_MALLOC_H_
+MALLOC_DECLARE(M_NLM);
+#endif
+
+struct nlm_host;
+
+/*
+ * Copy a struct netobj.
+ */
+extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src,
+ struct malloc_type *type);
+
+/*
+ * Search for an existing NLM host that matches the given name
+ * (typically the caller_name element of an nlm4_lock). If none is
+ * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * address of the host so that we can call it back for async
+ * responses.
+ */
+extern struct nlm_host *nlm_find_host_by_name(const char *name,
+ struct svc_req *rqstp);
+
+/*
+ * Search for an existing NLM host that matches the given remote
+ * address. If none is found, create a new host with the requested
+ * address and remember 'vers' as the NLM protocol version to use for
+ * that host.
+ */
+extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr,
+ int vers);
+
+/*
+ * Return an RPC client handle that can be used to talk to the NLM
+ * running on the given host.
+ */
+extern CLIENT *nlm_host_get_rpc(struct nlm_host *host);
+
+/*
+ * Called when a host restarts.
+ */
+extern void nlm_sm_notify(nlm_sm_status *argp);
+
+/*
+ * Implementation for lock testing RPCs. Returns the NLM host that
+ * matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_test(nlm4_testargs *argp,
+ nlm4_testres *result, struct svc_req *rqstp);
+
+/*
+ * Implementation for lock setting RPCs. Returns the NLM host that
+ * matches the RPC arguments. If monitor is TRUE, set up an NSM
+ * monitor for this host.
+ */
+extern struct nlm_host *nlm_do_lock(nlm4_lockargs *argp,
+ nlm4_res *result, struct svc_req *rqstp, bool_t monitor);
+
+/*
+ * Implementation for cancelling a pending lock request. Returns the
+ * NLM host that matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_cancel(nlm4_cancargs *argp,
+ nlm4_res *result, struct svc_req *rqstp);
+
+/*
+ * Implementation for unlocking RPCs. Returns the NLM host that
+ * matches the RPC arguments.
+ */
+extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
+ nlm4_res *result, struct svc_req *rqstp);
+
+/*
+ * Free all locks associated with the hostname argp->name.
+ */
+extern void nlm_do_free_all(nlm4_notify *argp);
+
+/*
+ * Find an RPC transport that can be used to communicate with the
+ * userland part of lockd.
+ */
+extern CLIENT *nlm_user_lockd(void);
+
+#endif
+
+#endif
diff --git a/sys/nlm/nlm_prot.h b/sys/nlm/nlm_prot.h
new file mode 100644
index 0000000..6197189
--- /dev/null
+++ b/sys/nlm/nlm_prot.h
@@ -0,0 +1,448 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+/* $FreeBSD$ */
+
+#ifndef _NLM_PROT_H_RPCGEN
+#define _NLM_PROT_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LM_MAXSTRLEN 1024
+#define MAXNAMELEN LM_MAXSTRLEN+1
+
+enum nlm_stats {
+ nlm_granted = 0,
+ nlm_denied = 1,
+ nlm_denied_nolocks = 2,
+ nlm_blocked = 3,
+ nlm_denied_grace_period = 4,
+ nlm_deadlck = 5
+};
+typedef enum nlm_stats nlm_stats;
+
+struct nlm_holder {
+ bool_t exclusive;
+ int svid;
+ netobj oh;
+ u_int l_offset;
+ u_int l_len;
+};
+typedef struct nlm_holder nlm_holder;
+
+struct nlm_testrply {
+ nlm_stats stat;
+ union {
+ struct nlm_holder holder;
+ } nlm_testrply_u;
+};
+typedef struct nlm_testrply nlm_testrply;
+
+struct nlm_stat {
+ nlm_stats stat;
+};
+typedef struct nlm_stat nlm_stat;
+
+struct nlm_res {
+ netobj cookie;
+ nlm_stat stat;
+};
+typedef struct nlm_res nlm_res;
+
+struct nlm_testres {
+ netobj cookie;
+ nlm_testrply stat;
+};
+typedef struct nlm_testres nlm_testres;
+
+struct nlm_lock {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ int svid;
+ u_int l_offset;
+ u_int l_len;
+};
+typedef struct nlm_lock nlm_lock;
+
+struct nlm_lockargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm_lock alock;
+ bool_t reclaim;
+ int state;
+};
+typedef struct nlm_lockargs nlm_lockargs;
+
+struct nlm_cancargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm_lock alock;
+};
+typedef struct nlm_cancargs nlm_cancargs;
+
+struct nlm_testargs {
+ netobj cookie;
+ bool_t exclusive;
+ struct nlm_lock alock;
+};
+typedef struct nlm_testargs nlm_testargs;
+
+struct nlm_unlockargs {
+ netobj cookie;
+ struct nlm_lock alock;
+};
+typedef struct nlm_unlockargs nlm_unlockargs;
+/*
+ * The following enums are actually bit encoded for efficient
+ * boolean algebra.... DON'T change them.....
+ */
+
+enum fsh_mode {
+ fsm_DN = 0,
+ fsm_DR = 1,
+ fsm_DW = 2,
+ fsm_DRW = 3
+};
+typedef enum fsh_mode fsh_mode;
+
+enum fsh_access {
+ fsa_NONE = 0,
+ fsa_R = 1,
+ fsa_W = 2,
+ fsa_RW = 3
+};
+typedef enum fsh_access fsh_access;
+
+struct nlm_share {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ fsh_mode mode;
+ fsh_access access;
+};
+typedef struct nlm_share nlm_share;
+
+struct nlm_shareargs {
+ netobj cookie;
+ nlm_share share;
+ bool_t reclaim;
+};
+typedef struct nlm_shareargs nlm_shareargs;
+
+struct nlm_shareres {
+ netobj cookie;
+ nlm_stats stat;
+ int sequence;
+};
+typedef struct nlm_shareres nlm_shareres;
+
+struct nlm_notify {
+ char *name;
+ long state;
+};
+typedef struct nlm_notify nlm_notify;
+/* definitions for NLM version 4 */
+
+enum nlm4_stats {
+ nlm4_granted = 0,
+ nlm4_denied = 1,
+ nlm4_denied_nolocks = 2,
+ nlm4_blocked = 3,
+ nlm4_denied_grace_period = 4,
+ nlm4_deadlck = 5,
+ nlm4_rofs = 6,
+ nlm4_stale_fh = 7,
+ nlm4_fbig = 8,
+ nlm4_failed = 9
+};
+typedef enum nlm4_stats nlm4_stats;
+
+struct nlm4_stat {
+ nlm4_stats stat;
+};
+typedef struct nlm4_stat nlm4_stat;
+
+struct nlm4_holder {
+ bool_t exclusive;
+ u_int32_t svid;
+ netobj oh;
+ u_int64_t l_offset;
+ u_int64_t l_len;
+};
+typedef struct nlm4_holder nlm4_holder;
+
+struct nlm4_lock {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ u_int32_t svid;
+ u_int64_t l_offset;
+ u_int64_t l_len;
+};
+typedef struct nlm4_lock nlm4_lock;
+
+struct nlm4_share {
+ char *caller_name;
+ netobj fh;
+ netobj oh;
+ fsh_mode mode;
+ fsh_access access;
+};
+typedef struct nlm4_share nlm4_share;
+
+struct nlm4_testrply {
+ nlm4_stats stat;
+ union {
+ struct nlm4_holder holder;
+ } nlm4_testrply_u;
+};
+typedef struct nlm4_testrply nlm4_testrply;
+
+struct nlm4_testres {
+ netobj cookie;
+ nlm4_testrply stat;
+};
+typedef struct nlm4_testres nlm4_testres;
+
+struct nlm4_testargs {
+ netobj cookie;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_testargs nlm4_testargs;
+
+struct nlm4_res {
+ netobj cookie;
+ nlm4_stat stat;
+};
+typedef struct nlm4_res nlm4_res;
+
+struct nlm4_lockargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+ bool_t reclaim;
+ int state;
+};
+typedef struct nlm4_lockargs nlm4_lockargs;
+
+struct nlm4_cancargs {
+ netobj cookie;
+ bool_t block;
+ bool_t exclusive;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_cancargs nlm4_cancargs;
+
+struct nlm4_unlockargs {
+ netobj cookie;
+ struct nlm4_lock alock;
+};
+typedef struct nlm4_unlockargs nlm4_unlockargs;
+
+struct nlm4_shareargs {
+ netobj cookie;
+ nlm4_share share;
+ bool_t reclaim;
+};
+typedef struct nlm4_shareargs nlm4_shareargs;
+
+struct nlm4_shareres {
+ netobj cookie;
+ nlm4_stats stat;
+ int sequence;
+};
+typedef struct nlm4_shareres nlm4_shareres;
+
+struct nlm_sm_status {
+ char *mon_name;
+ int state;
+ char priv[16];
+};
+typedef struct nlm_sm_status nlm_sm_status;
+
+struct nlm4_notify {
+ char *name;
+ int32_t state;
+};
+typedef struct nlm4_notify nlm4_notify;
+
+#define NLM_PROG ((unsigned long)(100021))
+#define NLM_SM ((unsigned long)(0))
+
+#define NLM_SM_NOTIFY ((unsigned long)(1))
+extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *);
+extern bool_t nlm_sm_notify_0_svc(struct nlm_sm_status *, void *, struct svc_req *);
+#define NLM_VERS ((unsigned long)(1))
+
+#define NLM_TEST ((unsigned long)(1))
+extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *);
+extern bool_t nlm_test_1_svc(struct nlm_testargs *, nlm_testres *, struct svc_req *);
+#define NLM_LOCK ((unsigned long)(2))
+extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_lock_1_svc(struct nlm_lockargs *, nlm_res *, struct svc_req *);
+#define NLM_CANCEL ((unsigned long)(3))
+extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_cancel_1_svc(struct nlm_cancargs *, nlm_res *, struct svc_req *);
+#define NLM_UNLOCK ((unsigned long)(4))
+extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_unlock_1_svc(struct nlm_unlockargs *, nlm_res *, struct svc_req *);
+#define NLM_GRANTED ((unsigned long)(5))
+extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_granted_1_svc(struct nlm_testargs *, nlm_res *, struct svc_req *);
+#define NLM_TEST_MSG ((unsigned long)(6))
+extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern bool_t nlm_test_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
+#define NLM_LOCK_MSG ((unsigned long)(7))
+extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *);
+extern bool_t nlm_lock_msg_1_svc(struct nlm_lockargs *, void *, struct svc_req *);
+#define NLM_CANCEL_MSG ((unsigned long)(8))
+extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *);
+extern bool_t nlm_cancel_msg_1_svc(struct nlm_cancargs *, void *, struct svc_req *);
+#define NLM_UNLOCK_MSG ((unsigned long)(9))
+extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *);
+extern bool_t nlm_unlock_msg_1_svc(struct nlm_unlockargs *, void *, struct svc_req *);
+#define NLM_GRANTED_MSG ((unsigned long)(10))
+extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern bool_t nlm_granted_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
+#define NLM_TEST_RES ((unsigned long)(11))
+extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *);
+extern bool_t nlm_test_res_1_svc(nlm_testres *, void *, struct svc_req *);
+#define NLM_LOCK_RES ((unsigned long)(12))
+extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_lock_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_CANCEL_RES ((unsigned long)(13))
+extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_cancel_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_UNLOCK_RES ((unsigned long)(14))
+extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_unlock_res_1_svc(nlm_res *, void *, struct svc_req *);
+#define NLM_GRANTED_RES ((unsigned long)(15))
+extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *);
+extern bool_t nlm_granted_res_1_svc(nlm_res *, void *, struct svc_req *);
+extern int nlm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+#define NLM_VERSX ((unsigned long)(3))
+
+#define NLM_SHARE ((unsigned long)(20))
+extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern bool_t nlm_share_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
+#define NLM_UNSHARE ((unsigned long)(21))
+extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern bool_t nlm_unshare_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
+#define NLM_NM_LOCK ((unsigned long)(22))
+extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *);
+extern bool_t nlm_nm_lock_3_svc(nlm_lockargs *, nlm_res *, struct svc_req *);
+#define NLM_FREE_ALL ((unsigned long)(23))
+extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *);
+extern bool_t nlm_free_all_3_svc(nlm_notify *, void *, struct svc_req *);
+extern int nlm_prog_3_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+#define NLM_VERS4 ((unsigned long)(4))
+
+#define NLM4_TEST ((unsigned long)(1))
+extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *);
+extern bool_t nlm4_test_4_svc(nlm4_testargs *, nlm4_testres *, struct svc_req *);
+#define NLM4_LOCK ((unsigned long)(2))
+extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_CANCEL ((unsigned long)(3))
+extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_cancel_4_svc(nlm4_cancargs *, nlm4_res *, struct svc_req *);
+#define NLM4_UNLOCK ((unsigned long)(4))
+extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_unlock_4_svc(nlm4_unlockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_GRANTED ((unsigned long)(5))
+extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_granted_4_svc(nlm4_testargs *, nlm4_res *, struct svc_req *);
+#define NLM4_TEST_MSG ((unsigned long)(6))
+extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern bool_t nlm4_test_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
+#define NLM4_LOCK_MSG ((unsigned long)(7))
+extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *);
+extern bool_t nlm4_lock_msg_4_svc(nlm4_lockargs *, void *, struct svc_req *);
+#define NLM4_CANCEL_MSG ((unsigned long)(8))
+extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *);
+extern bool_t nlm4_cancel_msg_4_svc(nlm4_cancargs *, void *, struct svc_req *);
+#define NLM4_UNLOCK_MSG ((unsigned long)(9))
+extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *);
+extern bool_t nlm4_unlock_msg_4_svc(nlm4_unlockargs *, void *, struct svc_req *);
+#define NLM4_GRANTED_MSG ((unsigned long)(10))
+extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern bool_t nlm4_granted_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
+#define NLM4_TEST_RES ((unsigned long)(11))
+extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *);
+extern bool_t nlm4_test_res_4_svc(nlm4_testres *, void *, struct svc_req *);
+#define NLM4_LOCK_RES ((unsigned long)(12))
+extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_lock_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_CANCEL_RES ((unsigned long)(13))
+extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_cancel_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_UNLOCK_RES ((unsigned long)(14))
+extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_unlock_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_GRANTED_RES ((unsigned long)(15))
+extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *);
+extern bool_t nlm4_granted_res_4_svc(nlm4_res *, void *, struct svc_req *);
+#define NLM4_SHARE ((unsigned long)(20))
+extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern bool_t nlm4_share_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
+#define NLM4_UNSHARE ((unsigned long)(21))
+extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern bool_t nlm4_unshare_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
+#define NLM4_NM_LOCK ((unsigned long)(22))
+extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern bool_t nlm4_nm_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
+#define NLM4_FREE_ALL ((unsigned long)(23))
+extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *);
+extern bool_t nlm4_free_all_4_svc(nlm4_notify *, void *, struct svc_req *);
+extern int nlm_prog_4_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+
+/* the xdr functions */
+extern bool_t xdr_nlm_stats(XDR *, nlm_stats*);
+extern bool_t xdr_nlm_holder(XDR *, nlm_holder*);
+extern bool_t xdr_nlm_testrply(XDR *, nlm_testrply*);
+extern bool_t xdr_nlm_stat(XDR *, nlm_stat*);
+extern bool_t xdr_nlm_res(XDR *, nlm_res*);
+extern bool_t xdr_nlm_testres(XDR *, nlm_testres*);
+extern bool_t xdr_nlm_lock(XDR *, nlm_lock*);
+extern bool_t xdr_nlm_lockargs(XDR *, nlm_lockargs*);
+extern bool_t xdr_nlm_cancargs(XDR *, nlm_cancargs*);
+extern bool_t xdr_nlm_testargs(XDR *, nlm_testargs*);
+extern bool_t xdr_nlm_unlockargs(XDR *, nlm_unlockargs*);
+extern bool_t xdr_fsh_mode(XDR *, fsh_mode*);
+extern bool_t xdr_fsh_access(XDR *, fsh_access*);
+extern bool_t xdr_nlm_share(XDR *, nlm_share*);
+extern bool_t xdr_nlm_shareargs(XDR *, nlm_shareargs*);
+extern bool_t xdr_nlm_shareres(XDR *, nlm_shareres*);
+extern bool_t xdr_nlm_notify(XDR *, nlm_notify*);
+extern bool_t xdr_nlm4_stats(XDR *, nlm4_stats*);
+extern bool_t xdr_nlm4_stat(XDR *, nlm4_stat*);
+extern bool_t xdr_nlm4_holder(XDR *, nlm4_holder*);
+extern bool_t xdr_nlm4_lock(XDR *, nlm4_lock*);
+extern bool_t xdr_nlm4_share(XDR *, nlm4_share*);
+extern bool_t xdr_nlm4_testrply(XDR *, nlm4_testrply*);
+extern bool_t xdr_nlm4_testres(XDR *, nlm4_testres*);
+extern bool_t xdr_nlm4_testargs(XDR *, nlm4_testargs*);
+extern bool_t xdr_nlm4_res(XDR *, nlm4_res*);
+extern bool_t xdr_nlm4_lockargs(XDR *, nlm4_lockargs*);
+extern bool_t xdr_nlm4_cancargs(XDR *, nlm4_cancargs*);
+extern bool_t xdr_nlm4_unlockargs(XDR *, nlm4_unlockargs*);
+extern bool_t xdr_nlm4_shareargs(XDR *, nlm4_shareargs*);
+extern bool_t xdr_nlm4_shareres(XDR *, nlm4_shareres*);
+extern bool_t xdr_nlm_sm_status(XDR *, nlm_sm_status*);
+extern bool_t xdr_nlm4_notify(XDR *, nlm4_notify*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_NLM_PROT_H_RPCGEN */
diff --git a/sys/nlm/nlm_prot_clnt.c b/sys/nlm/nlm_prot_clnt.c
new file mode 100644
index 0000000..b3ae5d8
--- /dev/null
+++ b/sys/nlm/nlm_prot_clnt.c
@@ -0,0 +1,372 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+/* Default timeout can be changed using clnt_control() */
+static struct timeval TIMEOUT = { 25, 0 };
+
+enum clnt_stat
+nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_SM_NOTIFY,
+ (xdrproc_t) xdr_nlm_sm_status, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_testres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL,
+ (xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK,
+ (xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST_MSG,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK_MSG,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL_MSG,
+ (xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK_MSG,
+ (xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED_MSG,
+ (xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_TEST_RES,
+ (xdrproc_t) xdr_nlm_testres, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_LOCK_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_CANCEL_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNLOCK_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_GRANTED_RES,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_SHARE,
+ (xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_UNSHARE,
+ (xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_NM_LOCK,
+ (xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM_FREE_ALL,
+ (xdrproc_t) xdr_nlm_notify, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_testres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL,
+ (xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK,
+ (xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST_MSG,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK_MSG,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL_MSG,
+ (xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK_MSG,
+ (xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED_MSG,
+ (xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_TEST_RES,
+ (xdrproc_t) xdr_nlm4_testres, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_LOCK_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_CANCEL_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNLOCK_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_GRANTED_RES,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_SHARE,
+ (xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_UNSHARE,
+ (xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_NM_LOCK,
+ (xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
+ (xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
+ TIMEOUT));
+}
+
+enum clnt_stat
+nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt)
+{
+ return (clnt_call(clnt, NLM4_FREE_ALL,
+ (xdrproc_t) xdr_nlm4_notify, (caddr_t) argp,
+ (xdrproc_t) xdr_void, (caddr_t) clnt_res,
+ TIMEOUT));
+}
diff --git a/sys/nlm/nlm_prot_impl.c b/sys/nlm/nlm_prot_impl.c
new file mode 100644
index 0000000..106f4b5
--- /dev/null
+++ b/sys/nlm/nlm_prot_impl.c
@@ -0,0 +1,1783 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet6.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/lockf.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+
+#include "nlm_prot.h"
+#include "sm_inter.h"
+#include "nlm.h"
+#include <rpc/rpc_com.h>
+#include <rpc/rpcb_prot.h>
+
+MALLOC_DEFINE(M_NLM, "NLM", "Network Lock Manager");
+
+/*
+ * If a host is inactive (and holds no locks) for this amount of
+ * seconds, we consider it idle and stop tracking it.
+ */
+#define NLM_IDLE_TIMEOUT 30
+
+/*
+ * We check the host list for idle every few seconds.
+ */
+#define NLM_IDLE_PERIOD 5
+
+/*
+ * Support for sysctl vfs.nlm.sysid
+ */
+SYSCTL_NODE(_vfs, OID_AUTO, nlm, CTLFLAG_RW, NULL, "Network Lock Manager");
+SYSCTL_NODE(_vfs_nlm, OID_AUTO, sysid, CTLFLAG_RW, NULL, "");
+
+/*
+ * Syscall hooks
+ */
+static int nlm_syscall_offset = SYS_nlm_syscall;
+static struct sysent nlm_syscall_prev_sysent;
+MAKE_SYSENT(nlm_syscall);
+static bool_t nlm_syscall_registered = FALSE;
+
+/*
+ * Debug level passed in from userland. We also support a sysctl hook
+ * so that it can be changed on a live system.
+ */
+static int nlm_debug_level;
+SYSCTL_INT(_debug, OID_AUTO, nlm_debug, CTLFLAG_RW, &nlm_debug_level, 0, "");
+
+/*
+ * Grace period handling. The value of nlm_grace_threshold is the
+ * value of time_uptime after which we are serving requests normally.
+ */
+static time_t nlm_grace_threshold;
+
+/*
+ * We check for idle hosts if time_uptime is greater than
+ * nlm_next_idle_check,
+ */
+static time_t nlm_next_idle_check;
+
+/*
+ * A socket to use for RPC - shared by all IPv4 RPC clients.
+ */
+static struct socket *nlm_socket;
+
+#ifdef INET6
+
+/*
+ * A socket to use for RPC - shared by all IPv6 RPC clients.
+ */
+static struct socket *nlm_socket6;
+
+#endif
+
+/*
+ * An RPC client handle that can be used to communicate with the local
+ * NSM.
+ */
+static CLIENT *nlm_nsm;
+
+/*
+ * An RPC client handle that can be used to communicate with the
+ * userland part of lockd.
+ */
+static CLIENT *nlm_lockd;
+
+/*
+ * Locks:
+ * (l) locked by nh_lock
+ * (s) only accessed via server RPC which is single threaded
+ * (c) const until freeing
+ */
+
+/*
+ * A pending asynchronous lock request, stored on the nc_pending list
+ * of the NLM host.
+ */
+struct nlm_async_lock {
+ TAILQ_ENTRY(nlm_async_lock) af_link; /* (l) host's list of locks */
+ struct task af_task; /* (c) async callback details */
+ void *af_cookie; /* (l) lock manager cancel token */
+ struct vnode *af_vp; /* (l) vnode to lock */
+ struct flock af_fl; /* (c) lock details */
+ struct nlm_host *af_host; /* (c) host which is locking */
+ nlm4_testargs af_granted; /* (c) notification details */
+};
+TAILQ_HEAD(nlm_async_lock_list, nlm_async_lock);
+
+/*
+ * NLM host.
+ */
+struct nlm_host {
+ struct mtx nh_lock;
+ TAILQ_ENTRY(nlm_host) nh_link; /* (s) global list of hosts */
+ char *nh_caller_name; /* (c) printable name of host */
+ uint32_t nh_sysid; /* (c) our allocaed system ID */
+ char nh_sysid_string[10]; /* (c) string rep. of sysid */
+ struct sockaddr_storage nh_addr; /* (s) remote address of host */
+ CLIENT *nh_rpc; /* (s) RPC handle to send to host */
+ rpcvers_t nh_vers; /* (s) NLM version of host */
+ int nh_state; /* (s) last seen NSM state of host */
+ bool_t nh_monitored; /* (s) TRUE if local NSM is monitoring */
+ time_t nh_idle_timeout; /* (s) Time at which host is idle */
+ struct sysctl_ctx_list nh_sysctl; /* (c) vfs.nlm.sysid nodes */
+ struct nlm_async_lock_list nh_pending; /* (l) pending async locks */
+ struct nlm_async_lock_list nh_finished; /* (l) finished async locks */
+};
+TAILQ_HEAD(nlm_host_list, nlm_host);
+
+static struct nlm_host_list nlm_hosts;
+static uint32_t nlm_next_sysid = 1;
+
+static void nlm_host_unmonitor(struct nlm_host *);
+
+/**********************************************************************/
+
+/*
+ * Initialise NLM globals.
+ */
+static void
+nlm_init(void *dummy)
+{
+ int error;
+
+ TAILQ_INIT(&nlm_hosts);
+
+ error = syscall_register(&nlm_syscall_offset, &nlm_syscall_sysent,
+ &nlm_syscall_prev_sysent);
+ if (error)
+ printf("Can't register NLM syscall\n");
+ else
+ nlm_syscall_registered = TRUE;
+}
+SYSINIT(nlm_init, SI_SUB_LOCK, SI_ORDER_FIRST, nlm_init, NULL);
+
+static void
+nlm_uninit(void *dummy)
+{
+
+ if (nlm_syscall_registered)
+ syscall_deregister(&nlm_syscall_offset,
+ &nlm_syscall_prev_sysent);
+}
+SYSUNINIT(nlm_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, nlm_uninit, NULL);
+
+/*
+ * Copy a struct netobj.
+ */
+void
+nlm_copy_netobj(struct netobj *dst, struct netobj *src,
+ struct malloc_type *type)
+{
+
+ dst->n_len = src->n_len;
+ dst->n_bytes = malloc(src->n_len, type, M_WAITOK);
+ memcpy(dst->n_bytes, src->n_bytes, src->n_len);
+}
+
+/*
+ * Create an RPC client handle for the given (address,prog,vers)
+ * triple using UDP.
+ */
+static CLIENT *
+nlm_get_rpc(struct sockaddr *sa, rpcprog_t prog, rpcvers_t vers)
+{
+ const char *wchan = "nlmrcv";
+ const char* protofmly;
+ struct sockaddr_storage ss;
+ struct socket *so;
+ CLIENT *rpcb;
+ struct timeval timo;
+ RPCB parms;
+ char *uaddr;
+ enum clnt_stat stat;
+ int rpcvers;
+
+ /*
+ * First we need to contact the remote RPCBIND service to find
+ * the right port.
+ */
+ memcpy(&ss, sa, sa->sa_len);
+ switch (ss.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&ss)->sin_port = htons(111);
+ protofmly = "inet";
+ so = nlm_socket;
+ break;
+
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&ss)->sin6_port = htons(111);
+ protofmly = "inet6";
+ so = nlm_socket6;
+ break;
+#endif
+
+ default:
+ /*
+ * Unsupported address family - fail.
+ */
+ return (NULL);
+ }
+
+ rpcb = clnt_dg_create(so, (struct sockaddr *)&ss,
+ RPCBPROG, RPCBVERS4, 0, 0);
+ if (!rpcb)
+ return (NULL);
+
+ parms.r_prog = prog;
+ parms.r_vers = vers;
+ parms.r_netid = "udp";
+ parms.r_addr = "";
+ parms.r_owner = "";
+
+ /*
+ * Use the default timeout.
+ */
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+again:
+ uaddr = NULL;
+ stat = CLNT_CALL(rpcb, (rpcprog_t) RPCBPROC_GETADDR,
+ (xdrproc_t) xdr_rpcb, &parms,
+ (xdrproc_t) xdr_wrapstring, &uaddr, timo);
+ if (stat == RPC_PROGVERSMISMATCH) {
+ /*
+ * Try RPCBIND version 3 if we haven't already.
+ *
+ * XXX fall back to portmap?
+ */
+ CLNT_CONTROL(rpcb, CLGET_VERS, &rpcvers);
+ if (rpcvers == RPCBVERS4) {
+ rpcvers = RPCBVERS;
+ CLNT_CONTROL(rpcb, CLSET_VERS, &rpcvers);
+ goto again;
+ }
+ }
+
+ if (stat == RPC_SUCCESS) {
+ /*
+ * We have a reply from the remote RPCBIND - turn it into an
+ * appropriate address and make a new client that can talk to
+ * the remote NLM.
+ *
+ * XXX fixup IPv6 scope ID.
+ */
+ struct netbuf *a;
+ a = __rpc_uaddr2taddr_af(ss.ss_family, uaddr);
+ memcpy(&ss, a->buf, a->len);
+ free(a->buf, M_RPC);
+ free(a, M_RPC);
+ xdr_free((xdrproc_t) xdr_wrapstring, &uaddr);
+ } else if (stat == RPC_PROGVERSMISMATCH) {
+ /*
+ * Try portmap.
+ */
+ struct pmap mapping;
+ u_short port;
+
+ rpcvers = PMAPVERS;
+ CLNT_CONTROL(rpcb, CLSET_VERS, &rpcvers);
+
+
+ mapping.pm_prog = parms.r_prog;
+ mapping.pm_vers = parms.r_vers;
+ mapping.pm_prot = IPPROTO_UDP;
+ mapping.pm_port = 0;
+
+ stat = CLNT_CALL(rpcb, (rpcprog_t) PMAPPROC_GETPORT,
+ (xdrproc_t) xdr_pmap, &mapping,
+ (xdrproc_t) xdr_u_short, &port, timo);
+
+ if (stat == RPC_SUCCESS) {
+ switch (ss.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&ss)->sin_port =
+ htons(port);
+ break;
+
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&ss)->sin6_port =
+ htons(port);
+ break;
+#endif
+ }
+ }
+ }
+ if (stat != RPC_SUCCESS) {
+ printf("NLM: failed to contact remote rpcbind, stat = %d\n",
+ (int) stat);
+ return (NULL);
+ }
+
+ /*
+ * Re-use the client we used to speak to rpcbind.
+ */
+ CLNT_CONTROL(rpcb, CLSET_SVC_ADDR, &ss);
+ CLNT_CONTROL(rpcb, CLSET_PROG, &prog);
+ CLNT_CONTROL(rpcb, CLSET_VERS, &vers);
+ CLNT_CONTROL(rpcb, CLSET_WAITCHAN, &wchan);
+ rpcb->cl_auth = authunix_create(curthread->td_ucred);
+
+ return (rpcb);
+}
+
+/*
+ * This async callback after when an async lock request has been
+ * granted. We notify the host which initiated the request.
+ */
+static void
+nlm_lock_callback(void *arg, int pending)
+{
+ struct nlm_async_lock *af = (struct nlm_async_lock *) arg;
+
+ if (nlm_debug_level >= 2)
+ printf("NLM: async lock %p for %s (sysid %d) granted\n",
+ af, af->af_host->nh_caller_name,
+ af->af_host->nh_sysid);
+
+ /*
+ * Send the results back to the host.
+ *
+ * Note: there is a possible race here with nlm_host_notify
+ * destroying teh RPC client. To avoid problems, the first
+ * thing nlm_host_notify does is to cancel pending async lock
+ * requests.
+ */
+ if (af->af_host->nh_vers == NLM_VERS4) {
+ nlm4_granted_msg_4(&af->af_granted,
+ NULL, af->af_host->nh_rpc);
+ } else {
+ /*
+ * Back-convert to legacy protocol
+ */
+ nlm_testargs granted;
+ granted.cookie = af->af_granted.cookie;
+ granted.exclusive = af->af_granted.exclusive;
+ granted.alock.caller_name =
+ af->af_granted.alock.caller_name;
+ granted.alock.fh = af->af_granted.alock.fh;
+ granted.alock.oh = af->af_granted.alock.oh;
+ granted.alock.svid = af->af_granted.alock.svid;
+ granted.alock.l_offset =
+ af->af_granted.alock.l_offset;
+ granted.alock.l_len =
+ af->af_granted.alock.l_len;
+
+ nlm_granted_msg_1(&granted,
+ NULL, af->af_host->nh_rpc);
+ }
+
+ /*
+ * Move this entry to the nh_finished list. Someone else will
+ * free it later - its too hard to do it here safely without
+ * racing with cancel.
+ *
+ * XXX possibly we should have a third "granted sent but not
+ * ack'ed" list so that we can re-send the granted message.
+ */
+ mtx_lock(&af->af_host->nh_lock);
+ TAILQ_REMOVE(&af->af_host->nh_pending, af, af_link);
+ TAILQ_INSERT_TAIL(&af->af_host->nh_finished, af, af_link);
+ mtx_unlock(&af->af_host->nh_lock);
+}
+
+/*
+ * Free an async lock request. The request must have been removed from
+ * any list.
+ */
+static void
+nlm_free_async_lock(struct nlm_async_lock *af)
+{
+ /*
+ * Free an async lock.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_testargs, &af->af_granted);
+ if (af->af_vp)
+ vrele(af->af_vp);
+ free(af, M_NLM);
+}
+
+/*
+ * Cancel our async request - this must be called with
+ * af->nh_host->nh_lock held. This is slightly complicated by a
+ * potential race with our own callback. If we fail to cancel the
+ * lock, it must already have been granted - we make sure our async
+ * task has completed by calling taskqueue_drain in this case.
+ */
+static int
+nlm_cancel_async_lock(struct nlm_async_lock *af)
+{
+ struct nlm_host *host = af->af_host;
+ int error;
+
+ mtx_assert(&host->nh_lock, MA_OWNED);
+
+ mtx_unlock(&host->nh_lock);
+
+ error = VOP_ADVLOCKASYNC(af->af_vp, NULL, F_CANCEL, &af->af_fl,
+ F_REMOTE, NULL, &af->af_cookie);
+
+ if (error) {
+ /*
+ * We failed to cancel - make sure our callback has
+ * completed before we continue.
+ */
+ taskqueue_drain(taskqueue_thread, &af->af_task);
+ }
+
+ mtx_lock(&host->nh_lock);
+
+ if (!error) {
+ if (nlm_debug_level >= 2)
+ printf("NLM: async lock %p for %s (sysid %d) "
+ "cancelled\n",
+ af, host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * Remove from the nh_pending list and free now that
+ * we are safe from the callback.
+ */
+ TAILQ_REMOVE(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ nlm_free_async_lock(af);
+ mtx_lock(&host->nh_lock);
+ }
+
+ return (error);
+}
+
+static void
+nlm_free_finished_locks(struct nlm_host *host)
+{
+ struct nlm_async_lock *af;
+
+ mtx_lock(&host->nh_lock);
+ while ((af = TAILQ_FIRST(&host->nh_finished)) != NULL) {
+ TAILQ_REMOVE(&host->nh_finished, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ nlm_free_async_lock(af);
+ mtx_lock(&host->nh_lock);
+ }
+ mtx_unlock(&host->nh_lock);
+}
+
+/*
+ * This is called when we receive a host state change
+ * notification. We unlock any active locks owned by the host.
+ */
+static void
+nlm_host_notify(struct nlm_host *host, int newstate, bool_t destroy)
+{
+ struct nlm_async_lock *af;
+
+ if (newstate) {
+ if (nlm_debug_level >= 1)
+ printf("NLM: host %s (sysid %d) rebooted, new "
+ "state is %d\n",
+ host->nh_caller_name, host->nh_sysid, newstate);
+ }
+
+ /*
+ * Cancel any pending async locks for this host.
+ */
+ mtx_lock(&host->nh_lock);
+ while ((af = TAILQ_FIRST(&host->nh_pending)) != NULL) {
+ /*
+ * nlm_cancel_async_lock will remove the entry from
+ * nh_pending and free it.
+ */
+ nlm_cancel_async_lock(af);
+ }
+ mtx_unlock(&host->nh_lock);
+ nlm_free_finished_locks(host);
+
+ /*
+ * The host just rebooted - trash its locks and forget any
+ * RPC client handle that we may have for it.
+ */
+ lf_clearremotesys(host->nh_sysid);
+ if (host->nh_rpc) {
+ AUTH_DESTROY(host->nh_rpc->cl_auth);
+ CLNT_DESTROY(host->nh_rpc);
+ host->nh_rpc = NULL;
+ }
+ host->nh_state = newstate;
+
+ /*
+ * Destroy the host if the caller believes that it won't be
+ * used again. This is safe enough - if we see the same name
+ * again, we will just create a new host.
+ */
+ if (destroy) {
+ TAILQ_REMOVE(&nlm_hosts, host, nh_link);
+ mtx_destroy(&host->nh_lock);
+ sysctl_ctx_free(&host->nh_sysctl);
+ free(host->nh_caller_name, M_NLM);
+ free(host, M_NLM);
+ }
+}
+
+/*
+ * Sysctl handler to count the number of locks for a sysid.
+ */
+static int
+nlm_host_lock_count_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nlm_host *host;
+ int count;
+
+ host = oidp->oid_arg1;
+ count = lf_countlocks(host->nh_sysid);
+ return sysctl_handle_int(oidp, &count, 0, req);
+}
+
+/*
+ * Create a new NLM host.
+ */
+static struct nlm_host *
+nlm_create_host(const char* caller_name)
+{
+ struct nlm_host *host;
+ struct sysctl_oid *oid;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: new host %s (sysid %d)\n",
+ caller_name, nlm_next_sysid);
+ host = malloc(sizeof(struct nlm_host), M_NLM, M_WAITOK|M_ZERO);
+ mtx_init(&host->nh_lock, "nh_lock", NULL, MTX_DEF);
+ host->nh_caller_name = strdup(caller_name, M_NLM);
+ host->nh_sysid = nlm_next_sysid++;
+ snprintf(host->nh_sysid_string, sizeof(host->nh_sysid_string),
+ "%d", host->nh_sysid);
+ host->nh_rpc = NULL;
+ host->nh_vers = 0;
+ host->nh_state = 0;
+ host->nh_monitored = FALSE;
+ TAILQ_INIT(&host->nh_pending);
+ TAILQ_INIT(&host->nh_finished);
+ TAILQ_INSERT_TAIL(&nlm_hosts, host, nh_link);
+
+ sysctl_ctx_init(&host->nh_sysctl);
+ oid = SYSCTL_ADD_NODE(&host->nh_sysctl,
+ SYSCTL_STATIC_CHILDREN(_vfs_nlm_sysid),
+ OID_AUTO, host->nh_sysid_string, CTLFLAG_RD, NULL, "");
+ SYSCTL_ADD_STRING(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "hostname", CTLFLAG_RD, host->nh_caller_name, 0, "");
+ SYSCTL_ADD_INT(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "version", CTLFLAG_RD, &host->nh_vers, 0, "");
+ SYSCTL_ADD_INT(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "monitored", CTLFLAG_RD, &host->nh_monitored, 0, "");
+ SYSCTL_ADD_PROC(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "lock_count", CTLTYPE_INT | CTLFLAG_RD, host, 0,
+ nlm_host_lock_count_sysctl, "I", "");
+
+ return (host);
+}
+
+/*
+ * Return non-zero if the address parts of the two sockaddrs are the
+ * same.
+ */
+static int
+nlm_compare_addr(const struct sockaddr *a, const struct sockaddr *b)
+{
+ const struct sockaddr_in *a4, *b4;
+#ifdef INET6
+ const struct sockaddr_in6 *a6, *b6;
+#endif
+
+ if (a->sa_family != b->sa_family)
+ return (FALSE);
+
+ switch (a->sa_family) {
+ case AF_INET:
+ a4 = (const struct sockaddr_in *) a;
+ b4 = (const struct sockaddr_in *) b;
+ return !memcmp(&a4->sin_addr, &b4->sin_addr,
+ sizeof(a4->sin_addr));
+#ifdef INET6
+ case AF_INET6:
+ a6 = (const struct sockaddr_in6 *) a;
+ b6 = (const struct sockaddr_in6 *) b;
+ return !memcmp(&a6->sin6_addr, &b6->sin6_addr,
+ sizeof(a6->sin6_addr));
+#endif
+ }
+
+ return (0);
+}
+
+/*
+ * Check for idle hosts and stop monitoring them. We could also free
+ * the host structure here, possibly after a larger timeout but that
+ * would require some care to avoid races with
+ * e.g. nlm_host_lock_count_sysctl.
+ */
+static void
+nlm_check_idle(void)
+{
+ struct nlm_host *host;
+
+ if (time_uptime <= nlm_next_idle_check)
+ return;
+
+ nlm_next_idle_check = time_uptime + NLM_IDLE_PERIOD;
+
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (host->nh_monitored
+ && time_uptime > host->nh_idle_timeout) {
+ if (lf_countlocks(host->nh_sysid) > 0) {
+ host->nh_idle_timeout =
+ time_uptime + NLM_IDLE_TIMEOUT;
+ continue;
+ }
+ nlm_host_unmonitor(host);
+ }
+ }
+}
+
+/*
+ * Search for an existing NLM host that matches the given name
+ * (typically the caller_name element of an nlm4_lock). If none is
+ * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * address of the host so that we can call it back for async
+ * responses.
+ */
+struct nlm_host *
+nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
+{
+ struct nlm_host *host;
+
+ nlm_check_idle();
+
+ /*
+ * The remote host is determined by caller_name.
+ */
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (!strcmp(host->nh_caller_name, name))
+ break;
+ }
+
+ if (!host)
+ host = nlm_create_host(name);
+ host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
+
+ /*
+ * If we have an RPC request, record the remote address so
+ * that can send async replies etc.
+ */
+ if (rqstp) {
+ struct netbuf *addr = &rqstp->rq_xprt->xp_rtaddr;
+
+ KASSERT(addr->len < sizeof(struct sockaddr_storage),
+ ("Strange remote transport address length"));
+
+ /*
+ * If we have seen an address before and we currently
+ * have an RPC client handle, make sure the address is
+ * the same, otherwise discard the client handle.
+ */
+ if (host->nh_addr.ss_len && host->nh_rpc) {
+ if (!nlm_compare_addr(
+ (struct sockaddr *) &host->nh_addr,
+ (struct sockaddr *) addr->buf)
+ || host->nh_vers != rqstp->rq_vers) {
+ AUTH_DESTROY(host->nh_rpc->cl_auth);
+ CLNT_DESTROY(host->nh_rpc);
+ host->nh_rpc = NULL;
+ }
+ }
+ memcpy(&host->nh_addr, addr->buf, addr->len);
+ host->nh_vers = rqstp->rq_vers;
+ }
+
+ return (host);
+}
+
+/*
+ * Search for an existing NLM host that matches the given remote
+ * address. If none is found, create a new host with the requested
+ * address and remember 'vers' as the NLM protocol version to use for
+ * that host.
+ */
+struct nlm_host *
+nlm_find_host_by_addr(const struct sockaddr *addr, int vers)
+{
+ struct nlm_host *host;
+
+ nlm_check_idle();
+
+ /*
+ * The remote host is determined by caller_name.
+ */
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (nlm_compare_addr(addr,
+ (const struct sockaddr *) &host->nh_addr))
+ break;
+ }
+
+ if (!host) {
+ /*
+ * Fake up a name using inet_ntop. This buffer is
+ * large enough for an IPv6 address.
+ */
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"];
+ switch (addr->sa_family) {
+ case AF_INET:
+ __rpc_inet_ntop(AF_INET,
+ &((const struct sockaddr_in *) addr)->sin_addr,
+ tmp, sizeof tmp);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ __rpc_inet_ntop(AF_INET6,
+ &((const struct sockaddr_in6 *) addr)->sin6_addr,
+ tmp, sizeof tmp);
+ break;
+#endif
+ default:
+ strcmp(tmp, "<unknown>");
+ }
+ host = nlm_create_host(tmp);
+ memcpy(&host->nh_addr, addr, addr->sa_len);
+ host->nh_vers = vers;
+ }
+ host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
+
+ return (host);
+}
+
+/*
+ * Find the NLM host that matches the value of 'sysid'. If none
+ * exists, return NULL.
+ */
+static struct nlm_host *
+nlm_find_host_by_sysid(int sysid)
+{
+ struct nlm_host *host;
+
+ TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
+ if (host->nh_sysid == sysid)
+ return (host);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Unregister this NLM host with the local NSM due to idleness.
+ */
+static void
+nlm_host_unmonitor(struct nlm_host *host)
+{
+ mon_id smmonid;
+ sm_stat_res smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: unmonitoring %s (sysid %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * We put our assigned system ID value in the priv field to
+ * make it simpler to find the host if we are notified of a
+ * host restart.
+ */
+ smmonid.mon_name = host->nh_caller_name;
+ smmonid.my_id.my_name = "localhost";
+ smmonid.my_id.my_prog = NLM_PROG;
+ smmonid.my_id.my_vers = NLM_SM;
+ smmonid.my_id.my_proc = NLM_SM_NOTIFY;
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_UNMON,
+ (xdrproc_t) xdr_mon, &smmonid,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ printf("Failed to contact local NSM - rpc error %d\n", stat);
+ return;
+ }
+ if (smstat.res_stat == stat_fail) {
+ printf("Local NSM refuses to unmonitor %s\n",
+ host->nh_caller_name);
+ return;
+ }
+
+ host->nh_monitored = FALSE;
+}
+
+/*
+ * Register this NLM host with the local NSM so that we can be
+ * notified if it reboots.
+ */
+static void
+nlm_host_monitor(struct nlm_host *host, int state)
+{
+ mon smmon;
+ sm_stat_res smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+
+ if (host->nh_state && state && host->nh_state != state) {
+ /*
+ * The host rebooted without telling us. Trash its
+ * locks.
+ */
+ nlm_host_notify(host, state, FALSE);
+ }
+
+ if (state && !host->nh_state) {
+ /*
+ * This is the first time we have seen an NSM state
+ * value for this host. We record it here to help
+ * detect host reboots.
+ */
+ host->nh_state = state;
+ if (nlm_debug_level >= 1)
+ printf("NLM: host %s (sysid %d) has NSM state %d\n",
+ host->nh_caller_name, host->nh_sysid, state);
+ }
+
+ if (host->nh_monitored)
+ return;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: monitoring %s (sysid %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ /*
+ * We put our assigned system ID value in the priv field to
+ * make it simpler to find the host if we are notified of a
+ * host restart.
+ */
+ smmon.mon_id.mon_name = host->nh_caller_name;
+ smmon.mon_id.my_id.my_name = "localhost";
+ smmon.mon_id.my_id.my_prog = NLM_PROG;
+ smmon.mon_id.my_id.my_vers = NLM_SM;
+ smmon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
+ memcpy(smmon.priv, &host->nh_sysid, sizeof(host->nh_sysid));
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_MON,
+ (xdrproc_t) xdr_mon, &smmon,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ printf("Failed to contact local NSM - rpc error %d\n", stat);
+ return;
+ }
+ if (smstat.res_stat == stat_fail) {
+ printf("Local NSM refuses to monitor %s\n",
+ host->nh_caller_name);
+ return;
+ }
+
+ host->nh_monitored = TRUE;
+}
+
+/*
+ * Return an RPC client handle that can be used to talk to the NLM
+ * running on the given host.
+ */
+CLIENT *
+nlm_host_get_rpc(struct nlm_host *host)
+{
+ struct timeval zero;
+
+ if (host->nh_rpc)
+ return (host->nh_rpc);
+
+ /*
+ * Set the send timeout to zero - we only use this rpc handle
+ * for sending async replies which have no return value.
+ */
+ host->nh_rpc = nlm_get_rpc((struct sockaddr *)&host->nh_addr,
+ NLM_PROG, host->nh_vers);
+
+ if (host->nh_rpc) {
+ zero.tv_sec = 0;
+ zero.tv_usec = 0;
+ CLNT_CONTROL(host->nh_rpc, CLSET_TIMEOUT, &zero);
+
+ /*
+ * Monitor the host - if it reboots, the address of
+ * its NSM might change so we must discard our RPC
+ * handle.
+ */
+ nlm_host_monitor(host, 0);
+ }
+
+ return (host->nh_rpc);
+}
+
+/**********************************************************************/
+
+/*
+ * Syscall interface with userland.
+ */
+
+extern void nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
+extern void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
+
+static int
+nlm_register_services(SVCPOOL *pool, int addr_count, char **addrs)
+{
+ static rpcvers_t versions[] = {
+ NLM_SM, NLM_VERS, NLM_VERSX, NLM_VERS4
+ };
+ static void (*dispatchers[])(struct svc_req *, SVCXPRT *) = {
+ nlm_prog_0, nlm_prog_1, nlm_prog_3, nlm_prog_4
+ };
+ static const int version_count = sizeof(versions) / sizeof(versions[0]);
+
+ SVCXPRT **xprts;
+ char netid[16];
+ char uaddr[128];
+ struct netconfig *nconf;
+ int i, j, error;
+
+ if (!addr_count) {
+ printf("NLM: no service addresses given - can't start server");
+ return (EINVAL);
+ }
+
+ xprts = malloc(addr_count * sizeof(SVCXPRT *), M_NLM, M_WAITOK);
+ for (i = 0; i < version_count; i++) {
+ for (j = 0; j < addr_count; j++) {
+ /*
+ * Create transports for the first version and
+ * then just register everything else to the
+ * same transports.
+ */
+ if (i == 0) {
+ char *up;
+
+ error = copyin(&addrs[2*j], &up,
+ sizeof(char*));
+ if (error)
+ goto out;
+ error = copyinstr(up, netid, sizeof(netid),
+ NULL);
+ if (error)
+ goto out;
+ error = copyin(&addrs[2*j+1], &up,
+ sizeof(char*));
+ if (error)
+ goto out;
+ error = copyinstr(up, uaddr, sizeof(uaddr),
+ NULL);
+ if (error)
+ goto out;
+ nconf = getnetconfigent(netid);
+ if (!nconf) {
+ printf("Can't lookup netid %s\n",
+ netid);
+ error = EINVAL;
+ goto out;
+ }
+ xprts[j] = svc_tp_create(pool, dispatchers[i],
+ NLM_PROG, versions[i], uaddr, nconf);
+ if (!xprts[j]) {
+ printf("NLM: unable to create "
+ "(NLM_PROG, %d).\n", versions[i]);
+ error = EINVAL;
+ goto out;
+ }
+ freenetconfigent(nconf);
+ } else {
+ nconf = getnetconfigent(xprts[j]->xp_netid);
+ rpcb_unset(NLM_PROG, versions[i], nconf);
+ if (!svc_reg(xprts[j], NLM_PROG, versions[i],
+ dispatchers[i], nconf)) {
+ printf("NLM: can't register "
+ "(NLM_PROG, %d)\n", versions[i]);
+ error = EINVAL;
+ goto out;
+ }
+ }
+ }
+ }
+ error = 0;
+out:
+ free(xprts, M_NLM);
+ return (error);
+}
+
+/*
+ * Main server entry point. Contacts the local NSM to get its current
+ * state and send SM_UNMON_ALL. Registers the NLM services and then
+ * services requests. Does not return until the server is interrupted
+ * by a signal.
+ */
+static int
+nlm_server_main(int addr_count, char **addrs)
+{
+ struct thread *td = curthread;
+ int error;
+ SVCPOOL *pool;
+ struct sockopt opt;
+ int portlow;
+#ifdef INET6
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr_in sin;
+ my_id id;
+ sm_stat smstat;
+ struct timeval timo;
+ enum clnt_stat stat;
+ struct nlm_host *host;
+
+ if (nlm_socket) {
+ printf("NLM: can't start server - it appears to be running already\n");
+ return (EPERM);
+ }
+
+ memset(&opt, 0, sizeof(opt));
+
+ nlm_socket = NULL;
+ error = socreate(AF_INET, &nlm_socket, SOCK_DGRAM, 0,
+ td->td_ucred, td);
+ if (error) {
+ printf("NLM: can't create IPv4 socket - error %d\n", error);
+ return (error);
+ }
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_IP;
+ opt.sopt_name = IP_PORTRANGE;
+ portlow = IP_PORTRANGE_LOW;
+ opt.sopt_val = &portlow;
+ opt.sopt_valsize = sizeof(portlow);
+ sosetopt(nlm_socket, &opt);
+
+#ifdef INET6
+ nlm_socket6 = NULL;
+ error = socreate(AF_INET6, &nlm_socket6, SOCK_DGRAM, 0,
+ td->td_ucred, td);
+ if (error) {
+ printf("NLM: can't create IPv6 socket - error %d\n", error);
+ return (error);
+ }
+ opt.sopt_dir = SOPT_SET;
+ opt.sopt_level = IPPROTO_IPV6;
+ opt.sopt_name = IPV6_PORTRANGE;
+ portlow = IPV6_PORTRANGE_LOW;
+ opt.sopt_val = &portlow;
+ opt.sopt_valsize = sizeof(portlow);
+ sosetopt(nlm_socket6, &opt);
+#endif
+
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = in6addr_loopback;
+ nlm_nsm = nlm_get_rpc((struct sockaddr *) &sin6, SM_PROG, SM_VERS);
+ if (!nlm_nsm) {
+#endif
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET6;
+ sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ nlm_nsm = nlm_get_rpc((struct sockaddr *) &sin, SM_PROG,
+ SM_VERS);
+#ifdef INET6
+ }
+#endif
+
+ if (!nlm_nsm) {
+ printf("Can't start NLM - unable to contact NSM\n");
+ return (EINVAL);
+ }
+
+ pool = svcpool_create();
+
+ error = nlm_register_services(pool, addr_count, addrs);
+ if (error)
+ goto out;
+
+ memset(&id, 0, sizeof(id));
+ id.my_name = "NFS NLM";
+
+ timo.tv_sec = 25;
+ timo.tv_usec = 0;
+ stat = CLNT_CALL(nlm_nsm, SM_UNMON_ALL,
+ (xdrproc_t) xdr_my_id, &id,
+ (xdrproc_t) xdr_sm_stat, &smstat, timo);
+
+ if (stat != RPC_SUCCESS) {
+ struct rpc_err err;
+
+ CLNT_GETERR(nlm_nsm, &err);
+ printf("NLM: unexpected error contacting NSM, stat=%d, errno=%d\n",
+ stat, err.re_errno);
+ error = EINVAL;
+ goto out;
+ }
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: local NSM state is %d\n", smstat.state);
+
+ svc_run(pool);
+ error = 0;
+
+out:
+ if (pool)
+ svcpool_destroy(pool);
+
+ /*
+ * Trash all the existing state so that if the server
+ * restarts, it gets a clean slate.
+ */
+ while ((host = TAILQ_FIRST(&nlm_hosts)) != NULL) {
+ nlm_host_notify(host, 0, TRUE);
+ }
+ if (nlm_nsm) {
+ AUTH_DESTROY(nlm_nsm->cl_auth);
+ CLNT_DESTROY(nlm_nsm);
+ nlm_nsm = NULL;
+ }
+ if (nlm_lockd) {
+ AUTH_DESTROY(nlm_lockd->cl_auth);
+ CLNT_DESTROY(nlm_lockd);
+ nlm_lockd = NULL;
+ }
+
+ soclose(nlm_socket);
+ nlm_socket = NULL;
+#ifdef INET6
+ soclose(nlm_socket6);
+ nlm_socket6 = NULL;
+#endif
+
+ return (error);
+}
+
+int
+nlm_syscall(struct thread *td, struct nlm_syscall_args *uap)
+{
+ int error;
+
+ error = priv_check(td, PRIV_NFS_LOCKD);
+ if (error)
+ return (error);
+
+ nlm_debug_level = uap->debug_level;
+ nlm_grace_threshold = time_uptime + uap->grace_period;
+ nlm_next_idle_check = time_uptime + NLM_IDLE_PERIOD;
+
+ return nlm_server_main(uap->addr_count, uap->addrs);
+}
+
+/**********************************************************************/
+
+/*
+ * NLM implementation details, called from the RPC stubs.
+ */
+
+
+void
+nlm_sm_notify(struct nlm_sm_status *argp)
+{
+ uint32_t sysid;
+ struct nlm_host *host;
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_sm_notify(): mon_name = %s\n", argp->mon_name);
+ memcpy(&sysid, &argp->priv, sizeof(sysid));
+ host = nlm_find_host_by_sysid(sysid);
+ if (host)
+ nlm_host_notify(host, argp->state, FALSE);
+}
+
+static void
+nlm_convert_to_fhandle_t(fhandle_t *fhp, struct netobj *p)
+{
+ memcpy(fhp, p->n_bytes, sizeof(fhandle_t));
+}
+
+struct vfs_state {
+ struct mount *vs_mp;
+ struct vnode *vs_vp;
+ int vs_vfslocked;
+};
+
+static int
+nlm_get_vfs_state(struct nlm_host *host, struct svc_req *rqstp,
+ fhandle_t *fhp, struct vfs_state *vs)
+{
+ int error, exflags, freecred;
+ struct ucred *cred = NULL, *credanon;
+
+ memset(vs, 0, sizeof(*vs));
+ freecred = FALSE;
+
+ vs->vs_mp = vfs_getvfs(&fhp->fh_fsid);
+ if (!vs->vs_mp) {
+ return (ESTALE);
+ }
+ vs->vs_vfslocked = VFS_LOCK_GIANT(vs->vs_mp);
+
+ error = VFS_CHECKEXP(vs->vs_mp, (struct sockaddr *)&host->nh_addr,
+ &exflags, &credanon);
+ if (error)
+ goto out;
+
+ if (exflags & MNT_EXRDONLY || (vs->vs_mp->mnt_flag & MNT_RDONLY)) {
+ error = EROFS;
+ goto out;
+ }
+
+ error = VFS_FHTOVP(vs->vs_mp, &fhp->fh_fid, &vs->vs_vp);
+ if (error)
+ goto out;
+
+ cred = crget();
+ freecred = TRUE;
+ if (!svc_getcred(rqstp, cred, NULL)) {
+ error = EINVAL;
+ goto out;
+ }
+ if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
+ crfree(cred);
+ cred = credanon;
+ freecred = FALSE;
+ }
+#if __FreeBSD_version < 800011
+ VOP_UNLOCK(vs->vs_vp, 0, curthread);
+#else
+ VOP_UNLOCK(vs->vs_vp, 0);
+#endif
+
+ /*
+ * Check cred.
+ */
+ error = VOP_ACCESS(vs->vs_vp, VWRITE, cred, curthread);
+ if (error)
+ goto out;
+
+out:
+ if (freecred)
+ crfree(cred);
+
+ return (error);
+}
+
+static void
+nlm_release_vfs_state(struct vfs_state *vs)
+{
+
+ if (vs->vs_vp)
+ vrele(vs->vs_vp);
+ if (vs->vs_mp)
+ vfs_rel(vs->vs_mp);
+ VFS_UNLOCK_GIANT(vs->vs_vfslocked);
+}
+
+static nlm4_stats
+nlm_convert_error(int error)
+{
+
+ if (error == ESTALE)
+ return nlm4_stale_fh;
+ else if (error == EROFS)
+ return nlm4_rofs;
+ else
+ return nlm4_failed;
+}
+
+struct nlm_host *
+nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host, *bhost;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_test(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_GETLK, &fl, F_REMOTE);
+ if (error) {
+ result->stat.stat = nlm4_failed;
+ goto out;
+ }
+
+ if (fl.l_type == F_UNLCK) {
+ result->stat.stat = nlm4_granted;
+ } else {
+ result->stat.stat = nlm4_denied;
+ result->stat.nlm4_testrply_u.holder.exclusive =
+ (fl.l_type == F_WRLCK);
+ result->stat.nlm4_testrply_u.holder.svid = fl.l_pid;
+ bhost = nlm_find_host_by_sysid(fl.l_sysid);
+ if (bhost) {
+ /*
+ * We don't have any useful way of recording
+ * the value of oh used in the original lock
+ * request. Ideally, the test reply would have
+ * a space for the owning host's name allowing
+ * our caller's NLM to keep track.
+ *
+ * As far as I can see, Solaris uses an eight
+ * byte structure for oh which contains a four
+ * byte pid encoded in local byte order and
+ * the first four bytes of the host
+ * name. Linux uses a variable length string
+ * 'pid@hostname' in ascii but doesn't even
+ * return that in test replies.
+ *
+ * For the moment, return nothing in oh
+ * (already zero'ed above).
+ */
+ }
+ result->stat.nlm4_testrply_u.holder.l_offset = fl.l_start;
+ result->stat.nlm4_testrply_u.holder.l_len = fl.l_len;
+ }
+
+out:
+ nlm_release_vfs_state(&vs);
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
+ bool_t monitor)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_lock(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold && !argp->reclaim) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+ if (argp->block) {
+ struct nlm_async_lock *af;
+
+ /*
+ * First, make sure we can contact the host's NLM.
+ */
+ if (!nlm_host_get_rpc(host)) {
+ result->stat.stat = nlm4_failed;
+ goto out;
+ }
+
+ /*
+ * First we need to check and see if there is an
+ * existing blocked lock that matches. This could be a
+ * badly behaved client or an RPC re-send. If we find
+ * one, just return nlm4_blocked.
+ */
+ mtx_lock(&host->nh_lock);
+ TAILQ_FOREACH(af, &host->nh_pending, af_link) {
+ if (af->af_fl.l_start == fl.l_start
+ && af->af_fl.l_len == fl.l_len
+ && af->af_fl.l_pid == fl.l_pid
+ && af->af_fl.l_type == fl.l_type) {
+ break;
+ }
+ }
+ mtx_unlock(&host->nh_lock);
+ if (af) {
+ result->stat.stat = nlm4_blocked;
+ goto out;
+ }
+
+ af = malloc(sizeof(struct nlm_async_lock), M_NLM,
+ M_WAITOK|M_ZERO);
+ TASK_INIT(&af->af_task, 0, nlm_lock_callback, af);
+ af->af_vp = vs.vs_vp;
+ af->af_fl = fl;
+ af->af_host = host;
+ /*
+ * We use M_RPC here so that we can xdr_free the thing
+ * later.
+ */
+ af->af_granted.exclusive = argp->exclusive;
+ af->af_granted.alock.caller_name =
+ strdup(argp->alock.caller_name, M_RPC);
+ nlm_copy_netobj(&af->af_granted.alock.fh,
+ &argp->alock.fh, M_RPC);
+ nlm_copy_netobj(&af->af_granted.alock.oh,
+ &argp->alock.oh, M_RPC);
+ af->af_granted.alock.svid = argp->alock.svid;
+ af->af_granted.alock.l_offset = argp->alock.l_offset;
+ af->af_granted.alock.l_len = argp->alock.l_len;
+
+ /*
+ * Put the entry on the pending list before calling
+ * VOP_ADVLOCKASYNC. We do this in case the lock
+ * request was blocked (returning EINPROGRESS) but
+ * then granted before we manage to run again. The
+ * client may receive the granted message before we
+ * send our blocked reply but thats their problem.
+ */
+ mtx_lock(&host->nh_lock);
+ TAILQ_INSERT_TAIL(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+
+ error = VOP_ADVLOCKASYNC(vs.vs_vp, NULL, F_SETLK, &fl, F_REMOTE,
+ &af->af_task, &af->af_cookie);
+
+ /*
+ * If the lock completed synchronously, just free the
+ * tracking structure now.
+ */
+ if (error != EINPROGRESS) {
+ mtx_lock(&host->nh_lock);
+ TAILQ_REMOVE(&host->nh_pending, af, af_link);
+ mtx_unlock(&host->nh_lock);
+ xdr_free((xdrproc_t) xdr_nlm4_testargs,
+ &af->af_granted);
+ free(af, M_NLM);
+ } else {
+ if (nlm_debug_level >= 2)
+ printf("NLM: pending async lock %p for %s "
+ "(sysid %d)\n",
+ af, host->nh_caller_name, sysid);
+ /*
+ * Don't vrele the vnode just yet - this must
+ * wait until either the async callback
+ * happens or the lock is cancelled.
+ */
+ vs.vs_vp = NULL;
+ }
+ } else {
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_SETLK, &fl, F_REMOTE);
+ }
+
+ if (error) {
+ if (error == EINPROGRESS) {
+ result->stat.stat = nlm4_blocked;
+ } else if (error == EDEADLK) {
+ result->stat.stat = nlm4_deadlck;
+ } else if (error == EAGAIN) {
+ result->stat.stat = nlm4_denied;
+ } else {
+ result->stat.stat = nlm4_failed;
+ }
+ } else {
+ if (monitor)
+ nlm_host_monitor(host, argp->state);
+ result->stat.stat = nlm4_granted;
+ }
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+ struct nlm_async_lock *af;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_cancel(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ if (argp->exclusive)
+ fl.l_type = F_WRLCK;
+ else
+ fl.l_type = F_RDLCK;
+
+ /*
+ * First we need to try and find the async lock request - if
+ * there isn't one, we give up and return nlm4_denied.
+ */
+ mtx_lock(&host->nh_lock);
+
+ TAILQ_FOREACH(af, &host->nh_pending, af_link) {
+ if (af->af_fl.l_start == fl.l_start
+ && af->af_fl.l_len == fl.l_len
+ && af->af_fl.l_pid == fl.l_pid
+ && af->af_fl.l_type == fl.l_type) {
+ break;
+ }
+ }
+
+ if (!af) {
+ mtx_unlock(&host->nh_lock);
+ result->stat.stat = nlm4_denied;
+ goto out;
+ }
+
+ error = nlm_cancel_async_lock(af);
+
+ if (error) {
+ result->stat.stat = nlm4_denied;
+ } else {
+ result->stat.stat = nlm4_granted;
+ }
+
+ mtx_unlock(&host->nh_lock);
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+struct nlm_host *
+nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ fhandle_t fh;
+ struct vfs_state vs;
+ struct nlm_host *host;
+ int error, sysid;
+ struct flock fl;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (NULL);
+ }
+
+ if (nlm_debug_level >= 3)
+ printf("nlm_do_unlock(): caller_name = %s (sysid = %d)\n",
+ host->nh_caller_name, host->nh_sysid);
+
+ nlm_free_finished_locks(host);
+ sysid = host->nh_sysid;
+
+ nlm_convert_to_fhandle_t(&fh, &argp->alock.fh);
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ if (time_uptime < nlm_grace_threshold) {
+ result->stat.stat = nlm4_denied_grace_period;
+ return (host);
+ }
+
+ error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
+ if (error) {
+ result->stat.stat = nlm_convert_error(error);
+ goto out;
+ }
+
+ fl.l_start = argp->alock.l_offset;
+ fl.l_len = argp->alock.l_len;
+ fl.l_pid = argp->alock.svid;
+ fl.l_sysid = sysid;
+ fl.l_whence = SEEK_SET;
+ fl.l_type = F_UNLCK;
+ error = VOP_ADVLOCK(vs.vs_vp, NULL, F_UNLCK, &fl, F_REMOTE);
+
+ /*
+ * Ignore the error - there is no result code for failure,
+ * only for grace period.
+ */
+ result->stat.stat = nlm4_granted;
+
+out:
+ nlm_release_vfs_state(&vs);
+
+ return (host);
+}
+
+void
+nlm_do_free_all(nlm4_notify *argp)
+{
+ struct nlm_host *host, *thost;
+
+ TAILQ_FOREACH_SAFE(host, &nlm_hosts, nh_link, thost) {
+ if (!strcmp(host->nh_caller_name, argp->name))
+ nlm_host_notify(host, argp->state, FALSE);
+ }
+}
+
+#define _PATH_RPCLOCKDSOCK "/var/run/rpclockd.sock"
+
+/*
+ * Make a connection to the userland lockd - we push anything we can't
+ * handle out to userland.
+ */
+CLIENT *
+nlm_user_lockd(void)
+{
+ struct sockaddr_un sun;
+ struct netconfig *nconf;
+ struct timeval zero;
+
+ if (nlm_lockd)
+ return (nlm_lockd);
+
+ sun.sun_family = AF_LOCAL;
+ strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
+ sun.sun_len = SUN_LEN(&sun);
+
+ nconf = getnetconfigent("local");
+ nlm_lockd = clnt_reconnect_create(nconf, (struct sockaddr *) &sun,
+ NLM_PROG, NLM_VERS4, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
+
+ /*
+ * Set the send timeout to zero - we only use this rpc handle
+ * for sending async replies which have no return value.
+ */
+ zero.tv_sec = 0;
+ zero.tv_usec = 0;
+ CLNT_CONTROL(nlm_lockd, CLSET_TIMEOUT, &zero);
+
+ return (nlm_lockd);
+}
diff --git a/sys/nlm/nlm_prot_server.c b/sys/nlm/nlm_prot_server.c
new file mode 100644
index 0000000..3e4499d
--- /dev/null
+++ b/sys/nlm/nlm_prot_server.c
@@ -0,0 +1,762 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include "nlm.h"
+
+/**********************************************************************/
+
+/*
+ * Convert between various versions of the protocol structures.
+ */
+
+static void
+nlm_convert_to_nlm4_lock(struct nlm4_lock *dst, struct nlm_lock *src)
+{
+
+ dst->caller_name = src->caller_name;
+ dst->fh = src->fh;
+ dst->oh = src->oh;
+ dst->svid = src->svid;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_share(struct nlm4_share *dst, struct nlm_share *src)
+{
+
+ dst->caller_name = src->caller_name;
+ dst->fh = src->fh;
+ dst->oh = src->oh;
+ dst->mode = src->mode;
+ dst->access = src->access;
+}
+
+static void
+nlm_convert_to_nlm_holder(struct nlm_holder *dst, struct nlm4_holder *src)
+{
+
+ dst->exclusive = src->exclusive;
+ dst->svid = src->svid;
+ dst->oh = src->oh;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_holder(struct nlm4_holder *dst, struct nlm_holder *src)
+{
+
+ dst->exclusive = src->exclusive;
+ dst->svid = src->svid;
+ dst->oh = src->oh;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static enum nlm_stats
+nlm_convert_to_nlm_stats(enum nlm4_stats src)
+{
+ if (src > nlm4_deadlck)
+ return nlm_denied;
+ return (enum nlm_stats) src;
+}
+
+static void
+nlm_convert_to_nlm_res(struct nlm_res *dst, struct nlm4_res *src)
+{
+ dst->cookie = src->cookie;
+ dst->stat.stat = nlm_convert_to_nlm_stats(src->stat.stat);
+}
+
+static void
+nlm_convert_to_nlm4_res(struct nlm4_res *dst, struct nlm_res *src)
+{
+ dst->cookie = src->cookie;
+ dst->stat.stat = (enum nlm4_stats) src->stat.stat;
+}
+
+/**********************************************************************/
+
+/*
+ * RPC server stubs.
+ */
+
+bool_t
+nlm_sm_notify_0_svc(struct nlm_sm_status *argp, void *result, struct svc_req *rqstp)
+{
+ nlm_sm_notify(argp);
+
+ return (TRUE);
+}
+
+bool_t
+nlm_test_1_svc(struct nlm_testargs *argp, nlm_testres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_testargs args4;
+ nlm4_testres res4;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_test_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
+ if (result->stat.stat == nlm_denied)
+ nlm_convert_to_nlm_holder(
+ &result->stat.nlm_testrply_u.holder,
+ &res4.stat.nlm4_testrply_u.holder);
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_lock_1_svc(struct nlm_lockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_lockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ retval = nlm4_lock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_cancel_1_svc(struct nlm_cancargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_cancargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_cancel_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_unlock_1_svc(struct nlm_unlockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_unlockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_unlock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_granted_1_svc(struct nlm_testargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_testargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ retval = nlm4_granted_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testargs args4;
+ nlm4_testres res4;
+ nlm_testres res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_test(&args4, &res4, rqstp);
+
+ res.cookie = res4.cookie;
+ res.stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
+ if (res.stat.stat == nlm_denied)
+ nlm_convert_to_nlm_holder(
+ &res.stat.nlm_testrply_u.holder,
+ &res4.stat.nlm4_testrply_u.holder);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_test_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_testres, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_lockargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ host = nlm_do_lock(&args4, &res4, rqstp, TRUE);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_lock_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_cancargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_cancel(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_cancel_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_unlock_msg_1_svc(struct nlm_unlockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_unlockargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ host = nlm_do_unlock(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_unlock_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testargs args4;
+ nlm4_res res4;
+ nlm_res res;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ args4.cookie = argp->cookie;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+
+ /*
+ * We make a synchronous call to userland and send the reply
+ * back async.
+ */
+ nlm4_granted_4_svc(&args4, &res4, rqstp);
+
+ nlm_convert_to_nlm_res(&res, &res4);
+
+ host = nlm_find_host_by_addr(
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
+ rqstp->rq_vers);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm_granted_res_1(&res, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm_res, &res);
+
+ return (FALSE);
+}
+
+bool_t
+nlm_test_res_1_svc(nlm_testres *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testres args4;
+
+ args4.cookie = argp->cookie;
+ if (argp->stat.stat == nlm_denied)
+ nlm_convert_to_nlm4_holder(
+ &args4.stat.nlm4_testrply_u.holder,
+ &argp->stat.nlm_testrply_u.holder);
+
+ return (nlm4_test_res_4_svc(&args4, result, rqstp));
+}
+
+bool_t
+nlm_lock_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_lock_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_cancel_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_cancel_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_unlock_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_unlock_res_4_svc(&arg4, result, rqstp));
+}
+
+bool_t
+nlm_granted_res_1_svc(nlm_res *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res arg4;
+
+ nlm_convert_to_nlm4_res(&arg4, argp);
+ return (nlm4_granted_res_4_svc(&arg4, result, rqstp));
+}
+
+int
+nlm_prog_1_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
+
+bool_t
+nlm_share_3_svc(nlm_shareargs *argp, nlm_shareres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_shareargs args4;
+ nlm4_shareres res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_share(&args4.share, &argp->share);
+ args4.reclaim = argp->reclaim;
+
+ retval = nlm4_share_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat = nlm_convert_to_nlm_stats(res4.stat);
+ result->sequence = res4.sequence;
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_unshare_3_svc(nlm_shareargs *argp, nlm_shareres *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_shareargs args4;
+ nlm4_shareres res4;
+
+ args4.cookie = argp->cookie;
+ nlm_convert_to_nlm4_share(&args4.share, &argp->share);
+ args4.reclaim = argp->reclaim;
+
+ retval = nlm4_unshare_4_svc(&args4, &res4, rqstp);
+ if (retval) {
+ result->cookie = res4.cookie;
+ result->stat = nlm_convert_to_nlm_stats(res4.stat);
+ result->sequence = res4.sequence;
+ }
+
+ return (retval);
+}
+
+bool_t
+nlm_nm_lock_3_svc(nlm_lockargs *argp, nlm_res *result, struct svc_req *rqstp)
+{
+ bool_t retval;
+ nlm4_lockargs args4;
+ nlm4_res res4;
+
+ args4.cookie = argp->cookie;
+ args4.block = argp->block;
+ args4.exclusive = argp->exclusive;
+ nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
+ args4.reclaim = argp->reclaim;
+ args4.state = argp->state;
+
+ retval = nlm4_nm_lock_4_svc(&args4, &res4, rqstp);
+ if (retval)
+ nlm_convert_to_nlm_res(result, &res4);
+
+ return (retval);
+}
+
+bool_t
+nlm_free_all_3_svc(nlm_notify *argp, void *result, struct svc_req *rqstp)
+{
+ struct nlm4_notify args4;
+
+ args4.name = argp->name;
+ args4.state = argp->state;
+
+ return (nlm4_free_all_4_svc(&args4, result, rqstp));
+}
+
+int
+nlm_prog_3_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
+
+bool_t
+nlm4_test_4_svc(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
+{
+
+ nlm_do_test(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_lock(argp, result, rqstp, TRUE);
+ return (TRUE);
+}
+
+bool_t
+nlm4_cancel_4_svc(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_cancel(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_unlock_4_svc(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_unlock(argp, result, rqstp);
+ return (TRUE);
+}
+
+bool_t
+nlm4_granted_4_svc(nlm4_testargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+ struct timeval tv;
+
+ memset(result, 0, sizeof(*result));
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+
+ /*
+ * Set a non-zero timeout to give the userland a chance to reply.
+ */
+ lockd = nlm_user_lockd();
+ if (!lockd) {
+ result->stat.stat = nlm4_failed;
+ return (TRUE);
+ }
+ tv.tv_sec = 20;
+ tv.tv_usec = 0;
+ CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
+ nlm4_granted_4(argp, result, lockd);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
+
+ return (TRUE);
+}
+
+bool_t
+nlm4_test_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_testres res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_test(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_test_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_testres, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_lock_msg_4_svc(nlm4_lockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_lock(argp, &res4, rqstp, TRUE);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_lock_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_cancel_msg_4_svc(nlm4_cancargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_cancel(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_cancel_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp)
+{
+ nlm4_res res4;
+ struct nlm_host *host;
+ CLIENT *rpc;
+ char dummy;
+
+ host = nlm_do_unlock(argp, &res4, rqstp);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_unlock_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
+{
+ struct nlm_host *host;
+ CLIENT *rpc;
+ nlm4_res res4;
+ char dummy;
+
+ /*
+ * We make a synchronous call to userland and send the reply
+ * back async.
+ */
+ nlm4_granted_4_svc(argp, &res4, rqstp);
+
+ host = nlm_find_host_by_addr(
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
+ rqstp->rq_vers);
+ rpc = nlm_host_get_rpc(host);
+ if (rpc)
+ nlm4_granted_res_4(&res4, &dummy, rpc);
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_test_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_lock_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_cancel_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_unlock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+ CLIENT* lockd;
+
+ lockd = nlm_user_lockd();
+ if (lockd)
+ nlm4_unlock_res_4(argp, result, lockd);
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_granted_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
+{
+
+ return (FALSE);
+}
+
+bool_t
+nlm4_share_4_svc(nlm4_shareargs *argp, nlm4_shareres *result, struct svc_req *rqstp)
+{
+
+ memset(result, 0, sizeof(*result));
+ result->stat = nlm4_denied;
+ return (TRUE);
+}
+
+bool_t
+nlm4_unshare_4_svc(nlm4_shareargs *argp, nlm4_shareres *result, struct svc_req *rqstp)
+{
+
+ memset(result, 0, sizeof(*result));
+ result->stat = nlm4_denied;
+ return (TRUE);
+}
+
+bool_t
+nlm4_nm_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+{
+
+ nlm_do_lock(argp, result, rqstp, FALSE);
+ return (TRUE);
+}
+
+bool_t
+nlm4_free_all_4_svc(nlm4_notify *argp, void *result, struct svc_req *rqstp)
+{
+
+ nlm_do_free_all(argp);
+ return (TRUE);
+}
+
+int
+nlm_prog_4_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result)
+{
+
+ (void) xdr_free(xdr_result, result);
+ return (TRUE);
+}
diff --git a/sys/nlm/nlm_prot_svc.c b/sys/nlm/nlm_prot_svc.c
new file mode 100644
index 0000000..eca6d86
--- /dev/null
+++ b/sys/nlm/nlm_prot_svc.c
@@ -0,0 +1,509 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include "nlm_prot.h"
+#include "nlm.h"
+
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+void nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp);
+void nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp);
+
+void
+nlm_prog_0(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ struct nlm_sm_status nlm_sm_notify_0_arg;
+ } argument;
+ char result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_SM_NOTIFY:
+ xdr_argument = (xdrproc_t) xdr_nlm_sm_status;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_sm_notify_0_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+
+ return;
+}
+
+void
+nlm_prog_1(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ struct nlm_testargs nlm_test_1_arg;
+ struct nlm_lockargs nlm_lock_1_arg;
+ struct nlm_cancargs nlm_cancel_1_arg;
+ struct nlm_unlockargs nlm_unlock_1_arg;
+ struct nlm_testargs nlm_granted_1_arg;
+ struct nlm_testargs nlm_test_msg_1_arg;
+ struct nlm_lockargs nlm_lock_msg_1_arg;
+ struct nlm_cancargs nlm_cancel_msg_1_arg;
+ struct nlm_unlockargs nlm_unlock_msg_1_arg;
+ struct nlm_testargs nlm_granted_msg_1_arg;
+ nlm_testres nlm_test_res_1_arg;
+ nlm_res nlm_lock_res_1_arg;
+ nlm_res nlm_cancel_res_1_arg;
+ nlm_res nlm_unlock_res_1_arg;
+ nlm_res nlm_granted_res_1_arg;
+ } argument;
+ union {
+ nlm_testres nlm_test_1_res;
+ nlm_res nlm_lock_1_res;
+ nlm_res nlm_cancel_1_res;
+ nlm_res nlm_unlock_1_res;
+ nlm_res nlm_granted_1_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_TEST:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm_testres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_1_svc;
+ break;
+
+ case NLM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_1_svc;
+ break;
+
+ case NLM_CANCEL:
+ xdr_argument = (xdrproc_t) xdr_nlm_cancargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_1_svc;
+ break;
+
+ case NLM_UNLOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_unlockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_1_svc;
+ break;
+
+ case NLM_GRANTED:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_1_svc;
+ break;
+
+ case NLM_TEST_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_msg_1_svc;
+ break;
+
+ case NLM_LOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_msg_1_svc;
+ break;
+
+ case NLM_CANCEL_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_cancargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_msg_1_svc;
+ break;
+
+ case NLM_UNLOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_unlockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_msg_1_svc;
+ break;
+
+ case NLM_GRANTED_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_msg_1_svc;
+ break;
+
+ case NLM_TEST_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_testres;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_test_res_1_svc;
+ break;
+
+ case NLM_LOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_lock_res_1_svc;
+ break;
+
+ case NLM_CANCEL_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_cancel_res_1_svc;
+ break;
+
+ case NLM_UNLOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unlock_res_1_svc;
+ break;
+
+ case NLM_GRANTED_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_granted_res_1_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_1_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
+
+void
+nlm_prog_3(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ nlm_shareargs nlm_share_3_arg;
+ nlm_shareargs nlm_unshare_3_arg;
+ nlm_lockargs nlm_nm_lock_3_arg;
+ nlm_notify nlm_free_all_3_arg;
+ } argument;
+ union {
+ nlm_shareres nlm_share_3_res;
+ nlm_shareres nlm_unshare_3_res;
+ nlm_res nlm_nm_lock_3_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM_TEST:
+ case NLM_LOCK:
+ case NLM_CANCEL:
+ case NLM_UNLOCK:
+ case NLM_GRANTED:
+ case NLM_TEST_MSG:
+ case NLM_LOCK_MSG:
+ case NLM_CANCEL_MSG:
+ case NLM_UNLOCK_MSG:
+ case NLM_GRANTED_MSG:
+ case NLM_TEST_RES:
+ case NLM_LOCK_RES:
+ case NLM_CANCEL_RES:
+ case NLM_UNLOCK_RES:
+ case NLM_GRANTED_RES:
+ nlm_prog_1(rqstp, transp);
+ return;
+
+ case NLM_SHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_share_3_svc;
+ break;
+
+ case NLM_UNSHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_unshare_3_svc;
+ break;
+
+ case NLM_NM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_nm_lock_3_svc;
+ break;
+
+ case NLM_FREE_ALL:
+ xdr_argument = (xdrproc_t) xdr_nlm_notify;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm_free_all_3_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_3_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
+
+void
+nlm_prog_4(struct svc_req *rqstp, SVCXPRT *transp)
+{
+ union {
+ nlm4_testargs nlm4_test_4_arg;
+ nlm4_lockargs nlm4_lock_4_arg;
+ nlm4_cancargs nlm4_cancel_4_arg;
+ nlm4_unlockargs nlm4_unlock_4_arg;
+ nlm4_testargs nlm4_granted_4_arg;
+ nlm4_testargs nlm4_test_msg_4_arg;
+ nlm4_lockargs nlm4_lock_msg_4_arg;
+ nlm4_cancargs nlm4_cancel_msg_4_arg;
+ nlm4_unlockargs nlm4_unlock_msg_4_arg;
+ nlm4_testargs nlm4_granted_msg_4_arg;
+ nlm4_testres nlm4_test_res_4_arg;
+ nlm4_res nlm4_lock_res_4_arg;
+ nlm4_res nlm4_cancel_res_4_arg;
+ nlm4_res nlm4_unlock_res_4_arg;
+ nlm4_res nlm4_granted_res_4_arg;
+ nlm4_shareargs nlm4_share_4_arg;
+ nlm4_shareargs nlm4_unshare_4_arg;
+ nlm4_lockargs nlm4_nm_lock_4_arg;
+ nlm4_notify nlm4_free_all_4_arg;
+ } argument;
+ union {
+ nlm4_testres nlm4_test_4_res;
+ nlm4_res nlm4_lock_4_res;
+ nlm4_res nlm4_cancel_4_res;
+ nlm4_res nlm4_unlock_4_res;
+ nlm4_res nlm4_granted_4_res;
+ nlm4_shareres nlm4_share_4_res;
+ nlm4_shareres nlm4_unshare_4_res;
+ nlm4_res nlm4_nm_lock_4_res;
+ } result;
+ bool_t retval;
+ xdrproc_t xdr_argument, xdr_result;
+ bool_t (*local)(char *, void *, struct svc_req *);
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp,
+ (xdrproc_t) xdr_void, (char *)NULL);
+ return;
+
+ case NLM4_TEST:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_testres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_4_svc;
+ break;
+
+ case NLM4_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_4_svc;
+ break;
+
+ case NLM4_CANCEL:
+ xdr_argument = (xdrproc_t) xdr_nlm4_cancargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_4_svc;
+ break;
+
+ case NLM4_UNLOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_unlockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_4_svc;
+ break;
+
+ case NLM4_GRANTED:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_4_svc;
+ break;
+
+ case NLM4_TEST_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_msg_4_svc;
+ break;
+
+ case NLM4_LOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_msg_4_svc;
+ break;
+
+ case NLM4_CANCEL_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_cancargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_msg_4_svc;
+ break;
+
+ case NLM4_UNLOCK_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_unlockargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_msg_4_svc;
+ break;
+
+ case NLM4_GRANTED_MSG:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testargs;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_msg_4_svc;
+ break;
+
+ case NLM4_TEST_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_testres;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_test_res_4_svc;
+ break;
+
+ case NLM4_LOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_lock_res_4_svc;
+ break;
+
+ case NLM4_CANCEL_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_cancel_res_4_svc;
+ break;
+
+ case NLM4_UNLOCK_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unlock_res_4_svc;
+ break;
+
+ case NLM4_GRANTED_RES:
+ xdr_argument = (xdrproc_t) xdr_nlm4_res;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_granted_res_4_svc;
+ break;
+
+ case NLM4_SHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm4_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_share_4_svc;
+ break;
+
+ case NLM4_UNSHARE:
+ xdr_argument = (xdrproc_t) xdr_nlm4_shareargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_shareres;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_unshare_4_svc;
+ break;
+
+ case NLM4_NM_LOCK:
+ xdr_argument = (xdrproc_t) xdr_nlm4_lockargs;
+ xdr_result = (xdrproc_t) xdr_nlm4_res;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_nm_lock_4_svc;
+ break;
+
+ case NLM4_FREE_ALL:
+ xdr_argument = (xdrproc_t) xdr_nlm4_notify;
+ xdr_result = (xdrproc_t) xdr_void;
+ local = (bool_t (*) (char *, void *, struct svc_req *))nlm4_free_all_4_svc;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ retval = (bool_t) (*local)((char *)&argument, (void *)&result, rqstp);
+ if (retval > 0 && !svc_sendreply(transp, xdr_result, (char *)&result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, xdr_argument, (char *)(caddr_t) &argument)) {
+ printf("unable to free arguments");
+ //exit(1);
+ }
+ if (!nlm_prog_4_freeresult(transp, xdr_result, (caddr_t) &result))
+ printf("unable to free results");
+
+ return;
+}
diff --git a/sys/nlm/nlm_prot_xdr.c b/sys/nlm/nlm_prot_xdr.c
new file mode 100644
index 0000000..034cbbc
--- /dev/null
+++ b/sys/nlm/nlm_prot_xdr.c
@@ -0,0 +1,454 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "nlm_prot.h"
+#include <sys/cdefs.h>
+#ifndef lint
+/*static char sccsid[] = "from: @(#)nlm_prot.x 1.8 87/09/21 Copyr 1987 Sun Micro";*/
+/*static char sccsid[] = "from: * @(#)nlm_prot.x 2.1 88/08/01 4.0 RPCSRC";*/
+__RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
+#endif /* not lint */
+__FBSDID("$FreeBSD$");
+
+bool_t
+xdr_nlm_stats(XDR *xdrs, nlm_stats *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_holder(XDR *xdrs, nlm_holder *objp)
+{
+
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testrply(XDR *xdrs, nlm_testrply *objp)
+{
+
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ switch (objp->stat) {
+ case nlm_denied:
+ if (!xdr_nlm_holder(xdrs, &objp->nlm_testrply_u.holder))
+ return (FALSE);
+ break;
+ default:
+ break;
+ }
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_stat(XDR *xdrs, nlm_stat *objp)
+{
+
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_res(XDR *xdrs, nlm_res *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_stat(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testres(XDR *xdrs, nlm_testres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_testrply(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_lock(XDR *xdrs, nlm_lock *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_lockargs(XDR *xdrs, nlm_lockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_cancargs(XDR *xdrs, nlm_cancargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_testargs(XDR *xdrs, nlm_testargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_unlockargs(XDR *xdrs, nlm_unlockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_fsh_mode(XDR *xdrs, fsh_mode *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_fsh_access(XDR *xdrs, fsh_access *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_share(XDR *xdrs, nlm_share *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_fsh_mode(xdrs, &objp->mode))
+ return (FALSE);
+ if (!xdr_fsh_access(xdrs, &objp->access))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_shareargs(XDR *xdrs, nlm_shareargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_share(xdrs, &objp->share))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_shareres(XDR *xdrs, nlm_shareres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm_stats(xdrs, &objp->stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->sequence))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_notify(XDR *xdrs, nlm_notify *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_long(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_stats(XDR *xdrs, nlm4_stats *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_stat(XDR *xdrs, nlm4_stat *objp)
+{
+
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_holder(XDR *xdrs, nlm4_holder *objp)
+{
+
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_uint32_t(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_lock(XDR *xdrs, nlm4_lock *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_uint32_t(xdrs, &objp->svid))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_offset))
+ return (FALSE);
+ if (!xdr_uint64_t(xdrs, &objp->l_len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_share(XDR *xdrs, nlm4_share *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->caller_name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->fh))
+ return (FALSE);
+ if (!xdr_netobj(xdrs, &objp->oh))
+ return (FALSE);
+ if (!xdr_fsh_mode(xdrs, &objp->mode))
+ return (FALSE);
+ if (!xdr_fsh_access(xdrs, &objp->access))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testrply(XDR *xdrs, nlm4_testrply *objp)
+{
+
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ switch (objp->stat) {
+ case nlm_denied:
+ if (!xdr_nlm4_holder(xdrs, &objp->nlm4_testrply_u.holder))
+ return (FALSE);
+ break;
+ default:
+ break;
+ }
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testres(XDR *xdrs, nlm4_testres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_testrply(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_testargs(XDR *xdrs, nlm4_testargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_res(XDR *xdrs, nlm4_res *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_stat(xdrs, &objp->stat))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_lockargs(XDR *xdrs, nlm4_lockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_cancargs(XDR *xdrs, nlm4_cancargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->block))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->exclusive))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_unlockargs(XDR *xdrs, nlm4_unlockargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_lock(xdrs, &objp->alock))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_shareargs(XDR *xdrs, nlm4_shareargs *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_share(xdrs, &objp->share))
+ return (FALSE);
+ if (!xdr_bool(xdrs, &objp->reclaim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_shareres(XDR *xdrs, nlm4_shareres *objp)
+{
+
+ if (!xdr_netobj(xdrs, &objp->cookie))
+ return (FALSE);
+ if (!xdr_nlm4_stats(xdrs, &objp->stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->sequence))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm_sm_status(XDR *xdrs, nlm_sm_status *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, LM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_nlm4_notify(XDR *xdrs, nlm4_notify *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->name, MAXNAMELEN))
+ return (FALSE);
+ if (!xdr_int32_t(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
diff --git a/sys/nlm/sm_inter.h b/sys/nlm/sm_inter.h
new file mode 100644
index 0000000..0cc240b
--- /dev/null
+++ b/sys/nlm/sm_inter.h
@@ -0,0 +1,112 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+/* $FreeBSD$ */
+
+#ifndef _SM_INTER_H_RPCGEN
+#define _SM_INTER_H_RPCGEN
+
+#include <rpc/rpc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SM_MAXSTRLEN 1024
+
+struct sm_name {
+ char *mon_name;
+};
+typedef struct sm_name sm_name;
+
+struct my_id {
+ char *my_name;
+ int my_prog;
+ int my_vers;
+ int my_proc;
+};
+typedef struct my_id my_id;
+
+struct mon_id {
+ char *mon_name;
+ struct my_id my_id;
+};
+typedef struct mon_id mon_id;
+
+struct mon {
+ struct mon_id mon_id;
+ char priv[16];
+};
+typedef struct mon mon;
+
+struct stat_chge {
+ char *mon_name;
+ int state;
+};
+typedef struct stat_chge stat_chge;
+
+struct sm_stat {
+ int state;
+};
+typedef struct sm_stat sm_stat;
+
+enum sm_res {
+ stat_succ = 0,
+ stat_fail = 1
+};
+typedef enum sm_res sm_res;
+
+struct sm_stat_res {
+ sm_res res_stat;
+ int state;
+};
+typedef struct sm_stat_res sm_stat_res;
+
+struct sm_status {
+ char *mon_name;
+ int state;
+ char priv[16];
+};
+typedef struct sm_status sm_status;
+
+#define SM_PROG ((unsigned long)(100024))
+#define SM_VERS ((unsigned long)(1))
+
+extern void sm_prog_1(struct svc_req *rqstp, SVCXPRT *transp);
+#define SM_STAT ((unsigned long)(1))
+extern struct sm_stat_res * sm_stat_1(struct sm_name *, CLIENT *);
+extern struct sm_stat_res * sm_stat_1_svc(struct sm_name *, struct svc_req *);
+#define SM_MON ((unsigned long)(2))
+extern struct sm_stat_res * sm_mon_1(struct mon *, CLIENT *);
+extern struct sm_stat_res * sm_mon_1_svc(struct mon *, struct svc_req *);
+#define SM_UNMON ((unsigned long)(3))
+extern struct sm_stat * sm_unmon_1(struct mon_id *, CLIENT *);
+extern struct sm_stat * sm_unmon_1_svc(struct mon_id *, struct svc_req *);
+#define SM_UNMON_ALL ((unsigned long)(4))
+extern struct sm_stat * sm_unmon_all_1(struct my_id *, CLIENT *);
+extern struct sm_stat * sm_unmon_all_1_svc(struct my_id *, struct svc_req *);
+#define SM_SIMU_CRASH ((unsigned long)(5))
+extern void * sm_simu_crash_1(void *, CLIENT *);
+extern void * sm_simu_crash_1_svc(void *, struct svc_req *);
+#define SM_NOTIFY ((unsigned long)(6))
+extern void * sm_notify_1(struct stat_chge *, CLIENT *);
+extern void * sm_notify_1_svc(struct stat_chge *, struct svc_req *);
+extern int sm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
+
+/* the xdr functions */
+extern bool_t xdr_sm_name(XDR *, sm_name*);
+extern bool_t xdr_my_id(XDR *, my_id*);
+extern bool_t xdr_mon_id(XDR *, mon_id*);
+extern bool_t xdr_mon(XDR *, mon*);
+extern bool_t xdr_stat_chge(XDR *, stat_chge*);
+extern bool_t xdr_sm_stat(XDR *, sm_stat*);
+extern bool_t xdr_sm_res(XDR *, sm_res*);
+extern bool_t xdr_sm_stat_res(XDR *, sm_stat_res*);
+extern bool_t xdr_sm_status(XDR *, sm_status*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_SM_INTER_H_RPCGEN */
diff --git a/sys/nlm/sm_inter_xdr.c b/sys/nlm/sm_inter_xdr.c
new file mode 100644
index 0000000..5f75432
--- /dev/null
+++ b/sys/nlm/sm_inter_xdr.c
@@ -0,0 +1,107 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "sm_inter.h"
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+bool_t
+xdr_sm_name(XDR *xdrs, sm_name *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_my_id(XDR *xdrs, my_id *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->my_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_prog))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_vers))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->my_proc))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_mon_id(XDR *xdrs, mon_id *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_my_id(xdrs, &objp->my_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_mon(XDR *xdrs, mon *objp)
+{
+
+ if (!xdr_mon_id(xdrs, &objp->mon_id))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_stat_chge(XDR *xdrs, stat_chge *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_stat(XDR *xdrs, sm_stat *objp)
+{
+
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_res(XDR *xdrs, sm_res *objp)
+{
+
+ if (!xdr_enum(xdrs, (enum_t *)objp))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_stat_res(XDR *xdrs, sm_stat_res *objp)
+{
+
+ if (!xdr_sm_res(xdrs, &objp->res_stat))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr_sm_status(XDR *xdrs, sm_status *objp)
+{
+
+ if (!xdr_string(xdrs, &objp->mon_name, SM_MAXSTRLEN))
+ return (FALSE);
+ if (!xdr_int(xdrs, &objp->state))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->priv, 16))
+ return (FALSE);
+ return (TRUE);
+}
OpenPOWER on IntegriCloud