aboutsummaryrefslogtreecommitdiff
path: root/sys/nlm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/nlm')
-rw-r--r--sys/nlm/nlm.h144
-rw-r--r--sys/nlm/nlm_advlock.c1235
-rw-r--r--sys/nlm/nlm_prot.h78
-rw-r--r--sys/nlm/nlm_prot_clnt.c237
-rw-r--r--sys/nlm/nlm_prot_impl.c723
-rw-r--r--sys/nlm/nlm_prot_server.c178
6 files changed, 2125 insertions, 470 deletions
diff --git a/sys/nlm/nlm.h b/sys/nlm/nlm.h
index 32bb97465aca..addd07e6b5ee 100644
--- a/sys/nlm/nlm.h
+++ b/sys/nlm/nlm.h
@@ -36,7 +36,17 @@
MALLOC_DECLARE(M_NLM);
#endif
+/*
+ * This value is added to host system IDs when recording NFS client
+ * locks in the local lock manager.
+ */
+#define NLM_SYSID_CLIENT 0x1000000
+
struct nlm_host;
+struct vnode;
+
+extern struct timeval nlm_zero_tv;
+extern int nlm_nsm_state;
/*
* Copy a struct netobj.
@@ -47,61 +57,140 @@ extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src,
/*
* Search for an existing NLM host that matches the given name
* (typically the caller_name element of an nlm4_lock). If none is
- * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * found, create a new host. If 'addr' is non-NULL, record the remote
* address of the host so that we can call it back for async
- * responses.
+ * responses. If 'vers' is greater than zero then record the NLM
+ * program version to use to communicate with this client. The host
+ * reference count is incremented - the caller must call
+ * nlm_host_release when it has finished using it.
*/
extern struct nlm_host *nlm_find_host_by_name(const char *name,
- struct svc_req *rqstp);
+ const struct sockaddr *addr, rpcvers_t vers);
/*
* Search for an existing NLM host that matches the given remote
* address. If none is found, create a new host with the requested
* address and remember 'vers' as the NLM protocol version to use for
- * that host.
+ * that host. The host reference count is incremented - the caller
+ * must call nlm_host_release when it has finished using it.
*/
extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr,
int vers);
/*
+ * Register this NLM host with the local NSM so that we can be
+ * notified if it reboots.
+ */
+extern void nlm_host_monitor(struct nlm_host *host, int state);
+
+/*
+ * Decrement the host reference count, freeing resources if the
+ * reference count reaches zero.
+ */
+extern void nlm_host_release(struct nlm_host *host);
+
+/*
* Return an RPC client handle that can be used to talk to the NLM
* running on the given host.
*/
extern CLIENT *nlm_host_get_rpc(struct nlm_host *host);
/*
+ * Return the system ID for a host.
+ */
+extern int nlm_host_get_sysid(struct nlm_host *host);
+
+/*
+ * Return the remote NSM state value for a host.
+ */
+extern int nlm_host_get_state(struct nlm_host *host);
+
+/*
+ * When sending a blocking lock request, we need to track the request
+ * in our waiting lock list. We add an entry to the waiting list
+ * before we send the lock RPC so that we can cope with a granted
+ * message arriving at any time. Call this function before sending the
+ * lock rpc. If the lock succeeds, call nlm_deregister_wait_lock with
+ * the handle this function returns, otherwise nlm_wait_lock. Both
+ * will remove the entry from the waiting list.
+ */
+extern void *nlm_register_wait_lock(struct nlm4_lock *lock, struct vnode *vp);
+
+/*
+ * Deregister a blocking lock request. Call this if the lock succeeded
+ * without blocking.
+ */
+extern void nlm_deregister_wait_lock(void *handle);
+
+/*
+ * Wait for a granted callback for a blocked lock request, waiting at
+ * most timo ticks. If no granted message is received within the
+ * timeout, return EWOULDBLOCK. If a signal interrupted the wait,
+ * return EINTR - the caller must arrange to send a cancellation to
+ * the server. In both cases, the request is removed from the waiting
+ * list.
+ */
+extern int nlm_wait_lock(void *handle, int timo);
+
+/*
+ * Cancel any pending waits for this vnode - called on forcible unmounts.
+ */
+extern void nlm_cancel_wait(struct vnode *vp);
+
+/*
* Called when a host restarts.
*/
extern void nlm_sm_notify(nlm_sm_status *argp);
/*
- * Implementation for lock testing RPCs. Returns the NLM host that
- * matches the RPC arguments.
+ * Implementation for lock testing RPCs. If the request was handled
+ * successfully and rpcp is non-NULL, *rpcp is set to an RPC client
+ * handle which can be used to send an async rpc reply. Returns zero
+ * if the request was handled, or a suitable unix error code
+ * otherwise.
+ */
+extern int nlm_do_test(nlm4_testargs *argp, nlm4_testres *result,
+ struct svc_req *rqstp, CLIENT **rpcp);
+
+/*
+ * Implementation for lock setting RPCs. If the request was handled
+ * successfully and rpcp is non-NULL, *rpcp is set to an RPC client
+ * handle which can be used to send an async rpc reply. Returns zero
+ * if the request was handled, or a suitable unix error code
+ * otherwise.
*/
-extern struct nlm_host *nlm_do_test(nlm4_testargs *argp,
- nlm4_testres *result, struct svc_req *rqstp);
+extern int nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result,
+ struct svc_req *rqstp, bool_t monitor, CLIENT **rpcp);
/*
- * Implementation for lock setting RPCs. Returns the NLM host that
- * matches the RPC arguments. If monitor is TRUE, set up an NSM
- * monitor for this host.
+ * Implementation for cancelling a pending lock request. If the
+ * request was handled successfully and rpcp is non-NULL, *rpcp is set
+ * to an RPC client handle which can be used to send an async rpc
+ * reply. Returns zero if the request was handled, or a suitable unix
+ * error code otherwise.
*/
-extern struct nlm_host *nlm_do_lock(nlm4_lockargs *argp,
- nlm4_res *result, struct svc_req *rqstp, bool_t monitor);
+extern int nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result,
+ struct svc_req *rqstp, CLIENT **rpcp);
/*
- * Implementation for cancelling a pending lock request. Returns the
- * NLM host that matches the RPC arguments.
+ * Implementation for unlocking RPCs. If the request was handled
+ * successfully and rpcp is non-NULL, *rpcp is set to an RPC client
+ * handle which can be used to send an async rpc reply. Returns zero
+ * if the request was handled, or a suitable unix error code
+ * otherwise.
*/
-extern struct nlm_host *nlm_do_cancel(nlm4_cancargs *argp,
- nlm4_res *result, struct svc_req *rqstp);
+extern int nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result,
+ struct svc_req *rqstp, CLIENT **rpcp);
/*
- * Implementation for unlocking RPCs. Returns the NLM host that
- * matches the RPC arguments.
+ * Implementation for granted RPCs. If the request was handled
+ * successfully and rpcp is non-NULL, *rpcp is set to an RPC client
+ * handle which can be used to send an async rpc reply. Returns zero
+ * if the request was handled, or a suitable unix error code
+ * otherwise.
*/
-extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
- nlm4_res *result, struct svc_req *rqstp);
+extern int nlm_do_granted(nlm4_testargs *argp, nlm4_res *result,
+ struct svc_req *rqstp, CLIENT **rpcp);
/*
* Free all locks associated with the hostname argp->name.
@@ -109,10 +198,17 @@ extern struct nlm_host *nlm_do_unlock(nlm4_unlockargs *argp,
extern void nlm_do_free_all(nlm4_notify *argp);
/*
- * Find an RPC transport that can be used to communicate with the
- * userland part of lockd.
+ * Recover client lock state after a server reboot.
+ */
+extern void nlm_client_recovery(struct nlm_host *);
+
+/*
+ * Interface from NFS client code to the NLM.
*/
-extern CLIENT *nlm_user_lockd(void);
+struct vop_advlock_args;
+struct vop_reclaim_args;
+extern int nlm_advlock(struct vop_advlock_args *ap);
+extern int nlm_reclaim(struct vop_reclaim_args *ap);
#endif
diff --git a/sys/nlm/nlm_advlock.c b/sys/nlm/nlm_advlock.c
new file mode 100644
index 000000000000..fb8b5a7d51c8
--- /dev/null
+++ b/sys/nlm/nlm_advlock.c
@@ -0,0 +1,1235 @@
+/*-
+ * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
+ * Authors: Doug Rabson <dfr@rabson.org>
+ * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/fcntl.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/lockf.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/syslog.h>
+#include <sys/systm.h>
+#include <sys/unistd.h>
+#include <sys/vnode.h>
+
+#include <rpc/rpcclnt.h>
+#include <nfs/nfsproto.h>
+#include <nfsclient/nfs.h>
+#include <nfsclient/nfsnode.h>
+#include <nfsclient/nfsmount.h>
+
+#include <nlm/nlm_prot.h>
+#include <nlm/nlm.h>
+
+/*
+ * We need to keep track of the svid values used for F_FLOCK locks.
+ */
+struct nlm_file_svid {
+ int ns_refs; /* thread count + 1 if active */
+ int ns_svid; /* on-the-wire SVID for this file */
+ struct ucred *ns_ucred; /* creds to use for lock recovery */
+ void *ns_id; /* local struct file pointer */
+ bool_t ns_active; /* TRUE if we own a lock */
+ LIST_ENTRY(nlm_file_svid) ns_link;
+};
+LIST_HEAD(nlm_file_svid_list, nlm_file_svid);
+
+#define NLM_SVID_HASH_SIZE 256
+struct nlm_file_svid_list nlm_file_svids[NLM_SVID_HASH_SIZE];
+
+struct mtx nlm_svid_lock;
+static struct unrhdr *nlm_svid_allocator;
+static volatile u_int nlm_xid = 1;
+
+static int nlm_setlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size, bool_t reclaim);
+static int nlm_clearlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size);
+static int nlm_getlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size);
+static int nlm_map_status(nlm4_stats stat);
+static struct nlm_file_svid *nlm_find_svid(void *id);
+static void nlm_free_svid(struct nlm_file_svid *nf);
+static int nlm_init_lock(struct flock *fl, int flags, int svid,
+ rpcvers_t vers, size_t fhlen, void *fh, off_t size,
+ struct nlm4_lock *lock, char oh_space[32]);
+
+static void
+nlm_client_init(void *dummy)
+{
+ int i;
+
+ mtx_init(&nlm_svid_lock, "NLM svid lock", NULL, MTX_DEF);
+ nlm_svid_allocator = new_unrhdr(PID_MAX + 2, INT_MAX, &nlm_svid_lock);
+ for (i = 0; i < NLM_SVID_HASH_SIZE; i++)
+ LIST_INIT(&nlm_file_svids[i]);
+}
+SYSINIT(nlm_client_init, SI_SUB_LOCK, SI_ORDER_FIRST, nlm_client_init, NULL);
+
+static int
+nlm_msg(struct thread *td, const char *server, const char *msg, int error)
+{
+ struct proc *p;
+
+ p = td ? td->td_proc : NULL;
+ if (error) {
+ tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
+ msg, error);
+ } else {
+ tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
+ }
+ return (0);
+}
+
+struct nlm_feedback_arg {
+ bool_t nf_printed;
+ struct nfsmount *nf_nmp;
+};
+
+static void
+nlm_down(struct nlm_feedback_arg *nf, struct thread *td,
+ const char *msg, int error)
+{
+ struct nfsmount *nmp = nf->nf_nmp;
+
+ if (nmp == NULL)
+ return;
+ mtx_lock(&nmp->nm_mtx);
+ if (!(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
+ nmp->nm_state |= NFSSTA_LOCKTIMEO;
+ mtx_unlock(&nmp->nm_mtx);
+ vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+ VQ_NOTRESPLOCK, 0);
+ } else {
+ mtx_unlock(&nmp->nm_mtx);
+ }
+
+ nf->nf_printed = TRUE;
+ nlm_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
+}
+
+static void
+nlm_up(struct nlm_feedback_arg *nf, struct thread *td,
+ const char *msg)
+{
+ struct nfsmount *nmp = nf->nf_nmp;
+
+ if (!nf->nf_printed)
+ return;
+
+ nlm_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
+
+ mtx_lock(&nmp->nm_mtx);
+ if (nmp->nm_state & NFSSTA_LOCKTIMEO) {
+ nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
+ mtx_unlock(&nmp->nm_mtx);
+ vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
+ VQ_NOTRESPLOCK, 1);
+ } else {
+ mtx_unlock(&nmp->nm_mtx);
+ }
+}
+
+static void
+nlm_feedback(int type, int proc, void *arg)
+{
+ struct thread *td = curthread;
+ struct nlm_feedback_arg *nf = (struct nlm_feedback_arg *) arg;
+
+ switch (type) {
+ case FEEDBACK_REXMIT2:
+ case FEEDBACK_RECONNECT:
+ nlm_down(nf, td, "lockd not responding", 0);
+ break;
+
+ case FEEDBACK_OK:
+ nlm_up(nf, td, "lockd is alive again");
+ break;
+ }
+}
+
+/*
+ * nlm_advlock --
+ * NFS advisory byte-level locks.
+ */
+static int
+nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl,
+ int flags, bool_t reclaim, bool_t unlock_vp)
+{
+ struct thread *td = curthread;
+ struct nfsmount *nmp;
+ struct nfsnode *np;
+ off_t size;
+ size_t fhlen;
+ union nfsfh fh;
+ struct sockaddr *sa;
+ struct sockaddr_storage ss;
+ char servername[MNAMELEN];
+ struct timeval timo;
+ int retries;
+ rpcvers_t vers;
+ struct nlm_host *host;
+ struct rpc_callextra ext;
+ struct nlm_feedback_arg nf;
+ AUTH *auth;
+ struct ucred *cred;
+ struct nlm_file_svid *ns;
+ int svid;
+ int error;
+
+ ASSERT_VOP_LOCKED(vp, "nlm_advlock_1");
+
+ /*
+ * Push any pending writes to the server and flush our cache
+ * so that if we are contending with another machine for a
+ * file, we get whatever they wrote and vice-versa.
+ */
+ if (op == F_SETLK || op == F_UNLCK)
+ nfs_vinvalbuf(vp, V_SAVE, td, 1);
+
+ np = VTONFS(vp);
+ nmp = VFSTONFS(vp->v_mount);
+ size = np->n_size;
+ sa = nmp->nm_nam;
+ memcpy(&ss, sa, sa->sa_len);
+ sa = (struct sockaddr *) &ss;
+ strcpy(servername, nmp->nm_hostname);
+ fhlen = np->n_fhsize;
+ memcpy(&fh.fh_bytes, np->n_fhp, fhlen);
+ timo.tv_sec = nmp->nm_timeo / NFS_HZ;
+ timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * (1000000 / NFS_HZ);
+ if (NFS_ISV3(vp))
+ vers = NLM_VERS4;
+ else
+ vers = NLM_VERS;
+
+ if (nmp->nm_flag & NFSMNT_SOFT)
+ retries = nmp->nm_retry;
+ else
+ retries = INT_MAX;
+
+ if (unlock_vp)
+ VOP_UNLOCK(vp, 0);
+
+ /*
+ * We need to switch to mount-point creds so that we can send
+ * packets from a privileged port.
+ */
+ cred = td->td_ucred;
+ td->td_ucred = vp->v_mount->mnt_cred;
+
+ host = nlm_find_host_by_name(servername, sa, vers);
+ auth = authunix_create(cred);
+ memset(&ext, 0, sizeof(ext));
+
+ nf.nf_printed = FALSE;
+ nf.nf_nmp = nmp;
+ ext.rc_auth = auth;
+
+ ext.rc_feedback = nlm_feedback;
+ ext.rc_feedback_arg = &nf;
+
+ ns = NULL;
+ if (flags & F_FLOCK) {
+ ns = nlm_find_svid(id);
+ KASSERT(fl->l_start == 0 && fl->l_len == 0,
+ ("F_FLOCK lock requests must be whole-file locks"));
+ if (!ns->ns_ucred) {
+ /*
+ * Remember the creds used for locking in case
+ * we need to recover the lock later.
+ */
+ ns->ns_ucred = crdup(cred);
+ }
+ svid = ns->ns_svid;
+ } else if (flags & F_REMOTE) {
+ /*
+ * If we are recovering after a server restart or
+ * trashing locks on a force unmount, use the same
+ * svid as last time.
+ */
+ svid = fl->l_pid;
+ } else {
+ svid = ((struct proc *) id)->p_pid;
+ }
+
+ switch(op) {
+ case F_SETLK:
+ if ((flags & (F_FLOCK|F_WAIT)) == (F_FLOCK|F_WAIT)
+ && fl->l_type == F_WRLCK) {
+ /*
+ * The semantics for flock(2) require that any
+ * shared lock on the file must be released
+ * before an exclusive lock is granted. The
+ * local locking code interprets this by
+ * unlocking the file before sleeping on a
+ * blocked exclusive lock request. We
+ * approximate this by first attempting
+ * non-blocking and if that fails, we unlock
+ * the file and block.
+ */
+ error = nlm_setlock(host, &ext, vers, &timo, retries,
+ vp, F_SETLK, fl, flags & ~F_WAIT,
+ svid, fhlen, &fh.fh_bytes, size, reclaim);
+ if (error == EAGAIN) {
+ fl->l_type = F_UNLCK;
+ error = nlm_clearlock(host, &ext, vers, &timo,
+ retries, vp, F_UNLCK, fl, flags,
+ svid, fhlen, &fh.fh_bytes, size);
+ fl->l_type = F_WRLCK;
+ if (!error) {
+ mtx_lock(&nlm_svid_lock);
+ if (ns->ns_active) {
+ ns->ns_refs--;
+ ns->ns_active = FALSE;
+ }
+ mtx_unlock(&nlm_svid_lock);
+ flags |= F_WAIT;
+ error = nlm_setlock(host, &ext, vers,
+ &timo, retries, vp, F_SETLK, fl,
+ flags, svid, fhlen, &fh.fh_bytes,
+ size, reclaim);
+ }
+ }
+ } else {
+ error = nlm_setlock(host, &ext, vers, &timo, retries,
+ vp, op, fl, flags, svid, fhlen, &fh.fh_bytes,
+ size, reclaim);
+ }
+ if (!error && ns) {
+ mtx_lock(&nlm_svid_lock);
+ if (!ns->ns_active) {
+ /*
+ * Add one to the reference count to
+ * hold onto the SVID for the lifetime
+ * of the lock. Note that since
+ * F_FLOCK only supports whole-file
+ * locks, there can only be one active
+ * lock for this SVID.
+ */
+ ns->ns_refs++;
+ ns->ns_active = TRUE;
+ }
+ mtx_unlock(&nlm_svid_lock);
+ }
+ break;
+
+ case F_UNLCK:
+ error = nlm_clearlock(host, &ext, vers, &timo, retries,
+ vp, op, fl, flags, svid, fhlen, &fh.fh_bytes, size);
+ if (!error && ns) {
+ mtx_lock(&nlm_svid_lock);
+ if (ns->ns_active) {
+ ns->ns_refs--;
+ ns->ns_active = FALSE;
+ }
+ mtx_unlock(&nlm_svid_lock);
+ }
+ break;
+
+ case F_GETLK:
+ error = nlm_getlock(host, &ext, vers, &timo, retries,
+ vp, op, fl, flags, svid, fhlen, &fh.fh_bytes, size);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (ns)
+ nlm_free_svid(ns);
+
+ td->td_ucred = cred;
+ AUTH_DESTROY(auth);
+
+ nlm_host_release(host);
+
+ return (error);
+}
+
+int
+nlm_advlock(struct vop_advlock_args *ap)
+{
+
+ return (nlm_advlock_internal(ap->a_vp, ap->a_id, ap->a_op, ap->a_fl,
+ ap->a_flags, FALSE, TRUE));
+}
+
+/*
+ * Set the creds of td to the creds of the given lock's owner. The new
+ * creds reference count will be incremented via crhold. The caller is
+ * responsible for calling crfree and restoring td's original creds.
+ */
+static void
+nlm_set_creds_for_lock(struct thread *td, struct flock *fl)
+{
+ int i;
+ struct nlm_file_svid *ns;
+ struct proc *p;
+ struct ucred *cred;
+
+ cred = NULL;
+ if (fl->l_pid > PID_MAX) {
+ /*
+ * If this was originally a F_FLOCK-style lock, we
+ * recorded the creds used when it was originally
+ * locked in the nlm_file_svid structure.
+ */
+ mtx_lock(&nlm_svid_lock);
+ for (i = 0; i < NLM_SVID_HASH_SIZE; i++) {
+ for (ns = LIST_FIRST(&nlm_file_svids[i]); ns;
+ ns = LIST_NEXT(ns, ns_link)) {
+ if (ns->ns_svid == fl->l_pid) {
+ cred = crhold(ns->ns_ucred);
+ break;
+ }
+ }
+ }
+ mtx_unlock(&nlm_svid_lock);
+ } else {
+ /*
+ * This lock is owned by a process. Get a reference to
+ * the process creds.
+ */
+ p = pfind(fl->l_pid);
+ if (p) {
+ cred = crhold(p->p_ucred);
+ PROC_UNLOCK(p);
+ }
+ }
+
+ /*
+ * If we can't find a cred, fall back on the recovery
+ * thread's cred.
+ */
+ if (!cred) {
+ cred = crhold(td->td_ucred);
+ }
+
+ td->td_ucred = cred;
+}
+
+static int
+nlm_reclaim_free_lock(struct vnode *vp, struct flock *fl, void *arg)
+{
+ struct flock newfl;
+ struct thread *td = curthread;
+ struct ucred *oldcred;
+ int error;
+
+ newfl = *fl;
+ newfl.l_type = F_UNLCK;
+
+ oldcred = td->td_ucred;
+ nlm_set_creds_for_lock(td, &newfl);
+
+ error = nlm_advlock_internal(vp, NULL, F_UNLCK, &newfl, F_REMOTE,
+ FALSE, FALSE);
+
+ crfree(td->td_ucred);
+ td->td_ucred = oldcred;
+
+ return (error);
+}
+
+int
+nlm_reclaim(struct vop_reclaim_args *ap)
+{
+
+ nlm_cancel_wait(ap->a_vp);
+ lf_iteratelocks_vnode(ap->a_vp, nlm_reclaim_free_lock, NULL);
+ return (0);
+}
+
+struct nlm_recovery_context {
+ struct nlm_host *nr_host; /* host we are recovering */
+ int nr_state; /* remote NSM state for recovery */
+};
+
+static int
+nlm_client_recover_lock(struct vnode *vp, struct flock *fl, void *arg)
+{
+ struct nlm_recovery_context *nr = (struct nlm_recovery_context *) arg;
+ struct thread *td = curthread;
+ struct ucred *oldcred;
+ int state, error;
+
+ /*
+ * If the remote NSM state changes during recovery, the host
+ * must have rebooted a second time. In that case, we must
+ * restart the recovery.
+ */
+ state = nlm_host_get_state(nr->nr_host);
+ if (nr->nr_state != state)
+ return (ERESTART);
+
+ error = vn_lock(vp, LK_SHARED);
+ if (error)
+ return (error);
+
+ oldcred = td->td_ucred;
+ nlm_set_creds_for_lock(td, fl);
+
+ error = nlm_advlock_internal(vp, NULL, F_SETLK, fl, F_REMOTE,
+ TRUE, TRUE);
+
+ crfree(td->td_ucred);
+ td->td_ucred = oldcred;
+
+ return (error);
+}
+
+void
+nlm_client_recovery(struct nlm_host *host)
+{
+ struct nlm_recovery_context nr;
+ int sysid, error;
+
+ sysid = NLM_SYSID_CLIENT | nlm_host_get_sysid(host);
+ do {
+ nr.nr_host = host;
+ nr.nr_state = nlm_host_get_state(host);
+ error = lf_iteratelocks_sysid(sysid,
+ nlm_client_recover_lock, &nr);
+ } while (error == ERESTART);
+}
+
+static void
+nlm_convert_to_nlm_lock(struct nlm_lock *dst, struct nlm4_lock *src)
+{
+
+ dst->caller_name = src->caller_name;
+ dst->fh = src->fh;
+ dst->oh = src->oh;
+ dst->svid = src->svid;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_holder(struct nlm4_holder *dst, struct nlm_holder *src)
+{
+
+ dst->exclusive = src->exclusive;
+ dst->svid = src->svid;
+ dst->oh = src->oh;
+ dst->l_offset = src->l_offset;
+ dst->l_len = src->l_len;
+}
+
+static void
+nlm_convert_to_nlm4_res(struct nlm4_res *dst, struct nlm_res *src)
+{
+ dst->cookie = src->cookie;
+ dst->stat.stat = (enum nlm4_stats) src->stat.stat;
+}
+
+static enum clnt_stat
+nlm_test_rpc(rpcvers_t vers, nlm4_testargs *args, nlm4_testres *res, CLIENT *client,
+ struct rpc_callextra *ext, struct timeval timo)
+{
+ if (vers == NLM_VERS4) {
+ return nlm4_test_4(args, res, client, ext, timo);
+ } else {
+ nlm_testargs args1;
+ nlm_testres res1;
+ enum clnt_stat stat;
+
+ args1.cookie = args->cookie;
+ args1.exclusive = args->exclusive;
+ nlm_convert_to_nlm_lock(&args1.alock, &args->alock);
+ memset(&res1, 0, sizeof(res1));
+
+ stat = nlm_test_1(&args1, &res1, client, ext, timo);
+
+ if (stat == RPC_SUCCESS) {
+ res->cookie = res1.cookie;
+ res->stat.stat = (enum nlm4_stats) res1.stat.stat;
+ if (res1.stat.stat == nlm_denied)
+ nlm_convert_to_nlm4_holder(
+ &res->stat.nlm4_testrply_u.holder,
+ &res1.stat.nlm_testrply_u.holder);
+ }
+
+ return (stat);
+ }
+}
+
+static enum clnt_stat
+nlm_lock_rpc(rpcvers_t vers, nlm4_lockargs *args, nlm4_res *res, CLIENT *client,
+ struct rpc_callextra *ext, struct timeval timo)
+{
+ if (vers == NLM_VERS4) {
+ return nlm4_lock_4(args, res, client, ext, timo);
+ } else {
+ nlm_lockargs args1;
+ nlm_res res1;
+ enum clnt_stat stat;
+
+ args1.cookie = args->cookie;
+ args1.block = args->block;
+ args1.exclusive = args->exclusive;
+ nlm_convert_to_nlm_lock(&args1.alock, &args->alock);
+ args1.reclaim = args->reclaim;
+ args1.state = args->state;
+ memset(&res1, 0, sizeof(res1));
+
+ stat = nlm_lock_1(&args1, &res1, client, ext, timo);
+
+ if (stat == RPC_SUCCESS) {
+ nlm_convert_to_nlm4_res(res, &res1);
+ }
+
+ return (stat);
+ }
+}
+
+static enum clnt_stat
+nlm_cancel_rpc(rpcvers_t vers, nlm4_cancargs *args, nlm4_res *res, CLIENT *client,
+ struct rpc_callextra *ext, struct timeval timo)
+{
+ if (vers == NLM_VERS4) {
+ return nlm4_cancel_4(args, res, client, ext, timo);
+ } else {
+ nlm_cancargs args1;
+ nlm_res res1;
+ enum clnt_stat stat;
+
+ args1.cookie = args->cookie;
+ args1.block = args->block;
+ args1.exclusive = args->exclusive;
+ nlm_convert_to_nlm_lock(&args1.alock, &args->alock);
+ memset(&res1, 0, sizeof(res1));
+
+ stat = nlm_cancel_1(&args1, &res1, client, ext, timo);
+
+ if (stat == RPC_SUCCESS) {
+ nlm_convert_to_nlm4_res(res, &res1);
+ }
+
+ return (stat);
+ }
+}
+
+static enum clnt_stat
+nlm_unlock_rpc(rpcvers_t vers, nlm4_unlockargs *args, nlm4_res *res, CLIENT *client,
+ struct rpc_callextra *ext, struct timeval timo)
+{
+ if (vers == NLM_VERS4) {
+ return nlm4_unlock_4(args, res, client, ext, timo);
+ } else {
+ nlm_unlockargs args1;
+ nlm_res res1;
+ enum clnt_stat stat;
+
+ args1.cookie = args->cookie;
+ nlm_convert_to_nlm_lock(&args1.alock, &args->alock);
+ memset(&res1, 0, sizeof(res1));
+
+ stat = nlm_unlock_1(&args1, &res1, client, ext, timo);
+
+ if (stat == RPC_SUCCESS) {
+ nlm_convert_to_nlm4_res(res, &res1);
+ }
+
+ return (stat);
+ }
+}
+
+/*
+ * Called after a lock request (set or clear) succeeded. We record the
+ * details in the local lock manager. Note that since the remote
+ * server has granted the lock, we can be sure that it doesn't
+ * conflict with any other locks we have in the local lock manager.
+ *
+ * Since it is possible that host may also make NLM client requests to
+ * our NLM server, we use a different sysid value to record our own
+ * client locks.
+ *
+ * Note that since it is possible for us to receive replies from the
+ * server in a different order than the locks were granted (e.g. if
+ * many local threads are contending for the same lock), we must use a
+ * blocking operation when registering with the local lock manager.
+ * We expect that any actual wait will be rare and short hence we
+ * ignore signals for this.
+ */
+static void
+nlm_record_lock(struct vnode *vp, int op, struct flock *fl,
+ int svid, int sysid, off_t size)
+{
+ struct vop_advlockasync_args a;
+ struct flock newfl;
+ int error;
+
+ a.a_vp = vp;
+ a.a_id = NULL;
+ a.a_op = op;
+ a.a_fl = &newfl;
+ a.a_flags = F_REMOTE|F_WAIT|F_NOINTR;
+ a.a_task = NULL;
+ a.a_cookiep = NULL;
+ newfl.l_start = fl->l_start;
+ newfl.l_len = fl->l_len;
+ newfl.l_type = fl->l_type;
+ newfl.l_whence = fl->l_whence;
+ newfl.l_pid = svid;
+ newfl.l_sysid = NLM_SYSID_CLIENT | sysid;
+
+ error = lf_advlockasync(&a, &vp->v_lockf, size);
+ KASSERT(error == 0, ("Failed to register NFS lock locally - error=%d",
+ error));
+}
+
+static int
+nlm_setlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size, bool_t reclaim)
+{
+ struct nlm4_lockargs args;
+ char oh_space[32];
+ struct nlm4_res res;
+ u_int xid;
+ CLIENT *client;
+ enum clnt_stat stat;
+ int retry, block, exclusive;
+ void *wait_handle = NULL;
+ int error;
+
+ memset(&args, 0, sizeof(args));
+ memset(&res, 0, sizeof(res));
+
+ block = (flags & F_WAIT) ? TRUE : FALSE;
+ exclusive = (fl->l_type == F_WRLCK);
+
+ error = nlm_init_lock(fl, flags, svid, vers, fhlen, fh, size,
+ &args.alock, oh_space);
+ if (error)
+ return (error);
+ args.block = block;
+ args.exclusive = exclusive;
+ args.reclaim = reclaim;
+ args.state = nlm_nsm_state;
+
+ retry = 5*hz;
+ for (;;) {
+ client = nlm_host_get_rpc(host);
+ if (!client)
+ return (ENOLCK); /* XXX retry? */
+
+ if (block)
+ wait_handle = nlm_register_wait_lock(&args.alock, vp);
+
+ xid = atomic_fetchadd_int(&nlm_xid, 1);
+ args.cookie.n_len = sizeof(xid);
+ args.cookie.n_bytes = (char*) &xid;
+
+ stat = nlm_lock_rpc(vers, &args, &res, client, ext, *timo);
+
+ CLNT_RELEASE(client);
+
+ if (stat != RPC_SUCCESS) {
+ if (block)
+ nlm_deregister_wait_lock(wait_handle);
+ if (retries) {
+ retries--;
+ continue;
+ }
+ return (EINVAL);
+ }
+
+ /*
+ * Free res.cookie.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res);
+
+ if (block && res.stat.stat != nlm4_blocked)
+ nlm_deregister_wait_lock(wait_handle);
+
+ if (res.stat.stat == nlm4_denied_grace_period) {
+ /*
+ * The server has recently rebooted and is
+ * giving old clients a change to reclaim
+ * their locks. Wait for a few seconds and try
+ * again.
+ */
+ error = tsleep(&args, PCATCH, "nlmgrace", retry);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+ retry = 2*retry;
+ if (retry > 30*hz)
+ retry = 30*hz;
+ continue;
+ }
+
+ if (block && res.stat.stat == nlm4_blocked) {
+ /*
+ * The server should call us back with a
+ * granted message when the lock succeeds. In
+ * order to deal with broken servers, lost
+ * granted messages and server reboots, we
+ * will also re-try every few seconds.
+ */
+ error = nlm_wait_lock(wait_handle, retry);
+ if (error == EWOULDBLOCK) {
+ retry = 2*retry;
+ if (retry > 30*hz)
+ retry = 30*hz;
+ continue;
+ }
+ if (error) {
+ /*
+ * We need to call the server to
+ * cancel our lock request.
+ */
+ nlm4_cancargs cancel;
+
+ memset(&cancel, 0, sizeof(cancel));
+
+ xid = atomic_fetchadd_int(&nlm_xid, 1);
+ cancel.cookie.n_len = sizeof(xid);
+ cancel.cookie.n_bytes = (char*) &xid;
+ cancel.block = block;
+ cancel.exclusive = exclusive;
+ cancel.alock = args.alock;
+
+ do {
+ client = nlm_host_get_rpc(host);
+ if (!client)
+ /* XXX retry? */
+ return (ENOLCK);
+
+ stat = nlm_cancel_rpc(vers, &cancel,
+ &res, client, ext, *timo);
+
+ CLNT_RELEASE(client);
+
+ if (stat != RPC_SUCCESS) {
+ /*
+ * We need to cope
+ * with temporary
+ * network partitions
+ * as well as server
+ * reboots. This means
+ * we have to keep
+ * trying to cancel
+ * until the server
+ * wakes up again.
+ */
+ pause("nlmcancel", 10*hz);
+ }
+ } while (stat != RPC_SUCCESS);
+
+ /*
+ * Free res.cookie.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res);
+
+ switch (res.stat.stat) {
+ case nlm_denied:
+ /*
+ * There was nothing
+ * to cancel. We are
+ * going to go ahead
+ * and assume we got
+ * the lock.
+ */
+ error = 0;
+ break;
+
+ case nlm4_denied_grace_period:
+ /*
+ * The server has
+ * recently rebooted -
+ * treat this as a
+ * successful
+ * cancellation.
+ */
+ break;
+
+ case nlm4_granted:
+ /*
+ * We managed to
+ * cancel.
+ */
+ break;
+
+ default:
+ /*
+ * Broken server
+ * implementation -
+ * can't really do
+ * anything here.
+ */
+ break;
+ }
+
+ }
+ } else {
+ error = nlm_map_status(res.stat.stat);
+ }
+
+ if (!error && !reclaim) {
+ nlm_record_lock(vp, op, fl, args.alock.svid,
+ nlm_host_get_sysid(host), size);
+ nlm_host_monitor(host, 0);
+ }
+
+ return (error);
+ }
+}
+
+static int
+nlm_clearlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size)
+{
+ struct nlm4_unlockargs args;
+ char oh_space[32];
+ struct nlm4_res res;
+ u_int xid;
+ CLIENT *client;
+ enum clnt_stat stat;
+ int error;
+
+ memset(&args, 0, sizeof(args));
+ memset(&res, 0, sizeof(res));
+
+ error = nlm_init_lock(fl, flags, svid, vers, fhlen, fh, size,
+ &args.alock, oh_space);
+ if (error)
+ return (error);
+
+ for (;;) {
+ client = nlm_host_get_rpc(host);
+ if (!client)
+ return (ENOLCK); /* XXX retry? */
+
+ xid = atomic_fetchadd_int(&nlm_xid, 1);
+ args.cookie.n_len = sizeof(xid);
+ args.cookie.n_bytes = (char*) &xid;
+
+ stat = nlm_unlock_rpc(vers, &args, &res, client, ext, *timo);
+
+ CLNT_RELEASE(client);
+
+ if (stat != RPC_SUCCESS) {
+ if (retries) {
+ retries--;
+ continue;
+ }
+ return (EINVAL);
+ }
+
+ /*
+ * Free res.cookie.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_res, &res);
+
+ if (res.stat.stat == nlm4_denied_grace_period) {
+ /*
+ * The server has recently rebooted and is
+ * giving old clients a change to reclaim
+ * their locks. Wait for a few seconds and try
+ * again.
+ */
+ error = tsleep(&args, PCATCH, "nlmgrace", 5*hz);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+ continue;
+ }
+
+ /*
+ * If we are being called via nlm_reclaim (which will
+ * use the F_REMOTE flag), don't record the lock
+ * operation in the local lock manager since the vnode
+ * is going away.
+ */
+ if (!(flags & F_REMOTE))
+ nlm_record_lock(vp, op, fl, args.alock.svid,
+ nlm_host_get_sysid(host), size);
+
+ return (0);
+ }
+}
+
+static int
+nlm_getlock(struct nlm_host *host, struct rpc_callextra *ext,
+ rpcvers_t vers, struct timeval *timo, int retries,
+ struct vnode *vp, int op, struct flock *fl, int flags,
+ int svid, size_t fhlen, void *fh, off_t size)
+{
+ struct nlm4_testargs args;
+ char oh_space[32];
+ struct nlm4_testres res;
+ u_int xid;
+ CLIENT *client;
+ enum clnt_stat stat;
+ int exclusive;
+ int error;
+
+ KASSERT(!(flags & F_FLOCK), ("unexpected F_FLOCK for F_GETLK"));
+
+ memset(&args, 0, sizeof(args));
+ memset(&res, 0, sizeof(res));
+
+ exclusive = (fl->l_type == F_WRLCK);
+
+ error = nlm_init_lock(fl, flags, svid, vers, fhlen, fh, size,
+ &args.alock, oh_space);
+ if (error)
+ return (error);
+ args.exclusive = exclusive;
+
+ for (;;) {
+ client = nlm_host_get_rpc(host);
+ if (!client)
+ return (ENOLCK); /* XXX retry? */
+
+ xid = atomic_fetchadd_int(&nlm_xid, 1);
+ args.cookie.n_len = sizeof(xid);
+ args.cookie.n_bytes = (char*) &xid;
+
+ stat = nlm_test_rpc(vers, &args, &res, client, ext, *timo);
+
+ CLNT_RELEASE(client);
+
+ if (stat != RPC_SUCCESS) {
+ if (retries) {
+ retries--;
+ continue;
+ }
+ return (EINVAL);
+ }
+
+ if (res.stat.stat == nlm4_denied_grace_period) {
+ /*
+ * The server has recently rebooted and is
+ * giving old clients a change to reclaim
+ * their locks. Wait for a few seconds and try
+ * again.
+ */
+ xdr_free((xdrproc_t) xdr_nlm4_testres, &res);
+ error = tsleep(&args, PCATCH, "nlmgrace", 5*hz);
+ if (error && error != EWOULDBLOCK)
+ return (error);
+ continue;
+ }
+
+ if (res.stat.stat == nlm4_denied) {
+ struct nlm4_holder *h =
+ &res.stat.nlm4_testrply_u.holder;
+ fl->l_start = h->l_offset;
+ fl->l_len = h->l_len;
+ fl->l_pid = h->svid;
+ if (h->exclusive)
+ fl->l_type = F_WRLCK;
+ else
+ fl->l_type = F_RDLCK;
+ fl->l_whence = SEEK_SET;
+ fl->l_sysid = 0;
+ } else {
+ fl->l_type = F_UNLCK;
+ }
+
+ xdr_free((xdrproc_t) xdr_nlm4_testres, &res);
+
+ return (0);
+ }
+}
+
+static int
+nlm_map_status(nlm4_stats stat)
+{
+ switch (stat) {
+ case nlm4_granted:
+ return (0);
+
+ case nlm4_denied:
+ return (EAGAIN);
+
+ case nlm4_denied_nolocks:
+ return (ENOLCK);
+
+ case nlm4_deadlck:
+ return (EDEADLK);
+
+ case nlm4_rofs:
+ return (EROFS);
+
+ case nlm4_stale_fh:
+ return (ESTALE);
+
+ case nlm4_fbig:
+ return (EFBIG);
+
+ case nlm4_failed:
+ return (EACCES);
+
+ default:
+ return (EINVAL);
+ }
+}
+
+static struct nlm_file_svid *
+nlm_find_svid(void *id)
+{
+ struct nlm_file_svid *ns, *newns;
+ int h;
+
+ h = (((uintptr_t) id) >> 7) % NLM_SVID_HASH_SIZE;
+
+ mtx_lock(&nlm_svid_lock);
+ LIST_FOREACH(ns, &nlm_file_svids[h], ns_link) {
+ if (ns->ns_id == id) {
+ ns->ns_refs++;
+ break;
+ }
+ }
+ mtx_unlock(&nlm_svid_lock);
+ if (!ns) {
+ int svid = alloc_unr(nlm_svid_allocator);
+ newns = malloc(sizeof(struct nlm_file_svid), M_NLM,
+ M_WAITOK);
+ newns->ns_refs = 1;
+ newns->ns_id = id;
+ newns->ns_svid = svid;
+ newns->ns_ucred = NULL;
+ newns->ns_active = FALSE;
+
+ /*
+ * We need to check for a race with some other
+ * thread allocating a svid for this file.
+ */
+ mtx_lock(&nlm_svid_lock);
+ LIST_FOREACH(ns, &nlm_file_svids[h], ns_link) {
+ if (ns->ns_id == id) {
+ ns->ns_refs++;
+ break;
+ }
+ }
+ if (ns) {
+ mtx_unlock(&nlm_svid_lock);
+ free_unr(nlm_svid_allocator, newns->ns_svid);
+ free(newns, M_NLM);
+ } else {
+ LIST_INSERT_HEAD(&nlm_file_svids[h], newns,
+ ns_link);
+ ns = newns;
+ mtx_unlock(&nlm_svid_lock);
+ }
+ }
+
+ return (ns);
+}
+
+static void
+nlm_free_svid(struct nlm_file_svid *ns)
+{
+
+ mtx_lock(&nlm_svid_lock);
+ ns->ns_refs--;
+ if (!ns->ns_refs) {
+ KASSERT(!ns->ns_active, ("Freeing active SVID"));
+ LIST_REMOVE(ns, ns_link);
+ mtx_unlock(&nlm_svid_lock);
+ free_unr(nlm_svid_allocator, ns->ns_svid);
+ if (ns->ns_ucred)
+ crfree(ns->ns_ucred);
+ free(ns, M_NLM);
+ } else {
+ mtx_unlock(&nlm_svid_lock);
+ }
+}
+
+static int
+nlm_init_lock(struct flock *fl, int flags, int svid,
+ rpcvers_t vers, size_t fhlen, void *fh, off_t size,
+ struct nlm4_lock *lock, char oh_space[32])
+{
+ size_t oh_len;
+ off_t start, len;
+
+ if (fl->l_whence == SEEK_END) {
+ if (size > OFF_MAX
+ || (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
+ return (EOVERFLOW);
+ start = size + fl->l_start;
+ } else if (fl->l_whence == SEEK_SET || fl->l_whence == SEEK_CUR) {
+ start = fl->l_start;
+ } else {
+ return (EINVAL);
+ }
+ if (start < 0)
+ return (EINVAL);
+ if (fl->l_len < 0) {
+ len = -fl->l_len;
+ start -= len;
+ if (start < 0)
+ return (EINVAL);
+ } else {
+ len = fl->l_len;
+ }
+
+ if (vers == NLM_VERS) {
+ /*
+ * Enforce range limits on V1 locks
+ */
+ if (start > 0xffffffffLL || len > 0xffffffffLL)
+ return (EOVERFLOW);
+ }
+
+ snprintf(oh_space, 32, "%d@%s", svid, hostname);
+ oh_len = strlen(oh_space);
+
+ memset(lock, 0, sizeof(*lock));
+ lock->caller_name = hostname;
+ lock->fh.n_len = fhlen;
+ lock->fh.n_bytes = fh;
+ lock->oh.n_len = oh_len;
+ lock->oh.n_bytes = oh_space;
+ lock->svid = svid;
+ lock->l_offset = start;
+ lock->l_len = len;
+
+ return (0);
+}
diff --git a/sys/nlm/nlm_prot.h b/sys/nlm/nlm_prot.h
index 6197189f8fe6..98c5688e49f5 100644
--- a/sys/nlm/nlm_prot.h
+++ b/sys/nlm/nlm_prot.h
@@ -280,129 +280,129 @@ typedef struct nlm4_notify nlm4_notify;
#define NLM_SM ((unsigned long)(0))
#define NLM_SM_NOTIFY ((unsigned long)(1))
-extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *);
+extern enum clnt_stat nlm_sm_notify_0(struct nlm_sm_status *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_sm_notify_0_svc(struct nlm_sm_status *, void *, struct svc_req *);
#define NLM_VERS ((unsigned long)(1))
#define NLM_TEST ((unsigned long)(1))
-extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *);
+extern enum clnt_stat nlm_test_1(struct nlm_testargs *, nlm_testres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_1_svc(struct nlm_testargs *, nlm_testres *, struct svc_req *);
#define NLM_LOCK ((unsigned long)(2))
-extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *);
+extern enum clnt_stat nlm_lock_1(struct nlm_lockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_1_svc(struct nlm_lockargs *, nlm_res *, struct svc_req *);
#define NLM_CANCEL ((unsigned long)(3))
-extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *);
+extern enum clnt_stat nlm_cancel_1(struct nlm_cancargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_1_svc(struct nlm_cancargs *, nlm_res *, struct svc_req *);
#define NLM_UNLOCK ((unsigned long)(4))
-extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *);
+extern enum clnt_stat nlm_unlock_1(struct nlm_unlockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_1_svc(struct nlm_unlockargs *, nlm_res *, struct svc_req *);
#define NLM_GRANTED ((unsigned long)(5))
-extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *);
+extern enum clnt_stat nlm_granted_1(struct nlm_testargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_1_svc(struct nlm_testargs *, nlm_res *, struct svc_req *);
#define NLM_TEST_MSG ((unsigned long)(6))
-extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern enum clnt_stat nlm_test_msg_1(struct nlm_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
#define NLM_LOCK_MSG ((unsigned long)(7))
-extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *);
+extern enum clnt_stat nlm_lock_msg_1(struct nlm_lockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_msg_1_svc(struct nlm_lockargs *, void *, struct svc_req *);
#define NLM_CANCEL_MSG ((unsigned long)(8))
-extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *);
+extern enum clnt_stat nlm_cancel_msg_1(struct nlm_cancargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_msg_1_svc(struct nlm_cancargs *, void *, struct svc_req *);
#define NLM_UNLOCK_MSG ((unsigned long)(9))
-extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *);
+extern enum clnt_stat nlm_unlock_msg_1(struct nlm_unlockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_msg_1_svc(struct nlm_unlockargs *, void *, struct svc_req *);
#define NLM_GRANTED_MSG ((unsigned long)(10))
-extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *);
+extern enum clnt_stat nlm_granted_msg_1(struct nlm_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_msg_1_svc(struct nlm_testargs *, void *, struct svc_req *);
#define NLM_TEST_RES ((unsigned long)(11))
-extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *);
+extern enum clnt_stat nlm_test_res_1(nlm_testres *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_test_res_1_svc(nlm_testres *, void *, struct svc_req *);
#define NLM_LOCK_RES ((unsigned long)(12))
-extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *);
+extern enum clnt_stat nlm_lock_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_lock_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_CANCEL_RES ((unsigned long)(13))
-extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *);
+extern enum clnt_stat nlm_cancel_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_cancel_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_UNLOCK_RES ((unsigned long)(14))
-extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *);
+extern enum clnt_stat nlm_unlock_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unlock_res_1_svc(nlm_res *, void *, struct svc_req *);
#define NLM_GRANTED_RES ((unsigned long)(15))
-extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *);
+extern enum clnt_stat nlm_granted_res_1(nlm_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_granted_res_1_svc(nlm_res *, void *, struct svc_req *);
extern int nlm_prog_1_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
#define NLM_VERSX ((unsigned long)(3))
#define NLM_SHARE ((unsigned long)(20))
-extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern enum clnt_stat nlm_share_3(nlm_shareargs *, nlm_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_share_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
#define NLM_UNSHARE ((unsigned long)(21))
-extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *);
+extern enum clnt_stat nlm_unshare_3(nlm_shareargs *, nlm_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_unshare_3_svc(nlm_shareargs *, nlm_shareres *, struct svc_req *);
#define NLM_NM_LOCK ((unsigned long)(22))
-extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *);
+extern enum clnt_stat nlm_nm_lock_3(nlm_lockargs *, nlm_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_nm_lock_3_svc(nlm_lockargs *, nlm_res *, struct svc_req *);
#define NLM_FREE_ALL ((unsigned long)(23))
-extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *);
+extern enum clnt_stat nlm_free_all_3(nlm_notify *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm_free_all_3_svc(nlm_notify *, void *, struct svc_req *);
extern int nlm_prog_3_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
#define NLM_VERS4 ((unsigned long)(4))
#define NLM4_TEST ((unsigned long)(1))
-extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *);
+extern enum clnt_stat nlm4_test_4(nlm4_testargs *, nlm4_testres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_4_svc(nlm4_testargs *, nlm4_testres *, struct svc_req *);
#define NLM4_LOCK ((unsigned long)(2))
-extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern enum clnt_stat nlm4_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
#define NLM4_CANCEL ((unsigned long)(3))
-extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *);
+extern enum clnt_stat nlm4_cancel_4(nlm4_cancargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_4_svc(nlm4_cancargs *, nlm4_res *, struct svc_req *);
#define NLM4_UNLOCK ((unsigned long)(4))
-extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *);
+extern enum clnt_stat nlm4_unlock_4(nlm4_unlockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_4_svc(nlm4_unlockargs *, nlm4_res *, struct svc_req *);
#define NLM4_GRANTED ((unsigned long)(5))
-extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *);
+extern enum clnt_stat nlm4_granted_4(nlm4_testargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_4_svc(nlm4_testargs *, nlm4_res *, struct svc_req *);
#define NLM4_TEST_MSG ((unsigned long)(6))
-extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern enum clnt_stat nlm4_test_msg_4(nlm4_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
#define NLM4_LOCK_MSG ((unsigned long)(7))
-extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *);
+extern enum clnt_stat nlm4_lock_msg_4(nlm4_lockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_msg_4_svc(nlm4_lockargs *, void *, struct svc_req *);
#define NLM4_CANCEL_MSG ((unsigned long)(8))
-extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *);
+extern enum clnt_stat nlm4_cancel_msg_4(nlm4_cancargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_msg_4_svc(nlm4_cancargs *, void *, struct svc_req *);
#define NLM4_UNLOCK_MSG ((unsigned long)(9))
-extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *);
+extern enum clnt_stat nlm4_unlock_msg_4(nlm4_unlockargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_msg_4_svc(nlm4_unlockargs *, void *, struct svc_req *);
#define NLM4_GRANTED_MSG ((unsigned long)(10))
-extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *);
+extern enum clnt_stat nlm4_granted_msg_4(nlm4_testargs *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_msg_4_svc(nlm4_testargs *, void *, struct svc_req *);
#define NLM4_TEST_RES ((unsigned long)(11))
-extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *);
+extern enum clnt_stat nlm4_test_res_4(nlm4_testres *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_test_res_4_svc(nlm4_testres *, void *, struct svc_req *);
#define NLM4_LOCK_RES ((unsigned long)(12))
-extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *);
+extern enum clnt_stat nlm4_lock_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_lock_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_CANCEL_RES ((unsigned long)(13))
-extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *);
+extern enum clnt_stat nlm4_cancel_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_cancel_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_UNLOCK_RES ((unsigned long)(14))
-extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *);
+extern enum clnt_stat nlm4_unlock_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unlock_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_GRANTED_RES ((unsigned long)(15))
-extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *);
+extern enum clnt_stat nlm4_granted_res_4(nlm4_res *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_granted_res_4_svc(nlm4_res *, void *, struct svc_req *);
#define NLM4_SHARE ((unsigned long)(20))
-extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern enum clnt_stat nlm4_share_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_share_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
#define NLM4_UNSHARE ((unsigned long)(21))
-extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *);
+extern enum clnt_stat nlm4_unshare_4(nlm4_shareargs *, nlm4_shareres *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_unshare_4_svc(nlm4_shareargs *, nlm4_shareres *, struct svc_req *);
#define NLM4_NM_LOCK ((unsigned long)(22))
-extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *);
+extern enum clnt_stat nlm4_nm_lock_4(nlm4_lockargs *, nlm4_res *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_nm_lock_4_svc(nlm4_lockargs *, nlm4_res *, struct svc_req *);
#define NLM4_FREE_ALL ((unsigned long)(23))
-extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *);
+extern enum clnt_stat nlm4_free_all_4(nlm4_notify *, void *, CLIENT *, struct rpc_callextra *, struct timeval);
extern bool_t nlm4_free_all_4_svc(nlm4_notify *, void *, struct svc_req *);
extern int nlm_prog_4_freeresult(SVCXPRT *, xdrproc_t, caddr_t);
diff --git a/sys/nlm/nlm_prot_clnt.c b/sys/nlm/nlm_prot_clnt.c
index 9a16e32a9106..a268e63f6c3c 100644
--- a/sys/nlm/nlm_prot_clnt.c
+++ b/sys/nlm/nlm_prot_clnt.c
@@ -17,356 +17,353 @@ __RCSID("$NetBSD: nlm_prot.x,v 1.6 2000/06/07 14:30:15 bouyer Exp $");
#endif /* not lint */
__FBSDID("$FreeBSD$");
-/* Default timeout can be changed using clnt_control() */
-static struct timeval TIMEOUT = { 25, 0 };
-
enum clnt_stat
-nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt)
+nlm_sm_notify_0(struct nlm_sm_status *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_SM_NOTIFY,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_SM_NOTIFY,
(xdrproc_t) xdr_nlm_sm_status, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt)
+nlm_test_1(struct nlm_testargs *argp, nlm_testres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_TEST,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_TEST,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_testres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+nlm_lock_1(struct nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_LOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+nlm_cancel_1(struct nlm_cancargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_CANCEL,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL,
(xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+nlm_unlock_1(struct nlm_unlockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_UNLOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK,
(xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+nlm_granted_1(struct nlm_testargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_GRANTED,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+nlm_test_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_TEST_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_TEST_MSG,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt)
+nlm_lock_msg_1(struct nlm_lockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_LOCK_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK_MSG,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt)
+nlm_cancel_msg_1(struct nlm_cancargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_CANCEL_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL_MSG,
(xdrproc_t) xdr_nlm_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+nlm_unlock_msg_1(struct nlm_unlockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_UNLOCK_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK_MSG,
(xdrproc_t) xdr_nlm_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt)
+nlm_granted_msg_1(struct nlm_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_GRANTED_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED_MSG,
(xdrproc_t) xdr_nlm_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt)
+nlm_test_res_1(nlm_testres *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_TEST_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_TEST_RES,
(xdrproc_t) xdr_nlm_testres, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+nlm_lock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_LOCK_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_LOCK_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+nlm_cancel_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_CANCEL_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_CANCEL_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+nlm_unlock_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_UNLOCK_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_UNLOCK_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt)
+nlm_granted_res_1(nlm_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_GRANTED_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_GRANTED_RES,
(xdrproc_t) xdr_nlm_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+nlm_share_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_SHARE,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_SHARE,
(xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt)
+nlm_unshare_3(nlm_shareargs *argp, nlm_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_UNSHARE,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_UNSHARE,
(xdrproc_t) xdr_nlm_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_shareres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt)
+nlm_nm_lock_3(nlm_lockargs *argp, nlm_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_NM_LOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_NM_LOCK,
(xdrproc_t) xdr_nlm_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt)
+nlm_free_all_3(nlm_notify *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM_FREE_ALL,
+ return (CLNT_CALL_EXT(clnt, ext, NLM_FREE_ALL,
(xdrproc_t) xdr_nlm_notify, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt)
+nlm4_test_4(nlm4_testargs *argp, nlm4_testres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_TEST,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_testres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+nlm4_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_LOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+nlm4_cancel_4(nlm4_cancargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_CANCEL,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL,
(xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+nlm4_unlock_4(nlm4_unlockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_UNLOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK,
(xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+nlm4_granted_4(nlm4_testargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_GRANTED,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+nlm4_test_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_TEST_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST_MSG,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt)
+nlm4_lock_msg_4(nlm4_lockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_LOCK_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK_MSG,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt)
+nlm4_cancel_msg_4(nlm4_cancargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_CANCEL_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL_MSG,
(xdrproc_t) xdr_nlm4_cancargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt)
+nlm4_unlock_msg_4(nlm4_unlockargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_UNLOCK_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK_MSG,
(xdrproc_t) xdr_nlm4_unlockargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt)
+nlm4_granted_msg_4(nlm4_testargs *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_GRANTED_MSG,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED_MSG,
(xdrproc_t) xdr_nlm4_testargs, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt)
+nlm4_test_res_4(nlm4_testres *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_TEST_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_TEST_RES,
(xdrproc_t) xdr_nlm4_testres, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+nlm4_lock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_LOCK_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_LOCK_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+nlm4_cancel_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_CANCEL_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_CANCEL_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+nlm4_unlock_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_UNLOCK_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_UNLOCK_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt)
+nlm4_granted_res_4(nlm4_res *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_GRANTED_RES,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_GRANTED_RES,
(xdrproc_t) xdr_nlm4_res, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+nlm4_share_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_SHARE,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_SHARE,
(xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt)
+nlm4_unshare_4(nlm4_shareargs *argp, nlm4_shareres *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_UNSHARE,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_UNSHARE,
(xdrproc_t) xdr_nlm4_shareargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_shareres, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt)
+nlm4_nm_lock_4(nlm4_lockargs *argp, nlm4_res *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_NM_LOCK,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_NM_LOCK,
(xdrproc_t) xdr_nlm4_lockargs, (caddr_t) argp,
(xdrproc_t) xdr_nlm4_res, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
enum clnt_stat
-nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt)
+nlm4_free_all_4(nlm4_notify *argp, void *clnt_res, CLIENT *clnt, struct rpc_callextra *ext, struct timeval timo)
{
- return (clnt_call(clnt, NLM4_FREE_ALL,
+ return (CLNT_CALL_EXT(clnt, ext, NLM4_FREE_ALL,
(xdrproc_t) xdr_nlm4_notify, (caddr_t) argp,
(xdrproc_t) xdr_void, (caddr_t) clnt_res,
- TIMEOUT));
+ timo));
}
diff --git a/sys/nlm/nlm_prot_impl.c b/sys/nlm/nlm_prot_impl.c
index 4baa48f77668..7647ae520c27 100644
--- a/sys/nlm/nlm_prot_impl.c
+++ b/sys/nlm/nlm_prot_impl.c
@@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
+#include <sys/kthread.h>
#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
@@ -51,6 +52,10 @@ __FBSDID("$FreeBSD$");
#include <sys/unistd.h>
#include <sys/vnode.h>
+#include <nfs/nfsproto.h>
+#include <nfsclient/nfs.h>
+#include <nfsclient/nfsnode.h>
+
#include <nlm/nlm_prot.h>
#include <nlm/sm_inter.h>
#include <nlm/nlm.h>
@@ -131,21 +136,53 @@ static struct socket *nlm_socket6;
static CLIENT *nlm_nsm;
/*
- * An RPC client handle that can be used to communicate with the
- * userland part of lockd.
+ * An AUTH handle for the server's creds.
*/
-static CLIENT *nlm_lockd;
+static AUTH *nlm_auth;
+
+/*
+ * A zero timeval for sending async RPC messages.
+ */
+struct timeval nlm_zero_tv = { 0, 0 };
+
+/*
+ * The local NSM state number
+ */
+int nlm_nsm_state;
+
+
+/*
+ * A lock to protect the host list and waiting lock list.
+ */
+static struct mtx nlm_global_lock;
/*
* Locks:
* (l) locked by nh_lock
* (s) only accessed via server RPC which is single threaded
+ * (g) locked by nlm_global_lock
* (c) const until freeing
+ * (a) modified using atomic ops
+ */
+
+/*
+ * A pending client-side lock request, stored on the nlm_waiting_locks
+ * list.
*/
+struct nlm_waiting_lock {
+ TAILQ_ENTRY(nlm_waiting_lock) nw_link; /* (g) */
+ bool_t nw_waiting; /* (g) */
+ nlm4_lock nw_lock; /* (c) */
+ union nfsfh nw_fh; /* (c) */
+ struct vnode *nw_vp; /* (c) */
+};
+TAILQ_HEAD(nlm_waiting_lock_list, nlm_waiting_lock);
+
+struct nlm_waiting_lock_list nlm_waiting_locks; /* (g) */
/*
- * A pending asynchronous lock request, stored on the nh_pending list
- * of the NLM host.
+ * A pending server-side asynchronous lock request, stored on the
+ * nh_pending list of the NLM host.
*/
struct nlm_async_lock {
TAILQ_ENTRY(nlm_async_lock) af_link; /* (l) host's list of locks */
@@ -154,6 +191,7 @@ struct nlm_async_lock {
struct vnode *af_vp; /* (l) vnode to lock */
struct flock af_fl; /* (c) lock details */
struct nlm_host *af_host; /* (c) host which is locking */
+ CLIENT *af_rpc; /* (c) rpc client to send message */
nlm4_testargs af_granted; /* (c) notification details */
};
TAILQ_HEAD(nlm_async_lock_list, nlm_async_lock);
@@ -164,19 +202,21 @@ TAILQ_HEAD(nlm_async_lock_list, nlm_async_lock);
enum nlm_host_state {
NLM_UNMONITORED,
NLM_MONITORED,
- NLM_MONITOR_FAILED
+ NLM_MONITOR_FAILED,
+ NLM_RECOVERING
};
struct nlm_host {
struct mtx nh_lock;
- TAILQ_ENTRY(nlm_host) nh_link; /* (s) global list of hosts */
- char *nh_caller_name; /* (c) printable name of host */
+ volatile u_int nh_refs; /* (a) reference count */
+ TAILQ_ENTRY(nlm_host) nh_link; /* (g) global list of hosts */
+ char nh_caller_name[MAXNAMELEN]; /* (c) printable name of host */
uint32_t nh_sysid; /* (c) our allocaed system ID */
char nh_sysid_string[10]; /* (c) string rep. of sysid */
struct sockaddr_storage nh_addr; /* (s) remote address of host */
- CLIENT *nh_rpc; /* (s) RPC handle to send to host */
+ CLIENT *nh_rpc; /* (l) RPC handle to send to host */
rpcvers_t nh_vers; /* (s) NLM version of host */
int nh_state; /* (s) last seen NSM state of host */
- enum nlm_host_state nh_monstate; /* (s) local NSM monitoring state */
+ enum nlm_host_state nh_monstate; /* (l) local NSM monitoring state */
time_t nh_idle_timeout; /* (s) Time at which host is idle */
time_t nh_rpc_create_time; /* (s) Time we create RPC client */
struct sysctl_ctx_list nh_sysctl; /* (c) vfs.nlm.sysid nodes */
@@ -185,8 +225,8 @@ struct nlm_host {
};
TAILQ_HEAD(nlm_host_list, nlm_host);
-static struct nlm_host_list nlm_hosts;
-static uint32_t nlm_next_sysid = 1;
+static struct nlm_host_list nlm_hosts; /* (g) */
+static uint32_t nlm_next_sysid = 1; /* (g) */
static void nlm_host_unmonitor(struct nlm_host *);
@@ -200,6 +240,8 @@ nlm_init(void *dummy)
{
int error;
+ mtx_init(&nlm_global_lock, "nlm_global_lock", NULL, MTX_DEF);
+ TAILQ_INIT(&nlm_waiting_locks);
TAILQ_INIT(&nlm_hosts);
error = syscall_register(&nlm_syscall_offset, &nlm_syscall_sysent,
@@ -381,7 +423,7 @@ again:
CLNT_CONTROL(rpcb, CLSET_PROG, &prog);
CLNT_CONTROL(rpcb, CLSET_VERS, &vers);
CLNT_CONTROL(rpcb, CLSET_WAITCHAN, &wchan);
- rpcb->cl_auth = authunix_create(curthread->td_ucred);
+ rpcb->cl_auth = nlm_auth;
return (rpcb);
}
@@ -394,6 +436,7 @@ static void
nlm_lock_callback(void *arg, int pending)
{
struct nlm_async_lock *af = (struct nlm_async_lock *) arg;
+ struct rpc_callextra ext;
if (nlm_debug_level >= 2)
printf("NLM: async lock %p for %s (sysid %d) granted\n",
@@ -408,9 +451,11 @@ nlm_lock_callback(void *arg, int pending)
* thing nlm_host_notify does is to cancel pending async lock
* requests.
*/
+ memset(&ext, 0, sizeof(ext));
+ ext.rc_auth = nlm_auth;
if (af->af_host->nh_vers == NLM_VERS4) {
nlm4_granted_msg_4(&af->af_granted,
- NULL, af->af_host->nh_rpc);
+ NULL, af->af_rpc, &ext, nlm_zero_tv);
} else {
/*
* Back-convert to legacy protocol
@@ -429,7 +474,7 @@ nlm_lock_callback(void *arg, int pending)
af->af_granted.alock.l_len;
nlm_granted_msg_1(&granted,
- NULL, af->af_host->nh_rpc);
+ NULL, af->af_rpc, &ext, nlm_zero_tv);
}
/*
@@ -456,6 +501,8 @@ nlm_free_async_lock(struct nlm_async_lock *af)
/*
* Free an async lock.
*/
+ if (af->af_rpc)
+ CLNT_RELEASE(af->af_rpc);
xdr_free((xdrproc_t) xdr_nlm4_testargs, &af->af_granted);
if (af->af_vp)
vrele(af->af_vp);
@@ -527,11 +574,57 @@ nlm_free_finished_locks(struct nlm_host *host)
}
/*
- * This is called when we receive a host state change
- * notification. We unlock any active locks owned by the host.
+ * Free resources used by a host. This is called after the reference
+ * count has reached zero so it doesn't need to worry about locks.
*/
static void
-nlm_host_notify(struct nlm_host *host, int newstate, bool_t destroy)
+nlm_host_destroy(struct nlm_host *host)
+{
+
+ mtx_lock(&nlm_global_lock);
+ TAILQ_REMOVE(&nlm_hosts, host, nh_link);
+ mtx_unlock(&nlm_global_lock);
+
+ if (host->nh_rpc)
+ CLNT_RELEASE(host->nh_rpc);
+ mtx_destroy(&host->nh_lock);
+ sysctl_ctx_free(&host->nh_sysctl);
+ free(host, M_NLM);
+}
+
+/*
+ * Thread start callback for client lock recovery
+ */
+static void
+nlm_client_recovery_start(void *arg)
+{
+ struct nlm_host *host = (struct nlm_host *) arg;
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: client lock recovery for %s started\n",
+ host->nh_caller_name);
+
+ nlm_client_recovery(host);
+
+ if (nlm_debug_level >= 1)
+ printf("NLM: client lock recovery for %s completed\n",
+ host->nh_caller_name);
+
+ host->nh_monstate = NLM_MONITORED;
+ nlm_host_release(host);
+
+ kthread_exit();
+}
+
+/*
+ * This is called when we receive a host state change notification. We
+ * unlock any active locks owned by the host. When rpc.lockd is
+ * shutting down, this function is called with newstate set to zero
+ * which allows us to cancel any pending async locks and clear the
+ * locking state.
+ */
+static void
+nlm_host_notify(struct nlm_host *host, int newstate)
{
struct nlm_async_lock *af;
@@ -557,28 +650,24 @@ nlm_host_notify(struct nlm_host *host, int newstate, bool_t destroy)
nlm_free_finished_locks(host);
/*
- * The host just rebooted - trash its locks and forget any
- * RPC client handle that we may have for it.
+ * The host just rebooted - trash its locks.
*/
lf_clearremotesys(host->nh_sysid);
- if (host->nh_rpc) {
- AUTH_DESTROY(host->nh_rpc->cl_auth);
- CLNT_DESTROY(host->nh_rpc);
- host->nh_rpc = NULL;
- }
host->nh_state = newstate;
/*
- * Destroy the host if the caller believes that it won't be
- * used again. This is safe enough - if we see the same name
- * again, we will just create a new host.
+ * If we have any remote locks for this host (i.e. it
+ * represents a remote NFS server that our local NFS client
+ * has locks for), start a recovery thread.
*/
- if (destroy) {
- TAILQ_REMOVE(&nlm_hosts, host, nh_link);
- mtx_destroy(&host->nh_lock);
- sysctl_ctx_free(&host->nh_sysctl);
- free(host->nh_caller_name, M_NLM);
- free(host, M_NLM);
+ if (newstate != 0
+ && host->nh_monstate != NLM_RECOVERING
+ && lf_countlocks(NLM_SYSID_CLIENT | host->nh_sysid) > 0) {
+ struct thread *td;
+ host->nh_monstate = NLM_RECOVERING;
+ refcount_acquire(&host->nh_refs);
+ kthread_add(nlm_client_recovery_start, host, curproc, &td, 0, 0,
+ "NFS lock recovery for %s", host->nh_caller_name);
}
}
@@ -597,6 +686,20 @@ nlm_host_lock_count_sysctl(SYSCTL_HANDLER_ARGS)
}
/*
+ * Sysctl handler to count the number of client locks for a sysid.
+ */
+static int
+nlm_host_client_lock_count_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nlm_host *host;
+ int count;
+
+ host = oidp->oid_arg1;
+ count = lf_countlocks(NLM_SYSID_CLIENT | host->nh_sysid);
+ return sysctl_handle_int(oidp, &count, 0, req);
+}
+
+/*
* Create a new NLM host.
*/
static struct nlm_host *
@@ -605,12 +708,17 @@ nlm_create_host(const char* caller_name)
struct nlm_host *host;
struct sysctl_oid *oid;
+ mtx_assert(&nlm_global_lock, MA_OWNED);
+
if (nlm_debug_level >= 1)
printf("NLM: new host %s (sysid %d)\n",
caller_name, nlm_next_sysid);
- host = malloc(sizeof(struct nlm_host), M_NLM, M_WAITOK|M_ZERO);
+ host = malloc(sizeof(struct nlm_host), M_NLM, M_NOWAIT|M_ZERO);
+ if (!host)
+ return (NULL);
mtx_init(&host->nh_lock, "nh_lock", NULL, MTX_DEF);
- host->nh_caller_name = strdup(caller_name, M_NLM);
+ host->nh_refs = 1;
+ strlcpy(host->nh_caller_name, caller_name, MAXNAMELEN);
host->nh_sysid = nlm_next_sysid++;
snprintf(host->nh_sysid_string, sizeof(host->nh_sysid_string),
"%d", host->nh_sysid);
@@ -622,6 +730,8 @@ nlm_create_host(const char* caller_name)
TAILQ_INIT(&host->nh_finished);
TAILQ_INSERT_TAIL(&nlm_hosts, host, nh_link);
+ mtx_unlock(&nlm_global_lock);
+
sysctl_ctx_init(&host->nh_sysctl);
oid = SYSCTL_ADD_NODE(&host->nh_sysctl,
SYSCTL_STATIC_CHILDREN(_vfs_nlm_sysid),
@@ -635,6 +745,11 @@ nlm_create_host(const char* caller_name)
SYSCTL_ADD_PROC(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
"lock_count", CTLTYPE_INT | CTLFLAG_RD, host, 0,
nlm_host_lock_count_sysctl, "I", "");
+ SYSCTL_ADD_PROC(&host->nh_sysctl, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "client_lock_count", CTLTYPE_INT | CTLFLAG_RD, host, 0,
+ nlm_host_client_lock_count_sysctl, "I", "");
+
+ mtx_lock(&nlm_global_lock);
return (host);
}
@@ -683,6 +798,8 @@ nlm_check_idle(void)
{
struct nlm_host *host;
+ mtx_assert(&nlm_global_lock, MA_OWNED);
+
if (time_uptime <= nlm_next_idle_check)
return;
@@ -691,12 +808,17 @@ nlm_check_idle(void)
TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
if (host->nh_monstate == NLM_MONITORED
&& time_uptime > host->nh_idle_timeout) {
- if (lf_countlocks(host->nh_sysid) > 0) {
+ mtx_unlock(&nlm_global_lock);
+ if (lf_countlocks(host->nh_sysid) > 0
+ || lf_countlocks(NLM_SYSID_CLIENT
+ + host->nh_sysid)) {
host->nh_idle_timeout =
time_uptime + NLM_IDLE_TIMEOUT;
+ mtx_lock(&nlm_global_lock);
continue;
}
nlm_host_unmonitor(host);
+ mtx_lock(&nlm_global_lock);
}
}
}
@@ -704,16 +826,18 @@ nlm_check_idle(void)
/*
* Search for an existing NLM host that matches the given name
* (typically the caller_name element of an nlm4_lock). If none is
- * found, create a new host. If 'rqstp' is non-NULL, record the remote
+ * found, create a new host. If 'addr' is non-NULL, record the remote
* address of the host so that we can call it back for async
- * responses.
+ * responses. If 'vers' is greater than zero then record the NLM
+ * program version to use to communicate with this client.
*/
struct nlm_host *
-nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
+nlm_find_host_by_name(const char *name, const struct sockaddr *addr,
+ rpcvers_t vers)
{
struct nlm_host *host;
- nlm_check_idle();
+ mtx_lock(&nlm_global_lock);
/*
* The remote host is determined by caller_name.
@@ -723,18 +847,24 @@ nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
break;
}
- if (!host)
+ if (!host) {
host = nlm_create_host(name);
+ if (!host) {
+ mtx_unlock(&nlm_global_lock);
+ return (NULL);
+ }
+ }
+ refcount_acquire(&host->nh_refs);
+
host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
/*
- * If we have an RPC request, record the remote address so
- * that can send async replies etc.
+ * If we have an address for the host, record it so that we
+ * can send async replies etc.
*/
- if (rqstp) {
- struct netbuf *addr = &rqstp->rq_xprt->xp_rtaddr;
+ if (addr) {
- KASSERT(addr->len < sizeof(struct sockaddr_storage),
+ KASSERT(addr->sa_len < sizeof(struct sockaddr_storage),
("Strange remote transport address length"));
/*
@@ -745,17 +875,26 @@ nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
if (host->nh_addr.ss_len && host->nh_rpc) {
if (!nlm_compare_addr(
(struct sockaddr *) &host->nh_addr,
- (struct sockaddr *) addr->buf)
- || host->nh_vers != rqstp->rq_vers) {
- AUTH_DESTROY(host->nh_rpc->cl_auth);
- CLNT_DESTROY(host->nh_rpc);
+ addr)
+ || host->nh_vers != vers) {
+ CLIENT *client;
+ mtx_lock(&host->nh_lock);
+ client = host->nh_rpc;
host->nh_rpc = NULL;
+ mtx_unlock(&host->nh_lock);
+ if (client) {
+ CLNT_RELEASE(client);
+ }
}
}
- memcpy(&host->nh_addr, addr->buf, addr->len);
- host->nh_vers = rqstp->rq_vers;
+ memcpy(&host->nh_addr, addr, addr->sa_len);
+ host->nh_vers = vers;
}
+ nlm_check_idle();
+
+ mtx_unlock(&nlm_global_lock);
+
return (host);
}
@@ -768,9 +907,32 @@ nlm_find_host_by_name(const char *name, struct svc_req *rqstp)
struct nlm_host *
nlm_find_host_by_addr(const struct sockaddr *addr, int vers)
{
+ /*
+ * Fake up a name using inet_ntop. This buffer is
+ * large enough for an IPv6 address.
+ */
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"];
struct nlm_host *host;
- nlm_check_idle();
+ switch (addr->sa_family) {
+ case AF_INET:
+ __rpc_inet_ntop(AF_INET,
+ &((const struct sockaddr_in *) addr)->sin_addr,
+ tmp, sizeof tmp);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ __rpc_inet_ntop(AF_INET6,
+ &((const struct sockaddr_in6 *) addr)->sin6_addr,
+ tmp, sizeof tmp);
+ break;
+#endif
+ default:
+ strcmp(tmp, "<unknown>");
+ }
+
+
+ mtx_lock(&nlm_global_lock);
/*
* The remote host is determined by caller_name.
@@ -782,33 +944,22 @@ nlm_find_host_by_addr(const struct sockaddr *addr, int vers)
}
if (!host) {
- /*
- * Fake up a name using inet_ntop. This buffer is
- * large enough for an IPv6 address.
- */
- char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"];
- switch (addr->sa_family) {
- case AF_INET:
- __rpc_inet_ntop(AF_INET,
- &((const struct sockaddr_in *) addr)->sin_addr,
- tmp, sizeof tmp);
- break;
-#ifdef INET6
- case AF_INET6:
- __rpc_inet_ntop(AF_INET6,
- &((const struct sockaddr_in6 *) addr)->sin6_addr,
- tmp, sizeof tmp);
- break;
-#endif
- default:
- strcmp(tmp, "<unknown>");
- }
host = nlm_create_host(tmp);
+ if (!host) {
+ mtx_unlock(&nlm_global_lock);
+ return (NULL);
+ }
memcpy(&host->nh_addr, addr, addr->sa_len);
host->nh_vers = vers;
}
+ refcount_acquire(&host->nh_refs);
+
host->nh_idle_timeout = time_uptime + NLM_IDLE_TIMEOUT;
+ nlm_check_idle();
+
+ mtx_unlock(&nlm_global_lock);
+
return (host);
}
@@ -822,13 +973,25 @@ nlm_find_host_by_sysid(int sysid)
struct nlm_host *host;
TAILQ_FOREACH(host, &nlm_hosts, nh_link) {
- if (host->nh_sysid == sysid)
+ if (host->nh_sysid == sysid) {
+ refcount_acquire(&host->nh_refs);
return (host);
+ }
}
return (NULL);
}
+void nlm_host_release(struct nlm_host *host)
+{
+ if (refcount_release(&host->nh_refs)) {
+ /*
+ * Free the host
+ */
+ nlm_host_destroy(host);
+ }
+}
+
/*
* Unregister this NLM host with the local NSM due to idleness.
*/
@@ -878,7 +1041,7 @@ nlm_host_unmonitor(struct nlm_host *host)
* Register this NLM host with the local NSM so that we can be
* notified if it reboots.
*/
-static void
+void
nlm_host_monitor(struct nlm_host *host, int state)
{
mon smmon;
@@ -898,8 +1061,13 @@ nlm_host_monitor(struct nlm_host *host, int state)
host->nh_caller_name, host->nh_sysid, state);
}
- if (host->nh_monstate != NLM_UNMONITORED)
+ mtx_lock(&host->nh_lock);
+ if (host->nh_monstate != NLM_UNMONITORED) {
+ mtx_unlock(&host->nh_lock);
return;
+ }
+ host->nh_monstate = NLM_MONITORED;
+ mtx_unlock(&host->nh_lock);
if (nlm_debug_level >= 1)
printf("NLM: monitoring %s (sysid %d)\n",
@@ -930,7 +1098,9 @@ nlm_host_monitor(struct nlm_host *host, int state)
if (smstat.res_stat == stat_fail) {
printf("Local NSM refuses to monitor %s\n",
host->nh_caller_name);
+ mtx_lock(&host->nh_lock);
host->nh_monstate = NLM_MONITOR_FAILED;
+ mtx_unlock(&host->nh_lock);
return;
}
@@ -944,10 +1114,12 @@ nlm_host_monitor(struct nlm_host *host, int state)
CLIENT *
nlm_host_get_rpc(struct nlm_host *host)
{
- struct timeval zero;
+ CLIENT *client;
+
+ mtx_lock(&host->nh_lock);
/*
- * We can't hold onto RPC handles for too long - the async
+ * We can't hold onto RPC handles for too long - the async
* call/reply protocol used by some NLM clients makes it hard
* to tell when they change port numbers (e.g. after a
* reboot). Note that if a client reboots while it isn't
@@ -955,33 +1127,138 @@ nlm_host_get_rpc(struct nlm_host *host)
* expire the RPC handles after two minutes.
*/
if (host->nh_rpc && time_uptime > host->nh_rpc_create_time + 2*60) {
- CLIENT *client;
client = host->nh_rpc;
host->nh_rpc = NULL;
- CLNT_DESTROY(client);
+ mtx_unlock(&host->nh_lock);
+ CLNT_RELEASE(client);
+ mtx_lock(&host->nh_lock);
}
- if (host->nh_rpc)
- return (host->nh_rpc);
+ if (!host->nh_rpc) {
+ mtx_unlock(&host->nh_lock);
+ client = nlm_get_rpc((struct sockaddr *)&host->nh_addr,
+ NLM_PROG, host->nh_vers);
+ mtx_lock(&host->nh_lock);
+
+ if (client) {
+ if (host->nh_rpc) {
+ mtx_unlock(&host->nh_lock);
+ CLNT_DESTROY(client);
+ mtx_lock(&host->nh_lock);
+ } else {
+ host->nh_rpc = client;
+ host->nh_rpc_create_time = time_uptime;
+ }
+ }
+ }
+
+ client = host->nh_rpc;
+ if (client)
+ CLNT_ACQUIRE(client);
+ mtx_unlock(&host->nh_lock);
+
+ return (client);
+
+}
+
+int nlm_host_get_sysid(struct nlm_host *host)
+{
+
+ return (host->nh_sysid);
+}
+
+int
+nlm_host_get_state(struct nlm_host *host)
+{
+
+ return (host->nh_state);
+}
+
+void *
+nlm_register_wait_lock(struct nlm4_lock *lock, struct vnode *vp)
+{
+ struct nlm_waiting_lock *nw;
+
+ nw = malloc(sizeof(struct nlm_waiting_lock), M_NLM, M_WAITOK);
+ nw->nw_lock = *lock;
+ memcpy(&nw->nw_fh.fh_bytes, nw->nw_lock.fh.n_bytes,
+ nw->nw_lock.fh.n_len);
+ nw->nw_lock.fh.n_bytes = nw->nw_fh.fh_bytes;
+ nw->nw_waiting = TRUE;
+ nw->nw_vp = vp;
+ mtx_lock(&nlm_global_lock);
+ TAILQ_INSERT_TAIL(&nlm_waiting_locks, nw, nw_link);
+ mtx_unlock(&nlm_global_lock);
+
+ return nw;
+}
+
+void
+nlm_deregister_wait_lock(void *handle)
+{
+ struct nlm_waiting_lock *nw = handle;
+
+ mtx_lock(&nlm_global_lock);
+ TAILQ_REMOVE(&nlm_waiting_locks, nw, nw_link);
+ mtx_unlock(&nlm_global_lock);
+
+ free(nw, M_NLM);
+}
+
+int
+nlm_wait_lock(void *handle, int timo)
+{
+ struct nlm_waiting_lock *nw = handle;
+ int error;
/*
- * Set the send timeout to zero - we only use this rpc handle
- * for sending async replies which have no return value.
+ * If the granted message arrived before we got here,
+ * nw->nw_waiting will be FALSE - in that case, don't sleep.
*/
- host->nh_rpc = nlm_get_rpc((struct sockaddr *)&host->nh_addr,
- NLM_PROG, host->nh_vers);
+ mtx_lock(&nlm_global_lock);
+ error = 0;
+ if (nw->nw_waiting)
+ error = msleep(nw, &nlm_global_lock, PCATCH, "nlmlock", timo);
+ TAILQ_REMOVE(&nlm_waiting_locks, nw, nw_link);
+ if (error) {
+ /*
+ * The granted message may arrive after the
+ * interrupt/timeout but before we manage to lock the
+ * mutex. Detect this by examining nw_lock.
+ */
+ if (!nw->nw_waiting)
+ error = 0;
+ } else {
+ /*
+ * If nlm_cancel_wait is called, then error will be
+ * zero but nw_waiting will still be TRUE. We
+ * translate this into EINTR.
+ */
+ if (nw->nw_waiting)
+ error = EINTR;
+ }
+ mtx_unlock(&nlm_global_lock);
- if (host->nh_rpc) {
- zero.tv_sec = 0;
- zero.tv_usec = 0;
- CLNT_CONTROL(host->nh_rpc, CLSET_TIMEOUT, &zero);
+ free(nw, M_NLM);
- host->nh_rpc_create_time = time_uptime;
- }
+ return (error);
+}
+
+void
+nlm_cancel_wait(struct vnode *vp)
+{
+ struct nlm_waiting_lock *nw;
- return (host->nh_rpc);
+ mtx_lock(&nlm_global_lock);
+ TAILQ_FOREACH(nw, &nlm_waiting_locks, nw_link) {
+ if (nw->nw_vp == vp) {
+ wakeup(nw);
+ }
+ }
+ mtx_unlock(&nlm_global_lock);
}
+
/**********************************************************************/
/*
@@ -1099,7 +1376,11 @@ nlm_server_main(int addr_count, char **addrs)
sm_stat smstat;
struct timeval timo;
enum clnt_stat stat;
- struct nlm_host *host;
+ struct nlm_host *host, *nhost;
+ struct nlm_waiting_lock *nw;
+ vop_advlock_t *old_nfs_advlock;
+ vop_reclaim_t *old_nfs_reclaim;
+ int v4_used, v6_used;
if (nlm_socket) {
printf("NLM: can't start server - it appears to be running already\n");
@@ -1129,6 +1410,7 @@ nlm_server_main(int addr_count, char **addrs)
td->td_ucred, td);
if (error) {
printf("NLM: can't create IPv6 socket - error %d\n", error);
+ goto out;
return (error);
}
opt.sopt_dir = SOPT_SET;
@@ -1140,6 +1422,8 @@ nlm_server_main(int addr_count, char **addrs)
sosetopt(nlm_socket6, &opt);
#endif
+ nlm_auth = authunix_create(curthread->td_ucred);
+
#ifdef INET6
memset(&sin6, 0, sizeof(sin6));
sin6.sin6_len = sizeof(sin6);
@@ -1191,36 +1475,88 @@ nlm_server_main(int addr_count, char **addrs)
if (nlm_debug_level >= 1)
printf("NLM: local NSM state is %d\n", smstat.state);
+ nlm_nsm_state = smstat.state;
+
+ old_nfs_advlock = nfs_advlock_p;
+ nfs_advlock_p = nlm_advlock;
+ old_nfs_reclaim = nfs_reclaim_p;
+ nfs_reclaim_p = nlm_reclaim;
svc_run(pool);
error = 0;
+ nfs_advlock_p = old_nfs_advlock;
+ nfs_reclaim_p = old_nfs_reclaim;
+
out:
if (pool)
svcpool_destroy(pool);
/*
- * Trash all the existing state so that if the server
- * restarts, it gets a clean slate.
+ * We are finished communicating with the NSM.
*/
- while ((host = TAILQ_FIRST(&nlm_hosts)) != NULL) {
- nlm_host_notify(host, 0, TRUE);
- }
if (nlm_nsm) {
- AUTH_DESTROY(nlm_nsm->cl_auth);
- CLNT_DESTROY(nlm_nsm);
+ CLNT_RELEASE(nlm_nsm);
nlm_nsm = NULL;
}
- if (nlm_lockd) {
- AUTH_DESTROY(nlm_lockd->cl_auth);
- CLNT_DESTROY(nlm_lockd);
- nlm_lockd = NULL;
+
+ /*
+ * Trash all the existing state so that if the server
+ * restarts, it gets a clean slate. This is complicated by the
+ * possibility that there may be other threads trying to make
+ * client locking requests.
+ *
+ * First we fake a client reboot notification which will
+ * cancel any pending async locks and purge remote lock state
+ * from the local lock manager. We release the reference from
+ * nlm_hosts to the host (which may remove it from the list
+ * and free it). After this phase, the only entries in the
+ * nlm_host list should be from other threads performing
+ * client lock requests. We arrange to defer closing the
+ * sockets until the last RPC client handle is released.
+ */
+ v4_used = 0;
+#ifdef INET6
+ v6_used = 0;
+#endif
+ mtx_lock(&nlm_global_lock);
+ TAILQ_FOREACH(nw, &nlm_waiting_locks, nw_link) {
+ wakeup(nw);
+ }
+ TAILQ_FOREACH_SAFE(host, &nlm_hosts, nh_link, nhost) {
+ mtx_unlock(&nlm_global_lock);
+ nlm_host_notify(host, 0);
+ nlm_host_release(host);
+ mtx_lock(&nlm_global_lock);
+ }
+ TAILQ_FOREACH_SAFE(host, &nlm_hosts, nh_link, nhost) {
+ mtx_lock(&host->nh_lock);
+ if (host->nh_rpc) {
+ if (host->nh_addr.ss_family == AF_INET)
+ v4_used++;
+#ifdef INET6
+ if (host->nh_addr.ss_family == AF_INET6)
+ v6_used++;
+#endif
+ /*
+ * Note that the rpc over udp code copes
+ * correctly with the fact that a socket may
+ * be used by many rpc handles.
+ */
+ CLNT_CONTROL(host->nh_rpc, CLSET_FD_CLOSE, 0);
+ }
+ mtx_unlock(&host->nh_lock);
}
+ mtx_unlock(&nlm_global_lock);
+
+ AUTH_DESTROY(nlm_auth);
- soclose(nlm_socket);
+ if (!v4_used)
+ soclose(nlm_socket);
nlm_socket = NULL;
#ifdef INET6
- soclose(nlm_socket6);
+ if (!v6_used)
+ soclose(nlm_socket6);
nlm_socket6 = NULL;
#endif
@@ -1264,8 +1600,10 @@ nlm_sm_notify(struct nlm_sm_status *argp)
printf("nlm_sm_notify(): mon_name = %s\n", argp->mon_name);
memcpy(&sysid, &argp->priv, sizeof(sysid));
host = nlm_find_host_by_sysid(sysid);
- if (host)
- nlm_host_notify(host, argp->state, FALSE);
+ if (host) {
+ nlm_host_notify(host, argp->state);
+ nlm_host_release(host);
+ }
}
static void
@@ -1372,8 +1710,9 @@ nlm_convert_error(int error)
return nlm4_failed;
}
-struct nlm_host *
-nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
+int
+nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp,
+ CLIENT **rpcp)
{
fhandle_t fh;
struct vfs_state vs;
@@ -1382,11 +1721,13 @@ nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
struct flock fl;
memset(result, 0, sizeof(*result));
+ memset(&vs, 0, sizeof(vs));
- host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ host = nlm_find_host_by_name(argp->alock.caller_name,
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf, rqstp->rq_vers);
if (!host) {
result->stat.stat = nlm4_denied_nolocks;
- return (NULL);
+ return (ENOMEM);
}
if (nlm_debug_level >= 3)
@@ -1401,7 +1742,7 @@ nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
if (time_uptime < nlm_grace_threshold) {
result->stat.stat = nlm4_denied_grace_period;
- return (host);
+ goto out;
}
error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
@@ -1452,6 +1793,7 @@ nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
* For the moment, return nothing in oh
* (already zero'ed above).
*/
+ nlm_host_release(bhost);
}
result->stat.nlm4_testrply_u.holder.l_offset = fl.l_start;
result->stat.nlm4_testrply_u.holder.l_len = fl.l_len;
@@ -1459,12 +1801,15 @@ nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
out:
nlm_release_vfs_state(&vs);
- return (host);
+ if (rpcp)
+ *rpcp = nlm_host_get_rpc(host);
+ nlm_host_release(host);
+ return (0);
}
-struct nlm_host *
+int
nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
- bool_t monitor)
+ bool_t monitor, CLIENT **rpcp)
{
fhandle_t fh;
struct vfs_state vs;
@@ -1473,11 +1818,13 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
struct flock fl;
memset(result, 0, sizeof(*result));
+ memset(&vs, 0, sizeof(vs));
- host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ host = nlm_find_host_by_name(argp->alock.caller_name,
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf, rqstp->rq_vers);
if (!host) {
result->stat.stat = nlm4_denied_nolocks;
- return (NULL);
+ return (ENOMEM);
}
if (nlm_debug_level >= 3)
@@ -1490,7 +1837,7 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
* The host rebooted without telling us. Trash its
* locks.
*/
- nlm_host_notify(host, argp->state, FALSE);
+ nlm_host_notify(host, argp->state);
}
nlm_free_finished_locks(host);
@@ -1501,7 +1848,7 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
if (time_uptime < nlm_grace_threshold && !argp->reclaim) {
result->stat.stat = nlm4_denied_grace_period;
- return (host);
+ goto out;
}
error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
@@ -1521,11 +1868,13 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
fl.l_type = F_RDLCK;
if (argp->block) {
struct nlm_async_lock *af;
+ CLIENT *client;
/*
* First, make sure we can contact the host's NLM.
*/
- if (!nlm_host_get_rpc(host)) {
+ client = nlm_host_get_rpc(host);
+ if (!client) {
result->stat.stat = nlm4_failed;
goto out;
}
@@ -1547,6 +1896,7 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
}
mtx_unlock(&host->nh_lock);
if (af) {
+ CLNT_RELEASE(client);
result->stat.stat = nlm4_blocked;
goto out;
}
@@ -1557,6 +1907,7 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
af->af_vp = vs.vs_vp;
af->af_fl = fl;
af->af_host = host;
+ af->af_rpc = client;
/*
* We use M_RPC here so that we can xdr_free the thing
* later.
@@ -1592,6 +1943,7 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
* tracking structure now.
*/
if (error != EINPROGRESS) {
+ CLNT_RELEASE(af->af_rpc);
mtx_lock(&host->nh_lock);
TAILQ_REMOVE(&host->nh_pending, af, af_link);
mtx_unlock(&host->nh_lock);
@@ -1632,12 +1984,15 @@ nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp,
out:
nlm_release_vfs_state(&vs);
-
- return (host);
+ if (rpcp)
+ *rpcp = nlm_host_get_rpc(host);
+ nlm_host_release(host);
+ return (0);
}
-struct nlm_host *
-nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
+int
+nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp,
+ CLIENT **rpcp)
{
fhandle_t fh;
struct vfs_state vs;
@@ -1647,11 +2002,13 @@ nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
struct nlm_async_lock *af;
memset(result, 0, sizeof(*result));
+ memset(&vs, 0, sizeof(vs));
- host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ host = nlm_find_host_by_name(argp->alock.caller_name,
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf, rqstp->rq_vers);
if (!host) {
result->stat.stat = nlm4_denied_nolocks;
- return (NULL);
+ return (ENOMEM);
}
if (nlm_debug_level >= 3)
@@ -1666,7 +2023,7 @@ nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
if (time_uptime < nlm_grace_threshold) {
result->stat.stat = nlm4_denied_grace_period;
- return (host);
+ goto out;
}
error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
@@ -1718,12 +2075,15 @@ nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
out:
nlm_release_vfs_state(&vs);
-
- return (host);
+ if (rpcp)
+ *rpcp = nlm_host_get_rpc(host);
+ nlm_host_release(host);
+ return (0);
}
-struct nlm_host *
-nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
+int
+nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp,
+ CLIENT **rpcp)
{
fhandle_t fh;
struct vfs_state vs;
@@ -1732,11 +2092,13 @@ nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
struct flock fl;
memset(result, 0, sizeof(*result));
+ memset(&vs, 0, sizeof(vs));
- host = nlm_find_host_by_name(argp->alock.caller_name, rqstp);
+ host = nlm_find_host_by_name(argp->alock.caller_name,
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf, rqstp->rq_vers);
if (!host) {
result->stat.stat = nlm4_denied_nolocks;
- return (NULL);
+ return (ENOMEM);
}
if (nlm_debug_level >= 3)
@@ -1751,7 +2113,7 @@ nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
if (time_uptime < nlm_grace_threshold) {
result->stat.stat = nlm4_denied_grace_period;
- return (host);
+ goto out;
}
error = nlm_get_vfs_state(host, rqstp, &fh, &vs);
@@ -1776,8 +2138,54 @@ nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
out:
nlm_release_vfs_state(&vs);
+ if (rpcp)
+ *rpcp = nlm_host_get_rpc(host);
+ nlm_host_release(host);
+ return (0);
+}
- return (host);
+int
+nlm_do_granted(nlm4_testargs *argp, nlm4_res *result, struct svc_req *rqstp,
+
+ CLIENT **rpcp)
+{
+ struct nlm_host *host;
+ struct nlm_waiting_lock *nw;
+
+ memset(result, 0, sizeof(*result));
+
+ host = nlm_find_host_by_addr(
+ (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
+ rqstp->rq_vers);
+ if (!host) {
+ result->stat.stat = nlm4_denied_nolocks;
+ return (ENOMEM);
+ }
+
+ nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
+ result->stat.stat = nlm4_denied;
+
+ mtx_lock(&nlm_global_lock);
+ TAILQ_FOREACH(nw, &nlm_waiting_locks, nw_link) {
+ if (!nw->nw_waiting)
+ continue;
+ if (argp->alock.svid == nw->nw_lock.svid
+ && argp->alock.l_offset == nw->nw_lock.l_offset
+ && argp->alock.l_len == nw->nw_lock.l_len
+ && argp->alock.fh.n_len == nw->nw_lock.fh.n_len
+ && !memcmp(argp->alock.fh.n_bytes, nw->nw_lock.fh.n_bytes,
+ nw->nw_lock.fh.n_len)) {
+ nw->nw_waiting = FALSE;
+ wakeup(nw);
+ result->stat.stat = nlm4_granted;
+ break;
+ }
+ }
+ mtx_unlock(&nlm_global_lock);
+ if (rpcp)
+ *rpcp = nlm_host_get_rpc(host);
+ nlm_host_release(host);
+ return (0);
}
void
@@ -1787,45 +2195,10 @@ nlm_do_free_all(nlm4_notify *argp)
TAILQ_FOREACH_SAFE(host, &nlm_hosts, nh_link, thost) {
if (!strcmp(host->nh_caller_name, argp->name))
- nlm_host_notify(host, argp->state, FALSE);
+ nlm_host_notify(host, argp->state);
}
}
-#define _PATH_RPCLOCKDSOCK "/var/run/rpclockd.sock"
-
-/*
- * Make a connection to the userland lockd - we push anything we can't
- * handle out to userland.
- */
-CLIENT *
-nlm_user_lockd(void)
-{
- struct sockaddr_un sun;
- struct netconfig *nconf;
- struct timeval zero;
-
- if (nlm_lockd)
- return (nlm_lockd);
-
- sun.sun_family = AF_LOCAL;
- strcpy(sun.sun_path, _PATH_RPCLOCKDSOCK);
- sun.sun_len = SUN_LEN(&sun);
-
- nconf = getnetconfigent("local");
- nlm_lockd = clnt_reconnect_create(nconf, (struct sockaddr *) &sun,
- NLM_PROG, NLM_VERS4, RPC_MAXDATASIZE, RPC_MAXDATASIZE);
-
- /*
- * Set the send timeout to zero - we only use this rpc handle
- * for sending async replies which have no return value.
- */
- zero.tv_sec = 0;
- zero.tv_usec = 0;
- CLNT_CONTROL(nlm_lockd, CLSET_TIMEOUT, &zero);
-
- return (nlm_lockd);
-}
-
/*
* Kernel module glue
*/
diff --git a/sys/nlm/nlm_prot_server.c b/sys/nlm/nlm_prot_server.c
index 320680ad5e6b..fd6b4491e9a0 100644
--- a/sys/nlm/nlm_prot_server.c
+++ b/sys/nlm/nlm_prot_server.c
@@ -232,7 +232,6 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
nlm4_testargs args4;
nlm4_testres res4;
nlm_testres res;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
@@ -240,7 +239,8 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
- host = nlm_do_test(&args4, &res4, rqstp);
+ if (nlm_do_test(&args4, &res4, rqstp, &rpc))
+ return (FALSE);
res.cookie = res4.cookie;
res.stat.stat = nlm_convert_to_nlm_stats(res4.stat.stat);
@@ -249,9 +249,10 @@ nlm_test_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *rqst
&res.stat.nlm_testrply_u.holder,
&res4.stat.nlm4_testrply_u.holder);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm_test_res_1(&res, &dummy, rpc);
+ if (rpc) {
+ nlm_test_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm_testres, &res);
return (FALSE);
@@ -263,7 +264,6 @@ nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqst
nlm4_lockargs args4;
nlm4_res res4;
nlm_res res;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
@@ -274,13 +274,15 @@ nlm_lock_msg_1_svc(struct nlm_lockargs *argp, void *result, struct svc_req *rqst
args4.reclaim = argp->reclaim;
args4.state = argp->state;
- host = nlm_do_lock(&args4, &res4, rqstp, TRUE);
+ if (nlm_do_lock(&args4, &res4, rqstp, TRUE, &rpc))
+ return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm_lock_res_1(&res, &dummy, rpc);
+ if (rpc) {
+ nlm_lock_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@@ -292,7 +294,6 @@ nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rq
nlm4_cancargs args4;
nlm4_res res4;
nlm_res res;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
@@ -301,13 +302,15 @@ nlm_cancel_msg_1_svc(struct nlm_cancargs *argp, void *result, struct svc_req *rq
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
- host = nlm_do_cancel(&args4, &res4, rqstp);
+ if (nlm_do_cancel(&args4, &res4, rqstp, &rpc))
+ return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm_cancel_res_1(&res, &dummy, rpc);
+ if (rpc) {
+ nlm_cancel_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@@ -319,20 +322,21 @@ nlm_unlock_msg_1_svc(struct nlm_unlockargs *argp, void *result, struct svc_req *
nlm4_unlockargs args4;
nlm4_res res4;
nlm_res res;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
args4.cookie = argp->cookie;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
- host = nlm_do_unlock(&args4, &res4, rqstp);
+ if (nlm_do_unlock(&args4, &res4, rqstp, &rpc))
+ return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm_unlock_res_1(&res, &dummy, rpc);
+ if (rpc) {
+ nlm_unlock_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@@ -344,7 +348,6 @@ nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *r
nlm4_testargs args4;
nlm4_res res4;
nlm_res res;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
@@ -352,20 +355,15 @@ nlm_granted_msg_1_svc(struct nlm_testargs *argp, void *result, struct svc_req *r
args4.exclusive = argp->exclusive;
nlm_convert_to_nlm4_lock(&args4.alock, &argp->alock);
- /*
- * We make a synchronous call to userland and send the reply
- * back async.
- */
- nlm4_granted_4_svc(&args4, &res4, rqstp);
+ if (nlm_do_granted(&args4, &res4, rqstp, &rpc))
+ return (FALSE);
nlm_convert_to_nlm_res(&res, &res4);
- host = nlm_find_host_by_addr(
- (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
- rqstp->rq_vers);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm_granted_res_1(&res, &dummy, rpc);
+ if (rpc) {
+ nlm_granted_res_1(&res, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm_res, &res);
return (FALSE);
@@ -515,7 +513,7 @@ bool_t
nlm4_test_4_svc(nlm4_testargs *argp, nlm4_testres *result, struct svc_req *rqstp)
{
- nlm_do_test(argp, result, rqstp);
+ nlm_do_test(argp, result, rqstp, NULL);
return (TRUE);
}
@@ -523,7 +521,7 @@ bool_t
nlm4_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
- nlm_do_lock(argp, result, rqstp, TRUE);
+ nlm_do_lock(argp, result, rqstp, TRUE, NULL);
return (TRUE);
}
@@ -531,7 +529,7 @@ bool_t
nlm4_cancel_4_svc(nlm4_cancargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
- nlm_do_cancel(argp, result, rqstp);
+ nlm_do_cancel(argp, result, rqstp, NULL);
return (TRUE);
}
@@ -539,35 +537,15 @@ bool_t
nlm4_unlock_4_svc(nlm4_unlockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
- nlm_do_unlock(argp, result, rqstp);
+ nlm_do_unlock(argp, result, rqstp, NULL);
return (TRUE);
}
bool_t
nlm4_granted_4_svc(nlm4_testargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
- CLIENT* lockd;
- struct timeval tv;
-
- memset(result, 0, sizeof(*result));
- nlm_copy_netobj(&result->cookie, &argp->cookie, M_RPC);
-
- /*
- * Set a non-zero timeout to give the userland a chance to reply.
- */
- lockd = nlm_user_lockd();
- if (!lockd) {
- result->stat.stat = nlm4_failed;
- return (TRUE);
- }
- tv.tv_sec = 20;
- tv.tv_usec = 0;
- CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
- nlm4_granted_4(argp, result, lockd);
- tv.tv_sec = 0;
- tv.tv_usec = 0;
- CLNT_CONTROL(lockd, CLSET_TIMEOUT, &tv);
+ nlm_do_granted(argp, result, rqstp, NULL);
return (TRUE);
}
@@ -575,14 +553,15 @@ bool_t
nlm4_test_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_testres res4;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
- host = nlm_do_test(argp, &res4, rqstp);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm4_test_res_4(&res4, &dummy, rpc);
+ if (nlm_do_test(argp, &res4, rqstp, &rpc))
+ return (FALSE);
+ if (rpc) {
+ nlm4_test_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm4_testres, &res4);
return (FALSE);
@@ -592,14 +571,15 @@ bool_t
nlm4_lock_msg_4_svc(nlm4_lockargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
- host = nlm_do_lock(argp, &res4, rqstp, TRUE);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm4_lock_res_4(&res4, &dummy, rpc);
+ if (nlm_do_lock(argp, &res4, rqstp, TRUE, &rpc))
+ return (FALSE);
+ if (rpc) {
+ nlm4_lock_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@@ -609,14 +589,15 @@ bool_t
nlm4_cancel_msg_4_svc(nlm4_cancargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
- host = nlm_do_cancel(argp, &res4, rqstp);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm4_cancel_res_4(&res4, &dummy, rpc);
+ if (nlm_do_cancel(argp, &res4, rqstp, &rpc))
+ return (FALSE);
+ if (rpc) {
+ nlm4_cancel_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@@ -626,14 +607,14 @@ bool_t
nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp)
{
nlm4_res res4;
- struct nlm_host *host;
CLIENT *rpc;
char dummy;
- host = nlm_do_unlock(argp, &res4, rqstp);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm4_unlock_res_4(&res4, &dummy, rpc);
+ if (nlm_do_unlock(argp, &res4, rqstp, &rpc))
+ if (rpc) {
+ nlm4_unlock_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@@ -642,23 +623,16 @@ nlm4_unlock_msg_4_svc(nlm4_unlockargs *argp, void *result, struct svc_req *rqstp
bool_t
nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
{
- struct nlm_host *host;
- CLIENT *rpc;
nlm4_res res4;
+ CLIENT *rpc;
char dummy;
- /*
- * We make a synchronous call to userland and send the reply
- * back async.
- */
- nlm4_granted_4_svc(argp, &res4, rqstp);
-
- host = nlm_find_host_by_addr(
- (struct sockaddr *) rqstp->rq_xprt->xp_rtaddr.buf,
- rqstp->rq_vers);
- rpc = nlm_host_get_rpc(host);
- if (rpc)
- nlm4_granted_res_4(&res4, &dummy, rpc);
+ if (nlm_do_granted(argp, &res4, rqstp, &rpc))
+ return (FALSE);
+ if (rpc) {
+ nlm4_granted_res_4(&res4, &dummy, rpc, NULL, nlm_zero_tv);
+ CLNT_RELEASE(rpc);
+ }
xdr_free((xdrproc_t) xdr_nlm4_res, &res4);
return (FALSE);
@@ -667,11 +641,6 @@ nlm4_granted_msg_4_svc(nlm4_testargs *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
{
- CLIENT* lockd;
-
- lockd = nlm_user_lockd();
- if (lockd)
- nlm4_test_res_4(argp, result, lockd);
return (FALSE);
}
@@ -679,11 +648,6 @@ nlm4_test_res_4_svc(nlm4_testres *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
- CLIENT* lockd;
-
- lockd = nlm_user_lockd();
- if (lockd)
- nlm4_lock_res_4(argp, result, lockd);
return (FALSE);
}
@@ -691,11 +655,6 @@ nlm4_lock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
- CLIENT* lockd;
-
- lockd = nlm_user_lockd();
- if (lockd)
- nlm4_cancel_res_4(argp, result, lockd);
return (FALSE);
}
@@ -703,11 +662,6 @@ nlm4_cancel_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
bool_t
nlm4_unlock_res_4_svc(nlm4_res *argp, void *result, struct svc_req *rqstp)
{
- CLIENT* lockd;
-
- lockd = nlm_user_lockd();
- if (lockd)
- nlm4_unlock_res_4(argp, result, lockd);
return (FALSE);
}
@@ -741,7 +695,7 @@ bool_t
nlm4_nm_lock_4_svc(nlm4_lockargs *argp, nlm4_res *result, struct svc_req *rqstp)
{
- nlm_do_lock(argp, result, rqstp, FALSE);
+ nlm_do_lock(argp, result, rqstp, FALSE, NULL);
return (TRUE);
}