summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <cel@citi.umich.edu>2005-08-11 16:25:32 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-09-23 12:38:17 -0400
commit4a0f8c04f2ece949d54a0c4fd7490259cf23a58a (patch)
tree6c6e142cfa37b984dcba6f785a4f886374a307c6 /net/sunrpc
parentb4b5cc85ed4ecbe4adbfbc4df028850de67a9f09 (diff)
downloadop-kernel-dev-4a0f8c04f2ece949d54a0c4fd7490259cf23a58a.zip
op-kernel-dev-4a0f8c04f2ece949d54a0c4fd7490259cf23a58a.tar.gz
[PATCH] RPC: Rename sock_lock
Clean-up: replace a name reference to sockets in the generic parts of the RPC client by renaming sock_lock in the rpc_xprt structure. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:05:00 -0400 Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprt.c44
-rw-r--r--net/sunrpc/xprtsock.c22
2 files changed, 33 insertions, 33 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 589195e..1f0da8c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -106,9 +106,9 @@ xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
int retval;
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
retval = __xprt_lock_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
return retval;
}
@@ -161,9 +161,9 @@ __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
static inline void
xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
__xprt_release_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
}
/*
@@ -266,9 +266,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
req->rq_retries = 0;
xprt_reset_majortimeo(req);
/* Reset the RTT counters == "slow start" */
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
pprintk("RPC: %lu timeout\n", jiffies);
status = -ETIMEDOUT;
}
@@ -298,10 +298,10 @@ xprt_socket_autoclose(void *args)
void xprt_disconnect(struct rpc_xprt *xprt)
{
dprintk("RPC: disconnected transport %p\n", xprt);
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
xprt_clear_connected(xprt);
rpc_wake_up_status(&xprt->pending, -ENOTCONN);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
}
static void
@@ -309,12 +309,12 @@ xprt_init_autodisconnect(unsigned long data)
{
struct rpc_xprt *xprt = (struct rpc_xprt *)data;
- spin_lock(&xprt->sock_lock);
+ spin_lock(&xprt->transport_lock);
if (!list_empty(&xprt->recv) || xprt->shutdown)
goto out_abort;
if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
goto out_abort;
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
/* Let keventd close the socket */
if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0)
xprt_release_write(xprt, NULL);
@@ -322,7 +322,7 @@ xprt_init_autodisconnect(unsigned long data)
schedule_work(&xprt->task_cleanup);
return;
out_abort:
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
}
/**
@@ -482,7 +482,7 @@ xprt_timer(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- spin_lock(&xprt->sock_lock);
+ spin_lock(&xprt->transport_lock);
if (req->rq_received)
goto out;
@@ -496,7 +496,7 @@ xprt_timer(struct rpc_task *task)
out:
task->tk_timeout = 0;
rpc_wake_up_task(task);
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
}
/**
@@ -515,7 +515,7 @@ int xprt_prepare_transmit(struct rpc_task *task)
if (xprt->shutdown)
return -EIO;
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (req->rq_received && !req->rq_bytes_sent) {
err = req->rq_received;
goto out_unlock;
@@ -530,7 +530,7 @@ int xprt_prepare_transmit(struct rpc_task *task)
goto out_unlock;
}
out_unlock:
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
return err;
}
@@ -552,13 +552,13 @@ void xprt_transmit(struct rpc_task *task)
smp_rmb();
if (!req->rq_received) {
if (list_empty(&req->rq_list)) {
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
/* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
xprt_reset_majortimeo(req);
/* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer);
@@ -592,7 +592,7 @@ void xprt_transmit(struct rpc_task *task)
out_receive:
dprintk("RPC: %4d xmit complete\n", task->tk_pid);
/* Set the task's receive timeout value */
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (!xprt->nocong) {
int timer = task->tk_msg.rpc_proc->p_timer;
task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
@@ -607,7 +607,7 @@ void xprt_transmit(struct rpc_task *task)
else if (!req->rq_received)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
__xprt_release_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
}
static inline void do_xprt_reserve(struct rpc_task *task)
@@ -683,7 +683,7 @@ void xprt_release(struct rpc_task *task)
if (!(req = task->tk_rqstp))
return;
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
__xprt_release_write(xprt, task);
__xprt_put_cong(xprt, req);
if (!list_empty(&req->rq_list))
@@ -692,7 +692,7 @@ void xprt_release(struct rpc_task *task)
if (list_empty(&xprt->recv) && !xprt->shutdown)
mod_timer(&xprt->timer,
xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
task->tk_rqstp = NULL;
memset(req, 0, sizeof(*req)); /* mark unused */
@@ -750,7 +750,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
return ERR_PTR(result);
}
- spin_lock_init(&xprt->sock_lock);
+ spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->xprt_lock);
init_waitqueue_head(&xprt->cong_wait);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index a5a0420..bc90caa 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -307,7 +307,7 @@ static int xs_send_request(struct rpc_task *task)
if (status == -EAGAIN) {
if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
/* Protect against races with xs_write_space */
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
@@ -315,7 +315,7 @@ static int xs_send_request(struct rpc_task *task)
task->tk_timeout = req->rq_timeout;
rpc_sleep_on(&xprt->pending, task, NULL, NULL);
}
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
return status;
}
/* Keep holding the socket if it is blocked */
@@ -415,7 +415,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
goto dropit;
/* Look up and lock the request corresponding to the given XID */
- spin_lock(&xprt->sock_lock);
+ spin_lock(&xprt->transport_lock);
rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr)
goto out_unlock;
@@ -436,7 +436,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
xprt_complete_rqst(xprt, rovr, copied);
out_unlock:
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
dropit:
skb_free_datagram(sk, skb);
out:
@@ -531,13 +531,13 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc
ssize_t r;
/* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->sock_lock);
+ spin_lock(&xprt->transport_lock);
req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
if (!req) {
xprt->tcp_flags &= ~XPRT_COPY_DATA;
dprintk("RPC: XID %08x request not found!\n",
ntohl(xprt->tcp_xid));
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
return;
}
@@ -597,7 +597,7 @@ out:
req->rq_task->tk_pid);
xprt_complete_rqst(xprt, req, xprt->tcp_copied);
}
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
xs_tcp_check_recm(xprt);
}
@@ -696,7 +696,7 @@ static void xs_tcp_state_change(struct sock *sk)
switch (sk->sk_state) {
case TCP_ESTABLISHED:
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (!xprt_test_and_set_connected(xprt)) {
/* Reset TCP record info */
xprt->tcp_offset = 0;
@@ -705,7 +705,7 @@ static void xs_tcp_state_change(struct sock *sk)
xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
rpc_wake_up(&xprt->pending);
}
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
break;
case TCP_SYN_SENT:
case TCP_SYN_RECV:
@@ -753,10 +753,10 @@ static void xs_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
goto out;
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (xprt->snd_task)
rpc_wake_up_task(xprt->snd_task);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
out:
read_unlock(&sk->sk_callback_lock);
}
OpenPOWER on IntegriCloud