summaryrefslogtreecommitdiffstats
path: root/sys/rpc
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2014-01-04 15:51:31 +0000
committermav <mav@FreeBSD.org>2014-01-04 15:51:31 +0000
commit048eda83eb56efd0ddfd66e87d6885d60c22183a (patch)
tree48aaf31009a42278ab34ef1b8546560d3099ebd6 /sys/rpc
parent47c52965eeacfb141118834c3ccc691a738bddcf (diff)
downloadFreeBSD-src-048eda83eb56efd0ddfd66e87d6885d60c22183a.zip
FreeBSD-src-048eda83eb56efd0ddfd66e87d6885d60c22183a.tar.gz
Replace locks added in r260229 to protect sequence counters with atomics.
New algorithm does not create additional lock congestion, while some races it includes should not be a problem. Those races may keep requests in DRC cache for some more time by returning ACK position smaller then actual, but it still should be able to drop thems when proper ACK finally read. Races of the original algorithm based on TCP seq number were worse because they happened when reply sequence number were recorded. After that even correctly read ACKs could not clean DRC sometimes.
Diffstat (limited to 'sys/rpc')
-rw-r--r--sys/rpc/svc.h6
-rw-r--r--sys/rpc/svc_vc.c18
2 files changed, 9 insertions, 15 deletions
diff --git a/sys/rpc/svc.h b/sys/rpc/svc.h
index bcd2d79..6ae380b 100644
--- a/sys/rpc/svc.h
+++ b/sys/rpc/svc.h
@@ -168,8 +168,8 @@ typedef struct __rpc_svcxprt {
time_t xp_lastactive; /* time of last RPC */
u_int64_t xp_sockref; /* set by nfsv4 to identify socket */
int xp_upcallset; /* socket upcall is set up */
- uint32_t xp_snd_cnt; /* # of bytes sent to socket */
- struct sx xp_snd_lock; /* protects xp_snd_cnt & sb_cc */
+ uint32_t xp_snd_cnt; /* # of bytes to send to socket */
+ uint32_t xp_snt_cnt; /* # of bytes sent to socket */
#else
int xp_fd;
u_short xp_port; /* associated port number */
@@ -327,7 +327,7 @@ enum svcpool_state {
typedef SVCTHREAD *pool_assign_fn(SVCTHREAD *, struct svc_req *);
typedef void pool_done_fn(SVCTHREAD *, struct svc_req *);
typedef struct __rpc_svcpool {
- struct mtx sp_lock; /* protect the transport lists */
+ struct mtx_padalign sp_lock; /* protect the transport lists */
const char *sp_name; /* pool name (e.g. "nfsd", "NLM" */
enum svcpool_state sp_state; /* current pool state */
struct proc *sp_proc; /* process which is in svc_run */
diff --git a/sys/rpc/svc_vc.c b/sys/rpc/svc_vc.c
index c10224e..5fe6488 100644
--- a/sys/rpc/svc_vc.c
+++ b/sys/rpc/svc_vc.c
@@ -161,7 +161,6 @@ svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
xprt = svc_xprt_alloc();
sx_init(&xprt->xp_lock, "xprt->xp_lock");
- sx_init(&xprt->xp_snd_lock, "xprt->xp_snd_lock");
xprt->xp_pool = pool;
xprt->xp_socket = so;
xprt->xp_p1 = NULL;
@@ -188,7 +187,6 @@ svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
return (xprt);
cleanup_svc_vc_create:
if (xprt) {
- sx_destroy(&xprt->xp_snd_lock);
sx_destroy(&xprt->xp_lock);
svc_xprt_free(xprt);
}
@@ -237,7 +235,6 @@ svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
xprt = svc_xprt_alloc();
sx_init(&xprt->xp_lock, "xprt->xp_lock");
- sx_init(&xprt->xp_snd_lock, "xprt->xp_snd_lock");
xprt->xp_pool = pool;
xprt->xp_socket = so;
xprt->xp_p1 = cd;
@@ -277,7 +274,6 @@ svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
return (xprt);
cleanup_svc_vc_create:
if (xprt) {
- sx_destroy(&xprt->xp_snd_lock);
sx_destroy(&xprt->xp_lock);
svc_xprt_free(xprt);
}
@@ -300,7 +296,6 @@ svc_vc_create_backchannel(SVCPOOL *pool)
xprt = svc_xprt_alloc();
sx_init(&xprt->xp_lock, "xprt->xp_lock");
- sx_init(&xprt->xp_snd_lock, "xprt->xp_snd_lock");
xprt->xp_pool = pool;
xprt->xp_socket = NULL;
xprt->xp_p1 = cd;
@@ -550,9 +545,8 @@ static bool_t
svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
{
- sx_slock(&xprt->xp_snd_lock);
- *ack = xprt->xp_snd_cnt - xprt->xp_socket->so_snd.sb_cc;
- sx_sunlock(&xprt->xp_snd_lock);
+ *ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
+ *ack -= xprt->xp_socket->so_snd.sb_cc;
return (TRUE);
}
@@ -839,16 +833,16 @@ svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
len = mrep->m_pkthdr.len;
*mtod(mrep, uint32_t *) =
htonl(0x80000000 | (len - sizeof(uint32_t)));
- sx_xlock(&xprt->xp_snd_lock);
+ atomic_add_acq_32(&xprt->xp_snd_cnt, len);
error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
0, curthread);
if (!error) {
- xprt->xp_snd_cnt += len;
+ atomic_add_rel_32(&xprt->xp_snt_cnt, len);
if (seq)
*seq = xprt->xp_snd_cnt;
stat = TRUE;
- }
- sx_xunlock(&xprt->xp_snd_lock);
+ } else
+ atomic_subtract_32(&xprt->xp_snd_cnt, len);
} else {
m_freem(mrep);
}
OpenPOWER on IntegriCloud