summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2011-12-01 14:16:17 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-12-01 14:16:17 -0500
commitc25573b5134294c0be82bfaecc6d08136835b271 (patch)
treee3ac5119ad559cc32d391e8384d83a6ac2c06371 /net/sunrpc
parent7fdcf13b292e8b2e38e42de24be2503e37b2cf97 (diff)
downloadop-kernel-dev-c25573b5134294c0be82bfaecc6d08136835b271.zip
op-kernel-dev-c25573b5134294c0be82bfaecc6d08136835b271.tar.gz
SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
Whenever we free a slot, we know that the resulting xprt->num_reqs will be less than xprt->max_reqs, so we know that we can release at least one backlogged rpc_task. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org [>=3.1]
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprt.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e4..c64c0ef 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (xprt_dynamic_free_slot(xprt, req))
- return;
-
- memset(req, 0, sizeof(*req)); /* mark unused */
-
spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
+ if (!xprt_dynamic_free_slot(xprt, req)) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ list_add(&req->rq_list, &xprt->free);
+ }
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
OpenPOWER on IntegriCloud