diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2008-05-28 13:17:44 -0500 |
---|---|---|
committer | Tom Tucker <tom@opengridcomputing.com> | 2008-07-02 15:01:56 -0500 |
commit | 87295b6c5c7fd7bbc0ce3e7f42d2adbbac7352b9 (patch) | |
tree | 11a273d3fd2c61bbbf102b80c0b45a0f0021d499 /net | |
parent | e6ab9143719ff76f0b95f0866c4d0f6c743ad2e0 (diff) | |
download | op-kernel-dev-87295b6c5c7fd7bbc0ce3e7f42d2adbbac7352b9.zip op-kernel-dev-87295b6c5c7fd7bbc0ce3e7f42d2adbbac7352b9.tar.gz |
svcrdma: Add dma map count and WARN_ON
Add a dma map count in order to verify that all DMA mapping resources
have been freed when the transport is closed.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 5 |
3 files changed, 9 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index d25971b..b4b17f4 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -222,6 +222,7 @@ static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, ctxt->count = count; ctxt->direction = DMA_FROM_DEVICE; for (i = 0; i < count; i++) { + atomic_inc(&xprt->sc_dma_used); ctxt->sge[i].addr = ib_dma_map_single(xprt->sc_cm_id->device, vec[i].iov_base, vec[i].iov_len, diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index bdc11a30..a19b22b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -163,6 +163,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, sge_bytes = min((size_t)bc, (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off)); sge[sge_no].length = sge_bytes; + atomic_inc(&xprt->sc_dma_used); sge[sge_no].addr = ib_dma_map_single(xprt->sc_cm_id->device, (void *) @@ -385,6 +386,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ctxt->count = 1; /* Prepare the SGE for the RPCRDMA Header */ + atomic_inc(&rdma->sc_dma_used); ctxt->sge[0].addr = ib_dma_map_page(rdma->sc_cm_id->device, page, 0, PAGE_SIZE, DMA_TO_DEVICE); @@ -396,6 +398,7 @@ static int send_reply(struct svcxprt_rdma *rdma, for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); byte_count -= sge_bytes; + atomic_inc(&rdma->sc_dma_used); ctxt->sge[sge_no].addr = ib_dma_map_single(rdma->sc_cm_id->device, vec->sge[sge_no].iov_base, diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 7e8ee66..6fddd58 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -155,6 +155,7 @@ static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) struct svcxprt_rdma *xprt = ctxt->xprt; int i; for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { + atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_single(xprt->sc_cm_id->device, ctxt->sge[i].addr, ctxt->sge[i].length, @@ -519,6 +520,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, cma_xprt->sc_max_requests = svcrdma_max_requests; cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; atomic_set(&cma_xprt->sc_sq_count, 0); + atomic_set(&cma_xprt->sc_ctxt_used, 0); if (!listener) { int reqs = cma_xprt->sc_max_requests; @@ -569,6 +571,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) BUG_ON(sge_no >= xprt->sc_max_sge); page = svc_rdma_get_page(); ctxt->pages[sge_no] = page; + atomic_inc(&xprt->sc_dma_used); pa = ib_dma_map_page(xprt->sc_cm_id->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); @@ -1049,6 +1052,7 @@ static void __svc_rdma_free(struct work_struct *work) /* Warn if we leaked a resource or under-referenced */ WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); + WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); /* Destroy the QP if present (not a listener) */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) @@ -1169,6 +1173,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); /* Prepare SGE for local address */ + atomic_inc(&xprt->sc_dma_used); sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, p, 0, PAGE_SIZE, DMA_FROM_DEVICE); sge.lkey = xprt->sc_phys_mr->lkey; |