summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2016-02-04 11:03:28 -0800
committerDoug Ledford <dledford@redhat.com>2016-03-10 20:38:04 -0500
commitee84541ad11e70d372670160e727680051801517 (patch)
treeabbdb47297d51ea17a4c2a10323d2ef6325651c1 /drivers/infiniband
parent6c2ab0b857d1b674c5f710d2cbf06a0f3ac52313 (diff)
downloadop-kernel-dev-ee84541ad11e70d372670160e727680051801517.zip
op-kernel-dev-ee84541ad11e70d372670160e727680051801517.tar.gz
IB/qib: Insure last cursor is updated prior to complete
This patch is a prerequisite for adding a separate lock for post send. The timing of updating s_last needs to be before returning any send completion to avoid a race between a poll cq seeing a completion and the post send checking for a full queue. Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c12
2 files changed, 23 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 044525d9..ce886b2 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1008,10 +1008,18 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
start_timer(qp);
while (qp->s_last != qp->s_acked) {
+ u32 s_last;
+
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
struct rvt_sge *sge = &wqe->sg_list[i];
@@ -1028,8 +1036,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
}
/*
* If we were waiting for sends to complete before resending,
@@ -1068,11 +1074,19 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
*/
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ u32 s_last;
+
for (i = 0; i < wqe->wr.num_sge; i++) {
struct rvt_sge *sge = &wqe->sg_list[i];
rvt_put_mr(sge->mr);
}
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
@@ -1084,8 +1098,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
} else
this_cpu_inc(*ibp->rvp.rc_delayed_comp);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 56668cb..2623684 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -795,6 +795,13 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
struct rvt_sge *sge = &wqe->sg_list[i];
@@ -822,11 +829,6 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
status != IB_WC_SUCCESS);
}
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
OpenPOWER on IntegriCloud