summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_rc.c
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-07-25 11:08:28 -0700
committerRoland Dreier <rolandd@cisco.com>2007-10-09 20:05:49 -0700
commit4ee97180ac76deb5a715ac45b7d7516e6ee82ae7 (patch)
tree6683d1c34d3f36271a9d8275a645ce67222ffc56 /drivers/infiniband/hw/ipath/ipath_rc.c
parent210d6ca3db058cd1d6e6fd235ee3e25d6ac221cd (diff)
downloadop-kernel-dev-4ee97180ac76deb5a715ac45b7d7516e6ee82ae7.zip
op-kernel-dev-4ee97180ac76deb5a715ac45b7d7516e6ee82ae7.tar.gz
IB/ipath: Change UD to queue work requests like RC & UC
The code to post UD sends tried to process work requests at the time ib_post_send() is called without using a WQE queue. This was fine as long as HW resources were available for sending a packet. This patch changes UD to be handled more like RC and UC and shares more code. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_rc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 46744ea..53259da 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -81,9 +81,8 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
* Note that we are in the responder's side of the QP context.
* Note the QP s_lock must be held.
*/
-static int ipath_make_rc_ack(struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 pmtu, u32 *bth0p, u32 *bth2p)
+static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
+ struct ipath_other_headers *ohdr, u32 pmtu)
{
struct ipath_ack_entry *e;
u32 hwords;
@@ -192,8 +191,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
}
qp->s_hdrwords = hwords;
qp->s_cur_size = len;
- *bth0p = bth0 | (1 << 22); /* Set M bit */
- *bth2p = bth2;
+ ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
return 1;
bail:
@@ -203,32 +201,39 @@ bail:
/**
* ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- * @bth0p: pointer to the BTH opcode word
- * @bth2p: pointer to the BTH PSN word
*
* Return 1 if constructed; otherwise, return 0.
- * Note the QP s_lock must be held and interrupts disabled.
*/
-int ipath_make_rc_req(struct ipath_qp *qp,
- struct ipath_other_headers *ohdr,
- u32 pmtu, u32 *bth0p, u32 *bth2p)
+int ipath_make_rc_req(struct ipath_qp *qp)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ipath_other_headers *ohdr;
struct ipath_sge_state *ss;
struct ipath_swqe *wqe;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
char newreq;
+ unsigned long flags;
+ int ret = 0;
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /*
+ * The lock is needed to synchronize between the sending tasklet,
+ * the receive interrupt handler, and timeout resends.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
/* Sending responses has higher priority over sending requests. */
if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
qp->s_ack_state != OP(ACKNOWLEDGE)) &&
- ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
+ ipath_make_rc_ack(dev, qp, ohdr, pmtu))
goto done;
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
@@ -560,13 +565,12 @@ int ipath_make_rc_req(struct ipath_qp *qp,
qp->s_hdrwords = hwords;
qp->s_cur_sge = ss;
qp->s_cur_size = len;
- *bth0p = bth0 | (qp->s_state << 24);
- *bth2p = bth2;
+ ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
done:
- return 1;
-
+ ret = 1;
bail:
- return 0;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
}
/**
@@ -627,7 +631,7 @@ static void send_rc_ack(struct ipath_qp *qp)
/*
* If we can send the ACK, clear the ACK state.
*/
- if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
+ if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
dev->n_unicast_xmit++;
goto done;
}
@@ -757,7 +761,9 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
wc->vendor_err = 0;
wc->byte_len = 0;
wc->qp = &qp->ibqp;
+ wc->imm_data = 0;
wc->src_qp = qp->remote_qpn;
+ wc->wc_flags = 0;
wc->pkey_index = 0;
wc->slid = qp->remote_ah_attr.dlid;
wc->sl = qp->remote_ah_attr.sl;
@@ -1041,7 +1047,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
wc.vendor_err = 0;
wc.byte_len = 0;
wc.qp = &qp->ibqp;
+ wc.imm_data = 0;
wc.src_qp = qp->remote_qpn;
+ wc.wc_flags = 0;
wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
@@ -1454,6 +1462,19 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
goto send_ack;
}
/*
+ * Try to send a simple ACK to work around a Mellanox bug
+ * which doesn't accept a RDMA read response or atomic
+ * response as an ACK for earlier SENDs or RDMA writes.
+ */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
+ !(qp->s_flags & IPATH_S_ACK_PENDING) &&
+ qp->s_ack_state == OP(ACKNOWLEDGE)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+ goto send_ack;
+ }
+ /*
* Resend the RDMA read or atomic op which
* ACKs this duplicate request.
*/
OpenPOWER on IntegriCloud