summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c169
1 files changed, 115 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 84d435a..e2bd62b 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -273,6 +273,7 @@ static inline void queue2resp(struct ipzu_queue_resp *resp,
resp->queue_length = queue->queue_length;
resp->pagesize = queue->pagesize;
resp->toggle_state = queue->toggle_state;
+ resp->offset = queue->offset;
}
/*
@@ -309,7 +310,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
}
if (!ipz_rc) {
- ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%x",
+ ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
ipz_rc);
return -EBUSY;
}
@@ -333,7 +334,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
if (cnt == (nr_q_pages - 1)) { /* last page! */
if (h_ret != expected_hret) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
- "h_ret= %lx ", h_ret);
+ "h_ret=%li", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
@@ -347,7 +348,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
} else {
if (h_ret != H_PAGE_REGISTERED) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
- "h_ret= %lx ", h_ret);
+ "h_ret=%li", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
@@ -512,7 +513,7 @@ static struct ehca_qp *internal_create_qp(
} else if (init_attr->cap.max_send_wr > 255) {
ehca_err(pd->device,
"Invalid Number of "
- "ax_send_wr=%x for UD QP_TYPE=%x",
+ "max_send_wr=%x for UD QP_TYPE=%x",
init_attr->cap.max_send_wr, qp_type);
return ERR_PTR(-EINVAL);
}
@@ -523,6 +524,18 @@ static struct ehca_qp *internal_create_qp(
return ERR_PTR(-EINVAL);
break;
}
+ } else {
+ int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
+ || qp_type == IB_QPT_GSI) ? 250 : 252;
+
+ if (init_attr->cap.max_send_sge > max_sge
+ || init_attr->cap.max_recv_sge > max_sge) {
+ ehca_err(pd->device, "Invalid number of SGEs requested "
+ "send_sge=%x recv_sge=%x max_sge=%x",
+ init_attr->cap.max_send_sge,
+ init_attr->cap.max_recv_sge, max_sge);
+ return ERR_PTR(-EINVAL);
+ }
}
if (pd->uobject && udata)
@@ -556,7 +569,6 @@ static struct ehca_qp *internal_create_qp(
write_lock_irqsave(&ehca_qp_idr_lock, flags);
ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
} while (ret == -EAGAIN);
if (ret) {
@@ -565,11 +577,17 @@ static struct ehca_qp *internal_create_qp(
goto create_qp_exit0;
}
+ if (my_qp->token > 0x1FFFFFF) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid number of qp");
+ goto create_qp_exit1;
+ }
+
parms.servicetype = ibqptype2servicetype(qp_type);
if (parms.servicetype < 0) {
ret = -EINVAL;
ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
- goto create_qp_exit0;
+ goto create_qp_exit1;
}
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -598,8 +616,7 @@ static struct ehca_qp *internal_create_qp(
parms.squeue.max_sge = max_send_sge;
parms.rqueue.max_sge = max_recv_sge;
- if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
- && !(context && udata)) { /* no small QP support in userspace ATM */
+ if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
if (HAS_SQ(my_qp))
ehca_determine_small_queue(
&parms.squeue, max_send_sge, is_llqp);
@@ -612,7 +629,7 @@ static struct ehca_qp *internal_create_qp(
h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
if (h_ret != H_SUCCESS) {
- ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
+ ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li",
h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit1;
@@ -666,7 +683,7 @@ static struct ehca_qp *internal_create_qp(
&parms.squeue, swqe_size);
if (ret) {
ehca_err(pd->device, "Couldn't initialize squeue "
- "and pages ret=%x", ret);
+ "and pages ret=%i", ret);
goto create_qp_exit2;
}
}
@@ -677,7 +694,7 @@ static struct ehca_qp *internal_create_qp(
H_SUCCESS, &parms.rqueue, rwqe_size);
if (ret) {
ehca_err(pd->device, "Couldn't initialize rqueue "
- "and pages ret=%x", ret);
+ "and pages ret=%i", ret);
goto create_qp_exit3;
}
}
@@ -714,8 +731,6 @@ static struct ehca_qp *internal_create_qp(
if (qp_type == IB_QPT_GSI) {
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
- ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
- h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit4;
}
@@ -725,7 +740,7 @@ static struct ehca_qp *internal_create_qp(
ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
if (ret) {
ehca_err(pd->device,
- "Couldn't assign qp to send_cq ret=%x", ret);
+ "Couldn't assign qp to send_cq ret=%i", ret);
goto create_qp_exit4;
}
}
@@ -741,12 +756,13 @@ static struct ehca_qp *internal_create_qp(
resp.ext_type = my_qp->ext_type;
resp.qkey = my_qp->qkey;
resp.real_qp_num = my_qp->real_qp_num;
- resp.ipz_rqueue.offset = my_qp->ipz_rqueue.offset;
- resp.ipz_squeue.offset = my_qp->ipz_squeue.offset;
+
if (HAS_SQ(my_qp))
queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
if (HAS_RQ(my_qp))
queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
+ resp.fw_handle_ofs = (u32)
+ (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
@@ -841,7 +857,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to INIT"
- "ehca_qp=%p qp_num=%x hret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
@@ -855,7 +871,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not enable SRQ"
- "ehca_qp=%p qp_num=%x hret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
@@ -869,11 +885,13 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to RTR"
- "ehca_qp=%p qp_num=%x hret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
+ ehca_free_fw_ctrlblock(mqpcb);
+
return &my_qp->ib_srq;
create_srq2:
@@ -907,7 +925,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
&bad_send_wqe_p, NULL, 2);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
- " ehca_qp=%p qp_num=%x h_ret=%lx",
+ " ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
@@ -985,7 +1003,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ehca_err(ibqp->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, ibqp->qp_num, h_ret);
ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1;
@@ -1021,7 +1039,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ibqp, &smiqp_attr, smiqp_attr_mask, 1);
if (smirc) {
ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
- "ehca_modify_qp() rc=%x", smirc);
+ "ehca_modify_qp() rc=%i", smirc);
ret = H_PARAMETER;
goto modify_qp_exit1;
}
@@ -1123,7 +1141,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
if (ret) {
ehca_err(ibqp->device, "prepare_sqe_rts() failed "
- "ehca_qp=%p qp_num=%x ret=%x",
+ "ehca_qp=%p qp_num=%x ret=%i",
my_qp, ibqp->qp_num, ret);
goto modify_qp_exit2;
}
@@ -1149,6 +1167,13 @@ static int internal_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_PKEY_INDEX) {
+ if (attr->pkey_index >= 16) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid pkey_index=%x. "
+ "ehca_qp=%p qp_num=%x max_pkey_index=f",
+ attr->pkey_index, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
mqpcb->prim_p_key_idx = attr->pkey_index;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
}
@@ -1257,50 +1282,78 @@ static int internal_modify_qp(struct ib_qp *ibqp,
int ehca_mult = ib_rate_to_mult(
shca->sport[my_qp->init_attr.port_num].rate);
+ if (attr->alt_port_num < 1
+ || attr->alt_port_num > shca->num_ports) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid alt_port=%x. "
+ "ehca_qp=%p qp_num=%x num_ports=%x",
+ attr->alt_port_num, my_qp, ibqp->qp_num,
+ shca->num_ports);
+ goto modify_qp_exit2;
+ }
+ mqpcb->alt_phys_port = attr->alt_port_num;
+
+ if (attr->alt_pkey_index >= 16) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
+ "ehca_qp=%p qp_num=%x max_pkey_index=f",
+ attr->pkey_index, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
+ mqpcb->alt_p_key_idx = attr->alt_pkey_index;
+
+ mqpcb->timeout_al = attr->alt_timeout;
mqpcb->dlid_al = attr->alt_ah_attr.dlid;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1);
mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1);
mqpcb->service_level_al = attr->alt_ah_attr.sl;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1);
- if (ah_mult < ehca_mult)
- mqpcb->max_static_rate = (ah_mult > 0) ?
- ((ehca_mult - 1) / ah_mult) : 0;
+ if (ah_mult > 0 && ah_mult < ehca_mult)
+ mqpcb->max_static_rate_al = (ehca_mult - 1) / ah_mult;
else
mqpcb->max_static_rate_al = 0;
- update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
+ /* OpenIB doesn't support alternate retry counts - copy them */
+ mqpcb->retry_count_al = mqpcb->retry_count;
+ mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
+
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
+
+ /*
+ * Always supply the GRH flag, even if it's zero, to give the
+ * hypervisor a clear "yes" or "no" instead of a "perhaps"
+ */
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
/*
* only if GRH is TRUE we might consider SOURCE_GID_IDX
* and DEST_GID otherwise phype will return H_ATTR_PARM!!!
*/
if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
- mqpcb->send_grh_flag_al = 1 << 31;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
- mqpcb->source_gid_idx_al =
- attr->alt_ah_attr.grh.sgid_index;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1);
+ mqpcb->send_grh_flag_al = 1;
for (cnt = 0; cnt < 16; cnt++)
mqpcb->dest_gid_al.byte[cnt] =
attr->alt_ah_attr.grh.dgid.raw[cnt];
-
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1);
+ mqpcb->source_gid_idx_al =
+ attr->alt_ah_attr.grh.sgid_index;
mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1);
mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1);
mqpcb->traffic_class_al =
attr->alt_ah_attr.grh.traffic_class;
+
update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
}
}
@@ -1322,7 +1375,14 @@ static int internal_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_PATH_MIG_STATE) {
- mqpcb->path_migration_state = attr->path_mig_state;
+ if (attr->path_mig_state != IB_MIG_REARM
+ && attr->path_mig_state != IB_MIG_MIGRATED) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid mig_state=%x",
+ attr->path_mig_state);
+ goto modify_qp_exit2;
+ }
+ mqpcb->path_migration_state = attr->path_mig_state + 1;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
}
@@ -1348,7 +1408,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
+ ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
@@ -1381,7 +1441,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "ENABLE in context of "
"RESET_2_INIT failed! Maybe you didn't get "
- "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
+ "a LID h_ret=%li ehca_qp=%p qp_num=%x",
h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
@@ -1469,7 +1529,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(qp->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
}
@@ -1490,7 +1550,7 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr->qkey = qpcb->qkey;
qp_attr->path_mtu = qpcb->path_mtu;
- qp_attr->path_mig_state = qpcb->path_migration_state;
+ qp_attr->path_mig_state = qpcb->path_migration_state - 1;
qp_attr->rq_psn = qpcb->receive_psn;
qp_attr->sq_psn = qpcb->send_psn;
qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
@@ -1644,7 +1704,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- ehca_err(ibsrq->device, "hipz_h_modify_qp() failed rc=%lx "
+ ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li "
"ehca_qp=%p qp_num=%x",
h_ret, my_qp, my_qp->real_qp_num);
}
@@ -1687,12 +1747,13 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(srq->device, "hipz_h_query_qp() failed "
- "ehca_qp=%p qp_num=%x h_ret=%lx",
+ "ehca_qp=%p qp_num=%x h_ret=%li",
my_qp, my_qp->real_qp_num, h_ret);
goto query_srq_exit1;
}
srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
+ srq_attr->max_sge = qpcb->actual_nr_sges_in_rq_wqe;
srq_attr->srq_limit = EHCA_BMASK_GET(
MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
@@ -1737,7 +1798,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
if (ret) {
ehca_err(dev, "Couldn't unassign qp from "
- "send_cq ret=%x qp_num=%x cq_num=%x", ret,
+ "send_cq ret=%i qp_num=%x cq_num=%x", ret,
qp_num, my_qp->send_cq->cq_number);
return ret;
}
@@ -1749,7 +1810,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
- ehca_err(dev, "hipz_h_destroy_qp() failed rc=%lx "
+ ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
return ehca2ib_return_code(h_ret);
}
OpenPOWER on IntegriCloud