From 5f068992a1bccda5574b4f6d33458ef806686d7f Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 11 Nov 2005 14:06:01 -0800 Subject: [IB] srp: increase max_luns Increase SRP max_luns to 512 to match the kernel's default, since SRP storage targets can have lots of LUNs and the SRP initiator itself doesn't have any particular limit. Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/srp/ib_srp.c | 2 ++ drivers/infiniband/ulp/srp/ib_srp.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 321a3a1..a364530 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1417,6 +1417,8 @@ static ssize_t srp_create_target(struct class_device *class_dev, if (!target_host) return -ENOMEM; + target_host->max_lun = SRP_MAX_LUN; + target = host_to_target(target_host); memset(target, 0, sizeof *target); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 4fec28a..b564f18 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -54,6 +54,7 @@ enum { SRP_PORT_REDIRECT = 1, SRP_DLID_REDIRECT = 2, + SRP_MAX_LUN = 512, SRP_MAX_IU_LEN = 256, SRP_RQ_SHIFT = 6, -- cgit v1.1 From 47f2bce9021b4974ed33b072ebb8348c8145c946 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 15 Nov 2005 00:19:21 -0800 Subject: [IB] srp: don't post receive if no send buf available Have __srp_get_tx_iu() fail if the target port's request limit will not allow the initiator to post a send. This avoids continuing on and posting a receive, and then failing to post a corresponding send. If that happens, then the initiator will end up with an extra receive posted, and if this happens to much, the receive queue will overflow. Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/srp/ib_srp.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a364530..ee9fe22 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -802,13 +802,21 @@ static int srp_post_recv(struct srp_target_port *target) /* * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. + * req_lim and tx_head. Lock cannot be dropped between call here and + * call to __srp_post_send(). */ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) { if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) return NULL; + if (unlikely(target->req_lim < 1)) { + if (printk_ratelimit()) + printk(KERN_DEBUG PFX "Target has req_lim %d\n", + target->req_lim); + return NULL; + } + return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; } @@ -823,11 +831,6 @@ static int __srp_post_send(struct srp_target_port *target, struct ib_send_wr wr, *bad_wr; int ret = 0; - if (target->req_lim < 1) { - printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim); - return -EAGAIN; - } - list.addr = iu->dma; list.length = len; list.lkey = target->srp_host->mr->lkey; -- cgit v1.1 From cbc5b2bb9e226c2b2b981836d2289912e2ef3c1c Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 15 Nov 2005 00:24:23 -0800 Subject: [IB] mthca: don't disable RDMA writes if no responder resources Responder resources are only required to handle RDMA reads and atomic operations, not RDMA writes. So the driver should allow RDMA writes even if responder resources are set to 0. This is especially important for the UC transport -- with the old code, it was impossible to enable RDMA writes for UC QPs. Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_qp.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 760c418d..5899f0c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -730,15 +730,16 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) } if (attr_mask & IB_QP_ACCESS_FLAGS) { + qp_context->params2 |= + cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? + MTHCA_QP_BIT_RWE : 0); + /* - * Only enable RDMA/atomics if we have responder - * resources set to a non-zero value. + * Only enable RDMA reads and atomics if we have + * responder resources set to a non-zero value. */ if (qp->resp_depth) { qp_context->params2 |= - cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? - MTHCA_QP_BIT_RWE : 0); - qp_context->params2 |= cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? MTHCA_QP_BIT_RRE : 0); qp_context->params2 |= @@ -759,31 +760,27 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) if (qp->resp_depth && !attr->max_dest_rd_atomic) { /* * Lowering our responder resources to zero. - * Turn off RDMA/atomics as responder. - * (RWE/RRE/RAE in params2 already zero) + * Turn off reads RDMA and atomics as responder. + * (RRE/RAE in params2 already zero) */ - qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | - MTHCA_QP_OPTPAR_RRE | + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } if (!qp->resp_depth && attr->max_dest_rd_atomic) { /* * Increasing our responder resources from - * zero. Turn on RDMA/atomics as appropriate. + * zero. Turn on RDMA reads and atomics as + * appropriate. */ qp_context->params2 |= - cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ? - MTHCA_QP_BIT_RWE : 0); - qp_context->params2 |= cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? MTHCA_QP_BIT_RRE : 0); qp_context->params2 |= cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_QP_BIT_RAE : 0); - qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | - MTHCA_QP_OPTPAR_RRE | + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } -- cgit v1.1 From 48fd0d1fdd357caa2de8cb4ce6af810df7535f43 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 18 Nov 2005 14:11:17 -0800 Subject: IB/mthca: Safer max_send_sge/max_recv_sge calculation Calculation of QP capabilities still isn't exactly right in mthca: max_send_sge/max_recv_sge fields returned in create_qp can exceed the handware supported limits. Signed-off-by: Michael S. Tsirkin Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_qp.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 5899f0c..dd4e133 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -918,10 +918,12 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev, else qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE; - qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg); - qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - - sizeof (struct mthca_next_seg)) / - sizeof (struct mthca_data_seg); + qp->sq.max_gs = min_t(int, dev->limits.max_sg, + max_data_size / sizeof (struct mthca_data_seg)); + qp->rq.max_gs = min_t(int, dev->limits.max_sg, + (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - + sizeof (struct mthca_next_seg)) / + sizeof (struct mthca_data_seg)); } /* -- cgit v1.1 From eabc77935d8d2a761c88b9cbb6313bd54b6ddbb3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 18 Nov 2005 14:18:26 -0800 Subject: IB/umad: make sure write()s have sufficient data Make sure that userspace passes in enough data when sending a MAD. We always copy at least sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR bytes from userspace, so anything less is definitely invalid. Also, if the length is less than this limit, it's possible for the second copy_from_user() to get a negative length and trigger a BUG(). Signed-off-by: Roland Dreier --- drivers/infiniband/core/user_mad.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 5ea741f..e73f81c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -312,7 +312,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, int ret, length, hdr_len, copy_offset; int rmpp_active = 0; - if (count < sizeof (struct ib_user_mad)) + if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) return -EINVAL; length = count - sizeof (struct ib_user_mad); -- cgit v1.1