From d1e09ebf426ff34b4b6bbd6212b820edeb992bd4 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sat, 7 Jul 2012 15:13:47 -0700 Subject: RDMA/ocrdma: Fix assignment of max_srq_sge in device query We want to set attr->max_srq_sge to dev->attr.max_srq_sge, not to itself. This was detected by Coverity (CID 709210). Signed-off-by: Roland Dreier --- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 2e2e7ae..b2f9784b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; attr->max_srq = (dev->attr.max_qp - 1); - attr->max_srq_sge = attr->max_srq_sge; + attr->max_srq_sge = dev->attr.max_srq_sge; attr->max_srq_wr = dev->attr.max_rqe; attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; attr->max_fast_reg_page_list_len = 0; -- cgit v1.1 From f747c34af4f56cc239e04505bd583dd3bdcfe49d Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 5 Jul 2012 14:16:54 -0700 Subject: RDMA/cxgb4: Fix endianness of addition to mpa->private_data_size sparse correctly warns that if mpa->private_data_size is __be16, then doing += on it is wrong, even if we do += htons() -- on a little endian system, carries will go the wrong way. Fix this up by doing the addition in native byte order. Acked-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb4/cm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b18870c..51f4206 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -548,8 +548,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, } if (mpa_rev_to_use == 2) { - mpa->private_data_size += - htons(sizeof(struct mpa_v2_conn_params)); + mpa->private_data_size = htons(ntohs(mpa->private_data_size) + + sizeof (struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); @@ -635,8 +635,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; - mpa->private_data_size += - htons(sizeof(struct mpa_v2_conn_params)); + mpa->private_data_size = htons(ntohs(mpa->private_data_size) + + sizeof (struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); @@ -715,8 +715,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; - mpa->private_data_size += - htons(sizeof(struct mpa_v2_conn_params)); + mpa->private_data_size = htons(ntohs(mpa->private_data_size) + + sizeof (struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer && (ep->mpa_attr.p2p_type != -- cgit v1.1 From d90f9b3591b3b5fa86178e318008fc1c531a84dc Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 5 Jul 2012 22:39:34 -0700 Subject: IB: Use IS_ENABLED(CONFIG_IPV6) Instead of testing defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) Signed-off-by: Roland Dreier --- drivers/infiniband/core/addr.c | 4 ++-- drivers/infiniband/core/cma.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 7 +++---- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6ef660c..28058ae 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -129,7 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) dev_put(dev); break; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { @@ -243,7 +243,7 @@ out: return ret; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) static int addr6_resolve(struct sockaddr_in6 *src_in, struct sockaddr_in6 *dst_in, struct rdma_dev_addr *addr) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 2e826f9..4d8f592 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2278,7 +2278,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, struct sockaddr *addr) { -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *sin6; if (addr->sa_family != AF_INET6) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index b050e62..5a04452 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -202,8 +202,7 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) return 0; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \ -defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) static int ocrdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) @@ -549,7 +548,7 @@ static struct ocrdma_driver ocrdma_drv = { static void ocrdma_unregister_inet6addr_notifier(void) { -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier); #endif } @@ -558,7 +557,7 @@ static int __init ocrdma_init_module(void) { int status; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); if (status) return status; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 014504d..fd3871e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1376,7 +1376,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work) if (skb->protocol == htons(ETH_P_IP)) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); #endif -- cgit v1.1 From 752a50cab600c6d46c5a1921c6a6d2fb116c8a4b Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:33 +0300 Subject: mlx4_core: Pass an invalid PCI id number to VFs Currently, VFs have 0 in their dev->caps.function field. This is a valid pci id (usually of the PF). Instead, pass an invalid PCI id to the VF via QUERY_FW, so that if the value gets accessed in the VF driver, we'll catch the problem. Signed-off-by: Jack Morgenstein Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/fw.c | 10 +++++++--- include/linux/mlx4/device.h | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 9c83bb8..4281ce0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -881,11 +881,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); + dev->caps.function = lg; + if (mlx4_is_slave(dev)) goto out; - MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); - dev->caps.function = lg; MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || @@ -966,9 +967,12 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; - /* for slaves, zero out everything except FW version */ + /* for slaves, set pci PPF ID to invalid and zero out everything + * else except FW version */ outbuf[0] = outbuf[1] = 0; memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); + outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; + return 0; } diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6a8f002..8eadf0f1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -534,6 +534,8 @@ struct mlx4_init_port_param { if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) +#define MLX4_INVALID_SLAVE_ID 0xFF + static inline int mlx4_is_master(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_MASTER; -- cgit v1.1 From b1d8eb5a213640f1be98a90e73a241d15b70045c Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:35 +0300 Subject: IB/mlx4: Add debug prints Define pr_fmt and add some pr_debug prints. Signed-off-by: Jack Morgenstein Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/mad.c | 19 +++++++++++++++++++ drivers/infiniband/hw/mlx4/main.c | 2 +- drivers/infiniband/hw/mlx4/mlx4_ib.h | 10 ++++++++++ drivers/infiniband/hw/mlx4/qp.c | 27 +++++++++++++++++++++++++-- 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 259b067..84786a9 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -242,6 +242,25 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int err; struct ib_port_attr pattr; + if (in_wc && in_wc->qp->qp_num) { + pr_debug("received MAD: slid:%d sqpn:%d " + "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", + in_wc->slid, in_wc->src_qp, + in_wc->dlid_path_bits, + in_wc->qp->qp_num, + in_wc->wc_flags, + in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, + be16_to_cpu(in_mad->mad_hdr.attr_id)); + if (in_wc->wc_flags & IB_WC_GRH) { + pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", + be64_to_cpu(in_grh->sgid.global.subnet_prefix), + be64_to_cpu(in_grh->sgid.global.interface_id)); + pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", + be64_to_cpu(in_grh->dgid.global.subnet_prefix), + be64_to_cpu(in_grh->dgid.global.interface_id)); + } + } + slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3530c41..5266b49 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -50,7 +50,7 @@ #include "mlx4_ib.h" #include "user.h" -#define DRV_NAME "mlx4_ib" +#define DRV_NAME MLX4_IB_DRV_NAME #define DRV_VERSION "1.0" #define DRV_RELDATE "April 4, 2008" diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index ff36655..5f298af 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -44,6 +44,16 @@ #include #include +#define MLX4_IB_DRV_NAME "mlx4_ib" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ + +#define mlx4_ib_warn(ibdev, format, arg...) \ + dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) + enum { MLX4_IB_SQ_MIN_WQE_SHIFT = 6, MLX4_IB_MAX_HEADROOM = 2048 diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8d4ed24..84b2696 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1335,11 +1335,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + pr_debug("qpn 0x%x: invalid attribute mask specified " + "for transition %d to %d. qp_type %d," + " attr_mask 0x%x\n", + ibqp->qp_num, cur_state, new_state, + ibqp->qp_type, attr_mask); goto out; + } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { + pr_debug("qpn 0x%x: invalid port number (%d) specified " + "for transition %d to %d. qp_type %d\n", + ibqp->qp_num, attr->port_num, cur_state, + new_state, ibqp->qp_type); goto out; } @@ -1350,17 +1360,30 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; - if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) + if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { + pr_debug("qpn 0x%x: invalid pkey index (%d) specified " + "for transition %d to %d. qp_type %d\n", + ibqp->qp_num, attr->pkey_index, cur_state, + new_state, ibqp->qp_type); goto out; + } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { + pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " + "Transition %d to %d. qp_type %d\n", + ibqp->qp_num, attr->max_rd_atomic, cur_state, + new_state, ibqp->qp_type); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { + pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " + "Transition %d to %d. qp_type %d\n", + ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, + new_state, ibqp->qp_type); goto out; } -- cgit v1.1 From aeab97ed1503bedbe14d1e1c5ab7b90253a67664 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Tue, 19 Jun 2012 11:21:38 +0300 Subject: IB/sa: Add GuidInfoRecord query support This query is needed for SRIOV alias GUID support. The query is implemented per the IB Spec definition in section 15.2.5.18 (GuidInfoRecord). Signed-off-by: Erez Shitrit Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/core/sa_query.c | 133 +++++++++++++++++++++++++++++++++++++ include/rdma/ib_sa.h | 33 +++++++++ 2 files changed, 166 insertions(+) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index fbbfa24..a8905ab 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -94,6 +94,12 @@ struct ib_sa_path_query { struct ib_sa_query sa_query; }; +struct ib_sa_guidinfo_query { + void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); + void *context; + struct ib_sa_query sa_query; +}; + struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; @@ -347,6 +353,34 @@ static const struct ib_field service_rec_table[] = { .size_bits = 2*64 }, }; +#define GUIDINFO_REC_FIELD(field) \ + .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ + .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ + .field_name = "sa_guidinfo_rec:" #field + +static const struct ib_field guidinfo_rec_table[] = { + { GUIDINFO_REC_FIELD(lid), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 16 }, + { GUIDINFO_REC_FIELD(block_num), + .offset_words = 0, + .offset_bits = 16, + .size_bits = 8 }, + { GUIDINFO_REC_FIELD(res1), + .offset_words = 0, + .offset_bits = 24, + .size_bits = 8 }, + { GUIDINFO_REC_FIELD(res2), + .offset_words = 1, + .offset_bits = 0, + .size_bits = 32 }, + { GUIDINFO_REC_FIELD(guid_info_list), + .offset_words = 2, + .offset_bits = 0, + .size_bits = 512 }, +}; + static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); @@ -945,6 +979,105 @@ err1: return ret; } +/* Support GuidInfoRecord */ +static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, + int status, + struct ib_sa_mad *mad) +{ + struct ib_sa_guidinfo_query *query = + container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); + + if (mad) { + struct ib_sa_guidinfo_rec rec; + + ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), + mad->data, &rec); + query->callback(status, &rec, query->context); + } else + query->callback(status, NULL, query->context); +} + +static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) +{ + kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); +} + +int ib_sa_guid_info_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + struct ib_sa_guidinfo_rec *rec, + ib_sa_comp_mask comp_mask, u8 method, + int timeout_ms, gfp_t gfp_mask, + void (*callback)(int status, + struct ib_sa_guidinfo_rec *resp, + void *context), + void *context, + struct ib_sa_query **sa_query) +{ + struct ib_sa_guidinfo_query *query; + struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); + struct ib_sa_port *port; + struct ib_mad_agent *agent; + struct ib_sa_mad *mad; + int ret; + + if (!sa_dev) + return -ENODEV; + + if (method != IB_MGMT_METHOD_GET && + method != IB_MGMT_METHOD_SET && + method != IB_SA_METHOD_DELETE) { + return -EINVAL; + } + + port = &sa_dev->port[port_num - sa_dev->start_port]; + agent = port->agent; + + query = kmalloc(sizeof *query, gfp_mask); + if (!query) + return -ENOMEM; + + query->sa_query.port = port; + ret = alloc_mad(&query->sa_query, gfp_mask); + if (ret) + goto err1; + + ib_sa_client_get(client); + query->sa_query.client = client; + query->callback = callback; + query->context = context; + + mad = query->sa_query.mad_buf->mad; + init_mad(mad, agent); + + query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; + query->sa_query.release = ib_sa_guidinfo_rec_release; + + mad->mad_hdr.method = method; + mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); + mad->sa_hdr.comp_mask = comp_mask; + + ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, + mad->data); + + *sa_query = &query->sa_query; + + ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); + if (ret < 0) + goto err2; + + return ret; + +err2: + *sa_query = NULL; + ib_sa_client_put(query->sa_query.client); + free_mad(&query->sa_query); + +err1: + kfree(query); + return ret; +} +EXPORT_SYMBOL(ib_sa_guid_info_rec_query); + static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index d44a563..8275e53 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -251,6 +251,28 @@ struct ib_sa_service_rec { u64 data64[2]; }; +#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0) +#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1) +#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2) +#define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3) +#define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4) +#define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5) +#define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6) +#define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7) +#define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8) +#define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9) +#define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10) +#define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11) + +struct ib_sa_guidinfo_rec { + __be16 lid; + u8 block_num; + /* reserved */ + u8 res1; + __be32 res2; + u8 guid_info_list[64]; +}; + struct ib_sa_client { atomic_t users; struct completion comp; @@ -385,4 +407,15 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, */ void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); +/* Support GuidInfoRecord */ +int ib_sa_guid_info_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, + struct ib_sa_guidinfo_rec *rec, + ib_sa_comp_mask comp_mask, u8 method, + int timeout_ms, gfp_t gfp_mask, + void (*callback)(int status, + struct ib_sa_guidinfo_rec *resp, + void *context), + void *context, + struct ib_sa_query **sa_query); #endif /* IB_SA_H */ -- cgit v1.1 From 3045f0920367e625bbec7d66fadb444e673515af Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:39 +0300 Subject: IB/core: Move CM_xxx_ATTR_ID macros from cm_msgs.h to ib_cm.h These macros will be reused by the mlx4 SRIOV-IB CM paravirtualization code, and there is no reason to have them declared both in the IB core in the mlx4 IB driver. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/core/cm_msgs.h | 12 ------------ include/rdma/ib_cm.h | 12 ++++++++++++ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 7da9b21..be068f4 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -44,18 +44,6 @@ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ -#define CM_REQ_ATTR_ID cpu_to_be16(0x0010) -#define CM_MRA_ATTR_ID cpu_to_be16(0x0011) -#define CM_REJ_ATTR_ID cpu_to_be16(0x0012) -#define CM_REP_ATTR_ID cpu_to_be16(0x0013) -#define CM_RTU_ATTR_ID cpu_to_be16(0x0014) -#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) -#define CM_DREP_ATTR_ID cpu_to_be16(0x0016) -#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) -#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) -#define CM_LAP_ATTR_ID cpu_to_be16(0x0019) -#define CM_APR_ATTR_ID cpu_to_be16(0x001A) - enum cm_msg_sequence { CM_MSG_SEQUENCE_REQ, CM_MSG_SEQUENCE_LAP, diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 83f77ac..0e3ff30 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -262,6 +262,18 @@ struct ib_cm_event { void *private_data; }; +#define CM_REQ_ATTR_ID cpu_to_be16(0x0010) +#define CM_MRA_ATTR_ID cpu_to_be16(0x0011) +#define CM_REJ_ATTR_ID cpu_to_be16(0x0012) +#define CM_REP_ATTR_ID cpu_to_be16(0x0013) +#define CM_RTU_ATTR_ID cpu_to_be16(0x0014) +#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) +#define CM_DREP_ATTR_ID cpu_to_be16(0x0016) +#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) +#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) +#define CM_LAP_ATTR_ID cpu_to_be16(0x0019) +#define CM_APR_ATTR_ID cpu_to_be16(0x001A) + /** * ib_cm_handler - User-defined callback to process communication events. * @cm_id: Communication identifier associated with the reported event. -- cgit v1.1 From 354dff1bd8ccd41b6e8421226d586d35e7fb8920 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 27 Jun 2012 18:33:05 -0400 Subject: IB/qib: Fix UC MR refs for immediate operations An MR reference leak exists when handling UC RDMA writes with immediate data because we manipulate the reference counts as if the operation had been a send. This patch moves the last_imm label so that the RDMA write operations with immediate data converge at the cq building code. The copy/mr deref code is now done correctly prior to the branch to last_imm. Reviewed-by: Edward Mascarenhas Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_uc.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index ce7387f..70b4cb7 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -403,7 +403,6 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto rewind; wc.opcode = IB_WC_RECV; -last_imm: qib_copy_sge(&qp->r_sge, data, tlen, 0); while (qp->s_rdma_read_sge.num_sge) { atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); @@ -411,6 +410,7 @@ last_imm: qp->s_rdma_read_sge.sge = *qp->s_rdma_read_sge.sg_list++; } +last_imm: wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.qp = &qp->ibqp; @@ -509,6 +509,12 @@ rdma_last_imm: } wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; + qib_copy_sge(&qp->r_sge, data, tlen, 1); + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } goto last_imm; case OP(RDMA_WRITE_LAST): -- cgit v1.1 From 6a82649f217023863d6b1740017e6c3dd6685327 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 27 Jun 2012 18:33:12 -0400 Subject: IB/qib: Avoid returning EBUSY from MR deregister A timing issue can occur where qib_mr_dereg can return -EBUSY if the MR use count is not zero. This can occur if the MR is de-registered while RDMA read response packets are being progressed from the SDMA ring. The suspicion is that the peer sent an RDMA read request, which has already been copied across to the peer. The peer sees the completion of his request and then communicates to the responder that the MR is not needed any longer. The responder tries to de-register the MR, catching some responses remaining in the SDMA ring holding the MR use count. The code now uses a get/put paradigm to track MR use counts and coordinates with the MR de-registration process using a completion when the count has reached zero. A timeout on the delay is in place to catch other EBUSY issues. The reference count protocol is as follows: - The return to the user counts as 1 - A reference from the lk_table or the qib_ibdev counts as 1. - Transient I/O operations increase/decrease as necessary A lot of code duplication has been folded into the new routines init_qib_mregion() and deinit_qib_mregion(). Additionally, explicit initialization of fields to zero is now handled by kzalloc(). Also, duplicated code 'while.*num_sge' that decrements reference counts have been consolidated in qib_put_ss(). Reviewed-by: Ramkrishna Vepa Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_keys.c | 84 +++++++----- drivers/infiniband/hw/qib/qib_mr.c | 242 +++++++++++++++++++--------------- drivers/infiniband/hw/qib/qib_qp.c | 21 +-- drivers/infiniband/hw/qib/qib_rc.c | 24 ++-- drivers/infiniband/hw/qib/qib_ruc.c | 14 +- drivers/infiniband/hw/qib/qib_uc.c | 33 +---- drivers/infiniband/hw/qib/qib_ud.c | 12 +- drivers/infiniband/hw/qib/qib_verbs.c | 10 +- drivers/infiniband/hw/qib/qib_verbs.h | 28 +++- 9 files changed, 244 insertions(+), 224 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 8fd19a4..8b5ee3a 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -35,21 +35,40 @@ /** * qib_alloc_lkey - allocate an lkey - * @rkt: lkey table in which to allocate the lkey * @mr: memory region that this lkey protects + * @dma_region: 0->normal key, 1->restricted DMA key + * + * Returns 0 if successful, otherwise returns -errno. + * + * Increments mr reference count and sets published + * as required. + * + * Sets the lkey field mr for non-dma regions. * - * Returns 1 if successful, otherwise returns 0. */ -int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) +int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; - int ret; + int ret = 0; + struct qib_ibdev *dev = to_idev(mr->pd->device); + struct qib_lkey_table *rkt = &dev->lk_table; spin_lock_irqsave(&rkt->lock, flags); + /* special case for dma_mr lkey == 0 */ + if (dma_region) { + /* should the dma_mr be relative to the pd? */ + if (!dev->dma_mr) { + qib_get_mr(mr); + dev->dma_mr = mr; + mr->lkey_published = 1; + } + goto success; + } + /* Find the next available LKEY */ r = rkt->next; n = r; @@ -57,11 +76,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) if (rkt->table[r] == NULL) break; r = (r + 1) & (rkt->max - 1); - if (r == n) { - spin_unlock_irqrestore(&rkt->lock, flags); - ret = 0; + if (r == n) goto bail; - } } rkt->next = (r + 1) & (rkt->max - 1); /* @@ -76,46 +92,50 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) mr->lkey |= 1 << 8; rkt->gen++; } + qib_get_mr(mr); rkt->table[r] = mr; + mr->lkey_published = 1; +success: spin_unlock_irqrestore(&rkt->lock, flags); - - ret = 1; - -bail: +out: return ret; +bail: + spin_unlock_irqrestore(&rkt->lock, flags); + ret = -ENOMEM; + goto out; } /** * qib_free_lkey - free an lkey - * @rkt: table from which to free the lkey - * @lkey: lkey id to free + * @mr: mr to free from tables */ -int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) +void qib_free_lkey(struct qib_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; - int ret; + struct qib_ibdev *dev = to_idev(mr->pd->device); + struct qib_lkey_table *rkt = &dev->lk_table; + + spin_lock_irqsave(&rkt->lock, flags); + if (!mr->lkey_published) + goto out; + mr->lkey_published = 0; + spin_lock_irqsave(&dev->lk_table.lock, flags); if (lkey == 0) { if (dev->dma_mr && dev->dma_mr == mr) { - ret = atomic_read(&dev->dma_mr->refcount); - if (!ret) - dev->dma_mr = NULL; - } else - ret = 0; + qib_put_mr(dev->dma_mr); + dev->dma_mr = NULL; + } } else { r = lkey >> (32 - ib_qib_lkey_table_size); - ret = atomic_read(&dev->lk_table.table[r]->refcount); - if (!ret) - dev->lk_table.table[r] = NULL; + qib_put_mr(dev->dma_mr); + rkt->table[r] = NULL; } +out: spin_unlock_irqrestore(&dev->lk_table.lock, flags); - - if (ret) - ret = -EBUSY; - return ret; } /** @@ -150,7 +170,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, goto bail; if (!dev->dma_mr) goto bail; - atomic_inc(&dev->dma_mr->refcount); + qib_get_mr(dev->dma_mr); spin_unlock_irqrestore(&rkt->lock, flags); isge->mr = dev->dma_mr; @@ -171,7 +191,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, off + sge->length > mr->length || (mr->access_flags & acc) != acc)) goto bail; - atomic_inc(&mr->refcount); + qib_get_mr(mr); spin_unlock_irqrestore(&rkt->lock, flags); off += mr->offset; @@ -245,7 +265,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, goto bail; if (!dev->dma_mr) goto bail; - atomic_inc(&dev->dma_mr->refcount); + qib_get_mr(dev->dma_mr); spin_unlock_irqrestore(&rkt->lock, flags); sge->mr = dev->dma_mr; @@ -265,7 +285,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) goto bail; - atomic_inc(&mr->refcount); + qib_get_mr(mr); spin_unlock_irqrestore(&rkt->lock, flags); off += mr->offset; diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 08944e2..6a2028a 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) return container_of(ibfmr, struct qib_fmr, ibfmr); } +static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, + int count) +{ + int m, i = 0; + int rval = 0; + + m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; + for (; i < m; i++) { + mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); + if (!mr->map[i]) + goto bail; + } + mr->mapsz = m; + init_completion(&mr->comp); + /* count returning the ptr to user */ + atomic_set(&mr->refcount, 1); + mr->pd = pd; + mr->max_segs = count; +out: + return rval; +bail: + while (i) + kfree(mr->map[--i]); + rval = -ENOMEM; + goto out; +} + +static void deinit_qib_mregion(struct qib_mregion *mr) +{ + int i = mr->mapsz; + + mr->mapsz = 0; + while (i) + kfree(mr->map[--i]); +} + + /** * qib_get_dma_mr - get a DMA memory region * @pd: protection domain for this memory region @@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) */ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) { - struct qib_ibdev *dev = to_idev(pd->device); - struct qib_mr *mr; + struct qib_mr *mr = NULL; struct ib_mr *ret; - unsigned long flags; + int rval; if (to_ipd(pd)->user) { ret = ERR_PTR(-EPERM); @@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) goto bail; } - mr->mr.access_flags = acc; - atomic_set(&mr->mr.refcount, 0); + rval = init_qib_mregion(&mr->mr, pd, 0); + if (rval) { + ret = ERR_PTR(rval); + goto bail; + } - spin_lock_irqsave(&dev->lk_table.lock, flags); - if (!dev->dma_mr) - dev->dma_mr = &mr->mr; - spin_unlock_irqrestore(&dev->lk_table.lock, flags); + rval = qib_alloc_lkey(&mr->mr, 1); + if (rval) { + ret = ERR_PTR(rval); + goto bail_mregion; + } + + mr->mr.access_flags = acc; ret = &mr->ibmr; +done: + return ret; +bail_mregion: + deinit_qib_mregion(&mr->mr); bail: - return ret; + kfree(mr); + goto done; } -static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) +static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) { struct qib_mr *mr; - int m, i = 0; + int rval = -ENOMEM; + int m; /* Allocate struct plus pointers to first level page tables. */ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; - mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); + mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); if (!mr) - goto done; - - /* Allocate first level page tables. */ - for (; i < m; i++) { - mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); - if (!mr->mr.map[i]) - goto bail; - } - mr->mr.mapsz = m; - mr->mr.page_shift = 0; - mr->mr.max_segs = count; + goto bail; + rval = init_qib_mregion(&mr->mr, pd, count); + if (rval) + goto bail; /* * ib_reg_phys_mr() will initialize mr->ibmr except for * lkey and rkey. */ - if (!qib_alloc_lkey(lk_table, &mr->mr)) - goto bail; + rval = qib_alloc_lkey(&mr->mr, 0); + if (rval) + goto bail_mregion; mr->ibmr.lkey = mr->mr.lkey; mr->ibmr.rkey = mr->mr.lkey; +done: + return mr; - atomic_set(&mr->mr.refcount, 0); - goto done; - +bail_mregion: + deinit_qib_mregion(&mr->mr); bail: - while (i) - kfree(mr->mr.map[--i]); kfree(mr); - mr = NULL; - -done: - return mr; + mr = ERR_PTR(rval); + goto done; } /** @@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, int n, m, i; struct ib_mr *ret; - mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); - if (mr == NULL) { - ret = ERR_PTR(-ENOMEM); + mr = alloc_mr(num_phys_buf, pd); + if (IS_ERR(mr)) { + ret = (struct ib_mr *)mr; goto bail; } - mr->mr.pd = pd; mr->mr.user_base = *iova_start; mr->mr.iova = *iova_start; - mr->mr.length = 0; - mr->mr.offset = 0; mr->mr.access_flags = acc; - mr->umem = NULL; m = 0; n = 0; @@ -186,7 +221,6 @@ bail: * @pd: protection domain for this memory region * @start: starting userspace address * @length: length of region to register - * @virt_addr: virtual address to use (from HCA's point of view) * @mr_access_flags: access flags for this memory region * @udata: unused by the QLogic_IB driver * @@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, list_for_each_entry(chunk, &umem->chunk_list, list) n += chunk->nents; - mr = alloc_mr(n, &to_idev(pd->device)->lk_table); - if (!mr) { - ret = ERR_PTR(-ENOMEM); + mr = alloc_mr(n, pd); + if (IS_ERR(mr)) { + ret = (struct ib_mr *)mr; ib_umem_release(umem); goto bail; } - mr->mr.pd = pd; mr->mr.user_base = start; mr->mr.iova = virt_addr; mr->mr.length = length; @@ -271,21 +304,25 @@ bail: int qib_dereg_mr(struct ib_mr *ibmr) { struct qib_mr *mr = to_imr(ibmr); - struct qib_ibdev *dev = to_idev(ibmr->device); - int ret; - int i; - - ret = qib_free_lkey(dev, &mr->mr); - if (ret) - return ret; - - i = mr->mr.mapsz; - while (i) - kfree(mr->mr.map[--i]); + int ret = 0; + unsigned long timeout; + + qib_free_lkey(&mr->mr); + + qib_put_mr(&mr->mr); /* will set completion if last */ + timeout = wait_for_completion_timeout(&mr->mr.comp, + 5 * HZ); + if (!timeout) { + qib_get_mr(&mr->mr); + ret = -EBUSY; + goto out; + } + deinit_qib_mregion(&mr->mr); if (mr->umem) ib_umem_release(mr->umem); kfree(mr); - return 0; +out: + return ret; } /* @@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) { struct qib_mr *mr; - mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); - if (mr == NULL) - return ERR_PTR(-ENOMEM); - - mr->mr.pd = pd; - mr->mr.user_base = 0; - mr->mr.iova = 0; - mr->mr.length = 0; - mr->mr.offset = 0; - mr->mr.access_flags = 0; - mr->umem = NULL; + mr = alloc_mr(max_page_list_len, pd); + if (IS_ERR(mr)) + return (struct ib_mr *)mr; return &mr->ibmr; } @@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) if (size > PAGE_SIZE) return ERR_PTR(-EINVAL); - pl = kmalloc(sizeof *pl, GFP_KERNEL); + pl = kzalloc(sizeof *pl, GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); - pl->page_list = kmalloc(size, GFP_KERNEL); + pl->page_list = kzalloc(size, GFP_KERNEL); if (!pl->page_list) goto err_free; @@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct qib_fmr *fmr; - int m, i = 0; + int m; struct ib_fmr *ret; + int rval = -ENOMEM; /* Allocate struct plus pointers to first level page tables. */ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; - fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); + fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); if (!fmr) goto bail; - /* Allocate first level page tables. */ - for (; i < m; i++) { - fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], - GFP_KERNEL); - if (!fmr->mr.map[i]) - goto bail; - } - fmr->mr.mapsz = m; + rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); + if (rval) + goto bail; /* * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & * rkey. */ - if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) - goto bail; + rval = qib_alloc_lkey(&fmr->mr, 0); + if (rval) + goto bail_mregion; fmr->ibfmr.rkey = fmr->mr.lkey; fmr->ibfmr.lkey = fmr->mr.lkey; /* * Resources are allocated but no valid mapping (RKEY can't be * used). */ - fmr->mr.pd = pd; - fmr->mr.user_base = 0; - fmr->mr.iova = 0; - fmr->mr.length = 0; - fmr->mr.offset = 0; fmr->mr.access_flags = mr_access_flags; fmr->mr.max_segs = fmr_attr->max_pages; fmr->mr.page_shift = fmr_attr->page_shift; - atomic_set(&fmr->mr.refcount, 0); ret = &fmr->ibfmr; - goto done; +done: + return ret; +bail_mregion: + deinit_qib_mregion(&fmr->mr); bail: - while (i) - kfree(fmr->mr.map[--i]); kfree(fmr); - ret = ERR_PTR(-ENOMEM); - -done: - return ret; + ret = ERR_PTR(rval); + goto done; } /** @@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, u32 ps; int ret; - if (atomic_read(&fmr->mr.refcount)) + i = atomic_read(&fmr->mr.refcount); + if (i > 2) return -EBUSY; if (list_len > fmr->mr.max_segs) { @@ -490,16 +510,20 @@ int qib_unmap_fmr(struct list_head *fmr_list) int qib_dealloc_fmr(struct ib_fmr *ibfmr) { struct qib_fmr *fmr = to_ifmr(ibfmr); - int ret; - int i; - - ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); - if (ret) - return ret; - - i = fmr->mr.mapsz; - while (i) - kfree(fmr->mr.map[--i]); + int ret = 0; + unsigned long timeout; + + qib_free_lkey(&fmr->mr); + qib_put_mr(&fmr->mr); /* will set completion if last */ + timeout = wait_for_completion_timeout(&fmr->mr.comp, + 5 * HZ); + if (!timeout) { + qib_get_mr(&fmr->mr); + ret = -EBUSY; + goto out; + } + deinit_qib_mregion(&fmr->mr); kfree(fmr); - return 0; +out: + return ret; } diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 1ce56b5..693041b 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -406,18 +406,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) unsigned n; if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) - while (qp->s_rdma_read_sge.num_sge) { - atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); - if (--qp->s_rdma_read_sge.num_sge) - qp->s_rdma_read_sge.sge = - *qp->s_rdma_read_sge.sg_list++; - } + qib_put_ss(&qp->s_rdma_read_sge); - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); if (clr_sends) { while (qp->s_last != qp->s_head) { @@ -427,7 +418,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || @@ -437,7 +428,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) qp->s_last = 0; } if (qp->s_rdma_mr) { - atomic_dec(&qp->s_rdma_mr->refcount); + qib_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } } @@ -450,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && e->rdma_sge.mr) { - atomic_dec(&e->rdma_sge.mr->refcount); + qib_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } } @@ -495,7 +486,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) if (!(qp->s_flags & QIB_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { - atomic_dec(&qp->s_rdma_mr->refcount); + qib_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } if (qp->s_tx) { diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index b641416..3ab3413 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->rdma_sge.mr) { - atomic_dec(&e->rdma_sge.mr->refcount); + qib_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } /* FALLTHROUGH */ @@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, /* Copy SGE state in case we need to resend */ qp->s_rdma_mr = e->rdma_sge.mr; if (qp->s_rdma_mr) - atomic_inc(&qp->s_rdma_mr->refcount); + qib_get_mr(qp->s_rdma_mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; @@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; if (qp->s_rdma_mr) - atomic_inc(&qp->s_rdma_mr->refcount); + qib_get_mr(qp->s_rdma_mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) len = pmtu; @@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || @@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || @@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; if (e->rdma_sge.mr) { - atomic_dec(&e->rdma_sge.mr->refcount); + qib_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } if (len != 0) { @@ -2024,11 +2024,7 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; qib_copy_sge(&qp->r_sge, data, tlen, 1); - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); qp->r_msn++; if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) break; @@ -2116,7 +2112,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - atomic_dec(&e->rdma_sge.mr->refcount); + qib_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } reth = &ohdr->u.rc.reth; @@ -2188,7 +2184,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - atomic_dec(&e->rdma_sge.mr->refcount); + qib_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; @@ -2210,7 +2206,7 @@ send_last: (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); - atomic_dec(&qp->r_sge.sge.mr->refcount); + qib_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; e->opcode = opcode; e->sent = 0; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index c0ee7e0..357b6cfc 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -110,7 +110,7 @@ bad_lkey: while (j) { struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); @@ -501,7 +501,7 @@ again: (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); - atomic_dec(&qp->r_sge.sge.mr->refcount); + qib_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; goto send_comp; @@ -525,7 +525,7 @@ again: sge->sge_length -= len; if (sge->sge_length == 0) { if (!release) - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -542,11 +542,7 @@ again: sqp->s_len -= len; } if (release) - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) goto send_comp; @@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 70b4cb7..aa3a803 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -281,11 +281,7 @@ inv: set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; } else - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): @@ -404,12 +400,7 @@ send_last: goto rewind; wc.opcode = IB_WC_RECV; qib_copy_sge(&qp->r_sge, data, tlen, 0); - while (qp->s_rdma_read_sge.num_sge) { - atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); - if (--qp->s_rdma_read_sge.num_sge) - qp->s_rdma_read_sge.sge = - *qp->s_rdma_read_sge.sg_list++; - } + qib_put_ss(&qp->s_rdma_read_sge); last_imm: wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -493,13 +484,7 @@ rdma_last_imm: if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) - while (qp->s_rdma_read_sge.num_sge) { - atomic_dec(&qp->s_rdma_read_sge.sge.mr-> - refcount); - if (--qp->s_rdma_read_sge.num_sge) - qp->s_rdma_read_sge.sge = - *qp->s_rdma_read_sge.sg_list++; - } + qib_put_ss(&qp->s_rdma_read_sge); else { ret = qib_get_rwqe(qp, 1); if (ret < 0) @@ -510,11 +495,7 @@ rdma_last_imm: wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; qib_copy_sge(&qp->r_sge, data, tlen, 1); - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); goto last_imm; case OP(RDMA_WRITE_LAST): @@ -530,11 +511,7 @@ rdma_last: if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; qib_copy_sge(&qp->r_sge, data, tlen, 1); - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); break; default: diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index a468bf2..d6c7fe7 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) } length -= len; } - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) goto bail_unlock; wc.wr_id = qp->r_wr_id; @@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } else qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); - while (qp->r_sge.num_sge) { - atomic_dec(&qp->r_sge.sge.mr->refcount); - if (--qp->r_sge.num_sge) - qp->r_sge.sge = *qp->r_sge.sg_list++; - } + qib_put_ss(&qp->r_sge); if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) return; wc.wr_id = qp->r_wr_id; diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7b6c3bf..76d7ce8 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -435,7 +435,7 @@ bail_inval_free: while (j) { struct qib_sge *sge = &wqe->sg_list[--j]; - atomic_dec(&sge->mr->refcount); + qib_put_mr(sge->mr); } bail_inval: ret = -EINVAL; @@ -978,7 +978,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); if (tx->mr) { - atomic_dec(&tx->mr->refcount); + qib_put_mr(tx->mr); tx->mr = NULL; } if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { @@ -1336,7 +1336,7 @@ done: } qib_sendbuf_done(dd, pbufn); if (qp->s_rdma_mr) { - atomic_dec(&qp->s_rdma_mr->refcount); + qib_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } if (qp->s_wqe) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 4876060..4a2277b 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -302,6 +303,8 @@ struct qib_mregion { u32 max_segs; /* number of qib_segs in all the arrays */ u32 mapsz; /* size of the map array */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ + u8 lkey_published; /* in global table */ + struct completion comp; /* complete when refcount goes to zero */ atomic_t refcount; struct qib_segarray *map[0]; /* the segments */ }; @@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp); -int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); +int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); -int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); +void qib_free_lkey(struct qib_mregion *mr); int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, struct qib_sge *isge, struct ib_sge *sge, int acc); @@ -1014,6 +1017,27 @@ int qib_unmap_fmr(struct list_head *fmr_list); int qib_dealloc_fmr(struct ib_fmr *ibfmr); +static inline void qib_get_mr(struct qib_mregion *mr) +{ + atomic_inc(&mr->refcount); +} + +static inline void qib_put_mr(struct qib_mregion *mr) +{ + if (unlikely(atomic_dec_and_test(&mr->refcount))) + complete(&mr->comp); +} + +static inline void qib_put_ss(struct qib_sge_state *ss) +{ + while (ss->num_sge) { + qib_put_mr(ss->sge.mr); + if (--ss->num_sge) + ss->sge = *ss->sg_list++; + } +} + + void qib_release_mmap_info(struct kref *ref); struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, -- cgit v1.1 From 8aac4cc3a9d7d7c2f203b7a8db521b604cfb5dc9 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 27 Jun 2012 18:33:19 -0400 Subject: IB/qib: RCU locking for MR validation Profiling indicates that MR validation locking is expensive. The MR table is largely read-only and is a suitable candidate for RCU locking. The patch uses RCU locking during validation to eliminate one lock/unlock during that validation. Reviewed-by: Mike Heinz Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_keys.c | 98 ++++++++++++++++++----------------- drivers/infiniband/hw/qib/qib_mr.c | 7 +++ drivers/infiniband/hw/qib/qib_verbs.c | 4 +- drivers/infiniband/hw/qib/qib_verbs.h | 7 ++- 4 files changed, 66 insertions(+), 50 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 8b5ee3a..970165b 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -40,8 +40,7 @@ * * Returns 0 if successful, otherwise returns -errno. * - * Increments mr reference count and sets published - * as required. + * Increments mr reference count as required. * * Sets the lkey field mr for non-dma regions. * @@ -60,10 +59,12 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) /* special case for dma_mr lkey == 0 */ if (dma_region) { - /* should the dma_mr be relative to the pd? */ - if (!dev->dma_mr) { + struct qib_mregion *tmr; + + tmr = rcu_dereference(dev->dma_mr); + if (!tmr) { qib_get_mr(mr); - dev->dma_mr = mr; + rcu_assign_pointer(dev->dma_mr, mr); mr->lkey_published = 1; } goto success; @@ -93,7 +94,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) rkt->gen++; } qib_get_mr(mr); - rkt->table[r] = mr; + rcu_assign_pointer(rkt->table[r], mr); mr->lkey_published = 1; success: spin_unlock_irqrestore(&rkt->lock, flags); @@ -120,33 +121,30 @@ void qib_free_lkey(struct qib_mregion *mr) spin_lock_irqsave(&rkt->lock, flags); if (!mr->lkey_published) goto out; - mr->lkey_published = 0; - - - spin_lock_irqsave(&dev->lk_table.lock, flags); - if (lkey == 0) { - if (dev->dma_mr && dev->dma_mr == mr) { - qib_put_mr(dev->dma_mr); - dev->dma_mr = NULL; - } - } else { + if (lkey == 0) + rcu_assign_pointer(dev->dma_mr, NULL); + else { r = lkey >> (32 - ib_qib_lkey_table_size); - qib_put_mr(dev->dma_mr); - rkt->table[r] = NULL; + rcu_assign_pointer(rkt->table[r], NULL); } + qib_put_mr(mr); + mr->lkey_published = 0; out: - spin_unlock_irqrestore(&dev->lk_table.lock, flags); + spin_unlock_irqrestore(&rkt->lock, flags); } /** * qib_lkey_ok - check IB SGE for validity and initialize * @rkt: table containing lkey to check SGE against + * @pd: protection domain * @isge: outgoing internal SGE * @sge: SGE to check * @acc: access flags * * Return 1 if valid and successful, otherwise returns 0. * + * increments the reference count upon success + * * Check the IB SGE for validity and initialize our internal version * of it. */ @@ -156,24 +154,25 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, struct qib_mregion *mr; unsigned n, m; size_t off; - unsigned long flags; /* * We use LKEY == zero for kernel virtual addresses * (see qib_get_dma_mr and qib_dma.c). */ - spin_lock_irqsave(&rkt->lock, flags); + rcu_read_lock(); if (sge->lkey == 0) { struct qib_ibdev *dev = to_idev(pd->ibpd.device); if (pd->user) goto bail; - if (!dev->dma_mr) + mr = rcu_dereference(dev->dma_mr); + if (!mr) + goto bail; + if (unlikely(!atomic_inc_not_zero(&mr->refcount))) goto bail; - qib_get_mr(dev->dma_mr); - spin_unlock_irqrestore(&rkt->lock, flags); + rcu_read_unlock(); - isge->mr = dev->dma_mr; + isge->mr = mr; isge->vaddr = (void *) sge->addr; isge->length = sge->length; isge->sge_length = sge->length; @@ -181,18 +180,18 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, isge->n = 0; goto ok; } - mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; - if (unlikely(mr == NULL || mr->lkey != sge->lkey || - mr->pd != &pd->ibpd)) + mr = rcu_dereference( + rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]); + if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) goto bail; off = sge->addr - mr->user_base; - if (unlikely(sge->addr < mr->user_base || - off + sge->length > mr->length || - (mr->access_flags & acc) != acc)) + if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length || + (mr->access_flags & acc) == 0)) goto bail; - qib_get_mr(mr); - spin_unlock_irqrestore(&rkt->lock, flags); + if (unlikely(!atomic_inc_not_zero(&mr->refcount))) + goto bail; + rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { @@ -228,20 +227,22 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, ok: return 1; bail: - spin_unlock_irqrestore(&rkt->lock, flags); + rcu_read_unlock(); return 0; } /** * qib_rkey_ok - check the IB virtual address, length, and RKEY - * @dev: infiniband device - * @ss: SGE state + * @qp: qp for validation + * @sge: SGE state * @len: length of data * @vaddr: virtual address to place data * @rkey: rkey to check * @acc: access flags * * Return 1 if successful, otherwise 0. + * + * increments the reference count upon success */ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) @@ -250,25 +251,26 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, struct qib_mregion *mr; unsigned n, m; size_t off; - unsigned long flags; /* * We use RKEY == zero for kernel virtual addresses * (see qib_get_dma_mr and qib_dma.c). */ - spin_lock_irqsave(&rkt->lock, flags); + rcu_read_lock(); if (rkey == 0) { struct qib_pd *pd = to_ipd(qp->ibqp.pd); struct qib_ibdev *dev = to_idev(pd->ibpd.device); if (pd->user) goto bail; - if (!dev->dma_mr) + mr = rcu_dereference(dev->dma_mr); + if (!mr) goto bail; - qib_get_mr(dev->dma_mr); - spin_unlock_irqrestore(&rkt->lock, flags); + if (unlikely(!atomic_inc_not_zero(&mr->refcount))) + goto bail; + rcu_read_unlock(); - sge->mr = dev->dma_mr; + sge->mr = mr; sge->vaddr = (void *) vaddr; sge->length = len; sge->sge_length = len; @@ -277,16 +279,18 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, goto ok; } - mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; - if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) + mr = rcu_dereference( + rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]); + if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; off = vaddr - mr->iova; if (unlikely(vaddr < mr->iova || off + len > mr->length || (mr->access_flags & acc) == 0)) goto bail; - qib_get_mr(mr); - spin_unlock_irqrestore(&rkt->lock, flags); + if (unlikely(!atomic_inc_not_zero(&mr->refcount))) + goto bail; + rcu_read_unlock(); off += mr->offset; if (mr->page_shift) { @@ -322,7 +326,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, ok: return 1; bail: - spin_unlock_irqrestore(&rkt->lock, flags); + rcu_read_unlock(); return 0; } diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 6a2028a..e6687de 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -527,3 +527,10 @@ int qib_dealloc_fmr(struct ib_fmr *ibfmr) out: return ret; } + +void mr_rcu_callback(struct rcu_head *list) +{ + struct qib_mregion *mr = container_of(list, struct qib_mregion, list); + + complete(&mr->comp); +} diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 76d7ce8..59cdea3 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -2066,7 +2066,9 @@ int qib_register_ib_device(struct qib_devdata *dd) ret = -ENOMEM; goto err_lk; } - memset(dev->lk_table.table, 0, lk_tab_size); + RCU_INIT_POINTER(dev->dma_mr, NULL); + for (i = 0; i < dev->lk_table.max; i++) + RCU_INIT_POINTER(dev->lk_table.table[i], NULL); INIT_LIST_HEAD(&dev->pending_mmaps); spin_lock_init(&dev->pending_lock); dev->mmap_offset = PAGE_SIZE; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 4a2277b..85751fd 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -303,8 +303,9 @@ struct qib_mregion { u32 max_segs; /* number of qib_segs in all the arrays */ u32 mapsz; /* size of the map array */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ - u8 lkey_published; /* in global table */ + u8 lkey_published; /* in global table */ struct completion comp; /* complete when refcount goes to zero */ + struct rcu_head list; atomic_t refcount; struct qib_segarray *map[0]; /* the segments */ }; @@ -1022,10 +1023,12 @@ static inline void qib_get_mr(struct qib_mregion *mr) atomic_inc(&mr->refcount); } +void mr_rcu_callback(struct rcu_head *list); + static inline void qib_put_mr(struct qib_mregion *mr) { if (unlikely(atomic_dec_and_test(&mr->refcount))) - complete(&mr->comp); + call_rcu(&mr->list, mr_rcu_callback); } static inline void qib_put_ss(struct qib_sge_state *ss) -- cgit v1.1 From 00f5ce99dc6ee46c3113393cc8fa12173f9bbcd7 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:40 +0300 Subject: mlx4: Use port management change event instead of smp_snoop The port management change event can replace smp_snoop. If the capability bit for this event is set in dev-caps, the event is used (by the driver setting the PORT_MNG_CHG_EVENT bit in the async event mask in the MAP_EQ fw command). In this case, when the driver passes incoming SMP PORT_INFO SET mads to the FW, the FW generates port management change events to signal any changes to the driver. If the FW generates these events, smp_snoop shouldn't be invoked in ib_process_mad(), or duplicate events will occur (once from the FW-generated event, and once from smp_snoop). In the case where the FW does not generate port management change events smp_snoop needs to be invoked to create these events. The flow in smp_snoop has been modified to make use of the same procedures as in the fw-generated-event event case to generate the port management events (LID change, Client-rereg, Pkey change, and/or GID change). Port management change event handling required changing the mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument (last argument) had to be changed to unsigned long in order to accomodate passing the EQE pointer. We also needed to move the definition of struct mlx4_eqe from net/mlx4.h to file device.h -- to make it available to the IB driver, to handle port management change events. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/mad.c | 118 ++++++++++++++++++++------- drivers/infiniband/hw/mlx4/main.c | 29 +++++-- drivers/infiniband/hw/mlx4/mlx4_ib.h | 9 ++ drivers/net/ethernet/mellanox/mlx4/en_main.c | 5 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 22 ++++- drivers/net/ethernet/mellanox/mlx4/fw.c | 1 + drivers/net/ethernet/mellanox/mlx4/intf.c | 5 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 63 +------------- include/linux/mlx4/device.h | 99 +++++++++++++++++++++- include/linux/mlx4/driver.h | 3 +- 10 files changed, 249 insertions(+), 105 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 84786a9..58c45fb 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -147,47 +147,49 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) } /* - * Snoop SM MADs for port info and P_Key table sets, so we can - * synthesize LID change and P_Key change events. + * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can + * synthesize LID change, Client-Rereg, GID change, and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, - u16 prev_lid) + u16 prev_lid) { - struct ib_event event; + struct ib_port_info *pinfo; + u16 lid; + struct mlx4_ib_dev *dev = to_mdev(ibdev); if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && - mad->mad_hdr.method == IB_MGMT_METHOD_SET) { - if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { - struct ib_port_info *pinfo = - (struct ib_port_info *) ((struct ib_smp *) mad)->data; - u16 lid = be16_to_cpu(pinfo->lid); + mad->mad_hdr.method == IB_MGMT_METHOD_SET) + switch (mad->mad_hdr.attr_id) { + case IB_SMP_ATTR_PORT_INFO: + pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; + lid = be16_to_cpu(pinfo->lid); - update_sm_ah(to_mdev(ibdev), port_num, + update_sm_ah(dev, port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); - event.device = ibdev; - event.element.port_num = port_num; + if (pinfo->clientrereg_resv_subnetto & 0x80) + mlx4_ib_dispatch_event(dev, port_num, + IB_EVENT_CLIENT_REREGISTER); - if (pinfo->clientrereg_resv_subnetto & 0x80) { - event.event = IB_EVENT_CLIENT_REREGISTER; - ib_dispatch_event(&event); - } + if (prev_lid != lid) + mlx4_ib_dispatch_event(dev, port_num, + IB_EVENT_LID_CHANGE); + break; - if (prev_lid != lid) { - event.event = IB_EVENT_LID_CHANGE; - ib_dispatch_event(&event); - } - } + case IB_SMP_ATTR_PKEY_TABLE: + mlx4_ib_dispatch_event(dev, port_num, + IB_EVENT_PKEY_CHANGE); + break; - if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { - event.device = ibdev; - event.event = IB_EVENT_PKEY_CHANGE; - event.element.port_num = port_num; - ib_dispatch_event(&event); + case IB_SMP_ATTR_GUID_INFO: + mlx4_ib_dispatch_event(dev, port_num, + IB_EVENT_GID_CHANGE); + break; + default: + break; } - } } static void node_desc_override(struct ib_device *dev, @@ -305,7 +307,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_FAILURE; if (!out_mad->mad_hdr.status) { - smp_snoop(ibdev, port_num, in_mad, prev_lid); + if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) + smp_snoop(ibdev, port_num, in_mad, prev_lid); node_desc_override(ibdev, out_mad); } @@ -446,3 +449,62 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) ib_destroy_ah(dev->sm_ah[p]); } } + +void handle_port_mgmt_change_event(struct work_struct *work) +{ + struct ib_event_work *ew = container_of(work, struct ib_event_work, work); + struct mlx4_ib_dev *dev = ew->ib_dev; + struct mlx4_eqe *eqe = &(ew->ib_eqe); + u8 port = eqe->event.port_mgmt_change.port; + u32 changed_attr; + + switch (eqe->subtype) { + case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: + changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); + + /* Update the SM ah - This should be done before handling + the other changed attributes so that MADs can be sent to the SM */ + if (changed_attr & MSTR_SM_CHANGE_MASK) { + u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); + u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; + update_sm_ah(dev, port, lid, sl); + } + + /* Check if it is a lid change event */ + if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) + mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); + + /* Generate GUID changed event */ + if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) + mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); + + if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) + mlx4_ib_dispatch_event(dev, port, + IB_EVENT_CLIENT_REREGISTER); + break; + + case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: + mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); + break; + case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: + mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); + break; + default: + pr_warn("Unsupported subtype 0x%x for " + "Port Management Change event\n", eqe->subtype); + } + + kfree(ew); +} + +void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, + enum ib_event_type type) +{ + struct ib_event event; + + event.device = &dev->ib_dev; + event.element.port_num = port_num; + event.event = type; + + ib_dispatch_event(&event); +} diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 5266b49..4f230c2 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -898,7 +898,6 @@ static void update_gids_task(struct work_struct *work) union ib_gid *gids; int err; struct mlx4_dev *dev = gw->dev->dev; - struct ib_event event; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -916,10 +915,7 @@ static void update_gids_task(struct work_struct *work) pr_warn("set port command failed\n"); else { memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); - event.device = &gw->dev->ib_dev; - event.element.port_num = gw->port; - event.event = IB_EVENT_GID_CHANGE; - ib_dispatch_event(&event); + mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); } mlx4_free_cmd_mailbox(dev, mailbox); @@ -1383,10 +1379,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) } static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, - enum mlx4_dev_event event, int port) + enum mlx4_dev_event event, unsigned long param) { struct ib_event ibev; struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); + struct mlx4_eqe *eqe = NULL; + struct ib_event_work *ew; + int port = 0; + + if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) + eqe = (struct mlx4_eqe *)param; + else + port = (u8)param; if (port > ibdev->num_ports) return; @@ -1405,6 +1409,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, ibev.event = IB_EVENT_DEVICE_FATAL; break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: + ew = kmalloc(sizeof *ew, GFP_ATOMIC); + if (!ew) { + pr_err("failed to allocate memory for events work\n"); + break; + } + + INIT_WORK(&ew->work, handle_port_mgmt_change_event); + memcpy(&ew->ib_eqe, eqe, sizeof *eqe); + ew->ib_dev = ibdev; + handle_port_mgmt_change_event(&ew->work); + return; + default: return; } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 5f298af..23bfbf9 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -224,6 +224,12 @@ struct mlx4_ib_dev { int eq_added; }; +struct ib_event_work { + struct work_struct work; + struct mlx4_ib_dev *ib_dev; + struct mlx4_eqe ib_eqe; +}; + static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) { return container_of(ibdev, struct mlx4_ib_dev, ib_dev); @@ -381,4 +387,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid); +void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, + enum ib_event_type type); + #endif /* MLX4_IB_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 69ba572..a52922e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) } static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, - enum mlx4_dev_event event, int port) + enum mlx4_dev_event event, unsigned long port) { struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; struct mlx4_en_priv *priv; @@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, if (port < 1 || port > dev->caps.num_ports || !mdev->pndev[port]) return; - mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, + (int) port); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index bce98d9..9b15d02 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -82,6 +82,15 @@ enum { (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) +static u64 get_async_ev_mask(struct mlx4_dev *dev) +{ + u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); + + return async_ev_mask; +} + static void eq_set_ci(struct mlx4_eq *eq, int req_not) { __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | @@ -473,6 +482,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; + case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, + (unsigned long) eqe); + break; + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: @@ -956,7 +970,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) priv->eq_table.have_irq = 1; } - err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", @@ -996,7 +1010,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i; - mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); mlx4_free_irqs(dev); @@ -1040,7 +1054,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) mlx4_cmd_use_polling(dev); /* Map the new eq to handle all asyncronous events */ - err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[i].eqn); if (err) { mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); @@ -1054,7 +1068,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) } /* Return to default */ - mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 4281ce0..ee9d6b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", [48] = "Counters support", + [59] = "Port management change event support", }; int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index b4e9f6f..116895a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf) } EXPORT_SYMBOL_GPL(mlx4_unregister_interface); -void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_device_context *dev_ctx; @@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) - dev_ctx->intf->event(dev, dev_ctx->context, type, port); + dev_ctx->intf->event(dev, dev_ctx->context, type, param); spin_unlock_irqrestore(&priv->ctx_lock, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e5d2022..4d11d12 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -338,66 +338,6 @@ struct mlx4_srq_context { __be64 db_rec_addr; }; -struct mlx4_eqe { - u8 reserved1; - u8 type; - u8 reserved2; - u8 subtype; - union { - u32 raw[6]; - struct { - __be32 cqn; - } __packed comp; - struct { - u16 reserved1; - __be16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - __be64 out_param; - } __packed cmd; - struct { - __be32 qpn; - } __packed qp; - struct { - __be32 srqn; - } __packed srq; - struct { - __be32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; - } __packed cq_err; - struct { - u32 reserved1[2]; - __be32 port; - } __packed port_change; - struct { - #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 - u32 reserved; - u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; - } __packed comm_channel_arm; - struct { - u8 port; - u8 reserved[3]; - __be64 mac; - } __packed mac_update; - struct { - u8 port; - } __packed sw_event; - struct { - __be32 slave_id; - } __packed flr_event; - struct { - __be16 current_temperature; - __be16 warning_threshold; - } __packed warming; - } event; - u8 slave_id; - u8 reserved3[2]; - u8 owner; -} __packed; - struct mlx4_eq { struct mlx4_dev *dev; void __iomem *doorbell; @@ -887,7 +827,8 @@ void mlx4_catas_init(void); int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); -void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param); struct mlx4_dev_cap; struct mlx4_init_hca_param; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 8eadf0f1..560b220 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -96,7 +96,8 @@ enum { MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, - MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 + MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, + MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, }; enum { @@ -138,6 +139,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, + MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -235,6 +237,24 @@ enum { MLX4_MAX_FAST_REG_PAGES = 511, }; +enum { + MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, + MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, + MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, +}; + +/* Port mgmt change event handling */ +enum { + MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0, + MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1, + MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2, + MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3, + MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, +}; + +#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ + MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -511,6 +531,81 @@ struct mlx4_dev { int num_vfs; }; +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __packed comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __packed cmd; + struct { + __be32 qpn; + } __packed qp; + struct { + __be32 srqn; + } __packed srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __packed cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __packed port_change; + struct { + #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 + u32 reserved; + u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; + } __packed comm_channel_arm; + struct { + u8 port; + u8 reserved[3]; + __be64 mac; + } __packed mac_update; + struct { + __be32 slave_id; + } __packed flr_event; + struct { + __be16 current_temperature; + __be16 warning_threshold; + } __packed warming; + struct { + u8 reserved[3]; + u8 port; + union { + struct { + __be16 mstr_sm_lid; + __be16 port_lid; + __be32 changed_attr; + u8 reserved[3]; + u8 mstr_sm_sl; + __be64 gid_prefix; + } __packed port_info; + struct { + __be32 block_ptr; + __be32 tbl_entries_mask; + } __packed tbl_change_info; + } params; + } __packed port_mgmt_change; + } event; + u8 slave_id; + u8 reserved3[2]; + u8 owner; +} __packed; + struct mlx4_init_port_param { int set_guid0; int set_node_guid; @@ -536,6 +631,8 @@ struct mlx4_init_port_param { #define MLX4_INVALID_SLAVE_ID 0xFF +void handle_port_mgmt_change_event(struct work_struct *work); + static inline int mlx4_is_master(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_MASTER; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 5f1298b..0f509229 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -42,13 +42,14 @@ enum mlx4_dev_event { MLX4_DEV_EVENT_PORT_UP, MLX4_DEV_EVENT_PORT_DOWN, MLX4_DEV_EVENT_PORT_REINIT, + MLX4_DEV_EVENT_PORT_MGMT_CHANGE, }; struct mlx4_interface { void * (*add) (struct mlx4_dev *dev); void (*remove)(struct mlx4_dev *dev, void *context); void (*event) (struct mlx4_dev *dev, void *context, - enum mlx4_dev_event event, int port); + enum mlx4_dev_event event, unsigned long param); void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); struct list_head list; enum mlx4_protocol protocol; -- cgit v1.1 From 2aca1172c2f5b27fbc37297574f716c1c15f4153 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:41 +0300 Subject: net/mlx4_core: Initialize IB port capabilities for all slaves With IB SR-IOV, each slave has its own separate copy of the port capabilities flags. For example, the master can run a subnet manager (which causes the IsSM bit to be set in the master's port capabilities) without affecting the port capabilities seen by the slaves (the IsSM bit will be seen as cleared in the slaves). Also add a static inline mlx4_master_func_num() to enhance readability of the code. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/main.c | 11 +++++++++++ include/linux/mlx4/device.h | 5 +++++ 2 files changed, 16 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a0313de..83afb15 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1477,6 +1477,17 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; + /* initialize per-slave default ib port capabilities */ + if (mlx4_is_master(dev)) { + int i; + for (i = 0; i < dev->num_slaves; i++) { + if (i == mlx4_master_func_num(dev)) + continue; + priv->mfunc.master.slave_state[i].ib_cap_mask[port] = + ib_port_default_caps; + } + } + if (mlx4_is_mfunc(dev)) dev->caps.port_ib_mtu[port] = IB_MTU_2048; else diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 560b220..7fbdc89 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -633,6 +633,11 @@ struct mlx4_init_port_param { void handle_port_mgmt_change_event(struct work_struct *work); +static inline int mlx4_master_func_num(struct mlx4_dev *dev) +{ + return dev->caps.function; +} + static inline int mlx4_is_master(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_MASTER; -- cgit v1.1 From 7e23017704172cb6508cb365eb2cae7335e5da71 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 6 Jul 2012 18:29:45 +0000 Subject: IB/qib: Fix sparse RCU warnings in qib_keys.c Commit 8aac4cc3a9d7 ("IB/qib: RCU locking for MR validation") introduced new sparse warnings in qib_keys.c. Acked-by: Fengguang Wu Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_keys.c | 4 +++- drivers/infiniband/hw/qib/qib_verbs.h | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 970165b..e9486c7 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -349,7 +349,9 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) if (pd->user || rkey == 0) goto bail; - mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; + mr = rcu_dereference_protected( + rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))], + lockdep_is_held(&rkt->lock)); if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 85751fd..1293133 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -650,7 +650,7 @@ struct qib_lkey_table { u32 next; /* next unused index (speeds search) */ u32 gen; /* generation count */ u32 max; /* size of the table */ - struct qib_mregion **table; + struct qib_mregion __rcu **table; }; struct qib_opcode_stats { @@ -732,7 +732,7 @@ struct qib_ibdev { struct list_head pending_mmaps; spinlock_t mmap_offset_lock; /* protect mmap_offset */ u32 mmap_offset; - struct qib_mregion *dma_mr; + struct qib_mregion __rcu *dma_mr; /* QP numbers are shared by all IB ports */ struct qib_qpn_table qpn_table; -- cgit v1.1 From 9bbeb6663ea2f069bd3bb6387fe7a824def21064 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:27 +0000 Subject: IB/mthca: Warning about event for non-existent QPs should show event type Events received for non-existent QPs should generate a warning that includes the event type that was received. Signed-off-by: Dotan Barak Reviewed-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_qp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 9601049..c3074a1 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -247,7 +247,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, spin_unlock(&dev->qp_table.lock); if (!qp) { - mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); + mthca_warn(dev, "Async event %d for bogus QP %08x\n", + event_type, qpn); return; } -- cgit v1.1 From 16551d450146eeba29ae3933560a2ed4968b23ce Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:28 +0000 Subject: IB/mthca: Fill in sq_sig_type in query QP The query QP code was didn't fill that attribute, do that. Signed-off-by: Dotan Barak Reviewed-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_qp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index c3074a1..26a6845 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -502,6 +502,7 @@ done: qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->cap = qp_attr->cap; + qp_init_attr->sq_sig_type = qp->sq_policy; out_mailbox: mthca_free_mailbox(dev, mailbox); -- cgit v1.1 From 47e956b2a6a4539f948ff2b85dfd259704dc5021 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:29 +0000 Subject: IB/mlx4: Fill the masked_atomic_cap attribute in query device When the user queries for device capabilities, fill in the masked_atomic_cap attribute with the real support level of atomic capabilities instead of using a hard coded value. Signed-off-by: Dotan Barak Reviewed-by: Eli Cohen Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3530c41..07efe4b 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -157,7 +157,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; - props->masked_atomic_cap = IB_ATOMIC_HCA; + props->masked_atomic_cap = props->atomic_cap; props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; -- cgit v1.1 From f457ce471c522cadf697b873e2cf46e458e90bef Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:30 +0000 Subject: mlx4_core: Remove double function declarations Spotted four duplicate declarations in icm.h, remove them. Signed-off-by: Dotan Barak Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/icm.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index b10c07a..19e4efc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -81,13 +81,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, u64 virt, int obj_size, int nobj, int reserved, int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); -void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); -int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end); -void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end); static inline void mlx4_icm_first(struct mlx4_icm *icm, struct mlx4_icm_iter *iter) -- cgit v1.1 From 87d4abda83a125dec65181a6ecce3035db76d46a Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:31 +0000 Subject: IB/cm: Destroy idr as part of the module init error flow Clean the idr as part of the error flow since it is a resource too. Signed-off-by: Dotan Barak Reviewed-by: Sean Hefty Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/core/cm.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c889aae..d67999f 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3848,24 +3848,28 @@ static int __init ib_cm_init(void) INIT_LIST_HEAD(&cm.timewait_list); ret = class_register(&cm_class); - if (ret) - return -ENOMEM; + if (ret) { + ret = -ENOMEM; + goto error1; + } cm.wq = create_workqueue("ib_cm"); if (!cm.wq) { ret = -ENOMEM; - goto error1; + goto error2; } ret = ib_register_client(&cm_client); if (ret) - goto error2; + goto error3; return 0; -error2: +error3: destroy_workqueue(cm.wq); -error1: +error2: class_unregister(&cm_class); +error1: + idr_destroy(&cm.local_id_table); return ret; } -- cgit v1.1 From 240a9207aae24916dba7070aa7047c3732102cb8 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:32 +0000 Subject: net/mlx4_core: Free ICM table in case of error In mlx4_init_icm_table(), free the allocated table if we failed to allocate memory to its entries. Signed-off-by: Dotan Barak Reviewed-by: Yevgeny Petrilin Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/icm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index a9ade1c..88b7b3e7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -413,6 +413,8 @@ err: mlx4_free_icm(dev, table->icm[i], use_coherent); } + kfree(table->icm); + return -ENOMEM; } -- cgit v1.1 From 396f2feb05d7cc5549c611c05abfb4108cd1c6d6 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:42 +0300 Subject: mlx4_core: Implement mechanism for reserved Q_Keys The SR-IOV special QP tunneling mechanism uses proxy special QPs (instead of the real special QPs) for MADs on guests. These proxy QPs send their packets to a "tunnel" QP owned by the master. The master then forwards the MAD (after any required paravirtualization) to the real special QP, which sends out the MAD. For security reasons (i.e., to prevent guests from sending MADs to tunnel QPs belonging to other guests), each proxy-tunnel QP pair is assigned a unique, reserved, Q_Key. These Q_Keys are available only for proxy and tunnel QPs -- if the guest tries to use these Q_Keys with other QPs, it will fail. This patch introduces a mechanism for reserving a block of 64K Q_Keys for proxy/tunneling use. The patch introduces also two new fields into mlx4_dev: base_sqpn and base_tunnel_sqpn. In SR-IOV mode, the QP numbers for the "real," proxy, and tunnel sqps are added to the reserved QPN area (so that they will not change). There are 8 special QPs per port in the HCA, and each of them is assigned both a proxy and a tunnel QP, for each VF and for the PF as well in SR-IOV mode. The QPNs for these QPs are arranged as follows: 1. The real SQP numbers (8) 2. The proxy SQPs (8 * (max number of VFs + max number of PFs) 3. The tunnel SQPs (8 * (max number of VFs + max number of PFs) To support these QPs, two new fields are added to struct mlx4_dev: base_sqp: this is the QP number of the first of the real SQPs base_tunnel_sqp: this is the qp number of the first qp in the tunnel sqp region. (On guests, this is the first tunnel sqp of the 8 which are assigned to that guest). In addition, in SR-IOV mode, sqp_start is the number of the first proxy SQP in the proxy SQP region. (In guests, this is the first proxy SQP of the 8 which are assigned to that guest) Note that in non-SR-IOV mode, there are no proxies and no tunnels. In this case, sqp_start is set to sqp_base -- which minimizes code changes. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/main.c | 17 +++++++++++++++++ include/linux/mlx4/device.h | 11 +++++++++++ 2 files changed, 28 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 83afb15..81154a1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -391,6 +391,23 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) return ret; } +int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) +{ + u32 qk = MLX4_RESERVED_QKEY_BASE; + if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || + qpn < dev->caps.sqp_start) + return -EINVAL; + + if (qpn >= dev->caps.base_tunnel_sqpn) + /* tunnel qp */ + qk += qpn - dev->caps.base_tunnel_sqpn; + else + qk += qpn - dev->caps.sqp_start; + *qkey = qk; + return 0; +} +EXPORT_SYMBOL(mlx4_get_parav_qkey); + int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7fbdc89..c30a314 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -56,6 +56,13 @@ enum { MLX4_MAX_PORTS = 2 }; +/* base qkey for use in sriov tunnel-qp/proxy-qp communication. + * These qkeys must not be allowed for general use. This is a 64k range, + * and to test for violation, we use the mask (protect against future chg). + */ +#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000) +#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000) + enum { MLX4_BOARD_ID_LEN = 64 }; @@ -293,6 +300,8 @@ struct mlx4_caps { int max_qp_init_rdma; int max_qp_dest_rdma; int sqp_start; + u32 base_sqpn; + u32 base_tunnel_sqpn; int num_srqs; int max_srq_wqes; int max_srq_sge; @@ -772,4 +781,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); + #endif /* MLX4_DEVICE_H */ -- cgit v1.1 From 105c320f6ac37af30252577d419e47b39edb5843 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:43 +0300 Subject: mlx4_core: Allow guests to have IB ports Modify mlx4_dev_cap to allow IB support when SR-IOV is active. Modify mlx4_slave_cap to set the "rdma-supported" bit in its flags area, and pass that to the guests (this is done in QUERY_FUNC_CAP and its wrapper). However, we don't activate IB support quite yet -- we leave the error return at the start of mlx4_ib_add in the mlx4_ib driver. In addition, set "protected fmr supported" bit to zero in the QUERY_FUNC_CAP wrapper. Finally, in the QUERY_FUNC_CAP wrapper, we needed to add code which checks for the port type (IB or Ethernet). Previously, this was not an issue, since only Ethernet ports were supported. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/fw.c | 78 ++++++++++++++++++++----------- drivers/net/ethernet/mellanox/mlx4/main.c | 26 ++++------- 2 files changed, 59 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index ee9d6b0..5549f6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -174,6 +174,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 +#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 @@ -183,25 +184,44 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 +#define QUERY_FUNC_CAP_FMR_FLAG 0x80 +#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 +#define QUERY_FUNC_CAP_FLAG_ETH 0x80 + +/* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 +#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc +#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 +#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 + +#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 + if (vhcr->op_modifier == 1) { field = vhcr->in_modifier; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); - field = 0; /* ensure fvl bit is not set */ + field = 0; + /* ensure force vlan and force mac bits are not set */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); + /* ensure that phy_wqe_gid bit is not set */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); + } else if (vhcr->op_modifier == 0) { - field = 1 << 7; /* enable only ethernet interface */ + /* enable rdma and ethernet interfaces */ + field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = dev->caps.num_ports; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); - size = 0; /* no PF behavious is set for now */ + size = 0; /* no PF behaviour is set for now */ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + field = 0; /* protected FMR support not available as yet */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); + size = dev->caps.num_qps; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); @@ -254,11 +274,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) outbox = mailbox->buf; MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); - if (!(field & (1 << 7))) { - mlx4_err(dev, "The host doesn't support eth interface\n"); + if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { + mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); err = -EPROTONOSUPPORT; goto out; } + func_cap->flags = field; MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); func_cap->num_ports = field; @@ -297,17 +318,27 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) if (err) goto out; - MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - if (field & (1 << 7)) { - mlx4_err(dev, "VLAN is enforced on this port\n"); - err = -EPROTONOSUPPORT; - goto out; - } + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); + if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { + mlx4_err(dev, "VLAN is enforced on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } - if (field & (1 << 6)) { - mlx4_err(dev, "Force mac is enabled on this port\n"); - err = -EPROTONOSUPPORT; - goto out; + if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { + mlx4_err(dev, "Force mac is enabled on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); + if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { + mlx4_err(dev, "phy_wqe_gid is " + "enforced on this ib port\n"); + err = -EPROTONOSUPPORT; + goto out; + } } MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); @@ -701,12 +732,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u8 port_type; int err; -#define MLX4_PORT_SUPPORT_IB (1 << 0) -#define MLX4_PORT_SUGGEST_TYPE (1 << 3) -#define MLX4_PORT_DEFAULT_SENSE (1 << 4) -#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \ - ~MLX4_PORT_SUGGEST_TYPE & \ - ~MLX4_PORT_DEFAULT_SENSE) +#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, @@ -722,12 +748,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_GET(port_type, outbox->buf, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - /* Allow only Eth port, no link sensing allowed */ - port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; - - /* check eth is enabled for this port */ - if (!(port_type & 2)) - mlx4_dbg(dev, "QUERY PORT: eth not supported by host"); + /* No link sensing allowed */ + port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; + /* set port type to currently operating port type */ + port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 81154a1..58544b7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -288,29 +288,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) /* if only ETH is supported - assign ETH */ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; - /* if only IB is supported, - * assign IB only if SRIOV is off*/ + /* if only IB is supported, assign IB */ else if (dev->caps.supported_type[i] == - MLX4_PORT_TYPE_IB) { - if (dev->flags & MLX4_FLAG_SRIOV) - dev->caps.port_type[i] = - MLX4_PORT_TYPE_NONE; - else - dev->caps.port_type[i] = - MLX4_PORT_TYPE_IB; - /* if IB and ETH are supported, - * first of all check if SRIOV is on */ - } else if (dev->flags & MLX4_FLAG_SRIOV) - dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + MLX4_PORT_TYPE_IB) + dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; else { - /* In non-SRIOV mode, we set the port type - * according to user selection of port type, - * if usere selected none, take the FW hint */ - if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) + /* if IB and ETH are supported, we set the port + * type according to user selection of port type; + * if user selected none, take the FW hint */ + if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) dev->caps.port_type[i] = dev->caps.suggested_type[i] ? MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; else - dev->caps.port_type[i] = port_type_array[i-1]; + dev->caps.port_type[i] = port_type_array[i - 1]; } } /* -- cgit v1.1 From 6634961c14d38ef64ec284c07aecb03d3dd03b4a Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 19 Jun 2012 11:21:44 +0300 Subject: mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them To allow easy paravirtualization of P_Key and GID table sizes, keep paravirtualized sizes in mlx4_dev->caps, but save the actual physical sizes from FW in struct: mlx4_dev->phys_cap. In addition, in SR-IOV mode, do the following: 1. Reduce reported P_Key table size by 1. This is done to reserve the highest P_Key index for internal use, for declaring an invalid P_Key in P_Key paravirtualization. We require a P_Key index which always contain an invalid P_Key value for this purpose (i.e., one which cannot be modified by the subnet manager). The way to do this is to reduce the P_Key table size reported to the subnet manager by 1, so that it will not attempt to access the P_Key at index #127. 2. Paravirtualize the GID table size to 1. Thus, each guest sees only a single GID (at its paravirtualized index 0). In addition, since we are paravirtualizing the GID table size to 1, we add paravirtualization of the master GID event here (i.e., we do not do ib_dispatch_event() for the GUID change event on the master, since its (only) GUID never changes). Signed-off-by: Jack Morgenstein Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/mad.c | 10 ++++--- drivers/net/ethernet/mellanox/mlx4/fw.c | 43 +++++++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 32 ++++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 4 ++- drivers/net/ethernet/mellanox/mlx4/port.c | 11 ++++++-- include/linux/mlx4/device.h | 2 ++ 6 files changed, 92 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 58c45fb..c27141f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -184,8 +184,10 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, break; case IB_SMP_ATTR_GUID_INFO: - mlx4_ib_dispatch_event(dev, port_num, - IB_EVENT_GID_CHANGE); + /* paravirtualized master's guid is guid 0 -- does not change */ + if (!mlx4_is_master(dev->dev)) + mlx4_ib_dispatch_event(dev, port_num, + IB_EVENT_GID_CHANGE); break; default: break; @@ -487,7 +489,9 @@ void handle_port_mgmt_change_event(struct work_struct *work) mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); break; case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: - mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); + /* paravirtualized master's guid is guid 0 -- does not change */ + if (!mlx4_is_master(dev->dev)) + mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); break; default: pr_warn("Unsupported subtype 0x%x for " diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 5549f6b..473d63b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -730,9 +730,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, { u64 def_mac; u8 port_type; + u16 short_field; int err; #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c +#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, @@ -755,11 +758,51 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + short_field = 1; /* slave max gids */ + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_GID_OFFSET); + + short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_PKEY_OFFSET); } return err; } +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u16 field; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); + *gid_tbl_len = field; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); + *pkey_tbl_len = field; + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); + int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) { struct mlx4_cmd_mailbox *mailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 58544b7..5df3ac4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -215,6 +215,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; + dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; + dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; + /* set gid and pkey table operating lengths by default + * to non-sriov values */ dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; @@ -498,8 +502,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } - for (i = 1; i <= dev->caps.num_ports; ++i) + for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_mask[i] = dev->caps.port_type[i]; + if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, + &dev->caps.gid_table_len[i], + &dev->caps.pkey_table_len[i])) + return -ENODEV; + } if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > @@ -536,7 +545,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, for (port = 1; port <= dev->caps.num_ports; port++) { mlx4_CLOSE_PORT(dev, port); dev->caps.port_type[port] = port_types[port - 1]; - err = mlx4_SET_PORT(dev, port); + err = mlx4_SET_PORT(dev, port, -1); if (err) { mlx4_err(dev, "Failed to set port %d, " "aborting\n", port); @@ -722,7 +731,7 @@ static ssize_t set_port_ib_mtu(struct device *dev, mlx4_unregister_device(mdev); for (port = 1; port <= mdev->caps.num_ports; port++) { mlx4_CLOSE_PORT(mdev, port); - err = mlx4_SET_PORT(mdev, port); + err = mlx4_SET_PORT(mdev, port, -1); if (err) { mlx4_err(mdev, "Failed to set port %d, " "aborting\n", port); @@ -1173,6 +1182,17 @@ err: return -EIO; } +static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) +{ + int i; + + for (i = 1; i <= dev->caps.num_ports; i++) { + dev->caps.gid_table_len[i] = 1; + dev->caps.pkey_table_len[i] = + dev->phys_caps.pkey_phys_table_len[i] - 1; + } +} + static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1212,6 +1232,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto err_stop_fw; } + if (mlx4_is_master(dev)) + mlx4_parav_master_pf_caps(dev); + profile = default_profile; icm_size = mlx4_make_profile(dev, &profile, &dev_cap, @@ -1500,7 +1523,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) else dev->caps.port_ib_mtu[port] = IB_MTU_4096; - err = mlx4_SET_PORT(dev, port); + err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? + dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", port); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 4d11d12..cde6e51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -969,7 +969,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); -int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); /* resource tracker functions*/ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource resource_type, @@ -1012,6 +1012,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len); int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index a8fb529..90dc475 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -726,14 +726,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, enum { MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ + MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, MLX4_CHANGE_PORT_VL_CAP = 21, MLX4_CHANGE_PORT_MTU_CAP = 22, }; -int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) { struct mlx4_cmd_mailbox *mailbox; - int err, vl_cap; + int err, vl_cap, pkey_tbl_flag = 0; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) return 0; @@ -746,11 +747,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; + if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { + pkey_tbl_flag = 1; + ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); + } + /* IB VL CAP enum isn't used by the firmware, just numerical values */ for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { ((__be32 *) mailbox->buf)[0] = cpu_to_be32( (1 << MLX4_CHANGE_PORT_MTU_CAP) | (1 << MLX4_CHANGE_PORT_VL_CAP) | + (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | (vl_cap << MLX4_SET_PORT_VL_CAP)); err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index c30a314..441caf1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -268,6 +268,8 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) } struct mlx4_phys_caps { + u32 gid_phys_table_len[MLX4_MAX_PORTS + 1]; + u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1]; u32 num_phys_eqs; }; -- cgit v1.1 From 1fb9fed6d48960fec3ad8c97fed9aa16c9557091 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 16 Jul 2012 17:11:06 +0000 Subject: IB/qib: Fix QP RCU sparse warnings Commit af061a644a0e ("IB/qib: Use RCU for qpn lookup") introduced sparse warnings. This patch corrects those issues. Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_iba7322.c | 12 +++++------ drivers/infiniband/hw/qib/qib_mad.c | 12 ++++------- drivers/infiniband/hw/qib/qib_qp.c | 35 ++++++++++++++++++++++----------- drivers/infiniband/hw/qib/qib_verbs.c | 23 +++++++++++++++++++--- drivers/infiniband/hw/qib/qib_verbs.h | 14 +++++++------ 5 files changed, 61 insertions(+), 35 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index c881e74..78e8550 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -49,6 +50,7 @@ #include "qib_qsfp.h" #include "qib_mad.h" +#include "qib_verbs.h" static void qib_setup_7322_setextled(struct qib_pportdata *, u32); static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); @@ -5151,15 +5153,11 @@ static void try_7322_ipg(struct qib_pportdata *ppd) goto retry; if (!ibp->smi_ah) { - struct ib_ah_attr attr; struct ib_ah *ah; - memset(&attr, 0, sizeof attr); - attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE); - attr.port_num = ppd->port; - ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); + ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE)); if (IS_ERR(ah)) - ret = -EINVAL; + ret = PTR_ERR(ah); else { send_buf->ah = ah; ibp->smi_ah = to_iah(ah); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 4339021..6e20b58 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -90,14 +90,10 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) if (!ibp->sm_ah) { if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; - struct ib_ah_attr attr; - memset(&attr, 0, sizeof attr); - attr.dlid = ibp->sm_lid; - attr.port_num = ppd_from_ibp(ibp)->port; - ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); + ah = qib_create_qp0_ah(ibp, ibp->sm_lid); if (IS_ERR(ah)) - ret = -EINVAL; + ret = PTR_ERR(ah); else { send_buf->ah = ah; ibp->sm_ah = to_iah(ah); diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 693041b..4850d03 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -250,23 +250,33 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) spin_lock_irqsave(&dev->qpt_lock, flags); - if (ibp->qp0 == qp) { + if (rcu_dereference_protected(ibp->qp0, + lockdep_is_held(&dev->qpt_lock)) == qp) { atomic_dec(&qp->refcount); rcu_assign_pointer(ibp->qp0, NULL); - } else if (ibp->qp1 == qp) { + } else if (rcu_dereference_protected(ibp->qp1, + lockdep_is_held(&dev->qpt_lock)) == qp) { atomic_dec(&qp->refcount); rcu_assign_pointer(ibp->qp1, NULL); } else { - struct qib_qp *q, **qpp; + struct qib_qp *q; + struct qib_qp __rcu **qpp; qpp = &dev->qp_table[n]; - for (; (q = *qpp) != NULL; qpp = &q->next) + q = rcu_dereference_protected(*qpp, + lockdep_is_held(&dev->qpt_lock)); + for (; q; qpp = &q->next) { if (q == qp) { atomic_dec(&qp->refcount); - rcu_assign_pointer(*qpp, qp->next); - qp->next = NULL; + *qpp = qp->next; + rcu_assign_pointer(qp->next, NULL); + q = rcu_dereference_protected(*qpp, + lockdep_is_held(&dev->qpt_lock)); break; } + q = rcu_dereference_protected(*qpp, + lockdep_is_held(&dev->qpt_lock)); + } } spin_unlock_irqrestore(&dev->qpt_lock, flags); @@ -302,10 +312,12 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) spin_lock_irqsave(&dev->qpt_lock, flags); for (n = 0; n < dev->qp_table_size; n++) { - qp = dev->qp_table[n]; + qp = rcu_dereference_protected(dev->qp_table[n], + lockdep_is_held(&dev->qpt_lock)); rcu_assign_pointer(dev->qp_table[n], NULL); - for (; qp; qp = qp->next) + for (; qp; qp = rcu_dereference_protected(qp->next, + lockdep_is_held(&dev->qpt_lock))) qp_inuse++; } spin_unlock_irqrestore(&dev->qpt_lock, flags); @@ -337,7 +349,8 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) unsigned n = qpn_hash(dev, qpn); rcu_read_lock(); - for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) + for (qp = rcu_dereference(dev->qp_table[n]); qp; + qp = rcu_dereference(qp->next)) if (qp->ibqp.qp_num == qpn) break; } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 59cdea3..03ace06 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -1845,6 +1845,23 @@ bail: return ret; } +struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) +{ + struct ib_ah_attr attr; + struct ib_ah *ah = ERR_PTR(-EINVAL); + struct qib_qp *qp0; + + memset(&attr, 0, sizeof attr); + attr.dlid = dlid; + attr.port_num = ppd_from_ibp(ibp)->port; + rcu_read_lock(); + qp0 = rcu_dereference(ibp->qp0); + if (qp0) + ah = ib_create_ah(qp0->ibqp.pd, &attr); + rcu_read_unlock(); + return ah; +} + /** * qib_destroy_ah - destroy an address handle * @ibah: the AH to destroy @@ -2060,7 +2077,7 @@ int qib_register_ib_device(struct qib_devdata *dd) spin_lock_init(&dev->lk_table.lock); dev->lk_table.max = 1 << ib_qib_lkey_table_size; lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - dev->lk_table.table = (struct qib_mregion **) + dev->lk_table.table = (struct qib_mregion __rcu **) __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); if (dev->lk_table.table == NULL) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 1293133..61fad05 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -420,7 +420,7 @@ struct qib_qp { /* read mostly fields above and below */ struct ib_ah_attr remote_ah_attr; struct ib_ah_attr alt_ah_attr; - struct qib_qp *next; /* link list for QPN hash table */ + struct qib_qp __rcu *next; /* link list for QPN hash table */ struct qib_swqe *s_wq; /* send work queue */ struct qib_mmap_info *ip; struct qib_ib_header *s_hdr; /* next packet header to send */ @@ -659,8 +659,8 @@ struct qib_opcode_stats { }; struct qib_ibport { - struct qib_qp *qp0; - struct qib_qp *qp1; + struct qib_qp __rcu *qp0; + struct qib_qp __rcu *qp1; struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ struct qib_ah *sm_ah; struct qib_ah *smi_ah; @@ -743,7 +743,7 @@ struct qib_ibdev { struct list_head memwait; /* list for wait kernel memory */ struct list_head txreq_free; struct timer_list mem_timer; - struct qib_qp **qp_table; + struct qib_qp __rcu **qp_table; struct qib_pio_header *pio_hdrs; dma_addr_t pio_hdrs_phys; /* list of QPs waiting for RNR timer */ @@ -937,6 +937,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); +struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid); + void qib_rc_rnr_retry(unsigned long arg); void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); -- cgit v1.1 From f3331f88a4b97530b7acd3112902524d9dc0688c Mon Sep 17 00:00:00 2001 From: Betty Dall Date: Thu, 19 Jul 2012 19:34:19 +0000 Subject: IB/qib: Fix an incorrect log message There is a cut-and-paste typo in the function qib_pci_slot_reset() where it prints that the "link_reset" function is called rather than the "slot_reset" function. This makes the message misleading. Signed-off-by: Betty Dall Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 790646e..4f9d918 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -725,7 +725,7 @@ qib_pci_mmio_enabled(struct pci_dev *pdev) static pci_ers_result_t qib_pci_slot_reset(struct pci_dev *pdev) { - qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); + qib_devinfo(pdev, "QIB slot_reset function called, ignored\n"); return PCI_ERS_RESULT_CAN_RECOVER; } -- cgit v1.1 From 551ace124d0ef471e8a5fee3ef9e5bb7460251be Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 19 Jul 2012 13:03:56 +0000 Subject: IB/qib: Reduce sdma_lock contention Profiling has shown that sdma_lock is proving a bottleneck for performance. The situations include: - RDMA reads when krcvqs > 1 - post sends from multiple threads For RDMA read the current global qib_wq mechanism runs on all CPUs and contends for the sdma_lock when multiple RMDA read requests are fielded on differenct CPUs. For post sends, the direct call to qib_do_send() from multiple threads causes the contention. Since the sdma mechanism is per port, this fix converts the existing workqueue to a per port single thread workqueue to reduce the lock contention in the RDMA read case, and for any other case where the QP is scheduled via the workqueue mechanism from more than 1 CPU. For the post send case, This patch modifies the post send code to test for a non empty sdma engine. If the sdma is not idle the (now single thread) workqueue will be used to trigger the send engine instead of the direct call to qib_do_send(). Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib.h | 10 +++++-- drivers/infiniband/hw/qib/qib_init.c | 51 +++++++++++++++++++++++++++++++++-- drivers/infiniband/hw/qib/qib_verbs.c | 29 +++++++++++++++++--- drivers/infiniband/hw/qib/qib_verbs.h | 7 ++--- 4 files changed, 85 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 7e62f41..cbe5771 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1,8 +1,8 @@ #ifndef _QIB_KERNEL_H #define _QIB_KERNEL_H /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -544,6 +544,7 @@ struct qib_pportdata { /* read mostly */ struct qib_sdma_desc *sdma_descq; + struct workqueue_struct *qib_wq; struct qib_sdma_state sdma_state; dma_addr_t sdma_descq_phys; volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ @@ -1267,6 +1268,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, /* ppd->sdma_lock should be locked before calling this. */ int qib_sdma_make_progress(struct qib_pportdata *dd); +static inline int qib_sdma_empty(const struct qib_pportdata *ppd) +{ + return ppd->sdma_descq_added == ppd->sdma_descq_removed; +} + /* must be called under qib_sdma_lock */ static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) { diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index dc14e10..306e65e 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -210,6 +210,8 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, init_timer(&ppd->symerr_clear_timer); ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; ppd->symerr_clear_timer.data = (unsigned long)ppd; + + ppd->qib_wq = NULL; } static int init_pioavailregs(struct qib_devdata *dd) @@ -483,6 +485,42 @@ static void init_piobuf_state(struct qib_devdata *dd) } /** + * qib_create_workqueues - create per port workqueues + * @dd: the qlogic_ib device + */ +static int qib_create_workqueues(struct qib_devdata *dd) +{ + int pidx; + struct qib_pportdata *ppd; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (!ppd->qib_wq) { + char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ + snprintf(wq_name, sizeof(wq_name), "qib%d_%d", + dd->unit, pidx); + ppd->qib_wq = + create_singlethread_workqueue(wq_name); + if (!ppd->qib_wq) + goto wq_error; + } + } + return 0; +wq_error: + pr_err( + QIB_DRV_NAME ": create_singlethread_workqueue failed for port %d\n", + pidx + 1); + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (ppd->qib_wq) { + destroy_workqueue(ppd->qib_wq); + ppd->qib_wq = NULL; + } + } + return -ENOMEM; +} + +/** * qib_init - do the actual initialization sequence on the chip * @dd: the qlogic_ib device * @reinit: reinitializing, so don't allocate new memory @@ -764,6 +802,11 @@ static void qib_shutdown_device(struct qib_devdata *dd) * We can't count on interrupts since we are stopping. */ dd->f_quiet_serdes(ppd); + + if (ppd->qib_wq) { + destroy_workqueue(ppd->qib_wq); + ppd->qib_wq = NULL; + } } qib_update_eeprom_log(dd); @@ -1249,6 +1292,10 @@ static int __devinit qib_init_one(struct pci_dev *pdev, if (ret) goto bail; /* error already printed */ + ret = qib_create_workqueues(dd); + if (ret) + goto bail; + /* do the generic initialization */ initfail = qib_init(dd, 0); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 03ace06..fc9b205 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) * @qp: the QP to post on * @wr: the work request to send */ -static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) +static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, + int *scheduled) { struct qib_swqe *wqe; u32 next; @@ -440,6 +441,12 @@ bail_inval_free: bail_inval: ret = -EINVAL; bail: + if (!ret && !wr->next && + !qib_sdma_empty( + dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { + qib_schedule_send(qp); + *scheduled = 1; + } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } @@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct qib_qp *qp = to_iqp(ibqp); int err = 0; + int scheduled = 0; for (; wr; wr = wr->next) { - err = qib_post_one_send(qp, wr); + err = qib_post_one_send(qp, wr, &scheduled); if (err) { *bad_wr = wr; goto bail; @@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } /* Try to do the send work in the caller's context. */ - qib_do_send(&qp->s_work); + if (!scheduled) + qib_do_send(&qp->s_work); bail: return err; @@ -2308,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd) get_order(lk_tab_size)); kfree(dev->qp_table); } + +/* + * This must be called with s_lock held. + */ +void qib_schedule_send(struct qib_qp *qp) +{ + if (qib_send_ok(qp)) { + struct qib_ibport *ibp = + to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + + queue_work(ppd->qib_wq, &qp->s_work); + } +} diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 61fad05..aff8b2c 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -727,6 +727,7 @@ struct qib_ibport { struct qib_opcode_stats opstats[128]; }; + struct qib_ibdev { struct ib_device ibdev; struct list_head pending_mmaps; @@ -836,11 +837,7 @@ extern struct workqueue_struct *qib_cq_wq; /* * This must be called with s_lock held. */ -static inline void qib_schedule_send(struct qib_qp *qp) -{ - if (qib_send_ok(qp)) - queue_work(ib_wq, &qp->s_work); -} +void qib_schedule_send(struct qib_qp *qp); static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) { -- cgit v1.1 From 36a8f01cd24b125aa027c71c1288588edde5322d Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 19 Jul 2012 13:04:04 +0000 Subject: IB/qib: Add congestion control agent implementation Add a congestion control agent in the driver that handles gets and sets from the congestion control manager in the fabric for the Performance Scale Messaging (PSM) library. Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib.h | 35 ++++ drivers/infiniband/hw/qib/qib_init.c | 96 ++++++++++- drivers/infiniband/hw/qib/qib_mad.c | 315 ++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/qib/qib_mad.h | 198 ++++++++++++++++++++- drivers/infiniband/hw/qib/qib_sysfs.c | 158 ++++++++++++++++- 5 files changed, 790 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index cbe5771..6e19ec8 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -519,6 +519,7 @@ struct qib_pportdata { struct qib_devdata *dd; struct qib_chippport_specific *cpspec; /* chip-specific per-port */ struct kobject pport_kobj; + struct kobject pport_cc_kobj; struct kobject sl2vl_kobj; struct kobject diagc_kobj; @@ -638,6 +639,39 @@ struct qib_pportdata { struct timer_list led_override_timer; struct xmit_wait cong_stats; struct timer_list symerr_clear_timer; + + /* Synchronize access between driver writes and sysfs reads */ + spinlock_t cc_shadow_lock + ____cacheline_aligned_in_smp; + + /* Shadow copy of the congestion control table */ + struct cc_table_shadow *ccti_entries_shadow; + + /* Shadow copy of the congestion control entries */ + struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow; + + /* List of congestion control table entries */ + struct ib_cc_table_entry_shadow *ccti_entries; + + /* 16 congestion entries with each entry corresponding to a SL */ + struct ib_cc_congestion_entry_shadow *congestion_entries; + + /* Total number of congestion control table entries */ + u16 total_cct_entry; + + /* Bit map identifying service level */ + u16 cc_sl_control_map; + + /* maximum congestion control table index */ + u16 ccti_limit; + + /* CA's max number of 64 entry units in the congestion control table */ + u8 cc_max_table_entries; + + /* Maximum number of congestion control entries that the agent expects + * the manager to send. + */ + u8 cc_supported_table_entries; }; /* Observers. Not to be taken lightly, possibly not to ship. */ @@ -1078,6 +1112,7 @@ extern u32 qib_cpulist_count; extern unsigned long *qib_cpulist; extern unsigned qib_wc_pat; +extern unsigned qib_cc_table_size; int qib_init(struct qib_devdata *, int); int init_chip_wc_pat(struct qib_devdata *dd, u32); int qib_enable_wc(struct qib_devdata *dd); diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 306e65e..24ad901 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -41,6 +41,7 @@ #include "qib.h" #include "qib_common.h" +#include "qib_mad.h" /* * min buffers we want to have per context, after driver @@ -71,6 +72,9 @@ unsigned qib_n_krcv_queues; module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); +unsigned qib_cc_table_size; +module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); +MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); /* * qib_wc_pat parameter: * 0 is WC via MTRR @@ -199,6 +203,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, u8 hw_pidx, u8 port) { + int size; ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ @@ -212,6 +217,81 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, ppd->symerr_clear_timer.data = (unsigned long)ppd; ppd->qib_wq = NULL; + + spin_lock_init(&ppd->cc_shadow_lock); + + if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) + goto bail; + + ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, + IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT); + + ppd->cc_max_table_entries = + ppd->cc_supported_table_entries/IB_CCT_ENTRIES; + + size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry) + * IB_CCT_ENTRIES; + ppd->ccti_entries = kzalloc(size, GFP_KERNEL); + if (!ppd->ccti_entries) { + qib_dev_err(dd, + "failed to allocate congestion control table for port %d!\n", + port); + goto bail; + } + + size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry); + ppd->congestion_entries = kzalloc(size, GFP_KERNEL); + if (!ppd->congestion_entries) { + qib_dev_err(dd, + "failed to allocate congestion setting list for port %d!\n", + port); + goto bail_1; + } + + size = sizeof(struct cc_table_shadow); + ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); + if (!ppd->ccti_entries_shadow) { + qib_dev_err(dd, + "failed to allocate shadow ccti list for port %d!\n", + port); + goto bail_2; + } + + size = sizeof(struct ib_cc_congestion_setting_attr); + ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); + if (!ppd->congestion_entries_shadow) { + qib_dev_err(dd, + "failed to allocate shadow congestion setting list for port %d!\n", + port); + goto bail_3; + } + + return; + +bail_3: + kfree(ppd->ccti_entries_shadow); + ppd->ccti_entries_shadow = NULL; +bail_2: + kfree(ppd->congestion_entries); + ppd->congestion_entries = NULL; +bail_1: + kfree(ppd->ccti_entries); + ppd->ccti_entries = NULL; +bail: + /* User is intentionally disabling the congestion control agent */ + if (!qib_cc_table_size) + return; + + if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { + qib_cc_table_size = 0; + qib_dev_err(dd, + "Congestion Control table size %d less than minimum %d for port %d\n", + qib_cc_table_size, IB_CCT_MIN_ENTRIES, port); + } + + qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", + port); + return; } static int init_pioavailregs(struct qib_devdata *dd) @@ -1164,10 +1244,24 @@ static void cleanup_device_data(struct qib_devdata *dd) unsigned long flags; /* users can't do anything more with chip */ - for (pidx = 0; pidx < dd->num_pports; ++pidx) + for (pidx = 0; pidx < dd->num_pports; ++pidx) { if (dd->pport[pidx].statusp) *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; + spin_lock(&dd->pport[pidx].cc_shadow_lock); + + kfree(dd->pport[pidx].congestion_entries); + dd->pport[pidx].congestion_entries = NULL; + kfree(dd->pport[pidx].ccti_entries); + dd->pport[pidx].ccti_entries = NULL; + kfree(dd->pport[pidx].ccti_entries_shadow); + dd->pport[pidx].ccti_entries_shadow = NULL; + kfree(dd->pport[pidx].congestion_entries_shadow); + dd->pport[pidx].congestion_entries_shadow = NULL; + + spin_unlock(&dd->pport[pidx].cc_shadow_lock); + } + if (!qib_wc_pat) qib_disable_wc(dd); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 6e20b58..19f1e6c 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -49,6 +49,18 @@ static int reply(struct ib_smp *smp) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } +static int reply_failure(struct ib_smp *smp) +{ + /* + * The verbs framework will handle the directed/LID route + * packet changes. + */ + smp->method = IB_MGMT_METHOD_GET_RESP; + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + smp->status |= IB_SMP_DIRECTION; + return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY; +} + static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) { struct ib_mad_send_buf *send_buf; @@ -2047,6 +2059,298 @@ bail: return ret; } +static int cc_get_classportinfo(struct ib_cc_mad *ccp, + struct ib_device *ibdev) +{ + struct ib_cc_classportinfo_attr *p = + (struct ib_cc_classportinfo_attr *)ccp->mgmt_data; + + memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + + p->base_version = 1; + p->class_version = 1; + p->cap_mask = 0; + + /* + * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. + */ + p->resp_time_value = 18; + + return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_info(struct ib_cc_mad *ccp, + struct ib_device *ibdev, u8 port) +{ + struct ib_cc_info_attr *p = + (struct ib_cc_info_attr *)ccp->mgmt_data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + + memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + + p->congestion_info = 0; + p->control_table_cap = ppd->cc_max_table_entries; + + return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_setting(struct ib_cc_mad *ccp, + struct ib_device *ibdev, u8 port) +{ + int i; + struct ib_cc_congestion_setting_attr *p = + (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct ib_cc_congestion_entry_shadow *entries; + + memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + + spin_lock(&ppd->cc_shadow_lock); + + entries = ppd->congestion_entries_shadow->entries; + p->port_control = cpu_to_be16( + ppd->congestion_entries_shadow->port_control); + p->control_map = cpu_to_be16( + ppd->congestion_entries_shadow->control_map); + for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { + p->entries[i].ccti_increase = entries[i].ccti_increase; + p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); + p->entries[i].trigger_threshold = entries[i].trigger_threshold; + p->entries[i].ccti_min = entries[i].ccti_min; + } + + spin_unlock(&ppd->cc_shadow_lock); + + return reply((struct ib_smp *) ccp); +} + +static int cc_get_congestion_control_table(struct ib_cc_mad *ccp, + struct ib_device *ibdev, u8 port) +{ + struct ib_cc_table_attr *p = + (struct ib_cc_table_attr *)ccp->mgmt_data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + u32 cct_block_index = be32_to_cpu(ccp->attr_mod); + u32 max_cct_block; + u32 cct_entry; + struct ib_cc_table_entry_shadow *entries; + int i; + + /* Is the table index more than what is supported? */ + if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) + goto bail; + + memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); + + spin_lock(&ppd->cc_shadow_lock); + + max_cct_block = + (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES; + max_cct_block = max_cct_block ? max_cct_block - 1 : 0; + + if (cct_block_index > max_cct_block) { + spin_unlock(&ppd->cc_shadow_lock); + goto bail; + } + + ccp->attr_mod = cpu_to_be32(cct_block_index); + + cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1); + + cct_entry--; + + p->ccti_limit = cpu_to_be16(cct_entry); + + entries = &ppd->ccti_entries_shadow-> + entries[IB_CCT_ENTRIES * cct_block_index]; + cct_entry %= IB_CCT_ENTRIES; + + for (i = 0; i <= cct_entry; i++) + p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); + + spin_unlock(&ppd->cc_shadow_lock); + + return reply((struct ib_smp *) ccp); + +bail: + return reply_failure((struct ib_smp *) ccp); +} + +static int cc_set_congestion_setting(struct ib_cc_mad *ccp, + struct ib_device *ibdev, u8 port) +{ + struct ib_cc_congestion_setting_attr *p = + (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + int i; + + ppd->cc_sl_control_map = be16_to_cpu(p->control_map); + + for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { + ppd->congestion_entries[i].ccti_increase = + p->entries[i].ccti_increase; + + ppd->congestion_entries[i].ccti_timer = + be16_to_cpu(p->entries[i].ccti_timer); + + ppd->congestion_entries[i].trigger_threshold = + p->entries[i].trigger_threshold; + + ppd->congestion_entries[i].ccti_min = + p->entries[i].ccti_min; + } + + return reply((struct ib_smp *) ccp); +} + +static int cc_set_congestion_control_table(struct ib_cc_mad *ccp, + struct ib_device *ibdev, u8 port) +{ + struct ib_cc_table_attr *p = + (struct ib_cc_table_attr *)ccp->mgmt_data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + u32 cct_block_index = be32_to_cpu(ccp->attr_mod); + u32 cct_entry; + struct ib_cc_table_entry_shadow *entries; + int i; + + /* Is the table index more than what is supported? */ + if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) + goto bail; + + /* If this packet is the first in the sequence then + * zero the total table entry count. + */ + if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES) + ppd->total_cct_entry = 0; + + cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES; + + /* ccti_limit is 0 to 63 */ + ppd->total_cct_entry += (cct_entry + 1); + + if (ppd->total_cct_entry > ppd->cc_supported_table_entries) + goto bail; + + ppd->ccti_limit = be16_to_cpu(p->ccti_limit); + + entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); + + for (i = 0; i <= cct_entry; i++) + entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); + + spin_lock(&ppd->cc_shadow_lock); + + ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1; + memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, + (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry))); + + ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED; + ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map; + memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, + IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry)); + + spin_unlock(&ppd->cc_shadow_lock); + + return reply((struct ib_smp *) ccp); + +bail: + return reply_failure((struct ib_smp *) ccp); +} + +static int check_cc_key(struct qib_ibport *ibp, + struct ib_cc_mad *ccp, int mad_flags) +{ + return 0; +} + +static int process_cc(struct ib_device *ibdev, int mad_flags, + u8 port, struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; + struct qib_ibport *ibp = to_iport(ibdev, port); + int ret; + + *out_mad = *in_mad; + + if (ccp->class_version != 2) { + ccp->status |= IB_SMP_UNSUP_VERSION; + ret = reply((struct ib_smp *)ccp); + goto bail; + } + + ret = check_cc_key(ibp, ccp, mad_flags); + if (ret) + goto bail; + + switch (ccp->method) { + case IB_MGMT_METHOD_GET: + switch (ccp->attr_id) { + case IB_CC_ATTR_CLASSPORTINFO: + ret = cc_get_classportinfo(ccp, ibdev); + goto bail; + + case IB_CC_ATTR_CONGESTION_INFO: + ret = cc_get_congestion_info(ccp, ibdev, port); + goto bail; + + case IB_CC_ATTR_CA_CONGESTION_SETTING: + ret = cc_get_congestion_setting(ccp, ibdev, port); + goto bail; + + case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: + ret = cc_get_congestion_control_table(ccp, ibdev, port); + goto bail; + + /* FALLTHROUGH */ + default: + ccp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_smp *) ccp); + goto bail; + } + + case IB_MGMT_METHOD_SET: + switch (ccp->attr_id) { + case IB_CC_ATTR_CA_CONGESTION_SETTING: + ret = cc_set_congestion_setting(ccp, ibdev, port); + goto bail; + + case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: + ret = cc_set_congestion_control_table(ccp, ibdev, port); + goto bail; + + /* FALLTHROUGH */ + default: + ccp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_smp *) ccp); + goto bail; + } + + case IB_MGMT_METHOD_GET_RESP: + /* + * The ib_mad module will call us to process responses + * before checking for other consumers. + * Just tell the caller to process it normally. + */ + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + + case IB_MGMT_METHOD_TRAP: + default: + ccp->status |= IB_SMP_UNSUP_METHOD; + ret = reply((struct ib_smp *) ccp); + } + +bail: + return ret; +} + /** * qib_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on @@ -2071,6 +2375,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: @@ -2082,6 +2388,15 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, ret = process_perf(ibdev, port, in_mad, out_mad); goto bail; + case IB_MGMT_CLASS_CONG_MGMT: + if (!ppd->congestion_entries_shadow || + !qib_cc_table_size) { + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + } + ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); + goto bail; + default: ret = IB_MAD_RESULT_SUCCESS; } diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h index ecc416c..57bd3fa 100644 --- a/drivers/infiniband/hw/qib/qib_mad.h +++ b/drivers/infiniband/hw/qib/qib_mad.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -31,6 +31,8 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#ifndef _QIB_MAD_H +#define _QIB_MAD_H #include @@ -223,6 +225,198 @@ struct ib_pma_portcounters_cong { #define IB_PMA_SEL_CONG_ROUTING 0x08 /* + * Congestion control class attributes + */ +#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001) +#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002) +#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011) +#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012) +#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013) +#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014) +#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015) +#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016) +#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017) +#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018) + +/* generalizations for threshold values */ +#define IB_CC_THRESHOLD_NONE 0x0 +#define IB_CC_THRESHOLD_MIN 0x1 +#define IB_CC_THRESHOLD_MAX 0xf + +/* CCA MAD header constants */ +#define IB_CC_MAD_LOGDATA_LEN 32 +#define IB_CC_MAD_MGMTDATA_LEN 192 + +struct ib_cc_mad { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 cckey; + + /* For CongestionLog attribute only */ + u8 log_data[IB_CC_MAD_LOGDATA_LEN]; + + u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN]; +} __packed; + +/* + * Congestion Control class portinfo capability mask bits + */ +#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0) +#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1) +#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2) +#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8) + +struct ib_cc_classportinfo_attr { + u8 base_version; + u8 class_version; + __be16 cap_mask; + u8 reserved[3]; + u8 resp_time_value; /* only lower 5 bits */ + union ib_gid redirect_gid; + __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ + __be16 redirect_lid; + __be16 redirect_pkey; + __be32 redirect_qp; /* only lower 24 bits */ + __be32 redirect_qkey; + union ib_gid trap_gid; + __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ + __be16 trap_lid; + __be16 trap_pkey; + __be32 trap_hl_qp; /* 8, 24 bits respectively */ + __be32 trap_qkey; +} __packed; + +/* Congestion control traps */ +#define IB_CC_TRAP_KEY_VIOLATION 0x0000 + +struct ib_cc_trap_key_violation_attr { + __be16 source_lid; + u8 method; + u8 reserved1; + __be16 attrib_id; + __be32 attrib_mod; + __be32 qp; + __be64 cckey; + u8 sgid[16]; + u8 padding[24]; +} __packed; + +/* Congestion info flags */ +#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1 +#define IB_CC_TABLE_CAP_DEFAULT 31 + +struct ib_cc_info_attr { + __be16 congestion_info; + u8 control_table_cap; /* Multiple of 64 entry unit CCTs */ +} __packed; + +struct ib_cc_key_info_attr { + __be64 cckey; + u8 protect; + __be16 lease_period; + __be16 violations; +} __packed; + +#define IB_CC_CL_CA_LOGEVENTS_LEN 208 + +struct ib_cc_log_attr { + u8 log_type; + u8 congestion_flags; + __be16 threshold_event_counter; + __be16 threshold_congestion_event_map; + __be16 current_time_stamp; + u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN]; +} __packed; + +#define IB_CC_CLEC_SERVICETYPE_RC 0x0 +#define IB_CC_CLEC_SERVICETYPE_UC 0x1 +#define IB_CC_CLEC_SERVICETYPE_RD 0x2 +#define IB_CC_CLEC_SERVICETYPE_UD 0x3 + +struct ib_cc_log_event { + u8 local_qp_cn_entry; + u8 remote_qp_number_cn_entry[3]; + u8 sl_cn_entry:4; + u8 service_type_cn_entry:4; + __be32 remote_lid_cn_entry; + __be32 timestamp_cn_entry; +} __packed; + +/* Sixteen congestion entries */ +#define IB_CC_CCS_ENTRIES 16 + +/* Port control flags */ +#define IB_CC_CCS_PC_SL_BASED 0x01 + +struct ib_cc_congestion_entry { + u8 ccti_increase; + __be16 ccti_timer; + u8 trigger_threshold; + u8 ccti_min; /* min CCTI for cc table */ +} __packed; + +struct ib_cc_congestion_entry_shadow { + u8 ccti_increase; + u16 ccti_timer; + u8 trigger_threshold; + u8 ccti_min; /* min CCTI for cc table */ +} __packed; + +struct ib_cc_congestion_setting_attr { + __be16 port_control; + __be16 control_map; + struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES]; +} __packed; + +struct ib_cc_congestion_setting_attr_shadow { + u16 port_control; + u16 control_map; + struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES]; +} __packed; + +#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1 +#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1 + +/* 64 Congestion Control table entries in a single MAD */ +#define IB_CCT_ENTRIES 64 +#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2) + +struct ib_cc_table_entry { + __be16 entry; /* shift:2, multiplier:14 */ +}; + +struct ib_cc_table_entry_shadow { + u16 entry; /* shift:2, multiplier:14 */ +}; + +struct ib_cc_table_attr { + __be16 ccti_limit; /* max CCTI for cc table */ + struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES]; +} __packed; + +struct ib_cc_table_attr_shadow { + u16 ccti_limit; /* max CCTI for cc table */ + struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES]; +} __packed; + +#define CC_TABLE_SHADOW_MAX \ + (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES) + +struct cc_table_shadow { + u16 ccti_last_entry; + struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX]; +} __packed; + +#endif /* _QIB_MAD_H */ +/* * The PortSamplesControl.CounterMasks field is an array of 3 bit fields * which specify the N'th counter's capabilities. See ch. 16.1.3.2. * We support 5 counters which only count the mandatory quantities. diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index dd9cd49..ae78305 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -33,6 +34,7 @@ #include #include "qib.h" +#include "qib_mad.h" /** * qib_parse_ushort - parse an unsigned short value in an arbitrary base @@ -231,6 +233,98 @@ static struct attribute *port_default_attributes[] = { NULL }; +/* + * Start of per-port congestion control structures and support code + */ + +/* + * Congestion control table size followed by table entries + */ +static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + int ret; + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, pport_cc_kobj); + + if (!qib_cc_table_size || !ppd->ccti_entries_shadow) + return -EINVAL; + + ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow) + + sizeof(__be16); + + if (pos > ret) + return -EINVAL; + + if (count > ret - pos) + count = ret - pos; + + if (!count) + return count; + + spin_lock(&ppd->cc_shadow_lock); + memcpy(buf, ppd->ccti_entries_shadow, count); + spin_unlock(&ppd->cc_shadow_lock); + + return count; +} + +static void qib_port_release(struct kobject *kobj) +{ + /* nothing to do since memory is freed by qib_free_devdata() */ +} + +static struct kobj_type qib_port_cc_ktype = { + .release = qib_port_release, +}; + +static struct bin_attribute cc_table_bin_attr = { + .attr = {.name = "cc_table_bin", .mode = 0444}, + .read = read_cc_table_bin, + .size = PAGE_SIZE, +}; + +/* + * Congestion settings: port control, control map and an array of 16 + * entries for the congestion entries - increase, timer, event log + * trigger threshold and the minimum injection rate delay. + */ +static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + int ret; + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, pport_cc_kobj); + + if (!qib_cc_table_size || !ppd->congestion_entries_shadow) + return -EINVAL; + + ret = sizeof(struct ib_cc_congestion_setting_attr_shadow); + + if (pos > ret) + return -EINVAL; + if (count > ret - pos) + count = ret - pos; + + if (!count) + return count; + + spin_lock(&ppd->cc_shadow_lock); + memcpy(buf, ppd->congestion_entries_shadow, count); + spin_unlock(&ppd->cc_shadow_lock); + + return count; +} + +static struct bin_attribute cc_setting_bin_attr = { + .attr = {.name = "cc_settings_bin", .mode = 0444}, + .read = read_cc_setting_bin, + .size = PAGE_SIZE, +}; + + static ssize_t qib_portattr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -253,10 +347,6 @@ static ssize_t qib_portattr_store(struct kobject *kobj, return pattr->store(ppd, buf, len); } -static void qib_port_release(struct kobject *kobj) -{ - /* nothing to do since memory is freed by qib_free_devdata() */ -} static const struct sysfs_ops qib_port_ops = { .show = qib_portattr_show, @@ -670,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, if (ret) { qib_dev_err(dd, "Skipping sl2vl sysfs info, " "(err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_link; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -679,15 +769,57 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, if (ret) { qib_dev_err(dd, "Skipping diag_counters sysfs info, " "(err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_sl; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); + if (!qib_cc_table_size || !ppd->congestion_entries_shadow) + return 0; + + ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype, + kobj, "CCMgtA"); + if (ret) { + qib_dev_err(dd, + "Skipping Congestion Control sysfs info, (err %d) port %u\n", + ret, port_num); + goto bail_diagc; + } + + kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); + + ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, + &cc_setting_bin_attr); + if (ret) { + qib_dev_err(dd, + "Skipping Congestion Control setting sysfs info, (err %d) port %u\n", + ret, port_num); + goto bail_cc; + } + + ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, + &cc_table_bin_attr); + if (ret) { + qib_dev_err(dd, + "Skipping Congestion Control table sysfs info, (err %d) port %u\n", + ret, port_num); + goto bail_cc_entry_bin; + } + + qib_devinfo(dd->pcidev, + "IB%u: Congestion Control Agent enabled for port %d\n", + dd->unit, port_num); + return 0; +bail_cc_entry_bin: + sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr); +bail_cc: + kobject_put(&ppd->pport_cc_kobj); bail_diagc: - kobject_put(&ppd->sl2vl_kobj); + kobject_put(&ppd->diagc_kobj); bail_sl: + kobject_put(&ppd->sl2vl_kobj); +bail_link: kobject_put(&ppd->pport_kobj); bail: return ret; @@ -720,7 +852,15 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) for (i = 0; i < dd->num_pports; i++) { ppd = &dd->pport[i]; - kobject_put(&ppd->pport_kobj); + if (qib_cc_table_size && + ppd->congestion_entries_shadow) { + sysfs_remove_bin_file(&ppd->pport_cc_kobj, + &cc_setting_bin_attr); + sysfs_remove_bin_file(&ppd->pport_cc_kobj, + &cc_table_bin_attr); + kobject_put(&ppd->pport_cc_kobj); + } kobject_put(&ppd->sl2vl_kobj); + kobject_put(&ppd->pport_kobj); } } -- cgit v1.1 From 7fac33014f54c26bb1b1b4282b27c7988116d639 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 19 Jul 2012 13:04:25 +0000 Subject: IB/qib: checkpatch fixes Elminate some simple_strto* usage. checkpatch also noted pr_ conversations, which have been done as recommended. The pr_fmt() define is used to shorten line length. Other multi-line string warnings are also elmininated. Reviewed-by: Dean Luick Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier --- drivers/infiniband/hw/qib/qib_diag.c | 13 ++- drivers/infiniband/hw/qib/qib_driver.c | 15 +-- drivers/infiniband/hw/qib/qib_eeprom.c | 41 +++++---- drivers/infiniband/hw/qib/qib_file_ops.c | 63 ++++++------- drivers/infiniband/hw/qib/qib_fs.c | 21 +++-- drivers/infiniband/hw/qib/qib_iba6120.c | 91 +++++++++--------- drivers/infiniband/hw/qib/qib_iba7220.c | 92 ++++++++++--------- drivers/infiniband/hw/qib/qib_iba7322.c | 148 ++++++++++++++++-------------- drivers/infiniband/hw/qib/qib_init.c | 97 ++++++++++---------- drivers/infiniband/hw/qib/qib_intr.c | 8 +- drivers/infiniband/hw/qib/qib_pcie.c | 23 +++-- drivers/infiniband/hw/qib/qib_sd7220.c | 41 +++++---- drivers/infiniband/hw/qib/qib_sdma.c | 11 ++- drivers/infiniband/hw/qib/qib_sysfs.c | 88 ++++++------------ drivers/infiniband/hw/qib/qib_twsi.c | 8 +- drivers/infiniband/hw/qib/qib_wc_x86_64.c | 14 +-- 16 files changed, 394 insertions(+), 380 deletions(-) diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 9892456..1686fd4 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2010 QLogic Corporation. All rights reserved. - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -53,6 +53,9 @@ #include "qib.h" #include "qib_common.h" +#undef pr_fmt +#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt + /* * Each client that opens the diag device must read then write * offset 0, to prevent lossage from random cat or od. diag_state @@ -598,8 +601,8 @@ static ssize_t qib_diagpkt_write(struct file *fp, } tmpbuf = vmalloc(plen); if (!tmpbuf) { - qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " - "failing\n"); + qib_devinfo(dd->pcidev, + "Unable to allocate tmp buffer, failing\n"); ret = -ENOMEM; goto bail; } @@ -693,7 +696,7 @@ int qib_register_observer(struct qib_devdata *dd, ret = -ENOMEM; olp = vmalloc(sizeof *olp); if (!olp) { - printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); + pr_err("vmalloc for observer failed\n"); goto bail; } if (olp) { diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 8895cfe..e41e7f7 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -764,8 +764,9 @@ int qib_reset_device(int unit) qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { - qib_devinfo(dd->pcidev, "Invalid unit number %u or " - "not initialized or not present\n", unit); + qib_devinfo(dd->pcidev, + "Invalid unit number %u or not initialized or not present\n", + unit); ret = -ENXIO; goto bail; } @@ -802,11 +803,13 @@ int qib_reset_device(int unit) else ret = -EAGAIN; if (ret) - qib_dev_err(dd, "Reinitialize unit %u after " - "reset failed with %d\n", unit, ret); + qib_dev_err(dd, + "Reinitialize unit %u after reset failed with %d\n", + unit, ret); else - qib_devinfo(dd->pcidev, "Reinitialized unit %u after " - "resetting\n", unit); + qib_devinfo(dd->pcidev, + "Reinitialized unit %u after resetting\n", + unit); bail: return ret; diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index 92d9cfe..4d5d71a 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -160,10 +161,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) if (oguid > bguid[7]) { if (bguid[6] == 0xff) { if (bguid[5] == 0xff) { - qib_dev_err(dd, "Can't set %s GUID" - " from base, wraps to" - " OUI!\n", - qib_get_unit_name(t)); + qib_dev_err(dd, + "Can't set %s GUID from base, wraps to OUI!\n", + qib_get_unit_name(t)); dd->base_guid = 0; goto bail; } @@ -182,8 +182,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) len = sizeof(struct qib_flash); buf = vmalloc(len); if (!buf) { - qib_dev_err(dd, "Couldn't allocate memory to read %u " - "bytes from eeprom for GUID\n", len); + qib_dev_err(dd, + "Couldn't allocate memory to read %u bytes from eeprom for GUID\n", + len); goto bail; } @@ -201,23 +202,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd) csum = flash_csum(ifp, 0); if (csum != ifp->if_csum) { - qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " - "0x%x, not 0x%x\n", csum, ifp->if_csum); + qib_devinfo(dd->pcidev, + "Bad I2C flash checksum: 0x%x, not 0x%x\n", + csum, ifp->if_csum); goto done; } if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { - qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", - *(unsigned long long *) ifp->if_guid); + qib_dev_err(dd, + "Invalid GUID %llx from flash; ignoring\n", + *(unsigned long long *) ifp->if_guid); /* don't allow GUID if all 0 or all 1's */ goto done; } /* complain, but allow it */ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) - qib_devinfo(dd->pcidev, "Warning, GUID %llx is " - "default, probably not correct!\n", - *(unsigned long long *) ifp->if_guid); + qib_devinfo(dd->pcidev, + "Warning, GUID %llx is default, probably not correct!\n", + *(unsigned long long *) ifp->if_guid); bguid = ifp->if_guid; if (!bguid[0] && !bguid[1] && !bguid[2]) { @@ -260,8 +263,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) memcpy(dd->serial, ifp->if_serial, sizeof ifp->if_serial); if (!strstr(ifp->if_comment, "Tested successfully")) - qib_dev_err(dd, "Board SN %s did not pass functional " - "test: %s\n", dd->serial, ifp->if_comment); + qib_dev_err(dd, + "Board SN %s did not pass functional test: %s\n", + dd->serial, ifp->if_comment); memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); /* @@ -323,8 +327,9 @@ int qib_update_eeprom_log(struct qib_devdata *dd) buf = vmalloc(len); ret = 1; if (!buf) { - qib_dev_err(dd, "Couldn't allocate memory to read %u " - "bytes from eeprom for logging\n", len); + qib_dev_err(dd, + "Couldn't allocate memory to read %u bytes from eeprom for logging\n", + len); goto bail; } diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index a740324..faa44cb 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -49,6 +49,9 @@ #include "qib_common.h" #include "qib_user_sdma.h" +#undef pr_fmt +#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt + static int qib_open(struct inode *, struct file *); static int qib_close(struct inode *, struct file *); static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); @@ -315,8 +318,9 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, } if (cnt > tidcnt) { /* make sure it all fits in tid_pg_list */ - qib_devinfo(dd->pcidev, "Process tried to allocate %u " - "TIDs, only trying max (%u)\n", cnt, tidcnt); + qib_devinfo(dd->pcidev, + "Process tried to allocate %u TIDs, only trying max (%u)\n", + cnt, tidcnt); cnt = tidcnt; } pagep = (struct page **) rcd->tid_pg_list; @@ -750,9 +754,9 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, ret = remap_pfn_range(vma, vma->vm_start, pfn, len, vma->vm_page_prot); if (ret) - qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " - "bytes failed: %d\n", what, rcd->ctxt, - pfn, len, ret); + qib_devinfo(dd->pcidev, + "%s ctxt%u mmap of %lx, %x bytes failed: %d\n", + what, rcd->ctxt, pfn, len, ret); bail: return ret; } @@ -771,8 +775,9 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, */ sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; if ((vma->vm_end - vma->vm_start) > sz) { - qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " - "%lx > PAGE\n", vma->vm_end - vma->vm_start); + qib_devinfo(dd->pcidev, + "FAIL mmap userreg: reqlen %lx > PAGE\n", + vma->vm_end - vma->vm_start); ret = -EFAULT; } else { phys = dd->physaddr + ureg; @@ -802,8 +807,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, * for it. */ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { - qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " - "reqlen %lx > PAGE\n", + qib_devinfo(dd->pcidev, + "FAIL mmap piobufs: reqlen %lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EINVAL; goto bail; @@ -847,8 +852,8 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, size = rcd->rcvegrbuf_size; total_size = rcd->rcvegrbuf_chunks * size; if ((vma->vm_end - vma->vm_start) > total_size) { - qib_devinfo(dd->pcidev, "FAIL on egr bufs: " - "reqlen %lx > actual %lx\n", + qib_devinfo(dd->pcidev, + "FAIL on egr bufs: reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EINVAL; @@ -856,8 +861,9 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, } if (vma->vm_flags & VM_WRITE) { - qib_devinfo(dd->pcidev, "Can't map eager buffers as " - "writable (flags=%lx)\n", vma->vm_flags); + qib_devinfo(dd->pcidev, + "Can't map eager buffers as writable (flags=%lx)\n", + vma->vm_flags); ret = -EPERM; goto bail; } @@ -1270,8 +1276,8 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, GFP_KERNEL); if (!rcd || !ptmp) { - qib_dev_err(dd, "Unable to allocate ctxtdata " - "memory, failing open\n"); + qib_dev_err(dd, + "Unable to allocate ctxtdata memory, failing open\n"); ret = -ENOMEM; goto bailerr; } @@ -1560,10 +1566,10 @@ done_chk_sdma: } else if (weight == 1 && test_bit(cpumask_first(tsk_cpus_allowed(current)), qib_cpulist)) - qib_devinfo(dd->pcidev, "%s PID %u affinity " - "set to cpu %d; already allocated\n", - current->comm, current->pid, - cpumask_first(tsk_cpus_allowed(current))); + qib_devinfo(dd->pcidev, + "%s PID %u affinity set to cpu %d; already allocated\n", + current->comm, current->pid, + cpumask_first(tsk_cpus_allowed(current))); } mutex_unlock(&qib_mutex); @@ -2185,8 +2191,7 @@ int qib_cdev_init(int minor, const char *name, cdev = cdev_alloc(); if (!cdev) { - printk(KERN_ERR QIB_DRV_NAME - ": Could not allocate cdev for minor %d, %s\n", + pr_err("Could not allocate cdev for minor %d, %s\n", minor, name); ret = -ENOMEM; goto done; @@ -2198,8 +2203,7 @@ int qib_cdev_init(int minor, const char *name, ret = cdev_add(cdev, dev, 1); if (ret < 0) { - printk(KERN_ERR QIB_DRV_NAME - ": Could not add cdev for minor %d, %s (err %d)\n", + pr_err("Could not add cdev for minor %d, %s (err %d)\n", minor, name, -ret); goto err_cdev; } @@ -2209,8 +2213,7 @@ int qib_cdev_init(int minor, const char *name, goto done; ret = PTR_ERR(device); device = NULL; - printk(KERN_ERR QIB_DRV_NAME ": Could not create " - "device for minor %d, %s (err %d)\n", + pr_err("Could not create device for minor %d, %s (err %d)\n", minor, name, -ret); err_cdev: cdev_del(cdev); @@ -2245,16 +2248,14 @@ int __init qib_dev_init(void) ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); if (ret < 0) { - printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " - "chrdev region (err %d)\n", -ret); + pr_err("Could not allocate chrdev region (err %d)\n", -ret); goto done; } qib_class = class_create(THIS_MODULE, "ipath"); if (IS_ERR(qib_class)) { ret = PTR_ERR(qib_class); - printk(KERN_ERR QIB_DRV_NAME ": Could not create " - "device class (err %d)\n", -ret); + pr_err("Could not create device class (err %d)\n", -ret); unregister_chrdev_region(qib_dev, QIB_NMINORS); } diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 05e0f17..cff8a6c 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -382,7 +383,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, &simple_dir_operations, dd); if (ret) { - printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); + pr_err("create_file(%s) failed: %d\n", unit, ret); goto bail; } @@ -390,21 +391,21 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, &cntr_ops[0], dd); if (ret) { - printk(KERN_ERR "create_file(%s/counters) failed: %d\n", + pr_err("create_file(%s/counters) failed: %d\n", unit, ret); goto bail; } ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, &cntr_ops[1], dd); if (ret) { - printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", + pr_err("create_file(%s/counter_names) failed: %d\n", unit, ret); goto bail; } ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, &portcntr_ops[0], dd); if (ret) { - printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + pr_err("create_file(%s/%s) failed: %d\n", unit, "portcounter_names", ret); goto bail; } @@ -416,7 +417,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, &portcntr_ops[i], dd); if (ret) { - printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + pr_err("create_file(%s/%s) failed: %d\n", unit, fname, ret); goto bail; } @@ -426,7 +427,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, &qsfp_ops[i - 1], dd); if (ret) { - printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + pr_err("create_file(%s/%s) failed: %d\n", unit, fname, ret); goto bail; } @@ -435,7 +436,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, &flash_ops, dd); if (ret) - printk(KERN_ERR "create_file(%s/flash) failed: %d\n", + pr_err("create_file(%s/flash) failed: %d\n", unit, ret); bail: return ret; @@ -486,7 +487,7 @@ static int remove_device_files(struct super_block *sb, if (IS_ERR(dir)) { ret = PTR_ERR(dir); - printk(KERN_ERR "Lookup of %s failed\n", unit); + pr_err("Lookup of %s failed\n", unit); goto bail; } @@ -532,7 +533,7 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent) ret = simple_fill_super(sb, QIBFS_MAGIC, files); if (ret) { - printk(KERN_ERR "simple_fill_super failed: %d\n", ret); + pr_err("simple_fill_super failed: %d\n", ret); goto bail; } diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 4d352b9..a099ac1 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -753,8 +753,8 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, if (!hwerrs) return; if (hwerrs == ~0ULL) { - qib_dev_err(dd, "Read of hardware error status failed " - "(all bits set); ignoring\n"); + qib_dev_err(dd, + "Read of hardware error status failed (all bits set); ignoring\n"); return; } qib_stats.sps_hwerrs++; @@ -779,13 +779,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, * or it's occurred within the last 5 seconds. */ if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) - qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " - "(cleared)\n", (unsigned long long) hwerrs); + qib_devinfo(dd->pcidev, + "Hardware error: hwerr=0x%llx (cleared)\n", + (unsigned long long) hwerrs); if (hwerrs & ~IB_HWE_BITSEXTANT) - qib_dev_err(dd, "hwerror interrupt with unknown errors " - "%llx set\n", (unsigned long long) - (hwerrs & ~IB_HWE_BITSEXTANT)); + qib_dev_err(dd, + "hwerror interrupt with unknown errors %llx set\n", + (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT)); ctrl = qib_read_kreg32(dd, kr_control); if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { @@ -815,8 +816,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { isfatal = 1; - strlcat(msg, "[Memory BIST test failed, InfiniPath hardware" - " unusable]", msgl); + strlcat(msg, + "[Memory BIST test failed, InfiniPath hardware unusable]", + msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); @@ -868,8 +870,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, *msg = 0; /* recovered from all of them */ if (isfatal && !dd->diag_client) { - qib_dev_err(dd, "Fatal Hardware Error, no longer" - " usable, SN %.16s\n", dd->serial); + qib_dev_err(dd, + "Fatal Hardware Error, no longer usable, SN %.16s\n", + dd->serial); /* * for /sys status file and user programs to print; if no * trailing brace is copied, we'll know it was truncated. @@ -1017,9 +1020,9 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) qib_inc_eeprom_err(dd, log_idx, 1); if (errs & ~IB_E_BITSEXTANT) - qib_dev_err(dd, "error interrupt with unknown errors " - "%llx set\n", - (unsigned long long) (errs & ~IB_E_BITSEXTANT)); + qib_dev_err(dd, + "error interrupt with unknown errors %llx set\n", + (unsigned long long) (errs & ~IB_E_BITSEXTANT)); if (errs & E_SUM_ERRS) { qib_disarm_6120_senderrbufs(ppd); @@ -1089,8 +1092,8 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) } if (errs & ERR_MASK(ResetNegated)) { - qib_dev_err(dd, "Got reset, requires re-init " - "(unload and reload driver)\n"); + qib_dev_err(dd, + "Got reset, requires re-init (unload and reload driver)\n"); dd->flags &= ~QIB_INITTED; /* needs re-init */ /* mark as having had error */ *dd->devstatusp |= QIB_STATUS_HWERROR; @@ -1541,8 +1544,9 @@ static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat) qib_stats.sps_errints++; estat = qib_read_kreg64(dd, kr_errstatus); if (!estat) - qib_devinfo(dd->pcidev, "error interrupt (%Lx), " - "but no error bits set!\n", istat); + qib_devinfo(dd->pcidev, + "error interrupt (%Lx), but no error bits set!\n", + istat); handle_6120_errors(dd, estat); } @@ -1715,16 +1719,16 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd) } if (!dd->cspec->irq) - qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " - "work\n"); + qib_dev_err(dd, + "irq is 0, BIOS error? Interrupts won't work\n"); else { int ret; ret = request_irq(dd->cspec->irq, qib_6120intr, 0, QIB_DRV_NAME, dd); if (ret) - qib_dev_err(dd, "Couldn't setup interrupt " - "(irq=%d): %d\n", dd->cspec->irq, - ret); + qib_dev_err(dd, + "Couldn't setup interrupt (irq=%d): %d\n", + dd->cspec->irq, ret); } } @@ -1759,8 +1763,9 @@ static void pe_boardname(struct qib_devdata *dd) snprintf(dd->boardname, namelen, "%s", n); if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) - qib_dev_err(dd, "Unsupported InfiniPath hardware revision " - "%u.%u!\n", dd->majrev, dd->minrev); + qib_dev_err(dd, + "Unsupported InfiniPath hardware revision %u.%u!\n", + dd->majrev, dd->minrev); snprintf(dd->boardversion, sizeof(dd->boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", @@ -1833,8 +1838,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) bail: if (ret) { if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) - qib_dev_err(dd, "Reset failed to setup PCIe or " - "interrupts; continuing anyway\n"); + qib_dev_err(dd, + "Reset failed to setup PCIe or interrupts; continuing anyway\n"); /* clear the reset error, init error/hwerror mask */ qib_6120_init_hwerrors(dd); /* for Rev2 error interrupts; nop for rev 1 */ @@ -1876,8 +1881,9 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, } pa >>= 11; if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { - qib_dev_err(dd, "Physical page address 0x%lx " - "larger than supported\n", pa); + qib_dev_err(dd, + "Physical page address 0x%lx larger than supported\n", + pa); return; } @@ -1941,8 +1947,9 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr, } pa >>= 11; if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { - qib_dev_err(dd, "Physical page address 0x%lx " - "larger than supported\n", pa); + qib_dev_err(dd, + "Physical page address 0x%lx larger than supported\n", + pa); return; } @@ -2928,8 +2935,9 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) ppd->dd->unit, ppd->port); } else if (!strncmp(what, "off", 3)) { ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); - qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " - "(normal)\n", ppd->dd->unit, ppd->port); + qib_devinfo(ppd->dd->pcidev, + "Disabling IB%u:%u IBC loopback (normal)\n", + ppd->dd->unit, ppd->port); } else ret = -EINVAL; if (!ret) { @@ -3186,11 +3194,10 @@ static int qib_late_6120_initreg(struct qib_devdata *dd) qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); val = qib_read_kreg64(dd, kr_sendpioavailaddr); if (val != dd->pioavailregs_phys) { - qib_dev_err(dd, "Catastrophic software error, " - "SendPIOAvailAddr written as %lx, " - "read back as %llx\n", - (unsigned long) dd->pioavailregs_phys, - (unsigned long long) val); + qib_dev_err(dd, + "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); ret = -EINVAL; } return ret; @@ -3218,8 +3225,8 @@ static int init_6120_variables(struct qib_devdata *dd) dd->revision = readq(&dd->kregbase[kr_revision]); if ((dd->revision & 0xffffffffU) == 0xffffffffU) { - qib_dev_err(dd, "Revision register read failure, " - "giving up initialization\n"); + qib_dev_err(dd, + "Revision register read failure, giving up initialization\n"); ret = -ENODEV; goto bail; } @@ -3551,8 +3558,8 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, goto bail; if (qib_pcie_params(dd, 8, NULL, NULL)) - qib_dev_err(dd, "Failed to setup PCIe or interrupts; " - "continuing anyway\n"); + qib_dev_err(dd, + "Failed to setup PCIe or interrupts; continuing anyway\n"); dd->cspec->irq = pdev->irq; /* save IRQ */ /* clear diagctrl register, in case diags were running and crashed */ diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 86a0ba7..64d0ecb 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1111,9 +1111,9 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) sdma_7220_errors(ppd, errs); if (errs & ~IB_E_BITSEXTANT) - qib_dev_err(dd, "error interrupt with unknown errors " - "%llx set\n", (unsigned long long) - (errs & ~IB_E_BITSEXTANT)); + qib_dev_err(dd, + "error interrupt with unknown errors %llx set\n", + (unsigned long long) (errs & ~IB_E_BITSEXTANT)); if (errs & E_SUM_ERRS) { qib_disarm_7220_senderrbufs(ppd); @@ -1192,8 +1192,8 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) } if (errs & ERR_MASK(ResetNegated)) { - qib_dev_err(dd, "Got reset, requires re-init " - "(unload and reload driver)\n"); + qib_dev_err(dd, + "Got reset, requires re-init (unload and reload driver)\n"); dd->flags &= ~QIB_INITTED; /* needs re-init */ /* mark as having had error */ *dd->devstatusp |= QIB_STATUS_HWERROR; @@ -1305,8 +1305,8 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, if (!hwerrs) goto bail; if (hwerrs == ~0ULL) { - qib_dev_err(dd, "Read of hardware error status failed " - "(all bits set); ignoring\n"); + qib_dev_err(dd, + "Read of hardware error status failed (all bits set); ignoring\n"); goto bail; } qib_stats.sps_hwerrs++; @@ -1329,13 +1329,14 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, qib_inc_eeprom_err(dd, log_idx, 1); if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | RXE_PARITY)) - qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " - "(cleared)\n", (unsigned long long) hwerrs); + qib_devinfo(dd->pcidev, + "Hardware error: hwerr=0x%llx (cleared)\n", + (unsigned long long) hwerrs); if (hwerrs & ~IB_HWE_BITSEXTANT) - qib_dev_err(dd, "hwerror interrupt with unknown errors " - "%llx set\n", (unsigned long long) - (hwerrs & ~IB_HWE_BITSEXTANT)); + qib_dev_err(dd, + "hwerror interrupt with unknown errors %llx set\n", + (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT)); if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) qib_sd7220_clr_ibpar(dd); @@ -1362,8 +1363,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { isfatal = 1; - strlcat(msg, "[Memory BIST test failed, " - "InfiniPath hardware unusable]", msgl); + strlcat(msg, + "[Memory BIST test failed, InfiniPath hardware unusable]", + msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); @@ -1409,8 +1411,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, qib_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !dd->diag_client) { - qib_dev_err(dd, "Fatal Hardware Error, no longer" - " usable, SN %.16s\n", dd->serial); + qib_dev_err(dd, + "Fatal Hardware Error, no longer usable, SN %.16s\n", + dd->serial); /* * For /sys status file and user programs to print; if no * trailing brace is copied, we'll know it was truncated. @@ -1918,8 +1921,9 @@ static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) qib_stats.sps_errints++; estat = qib_read_kreg64(dd, kr_errstatus); if (!estat) - qib_devinfo(dd->pcidev, "error interrupt (%Lx), " - "but no error bits set!\n", istat); + qib_devinfo(dd->pcidev, + "error interrupt (%Lx), but no error bits set!\n", + istat); else handle_7220_errors(dd, estat); } @@ -2023,17 +2027,18 @@ bail: static void qib_setup_7220_interrupt(struct qib_devdata *dd) { if (!dd->cspec->irq) - qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " - "work\n"); + qib_dev_err(dd, + "irq is 0, BIOS error? Interrupts won't work\n"); else { int ret = request_irq(dd->cspec->irq, qib_7220intr, dd->msi_lo ? 0 : IRQF_SHARED, QIB_DRV_NAME, dd); if (ret) - qib_dev_err(dd, "Couldn't setup %s interrupt " - "(irq=%d): %d\n", dd->msi_lo ? - "MSI" : "INTx", dd->cspec->irq, ret); + qib_dev_err(dd, + "Couldn't setup %s interrupt (irq=%d): %d\n", + dd->msi_lo ? "MSI" : "INTx", + dd->cspec->irq, ret); } } @@ -2072,9 +2077,9 @@ static void qib_7220_boardname(struct qib_devdata *dd) snprintf(dd->boardname, namelen, "%s", n); if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) - qib_dev_err(dd, "Unsupported InfiniPath hardware " - "revision %u.%u!\n", - dd->majrev, dd->minrev); + qib_dev_err(dd, + "Unsupported InfiniPath hardware revision %u.%u!\n", + dd->majrev, dd->minrev); snprintf(dd->boardversion, sizeof(dd->boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", @@ -2146,8 +2151,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) bail: if (ret) { if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) - qib_dev_err(dd, "Reset failed to setup PCIe or " - "interrupts; continuing anyway\n"); + qib_dev_err(dd, + "Reset failed to setup PCIe or interrupts; continuing anyway\n"); /* hold IBC in reset, no sends, etc till later */ qib_write_kreg(dd, kr_control, 0ULL); @@ -2187,8 +2192,9 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, return; } if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { - qib_dev_err(dd, "Physical page address 0x%lx " - "larger than supported\n", pa); + qib_dev_err(dd, + "Physical page address 0x%lx larger than supported\n", + pa); return; } @@ -2706,8 +2712,9 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); /* enable heart beat again */ val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; - qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " - "(normal)\n", ppd->dd->unit, ppd->port); + qib_devinfo(ppd->dd->pcidev, + "Disabling IB%u:%u IBC loopback (normal)\n", + ppd->dd->unit, ppd->port); } else ret = -EINVAL; if (!ret) { @@ -3307,8 +3314,8 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd) if (!dd->msi_lo) return 0; - qib_devinfo(dd->pcidev, "MSI interrupt not detected," - " trying INTx interrupts\n"); + qib_devinfo(dd->pcidev, + "MSI interrupt not detected, trying INTx interrupts\n"); qib_7220_free_irq(dd); qib_enable_intx(dd->pcidev); /* @@ -3980,11 +3987,10 @@ static int qib_late_7220_initreg(struct qib_devdata *dd) qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); val = qib_read_kreg64(dd, kr_sendpioavailaddr); if (val != dd->pioavailregs_phys) { - qib_dev_err(dd, "Catastrophic software error, " - "SendPIOAvailAddr written as %lx, " - "read back as %llx\n", - (unsigned long) dd->pioavailregs_phys, - (unsigned long long) val); + qib_dev_err(dd, + "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); ret = -EINVAL; } qib_register_observer(dd, &sendctrl_observer); @@ -4014,8 +4020,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd) dd->revision = readq(&dd->kregbase[kr_revision]); if ((dd->revision & 0xffffffffU) == 0xffffffffU) { - qib_dev_err(dd, "Revision register read failure, " - "giving up initialization\n"); + qib_dev_err(dd, + "Revision register read failure, giving up initialization\n"); ret = -ENODEV; goto bail; } @@ -4613,8 +4619,8 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, break; } if (qib_pcie_params(dd, minwidth, NULL, NULL)) - qib_dev_err(dd, "Failed to setup PCIe or interrupts; " - "continuing anyway\n"); + qib_dev_err(dd, + "Failed to setup PCIe or interrupts; continuing anyway\n"); /* save IRQ for possible later use */ dd->cspec->irq = pdev->irq; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 78e8550..0d7280a 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -52,6 +52,9 @@ #include "qib_mad.h" #include "qib_verbs.h" +#undef pr_fmt +#define pr_fmt(fmt) QIB_DRV_NAME " " fmt + static void qib_setup_7322_setextled(struct qib_pportdata *, u32); static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); @@ -1577,8 +1580,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) qib_stats.sps_errints++; errs = qib_read_kreg64(dd, kr_errstatus); if (!errs) { - qib_devinfo(dd->pcidev, "device error interrupt, " - "but no error bits set!\n"); + qib_devinfo(dd->pcidev, + "device error interrupt, but no error bits set!\n"); goto done; } @@ -1624,8 +1627,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) if (errs & QIB_E_RESET) { int pidx; - qib_dev_err(dd, "Got reset, requires re-init " - "(unload and reload driver)\n"); + qib_dev_err(dd, + "Got reset, requires re-init (unload and reload driver)\n"); dd->flags &= ~QIB_INITTED; /* needs re-init */ /* mark as having had error */ *dd->devstatusp |= QIB_STATUS_HWERROR; @@ -1762,9 +1765,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) ppd->dd->cspec->r1 ? QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); - printk(KERN_INFO QIB_DRV_NAME - " IB%u:%u re-enabled QDR adaptation " - "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); + pr_info( + "IB%u:%u re-enabled QDR adaptation ibclt %x\n", + ppd->dd->unit, ppd->port, ibclt); } } } @@ -1806,9 +1809,9 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) if (!*msg) snprintf(msg, sizeof ppd->cpspec->epmsgbuf, "no others"); - qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" - " errors 0x%016Lx set (and %s)\n", - (errs & ~QIB_E_P_BITSEXTANT), msg); + qib_dev_porterr(dd, ppd->port, + "error interrupt with unknown errors 0x%016Lx set (and %s)\n", + (errs & ~QIB_E_P_BITSEXTANT), msg); *msg = '\0'; } @@ -2026,8 +2029,8 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, if (!hwerrs) goto bail; if (hwerrs == ~0ULL) { - qib_dev_err(dd, "Read of hardware error status failed " - "(all bits set); ignoring\n"); + qib_dev_err(dd, + "Read of hardware error status failed (all bits set); ignoring\n"); goto bail; } qib_stats.sps_hwerrs++; @@ -2041,8 +2044,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, /* no EEPROM logging, yet */ if (hwerrs) - qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " - "(cleared)\n", (unsigned long long) hwerrs); + qib_devinfo(dd->pcidev, + "Hardware error: hwerr=0x%llx (cleared)\n", + (unsigned long long) hwerrs); ctrl = qib_read_kreg32(dd, kr_control); if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { @@ -2066,8 +2070,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { isfatal = 1; - strlcpy(msg, "[Memory BIST test failed, " - "InfiniPath hardware unusable]", msgl); + strlcpy(msg, + "[Memory BIST test failed, InfiniPath hardware unusable]", + msgl); /* ignore from now on, so disable until driver reloaded */ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); @@ -2080,8 +2085,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, qib_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !dd->diag_client) { - qib_dev_err(dd, "Fatal Hardware Error, no longer" - " usable, SN %.16s\n", dd->serial); + qib_dev_err(dd, + "Fatal Hardware Error, no longer usable, SN %.16s\n", + dd->serial); /* * for /sys status file and user programs to print; if no * trailing brace is copied, we'll know it was truncated. @@ -2669,8 +2675,9 @@ static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) char msg[128]; kills = istat & ~QIB_I_BITSEXTANT; - qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" - " %s\n", (unsigned long long) kills, msg); + qib_dev_err(dd, + "Clearing reserved interrupt(s) 0x%016llx: %s\n", + (unsigned long long) kills, msg); qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); } @@ -3103,16 +3110,16 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) /* Try to get INTx interrupt */ try_intx: if (!dd->pcidev->irq) { - qib_dev_err(dd, "irq is 0, BIOS error? " - "Interrupts won't work\n"); + qib_dev_err(dd, + "irq is 0, BIOS error? Interrupts won't work\n"); goto bail; } ret = request_irq(dd->pcidev->irq, qib_7322intr, IRQF_SHARED, QIB_DRV_NAME, dd); if (ret) { - qib_dev_err(dd, "Couldn't setup INTx " - "interrupt (irq=%d): %d\n", - dd->pcidev->irq, ret); + qib_dev_err(dd, + "Couldn't setup INTx interrupt (irq=%d): %d\n", + dd->pcidev->irq, ret); goto bail; } dd->cspec->irq = dd->pcidev->irq; @@ -3187,8 +3194,9 @@ try_intx: * Shouldn't happen since the enable said we could * have as many as we are trying to setup here. */ - qib_dev_err(dd, "Couldn't setup MSIx " - "interrupt (vec=%d, irq=%d): %d\n", msixnum, + qib_dev_err(dd, + "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n", + msixnum, dd->cspec->msix_entries[msixnum].msix.vector, ret); qib_7322_nomsix(dd); @@ -3307,8 +3315,9 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { - qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" - " by module parameter\n", dd->unit); + qib_devinfo(dd->pcidev, + "IB%u: Forced to single port mode by module parameter\n", + dd->unit); features &= PORT_SPD_CAP; } @@ -3402,8 +3411,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd) if (val == dd->revision) break; if (i == 5) { - qib_dev_err(dd, "Failed to initialize after reset, " - "unusable\n"); + qib_dev_err(dd, + "Failed to initialize after reset, unusable\n"); ret = 0; goto bail; } @@ -3434,8 +3443,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd) if (qib_pcie_params(dd, dd->lbus_width, &dd->cspec->num_msix_entries, dd->cspec->msix_entries)) - qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " - "continuing anyway\n"); + qib_dev_err(dd, + "Reset failed to setup PCIe or interrupts; continuing anyway\n"); qib_setup_7322_interrupt(dd, 1); @@ -3476,8 +3485,9 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, return; } if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { - qib_dev_err(dd, "Physical page address 0x%lx " - "larger than supported\n", pa); + qib_dev_err(dd, + "Physical page address 0x%lx larger than supported\n", + pa); return; } @@ -4031,8 +4041,9 @@ static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) Loopback); /* enable heart beat again */ val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; - qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " - "(normal)\n", ppd->dd->unit, ppd->port); + qib_devinfo(ppd->dd->pcidev, + "Disabling IB%u:%u IBC loopback (normal)\n", + ppd->dd->unit, ppd->port); } else ret = -EINVAL; if (!ret) { @@ -4716,8 +4727,8 @@ static void init_7322_cntrnames(struct qib_devdata *dd) dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs * sizeof(u64), GFP_KERNEL); if (!dd->pport[i].cpspec->portcntrs) - qib_dev_err(dd, "Failed allocation for" - " portcounters\n"); + qib_dev_err(dd, + "Failed allocation for portcounters\n"); } } @@ -4867,8 +4878,8 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd) if (!dd->cspec->num_msix_entries) return 0; /* already using INTx */ - qib_devinfo(dd->pcidev, "MSIx interrupt not detected," - " trying INTx interrupts\n"); + qib_devinfo(dd->pcidev, + "MSIx interrupt not detected, trying INTx interrupts\n"); qib_7322_nomsix(dd); qib_enable_intx(dd->pcidev); qib_setup_7322_interrupt(dd, 0); @@ -5842,22 +5853,21 @@ static int setup_txselect(const char *str, struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; - char *n; + int ret; + if (strlen(str) >= MAX_ATTEN_LEN) { - printk(KERN_INFO QIB_DRV_NAME " txselect_values string " - "too long\n"); + pr_info("txselect_values string too long\n"); return -ENOSPC; } - val = simple_strtoul(str, &n, 0); - if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + + ret = kstrtoul(str, 0, &val); + if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { - printk(KERN_INFO QIB_DRV_NAME - "txselect_values must start with a number < %d\n", + pr_info("txselect_values must start with a number < %d\n", TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); - return -EINVAL; + return ret ? ret : -EINVAL; } - strcpy(txselect_list, str); + strcpy(txselect_list, str); list_for_each_entry(dd, &qib_dev_list, list) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) set_no_qsfp_atten(dd, 1); @@ -5880,11 +5890,10 @@ static int qib_late_7322_initreg(struct qib_devdata *dd) qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); val = qib_read_kreg64(dd, kr_sendpioavailaddr); if (val != dd->pioavailregs_phys) { - qib_dev_err(dd, "Catastrophic software error, " - "SendPIOAvailAddr written as %lx, " - "read back as %llx\n", - (unsigned long) dd->pioavailregs_phys, - (unsigned long long) val); + qib_dev_err(dd, + "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); ret = -EINVAL; } @@ -6096,8 +6105,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd) dd->revision = readq(&dd->kregbase[kr_revision]); if ((dd->revision & 0xffffffffU) == 0xffffffffU) { - qib_dev_err(dd, "Revision register read failure, " - "giving up initialization\n"); + qib_dev_err(dd, + "Revision register read failure, giving up initialization\n"); ret = -ENODEV; goto bail; } @@ -6263,9 +6272,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd) */ if (!(dd->flags & QIB_HAS_QSFP)) { if (!IS_QMH(dd) && !IS_QME(dd)) - qib_devinfo(dd->pcidev, "IB%u:%u: " - "Unknown mezzanine card type\n", - dd->unit, ppd->port); + qib_devinfo(dd->pcidev, + "IB%u:%u: Unknown mezzanine card type\n", + dd->unit, ppd->port); cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; /* * Choose center value as default tx serdes setting @@ -6920,8 +6929,8 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, dd->cspec->msix_entries[i].msix.entry = i; if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) - qib_dev_err(dd, "Failed to setup PCIe or interrupts; " - "continuing anyway\n"); + qib_dev_err(dd, + "Failed to setup PCIe or interrupts; continuing anyway\n"); /* may be less than we wanted, if not enough available */ dd->cspec->num_msix_entries = tabsize; @@ -7274,8 +7283,7 @@ static void find_best_ent(struct qib_pportdata *ppd, ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ)) { idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); - printk(KERN_INFO QIB_DRV_NAME - " IB%u:%u use idx %u into txdds_mfg\n", + pr_info("IB%u:%u use idx %u into txdds_mfg\n", ppd->dd->unit, ppd->port, idx); *sdr_dds = &txdds_extra_mfg[idx]; *ddr_dds = &txdds_extra_mfg[idx]; @@ -7430,11 +7438,11 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN); if (enable && !state) { - printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n", + pr_info("IB%u:%u Turning LOS on\n", ppd->dd->unit, ppd->port); data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); } else if (!enable && state) { - printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n", + pr_info("IB%u:%u Turning LOS off\n", ppd->dd->unit, ppd->port); data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); } @@ -7670,8 +7678,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) } } if (chan_done) { - printk(KERN_INFO QIB_DRV_NAME - " Serdes %d calibration not done after .5 sec: 0x%x\n", + pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n", IBSD(ppd->hw_pidx), chan_done); } else { for (chan = 0; chan < SERDES_CHANS; ++chan) { @@ -7679,9 +7686,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) (chan + (chan >> 1)), 25, 0, 0); if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) - printk(KERN_INFO QIB_DRV_NAME - " Serdes %d chan %d calibration " - "failed\n", IBSD(ppd->hw_pidx), chan); + pr_info("Serdes %d chan %d calibration failed\n", + IBSD(ppd->hw_pidx), chan); } } diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 24ad901..4443adf 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -38,11 +38,15 @@ #include #include #include +#include #include "qib.h" #include "qib_common.h" #include "qib_mad.h" +#undef pr_fmt +#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt + /* * min buffers we want to have per context, after driver */ @@ -124,8 +128,8 @@ int qib_create_ctxts(struct qib_devdata *dd) */ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); if (!dd->rcd) { - qib_dev_err(dd, "Unable to allocate ctxtdata array, " - "failing\n"); + qib_dev_err(dd, + "Unable to allocate ctxtdata array, failing\n"); ret = -ENOMEM; goto done; } @@ -141,8 +145,8 @@ int qib_create_ctxts(struct qib_devdata *dd) ppd = dd->pport + (i % dd->num_pports); rcd = qib_create_ctxtdata(ppd, i); if (!rcd) { - qib_dev_err(dd, "Unable to allocate ctxtdata" - " for Kernel ctxt, failing\n"); + qib_dev_err(dd, + "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); ret = -ENOMEM; goto done; } @@ -303,8 +307,8 @@ static int init_pioavailregs(struct qib_devdata *dd) &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, GFP_KERNEL); if (!dd->pioavailregs_dma) { - qib_dev_err(dd, "failed to allocate PIOavail reg area " - "in memory\n"); + qib_dev_err(dd, + "failed to allocate PIOavail reg area in memory\n"); ret = -ENOMEM; goto done; } @@ -359,15 +363,15 @@ static void init_shadow_tids(struct qib_devdata *dd) pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); if (!pages) { - qib_dev_err(dd, "failed to allocate shadow page * " - "array, no expected sends!\n"); + qib_dev_err(dd, + "failed to allocate shadow page * array, no expected sends!\n"); goto bail; } addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { - qib_dev_err(dd, "failed to allocate shadow dma handle " - "array, no expected sends!\n"); + qib_dev_err(dd, + "failed to allocate shadow dma handle array, no expected sends!\n"); goto bail_free; } @@ -391,13 +395,13 @@ static int loadtime_init(struct qib_devdata *dd) if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { - qib_dev_err(dd, "Driver only handles version %d, " - "chip swversion is %d (%llx), failng\n", - QIB_CHIP_SWVERSION, - (int)(dd->revision >> + qib_dev_err(dd, + "Driver only handles version %d, chip swversion is %d (%llx), failng\n", + QIB_CHIP_SWVERSION, + (int)(dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & - QLOGIC_IB_R_SOFTWARE_MASK, - (unsigned long long) dd->revision); + QLOGIC_IB_R_SOFTWARE_MASK, + (unsigned long long) dd->revision); ret = -ENOSYS; goto done; } @@ -501,8 +505,8 @@ static void verify_interrupt(unsigned long opaque) */ if (dd->int_counter == 0) { if (!dd->f_intr_fallback(dd)) - dev_err(&dd->pcidev->dev, "No interrupts detected, " - "not usable.\n"); + dev_err(&dd->pcidev->dev, + "No interrupts detected, not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->intrchk_timer, jiffies + HZ/2); } @@ -587,9 +591,8 @@ static int qib_create_workqueues(struct qib_devdata *dd) } return 0; wq_error: - pr_err( - QIB_DRV_NAME ": create_singlethread_workqueue failed for port %d\n", - pidx + 1); + pr_err("create_singlethread_workqueue failed for port %d\n", + pidx + 1); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->qib_wq) { @@ -665,8 +668,8 @@ int qib_init(struct qib_devdata *dd, int reinit) if (!lastfail) lastfail = qib_setup_eagerbufs(rcd); if (lastfail) { - qib_dev_err(dd, "failed to allocate kernel ctxt's " - "rcvhdrq and/or egr bufs\n"); + qib_dev_err(dd, + "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); continue; } } @@ -1016,8 +1019,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd) /* 1 GiB/sec, slightly over IB SDR line rate */ if (lcnt < (emsecs * 1024U)) qib_dev_err(dd, - "Performance problem: bandwidth to PIO buffers is " - "only %u MiB/sec\n", + "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n", lcnt / (u32) emsecs); preempt_enable(); @@ -1090,8 +1092,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) if (qib_cpulist) qib_cpulist_count = count; else - qib_early_err(&pdev->dev, "Could not alloc cpulist " - "info, cpu affinity might be wrong\n"); + qib_early_err(&pdev->dev, + "Could not alloc cpulist info, cpu affinity might be wrong\n"); } bail: @@ -1180,21 +1182,20 @@ static int __init qlogic_ib_init(void) */ idr_init(&qib_unit_table); if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { - printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); + pr_err("idr_pre_get() failed\n"); ret = -ENOMEM; goto bail_cq_wq; } ret = pci_register_driver(&qib_driver); if (ret < 0) { - printk(KERN_ERR QIB_DRV_NAME - ": Unable to register driver: error %d\n", -ret); + pr_err("Unable to register driver: error %d\n", -ret); goto bail_unit; } /* not fatal if it doesn't work */ if (qib_init_qibfs()) - printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); + pr_err("Unable to register ipathfs\n"); goto bail; /* all OK */ bail_unit: @@ -1218,9 +1219,9 @@ static void __exit qlogic_ib_cleanup(void) ret = qib_exit_qibfs(); if (ret) - printk(KERN_ERR QIB_DRV_NAME ": " - "Unable to cleanup counter filesystem: " - "error %d\n", -ret); + pr_err( + "Unable to cleanup counter filesystem: error %d\n", + -ret); pci_unregister_driver(&qib_driver); @@ -1360,9 +1361,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, #ifdef CONFIG_PCI_MSI dd = qib_init_iba6120_funcs(pdev, ent); #else - qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " - "work if CONFIG_PCI_MSI is not enabled\n", - ent->device); + qib_early_err(&pdev->dev, + "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", + ent->device); dd = ERR_PTR(-ENODEV); #endif break; @@ -1376,8 +1377,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, break; default: - qib_early_err(&pdev->dev, "Failing on unknown QLogic " - "deviceid 0x%x\n", ent->device); + qib_early_err(&pdev->dev, + "Failing on unknown QLogic deviceid 0x%x\n", + ent->device); ret = -ENODEV; } @@ -1434,9 +1436,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev, if (!qib_wc_pat) { ret = qib_enable_wc(dd); if (ret) { - qib_dev_err(dd, "Write combining not enabled " - "(err %d): performance may be poor\n", - -ret); + qib_dev_err(dd, + "Write combining not enabled (err %d): performance may be poor\n", + -ret); ret = 0; } } @@ -1502,9 +1504,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) gfp_flags | __GFP_COMP); if (!rcd->rcvhdrq) { - qib_dev_err(dd, "attempt to allocate %d bytes " - "for ctxt %u rcvhdrq failed\n", - amt, rcd->ctxt); + qib_dev_err(dd, + "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", + amt, rcd->ctxt); goto bail; } @@ -1533,8 +1535,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) return 0; bail_free: - qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " - "rcvhdrqtailaddr failed\n", rcd->ctxt); + qib_dev_err(dd, + "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", + rcd->ctxt); vfree(rcd->user_event_mask); rcd->user_event_mask = NULL; bail_free_hdrq: diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 6ae57d2..f4918f2 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -224,15 +224,15 @@ void qib_bad_intrstatus(struct qib_devdata *dd) * We print the message and disable interrupts, in hope of * having a better chance of debugging the problem. */ - qib_dev_err(dd, "Read of chip interrupt status failed" - " disabling interrupts\n"); + qib_dev_err(dd, + "Read of chip interrupt status failed disabling interrupts\n"); if (allbits++) { /* disable interrupt delivery, something is very wrong */ if (allbits == 2) dd->f_set_intr_state(dd, 0); if (allbits == 3) { - qib_dev_err(dd, "2nd bad interrupt status, " - "unregistering interrupts\n"); + qib_dev_err(dd, + "2nd bad interrupt status, unregistering interrupts\n"); dd->flags |= QIB_BADINTR; dd->flags &= ~QIB_INITTED; dd->f_free_irq(dd); diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 4f9d918..062c301eb 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -224,8 +224,9 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, } do_intx: if (ret) { - qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " - "falling back to INTx\n", tabsize, ret); + qib_dev_err(dd, + "pci_enable_msix %d vectors failed: %d, falling back to INTx\n", + tabsize, ret); tabsize = 0; } for (i = 0; i < tabsize; i++) @@ -251,8 +252,9 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos) ret = pci_enable_msi(pdev); if (ret) - qib_dev_err(dd, "pci_enable_msi failed: %d, " - "interrupts may not work\n", ret); + qib_dev_err(dd, + "pci_enable_msi failed: %d, interrupts may not work\n", + ret); /* continue even if it fails, we may still be OK... */ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, @@ -358,8 +360,8 @@ int qib_reinit_intr(struct qib_devdata *dd) pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); if (!pos) { - qib_dev_err(dd, "Can't find MSI capability, " - "can't restore MSI settings\n"); + qib_dev_err(dd, + "Can't find MSI capability, can't restore MSI settings\n"); ret = 0; /* nothing special for MSIx, just MSI */ goto bail; @@ -471,8 +473,8 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); r = pci_enable_device(dd->pcidev); if (r) - qib_dev_err(dd, "pci_enable_device failed after " - "reset: %d\n", r); + qib_dev_err(dd, + "pci_enable_device failed after reset: %d\n", r); } /* code to adjust PCIe capabilities. */ @@ -717,8 +719,9 @@ qib_pci_mmio_enabled(struct pci_dev *pdev) if (words == ~0ULL) ret = PCI_ERS_RESULT_NEED_RESET; } - qib_devinfo(pdev, "QIB mmio_enabled function called, " - "read wordscntr %Lx, returning %d\n", words, ret); + qib_devinfo(pdev, + "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n", + words, ret); return ret; } diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index ac065dd..a322d51 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. - * All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -342,15 +342,17 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0, 0); if (ret < 0) - qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" - " (%s)\n", chn, where); + qib_dev_err(dd, + "Failed checking TRIMDONE, chn %d (%s)\n", + chn, where); if (!(ret & 0x10)) { int probe; baduns |= (1 << chn); - qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." - " (%s)\n", chn, ret, where); + qib_dev_err(dd, + "TRIMDONE cleared on chn %d (%02X). (%s)\n", + chn, ret, where); probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_PGUDP(0), 0, 0); qib_dev_err(dd, "probe is %d (%02X)\n", @@ -375,8 +377,8 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); if (ret < 0) - qib_dev_err(dd, "Failed re-setting " - "TRIMDONE, chn %d (%s)\n", + qib_dev_err(dd, + "Failed re-setting TRIMDONE, chn %d (%s)\n", chn, where); } } @@ -1144,10 +1146,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - qib_dev_err(dd, "pre-read failed: elt %d," - " addr 0x%X, chnl %d\n", - (sloc & 0xF), - (sloc >> 9) & 0x3f, chnl); + qib_dev_err(dd, + "pre-read failed: elt %d, addr 0x%X, chnl %d\n", + (sloc & 0xF), + (sloc >> 9) & 0x3f, chnl); return ret; } val = (ret & ~mask) | (val & mask); @@ -1157,9 +1159,9 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - qib_dev_err(dd, "Global WR failed: elt %d," - " addr 0x%X, val %02X\n", - (sloc & 0xF), (sloc >> 9) & 0x3f, val); + qib_dev_err(dd, + "Global WR failed: elt %d, addr 0x%X, val %02X\n", + (sloc & 0xF), (sloc >> 9) & 0x3f, val); } return ret; } @@ -1173,11 +1175,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - qib_dev_err(dd, "Write failed: elt %d," - " addr 0x%X, chnl %d, val 0x%02X," - " mask 0x%02X\n", - (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, - val & 0xFF, mask & 0xFF); + qib_dev_err(dd, + "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n", + (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, + val & 0xFF, mask & 0xFF); break; } } diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 12a9604..3fc5144 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -276,8 +277,8 @@ static int alloc_sdma(struct qib_pportdata *ppd) GFP_KERNEL); if (!ppd->sdma_descq) { - qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " - "FIFO memory\n"); + qib_dev_err(ppd->dd, + "failed to allocate SendDMA descriptor FIFO memory\n"); goto bail; } @@ -285,8 +286,8 @@ static int alloc_sdma(struct qib_pportdata *ppd) ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); if (!ppd->sdma_head_dma) { - qib_dev_err(ppd->dd, "failed to allocate SendDMA " - "head memory\n"); + qib_dev_err(ppd->dd, + "failed to allocate SendDMA head memory\n"); goto cleanup_descq; } ppd->sdma_head_dma[0] = 0; diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index ae78305..034cc82 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -36,41 +36,6 @@ #include "qib.h" #include "qib_mad.h" -/** - * qib_parse_ushort - parse an unsigned short value in an arbitrary base - * @str: the string containing the number - * @valp: where to put the result - * - * Returns the number of bytes consumed, or negative value on error. - */ -static int qib_parse_ushort(const char *str, unsigned short *valp) -{ - unsigned long val; - char *end; - int ret; - - if (!isdigit(str[0])) { - ret = -EINVAL; - goto bail; - } - - val = simple_strtoul(str, &end, 0); - - if (val > 0xffff) { - ret = -EINVAL; - goto bail; - } - - *valp = val; - - ret = end + 1 - str; - if (ret == 0) - ret = -EINVAL; - -bail: - return ret; -} - /* start of per-port functions */ /* * Get/Set heartbeat enable. OR of 1=enabled, 2=auto @@ -92,7 +57,11 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, int ret; u16 val; - ret = qib_parse_ushort(buf, &val); + ret = kstrtou16(buf, 0, &val); + if (ret) { + qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); + return ret; + } /* * Set the "intentional" heartbeat enable per either of @@ -101,10 +70,7 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, * because entering loopback mode overrides it and automatically * disables heartbeat. */ - if (ret >= 0) - ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); - if (ret < 0) - qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); + ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); return ret < 0 ? ret : count; } @@ -128,12 +94,14 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, int ret; u16 val; - ret = qib_parse_ushort(buf, &val); - if (ret > 0) - qib_set_led_override(ppd, val); - else + ret = kstrtou16(buf, 0, &val); + if (ret) { qib_dev_err(dd, "attempt to set invalid LED override\n"); - return ret < 0 ? ret : count; + return ret; + } + + qib_set_led_override(ppd, val); + return count; } static ssize_t show_status(struct qib_pportdata *ppd, char *buf) @@ -501,12 +469,12 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, diagc_kobj); struct qib_ibport *qibp = &ppd->ibport_data; - char *endp; - long val = simple_strtol(buf, &endp, 0); - - if (val < 0 || endp == buf) - return -EINVAL; + u32 val; + int ret; + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; *(u32 *)((char *) qibp + dattr->counter) = val; return size; } @@ -739,8 +707,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, int ret; if (!port_num || port_num > dd->num_pports) { - qib_dev_err(dd, "Skipping infiniband class with " - "invalid port %u\n", port_num); + qib_dev_err(dd, + "Skipping infiniband class with invalid port %u\n", + port_num); ret = -ENODEV; goto bail; } @@ -749,8 +718,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, "linkcontrol"); if (ret) { - qib_dev_err(dd, "Skipping linkcontrol sysfs info, " - "(err %d) port %u\n", ret, port_num); + qib_dev_err(dd, + "Skipping linkcontrol sysfs info, (err %d) port %u\n", + ret, port_num); goto bail; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -758,8 +728,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, "sl2vl"); if (ret) { - qib_dev_err(dd, "Skipping sl2vl sysfs info, " - "(err %d) port %u\n", ret, port_num); + qib_dev_err(dd, + "Skipping sl2vl sysfs info, (err %d) port %u\n", + ret, port_num); goto bail_link; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -767,8 +738,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, "diag_counters"); if (ret) { - qib_dev_err(dd, "Skipping diag_counters sysfs info, " - "(err %d) port %u\n", ret, port_num); + qib_dev_err(dd, + "Skipping diag_counters sysfs info, (err %d) port %u\n", + ret, port_num); goto bail_sl; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c index ddde72e..647f7be 100644 --- a/drivers/infiniband/hw/qib/qib_twsi.c +++ b/drivers/infiniband/hw/qib/qib_twsi.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -449,8 +450,9 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, goto failed_write; ret = qib_twsi_wr(dd, addr, 0); if (ret) { - qib_dev_err(dd, "Failed to write interface" - " write addr %02X\n", addr); + qib_dev_err(dd, + "Failed to write interface write addr %02X\n", + addr); goto failed_write; } } diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 561b8bc..1d7281c 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -102,10 +103,10 @@ int qib_enable_wc(struct qib_devdata *dd) u64 atmp; atmp = pioaddr & ~(piolen - 1); if (atmp < addr || (atmp + piolen) > (addr + len)) { - qib_dev_err(dd, "No way to align address/size " - "(%llx/%llx), no WC mtrr\n", - (unsigned long long) atmp, - (unsigned long long) piolen << 1); + qib_dev_err(dd, + "No way to align address/size (%llx/%llx), no WC mtrr\n", + (unsigned long long) atmp, + (unsigned long long) piolen << 1); ret = -ENODEV; } else { pioaddr = atmp; @@ -120,8 +121,7 @@ int qib_enable_wc(struct qib_devdata *dd) if (cookie < 0) { { qib_devinfo(dd->pcidev, - "mtrr_add() WC for PIO bufs " - "failed (%d)\n", + "mtrr_add() WC for PIO bufs failed (%d)\n", cookie); ret = -EINVAL; } -- cgit v1.1